drm-misc-next for v5.18:
UAPI Changes: Cross-subsystem Changes: - Split out panel-lvds and lvds dt bindings . - Put yes/no on/off disabled/enabled strings in linux/string_helpers.h and use it in drivers and tomoyo. - Clarify dma_fence_chain and dma_fence_array should never include eachother. - Flatten chains in syncobj's. - Don't double add in fbdev/defio when page is already enlisted. - Don't sort deferred-I/O pages by default in fbdev. Core Changes: - Fix missing pm_runtime_put_sync in bridge. - Set modifier support to only linear fb modifier if drivers don't advertise support. - As a result, we remove allow_fb_modifiers. - Add missing clear for EDID Deep Color Modes in drm_reset_display_info. - Assorted documentation updates. - Warn once in drm_clflush if there is no arch support. - Add missing select for dp helper in drm_panel_edp. - Assorted small fixes. - Improve fb-helper's clipping handling. - Don't dump shmem mmaps in a core dump. - Add accounting to ttm resource manager, and use it in amdgpu. - Allow querying the detected eDP panel through debugfs. - Add helpers for xrgb8888 to 8 and 1 bits gray. - Improve drm's buddy allocator. - Add selftests for the buddy allocator. Driver Changes: - Add support for nomodeset to a lot of drm drivers. - Use drm_module_*_driver in a lot of drm drivers. - Assorted small fixes to bridge/lt9611, v3d, vc4, vmwgfx, mxsfb, nouveau, bridge/dw-hdmi, panfrost, lima, ingenic, sprd, bridge/anx7625, ti-sn65dsi86. - Add bridge/it6505. - Create DP and DVI-I connectors in ast. - Assorted nouveau backlight fixes. - Rework amdgpu reset handling. - Add dt bindings for ingenic,jz4780-dw-hdmi. - Support reading edid through aux channel in ingenic. - Add a drm driver for Solomon SSD130x OLED displays. - Add simple support for sharp LQ140M1JW46. - Add more panels to nt35560. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEuXvWqAysSYEJGuVH/lWMcqZwE8MFAmIWLSEACgkQ/lWMcqZw E8OP7hAAjix94EX5fhFa7OAdqUbFtsiKhK/4zNtV9FWpFiEsDBz+dlbfDQWIx5an FIiiiQtSfWjpDv6pcMhoNf80w+dDbc/Cuauz6nNGO7Pkaerh2D/EPG74FD7f7nE3 EIScVs1heYtzM9usKrFKupNYgIdgZxmeovClWuE0OTjLOes2PGvvxXK6iJqNqJMX VlDO5SR7GRqsDUDV6vmwl63uKL77xJXAahAXIx+BQ/1xrtEhlu6NwsgHIsmPmMSN YluX34zc1xD/6/uUqvEdp7u46/5/He1c5Q/ia1WV3wRxsO/eMZ+axXqCZP3XGZdt rMdGNtj1MWKkudYiowStWkCVSG/0fXJCFIAhvRmeZy+YqPdVlqZ2W7g4H1l9iJoo UVfT9cHrKoxHsukvIEckC5Ov9v1yr39Bd4wUuqaUTUSxY8VID5vjY63TsXl9Zke1 SluTFe9qybbnRNz/hYRvwIS1eT8HvUauAfAhypGTLI5DYHTD7PawcfMJkNzCtJm4 Ta4SC3rTpkpN+7oc8SoNgqRHQ8U9KL5oksP0wVa8vwHsMptSd3X4pUljc6TcfjLv GEo41D5AuJz3HRVcn9yqPbLoPE2FFB7bfwIMH77yNnoos4Izy/LGhKpN0YdImmI5 W5XVFB0jltGSIhkzLe1mFpLrdJwdUTSUVeCK4H5PhZZQEHLkVtg= =HuwD -----END PGP SIGNATURE----- Merge tag 'drm-misc-next-2022-02-23' of git://anongit.freedesktop.org/drm/drm-misc into drm-next drm-misc-next for v5.18: UAPI Changes: Cross-subsystem Changes: - Split out panel-lvds and lvds dt bindings . - Put yes/no on/off disabled/enabled strings in linux/string_helpers.h and use it in drivers and tomoyo. - Clarify dma_fence_chain and dma_fence_array should never include eachother. - Flatten chains in syncobj's. - Don't double add in fbdev/defio when page is already enlisted. - Don't sort deferred-I/O pages by default in fbdev. Core Changes: - Fix missing pm_runtime_put_sync in bridge. - Set modifier support to only linear fb modifier if drivers don't advertise support. - As a result, we remove allow_fb_modifiers. - Add missing clear for EDID Deep Color Modes in drm_reset_display_info. - Assorted documentation updates. - Warn once in drm_clflush if there is no arch support. - Add missing select for dp helper in drm_panel_edp. - Assorted small fixes. - Improve fb-helper's clipping handling. - Don't dump shmem mmaps in a core dump. - Add accounting to ttm resource manager, and use it in amdgpu. - Allow querying the detected eDP panel through debugfs. - Add helpers for xrgb8888 to 8 and 1 bits gray. - Improve drm's buddy allocator. - Add selftests for the buddy allocator. Driver Changes: - Add support for nomodeset to a lot of drm drivers. - Use drm_module_*_driver in a lot of drm drivers. - Assorted small fixes to bridge/lt9611, v3d, vc4, vmwgfx, mxsfb, nouveau, bridge/dw-hdmi, panfrost, lima, ingenic, sprd, bridge/anx7625, ti-sn65dsi86. - Add bridge/it6505. - Create DP and DVI-I connectors in ast. - Assorted nouveau backlight fixes. - Rework amdgpu reset handling. - Add dt bindings for ingenic,jz4780-dw-hdmi. - Support reading edid through aux channel in ingenic. - Add a drm driver for Solomon SSD130x OLED displays. - Add simple support for sharp LQ140M1JW46. - Add more panels to nt35560. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/686ec871-e77f-c230-22e5-9e3bb80f064a@linux.intel.com
This commit is contained in:
commit
54f43c17d6
|
@ -83,6 +83,9 @@ properties:
|
|||
type: boolean
|
||||
description: let the driver enable audio HDMI codec function or not.
|
||||
|
||||
aux-bus:
|
||||
$ref: /schemas/display/dp-aux-bus.yaml#
|
||||
|
||||
ports:
|
||||
$ref: /schemas/graph.yaml#/properties/ports
|
||||
|
||||
|
@ -167,5 +170,19 @@ examples:
|
|||
};
|
||||
};
|
||||
};
|
||||
|
||||
aux-bus {
|
||||
panel {
|
||||
compatible = "innolux,n125hce-gn1";
|
||||
power-supply = <&pp3300_disp_x>;
|
||||
backlight = <&backlight_lcd0>;
|
||||
|
||||
port {
|
||||
panel_in: endpoint {
|
||||
remote-endpoint = <&anx7625_out>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/bridge/ingenic,jz4780-hdmi.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Bindings for Ingenic JZ4780 HDMI Transmitter
|
||||
|
||||
maintainers:
|
||||
- H. Nikolaus Schaller <hns@goldelico.com>
|
||||
|
||||
description: |
|
||||
The HDMI Transmitter in the Ingenic JZ4780 is a Synopsys DesignWare HDMI 1.4
|
||||
TX controller IP with accompanying PHY IP.
|
||||
|
||||
allOf:
|
||||
- $ref: synopsys,dw-hdmi.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: ingenic,jz4780-dw-hdmi
|
||||
|
||||
reg-io-width:
|
||||
const: 4
|
||||
|
||||
clocks:
|
||||
maxItems: 2
|
||||
|
||||
ports:
|
||||
$ref: /schemas/graph.yaml#/properties/ports
|
||||
|
||||
properties:
|
||||
port@0:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
description: Input from LCD controller output.
|
||||
|
||||
port@1:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
description: Link to the HDMI connector.
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- clocks
|
||||
- clock-names
|
||||
- ports
|
||||
- reg-io-width
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/ingenic,jz4780-cgu.h>
|
||||
|
||||
hdmi: hdmi@10180000 {
|
||||
compatible = "ingenic,jz4780-dw-hdmi";
|
||||
reg = <0x10180000 0x8000>;
|
||||
reg-io-width = <4>;
|
||||
ddc-i2c-bus = <&i2c4>;
|
||||
interrupt-parent = <&intc>;
|
||||
interrupts = <3>;
|
||||
clocks = <&cgu JZ4780_CLK_AHB0>, <&cgu JZ4780_CLK_HDMI>;
|
||||
clock-names = "iahb", "isfr";
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
hdmi_in: port@0 {
|
||||
reg = <0>;
|
||||
dw_hdmi_in: endpoint {
|
||||
remote-endpoint = <&jz4780_lcd_out>;
|
||||
};
|
||||
};
|
||||
hdmi_out: port@1 {
|
||||
reg = <1>;
|
||||
dw_hdmi_out: endpoint {
|
||||
remote-endpoint = <&hdmi_con>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
...
|
|
@ -68,7 +68,7 @@ properties:
|
|||
- vesa-24
|
||||
description: |
|
||||
The color signals mapping order. See details in
|
||||
Documentation/devicetree/bindings/display/panel/lvds.yaml
|
||||
Documentation/devicetree/bindings/display/lvds.yaml
|
||||
|
||||
port@1:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/panel/lvds.yaml#
|
||||
$id: http://devicetree.org/schemas/display/lvds.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: LVDS Display Panel
|
||||
title: LVDS Display Common Properties
|
||||
|
||||
maintainers:
|
||||
- Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
|
||||
|
@ -13,8 +13,8 @@ maintainers:
|
|||
description: |+
|
||||
LVDS is a physical layer specification defined in ANSI/TIA/EIA-644-A. Multiple
|
||||
incompatible data link layers have been used over time to transmit image data
|
||||
to LVDS panels. This bindings supports display panels compatible with the
|
||||
following specifications.
|
||||
to LVDS devices. This bindings supports devices compatible with the following
|
||||
specifications.
|
||||
|
||||
[JEIDA] "Digital Interface Standards for Monitor", JEIDA-59-1999, February
|
||||
1999 (Version 1.0), Japan Electronic Industry Development Association (JEIDA)
|
||||
|
@ -26,18 +26,7 @@ description: |+
|
|||
Device compatible with those specifications have been marketed under the
|
||||
FPD-Link and FlatLink brands.
|
||||
|
||||
allOf:
|
||||
- $ref: panel-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: panel-lvds
|
||||
description:
|
||||
Shall contain "panel-lvds" in addition to a mandatory panel-specific
|
||||
compatible string defined in individual panel bindings. The "panel-lvds"
|
||||
value shall never be used on its own.
|
||||
|
||||
data-mapping:
|
||||
enum:
|
||||
- jeida-18
|
||||
|
@ -96,22 +85,6 @@ properties:
|
|||
If set, reverse the bit order described in the data mappings below on all
|
||||
data lanes, transmitting bits for slots 6 to 0 instead of 0 to 6.
|
||||
|
||||
port: true
|
||||
ports: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- data-mapping
|
||||
- width-mm
|
||||
- height-mm
|
||||
- panel-timing
|
||||
|
||||
oneOf:
|
||||
- required:
|
||||
- port
|
||||
- required:
|
||||
- ports
|
||||
|
||||
additionalProperties: true
|
||||
|
||||
...
|
|
@ -11,13 +11,23 @@ maintainers:
|
|||
- Thierry Reding <thierry.reding@gmail.com>
|
||||
|
||||
allOf:
|
||||
- $ref: lvds.yaml#
|
||||
- $ref: panel-common.yaml#
|
||||
- $ref: /schemas/display/lvds.yaml/#
|
||||
|
||||
select:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: advantech,idk-1110wr
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: advantech,idk-1110wr
|
||||
- {} # panel-lvds, but not listed here to avoid false select
|
||||
- const: panel-lvds
|
||||
|
||||
data-mapping:
|
||||
const: jeida-24
|
||||
|
@ -35,6 +45,11 @@ additionalProperties: false
|
|||
|
||||
required:
|
||||
- compatible
|
||||
- data-mapping
|
||||
- width-mm
|
||||
- height-mm
|
||||
- panel-timing
|
||||
- port
|
||||
|
||||
examples:
|
||||
- |+
|
||||
|
|
|
@ -11,15 +11,26 @@ maintainers:
|
|||
- Thierry Reding <thierry.reding@gmail.com>
|
||||
|
||||
allOf:
|
||||
- $ref: lvds.yaml#
|
||||
- $ref: panel-common.yaml#
|
||||
- $ref: /schemas/display/lvds.yaml/#
|
||||
|
||||
select:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: innolux,ee101ia-01d
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: innolux,ee101ia-01d
|
||||
- {} # panel-lvds, but not listed here to avoid false select
|
||||
- const: panel-lvds
|
||||
|
||||
backlight: true
|
||||
data-mapping: true
|
||||
enable-gpios: true
|
||||
power-supply: true
|
||||
width-mm: true
|
||||
|
@ -27,5 +38,13 @@ properties:
|
|||
panel-timing: true
|
||||
port: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- data-mapping
|
||||
- width-mm
|
||||
- height-mm
|
||||
- panel-timing
|
||||
- port
|
||||
|
||||
additionalProperties: false
|
||||
...
|
||||
|
|
|
@ -11,13 +11,23 @@ maintainers:
|
|||
- Thierry Reding <thierry.reding@gmail.com>
|
||||
|
||||
allOf:
|
||||
- $ref: lvds.yaml#
|
||||
- $ref: panel-common.yaml#
|
||||
- $ref: /schemas/display/lvds.yaml/#
|
||||
|
||||
select:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: mitsubishi,aa104xd12
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: mitsubishi,aa104xd12
|
||||
- {} # panel-lvds, but not listed here to avoid false select
|
||||
- const: panel-lvds
|
||||
|
||||
vcc-supply:
|
||||
description: Reference to the regulator powering the panel VCC pins.
|
||||
|
@ -39,6 +49,11 @@ additionalProperties: false
|
|||
required:
|
||||
- compatible
|
||||
- vcc-supply
|
||||
- data-mapping
|
||||
- width-mm
|
||||
- height-mm
|
||||
- panel-timing
|
||||
- port
|
||||
|
||||
examples:
|
||||
- |+
|
||||
|
|
|
@ -11,13 +11,23 @@ maintainers:
|
|||
- Thierry Reding <thierry.reding@gmail.com>
|
||||
|
||||
allOf:
|
||||
- $ref: lvds.yaml#
|
||||
- $ref: panel-common.yaml#
|
||||
- $ref: /schemas/display/lvds.yaml/#
|
||||
|
||||
select:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: mitsubishi,aa121td01
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: mitsubishi,aa121td01
|
||||
- {} # panel-lvds, but not listed here to avoid false select
|
||||
- const: panel-lvds
|
||||
|
||||
vcc-supply:
|
||||
description: Reference to the regulator powering the panel VCC pins.
|
||||
|
@ -39,6 +49,11 @@ additionalProperties: false
|
|||
required:
|
||||
- compatible
|
||||
- vcc-supply
|
||||
- data-mapping
|
||||
- width-mm
|
||||
- height-mm
|
||||
- panel-timing
|
||||
- port
|
||||
|
||||
examples:
|
||||
- |+
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/panel/panel-lvds.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Generic LVDS Display Panel Device Tree Bindings
|
||||
|
||||
maintainers:
|
||||
- Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
|
||||
- Thierry Reding <thierry.reding@gmail.com>
|
||||
|
||||
allOf:
|
||||
- $ref: panel-common.yaml#
|
||||
- $ref: /schemas/display/lvds.yaml/#
|
||||
|
||||
select:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: panel-lvds
|
||||
|
||||
not:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- advantech,idk-1110wr
|
||||
- advantech,idk-2121wr
|
||||
- innolux,ee101ia-01d
|
||||
- mitsubishi,aa104xd12
|
||||
- mitsubishi,aa121td01
|
||||
- sgd,gktw70sdae4se
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- auo,b101ew05
|
||||
- tbs,a711-panel
|
||||
|
||||
- const: panel-lvds
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- data-mapping
|
||||
- width-mm
|
||||
- height-mm
|
||||
- panel-timing
|
||||
- port
|
||||
|
||||
...
|
|
@ -284,6 +284,8 @@ properties:
|
|||
- sharp,lq101k1ly04
|
||||
# Sharp 12.3" (2400x1600 pixels) TFT LCD panel
|
||||
- sharp,lq123p1jx31
|
||||
# Sharp 14" (1920x1080 pixels) TFT LCD panel
|
||||
- sharp,lq140m1jw46
|
||||
# Sharp LS020B1DD01D 2.0" HQVGA TFT LCD panel
|
||||
- sharp,ls020b1dd01d
|
||||
# Shelly SCA07010-BFN-LNN 7.0" WVGA TFT LCD panel
|
||||
|
|
|
@ -11,13 +11,23 @@ maintainers:
|
|||
- Thierry Reding <thierry.reding@gmail.com>
|
||||
|
||||
allOf:
|
||||
- $ref: lvds.yaml#
|
||||
- $ref: panel-common.yaml#
|
||||
- $ref: /schemas/display/lvds.yaml/#
|
||||
|
||||
select:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: sgd,gktw70sdae4se
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: sgd,gktw70sdae4se
|
||||
- {} # panel-lvds, but not listed here to avoid false select
|
||||
- const: panel-lvds
|
||||
|
||||
data-mapping:
|
||||
const: jeida-18
|
||||
|
@ -35,6 +45,11 @@ additionalProperties: false
|
|||
|
||||
required:
|
||||
- compatible
|
||||
- port
|
||||
- data-mapping
|
||||
- width-mm
|
||||
- height-mm
|
||||
- panel-timing
|
||||
|
||||
examples:
|
||||
- |+
|
||||
|
|
|
@ -8,6 +8,7 @@ title: Solomon SSD1307 OLED Controller Framebuffer
|
|||
|
||||
maintainers:
|
||||
- Maxime Ripard <mripard@kernel.org>
|
||||
- Javier Martinez Canillas <javierm@redhat.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
|
|
|
@ -159,6 +159,21 @@ allOf:
|
|||
power-domains:
|
||||
maxItems: 1
|
||||
sram-supply: false
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: rockchip,rk3568-mali
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 2
|
||||
clock-names:
|
||||
items:
|
||||
- const: gpu
|
||||
- const: bus
|
||||
required:
|
||||
- clock-names
|
||||
|
||||
examples:
|
||||
- |
|
||||
|
|
23
MAINTAINERS
23
MAINTAINERS
|
@ -6082,7 +6082,8 @@ L: dri-devel@lists.freedesktop.org
|
|||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/panel/panel-lvds.c
|
||||
F: Documentation/devicetree/bindings/display/panel/lvds.yaml
|
||||
F: Documentation/devicetree/bindings/display/lvds.yaml
|
||||
F: Documentation/devicetree/bindings/display/panel/panel-lvds.yaml
|
||||
|
||||
DRM DRIVER FOR MANTIX MLAF057WE51 PANELS
|
||||
M: Guido Günther <agx@sigxcpu.org>
|
||||
|
@ -6131,6 +6132,13 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
|
|||
F: Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
|
||||
F: drivers/gpu/drm/panel/panel-novatek-nt35510.c
|
||||
|
||||
DRM DRIVER FOR NOVATEK NT35560 PANELS
|
||||
M: Linus Walleij <linus.walleij@linaro.org>
|
||||
S: Maintained
|
||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
F: Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
|
||||
F: drivers/gpu/drm/panel/panel-novatek-nt35560.c
|
||||
|
||||
DRM DRIVER FOR NOVATEK NT36672A PANELS
|
||||
M: Sumit Semwal <sumit.semwal@linaro.org>
|
||||
S: Maintained
|
||||
|
@ -6167,6 +6175,13 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
|
|||
F: Documentation/devicetree/bindings/display/repaper.txt
|
||||
F: drivers/gpu/drm/tiny/repaper.c
|
||||
|
||||
DRM DRIVER FOR SOLOMON SSD130X OLED DISPLAYS
|
||||
M: Javier Martinez Canillas <javierm@redhat.com>
|
||||
S: Maintained
|
||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
F: Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml
|
||||
F: drivers/gpu/drm/solomon/ssd130x*
|
||||
|
||||
DRM DRIVER FOR QEMU'S CIRRUS DEVICE
|
||||
M: Dave Airlie <airlied@redhat.com>
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
|
@ -6255,12 +6270,6 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
|
|||
F: Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
|
||||
F: drivers/gpu/drm/tiny/st7735r.c
|
||||
|
||||
DRM DRIVER FOR SONY ACX424AKP PANELS
|
||||
M: Linus Walleij <linus.walleij@linaro.org>
|
||||
S: Maintained
|
||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
F: drivers/gpu/drm/panel/panel-sony-acx424akp.c
|
||||
|
||||
DRM DRIVER FOR ST-ERICSSON MCDE
|
||||
M: Linus Walleij <linus.walleij@linaro.org>
|
||||
S: Maintained
|
||||
|
|
|
@ -176,6 +176,20 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
|
|||
|
||||
array->base.error = PENDING_ERROR;
|
||||
|
||||
/*
|
||||
* dma_fence_array objects should never contain any other fence
|
||||
* containers or otherwise we run into recursion and potential kernel
|
||||
* stack overflow on operations on the dma_fence_array.
|
||||
*
|
||||
* The correct way of handling this is to flatten out the array by the
|
||||
* caller instead.
|
||||
*
|
||||
* Enforce this here by checking that we don't create a dma_fence_array
|
||||
* with any container inside.
|
||||
*/
|
||||
while (num_fences--)
|
||||
WARN_ON(dma_fence_is_container(fences[num_fences]));
|
||||
|
||||
return array;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_fence_array_create);
|
||||
|
|
|
@ -148,8 +148,7 @@ static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
|
|||
|
||||
dma_fence_get(&head->base);
|
||||
dma_fence_chain_for_each(fence, &head->base) {
|
||||
struct dma_fence_chain *chain = to_dma_fence_chain(fence);
|
||||
struct dma_fence *f = chain ? chain->fence : fence;
|
||||
struct dma_fence *f = dma_fence_chain_contained(fence);
|
||||
|
||||
dma_fence_get(f);
|
||||
if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
|
||||
|
@ -165,8 +164,7 @@ static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
|
|||
static bool dma_fence_chain_signaled(struct dma_fence *fence)
|
||||
{
|
||||
dma_fence_chain_for_each(fence, fence) {
|
||||
struct dma_fence_chain *chain = to_dma_fence_chain(fence);
|
||||
struct dma_fence *f = chain ? chain->fence : fence;
|
||||
struct dma_fence *f = dma_fence_chain_contained(fence);
|
||||
|
||||
if (!dma_fence_is_signaled(f)) {
|
||||
dma_fence_put(fence);
|
||||
|
@ -254,5 +252,14 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
|
|||
|
||||
dma_fence_init(&chain->base, &dma_fence_chain_ops,
|
||||
&chain->lock, context, seqno);
|
||||
|
||||
/*
|
||||
* Chaining dma_fence_chain container together is only allowed through
|
||||
* the prev fence and not through the contained fence.
|
||||
*
|
||||
* The correct way of handling this is to flatten out the fence
|
||||
* structure into a dma_fence_array by the caller instead.
|
||||
*/
|
||||
WARN_ON(dma_fence_is_chain(fence));
|
||||
}
|
||||
EXPORT_SYMBOL(dma_fence_chain_init);
|
||||
|
|
|
@ -256,6 +256,11 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
|
|||
|
||||
dma_resv_assert_held(obj);
|
||||
|
||||
/* Drivers should not add containers here, instead add each fence
|
||||
* individually.
|
||||
*/
|
||||
WARN_ON(dma_fence_is_container(fence));
|
||||
|
||||
fobj = dma_resv_shared_list(obj);
|
||||
count = fobj->shared_count;
|
||||
|
||||
|
@ -323,12 +328,8 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
|
|||
}
|
||||
EXPORT_SYMBOL(dma_resv_add_excl_fence);
|
||||
|
||||
/**
|
||||
* dma_resv_iter_restart_unlocked - restart the unlocked iterator
|
||||
* @cursor: The dma_resv_iter object to restart
|
||||
*
|
||||
* Restart the unlocked iteration by initializing the cursor object.
|
||||
*/
|
||||
/* Restart the iterator by initializing all the necessary fields, but not the
|
||||
* relation to the dma_resv object. */
|
||||
static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
|
||||
{
|
||||
cursor->seq = read_seqcount_begin(&cursor->obj->seq);
|
||||
|
@ -344,14 +345,7 @@ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
|
|||
cursor->is_restarted = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
|
||||
* @cursor: cursor to record the current position
|
||||
*
|
||||
* Return all the fences in the dma_resv object which are not yet signaled.
|
||||
* The returned fence has an extra local reference so will stay alive.
|
||||
* If a concurrent modify is detected the whole iteration is started over again.
|
||||
*/
|
||||
/* Walk to the next not signaled fence and grab a reference to it */
|
||||
static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
|
||||
{
|
||||
struct dma_resv *obj = cursor->obj;
|
||||
|
@ -387,6 +381,12 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
|
|||
* dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
|
||||
* @cursor: the cursor with the current position
|
||||
*
|
||||
* Subsequent fences are iterated with dma_resv_iter_next_unlocked().
|
||||
*
|
||||
* Beware that the iterator can be restarted. Code which accumulates statistics
|
||||
* or similar needs to check for this with dma_resv_iter_is_restarted(). For
|
||||
* this reason prefer the locked dma_resv_iter_first() whenver possible.
|
||||
*
|
||||
* Returns the first fence from an unlocked dma_resv obj.
|
||||
*/
|
||||
struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
|
||||
|
@ -406,6 +406,10 @@ EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
|
|||
* dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
|
||||
* @cursor: the cursor with the current position
|
||||
*
|
||||
* Beware that the iterator can be restarted. Code which accumulates statistics
|
||||
* or similar needs to check for this with dma_resv_iter_is_restarted(). For
|
||||
* this reason prefer the locked dma_resv_iter_next() whenver possible.
|
||||
*
|
||||
* Returns the next fence from an unlocked dma_resv obj.
|
||||
*/
|
||||
struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
|
||||
|
@ -431,6 +435,8 @@ EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
|
|||
* dma_resv_iter_first - first fence from a locked dma_resv object
|
||||
* @cursor: cursor to record the current position
|
||||
*
|
||||
* Subsequent fences are iterated with dma_resv_iter_next_unlocked().
|
||||
*
|
||||
* Return the first fence in the dma_resv object while holding the
|
||||
* &dma_resv.lock.
|
||||
*/
|
||||
|
|
|
@ -71,6 +71,7 @@ config DRM_DEBUG_SELFTEST
|
|||
select DRM_DP_HELPER
|
||||
select DRM_LIB_RANDOM
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_BUDDY
|
||||
select DRM_EXPORT_FOR_TESTS if m
|
||||
default n
|
||||
help
|
||||
|
@ -403,6 +404,8 @@ source "drivers/gpu/drm/xlnx/Kconfig"
|
|||
|
||||
source "drivers/gpu/drm/gud/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/solomon/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/sprd/Kconfig"
|
||||
|
||||
config DRM_HYPERV
|
||||
|
|
|
@ -132,4 +132,5 @@ obj-$(CONFIG_DRM_TIDSS) += tidss/
|
|||
obj-y += xlnx/
|
||||
obj-y += gud/
|
||||
obj-$(CONFIG_DRM_HYPERV) += hyperv/
|
||||
obj-y += solomon/
|
||||
obj-$(CONFIG_DRM_SPRD) += sprd/
|
||||
|
|
|
@ -815,6 +815,8 @@ struct ip_discovery_top;
|
|||
#define AMDGPU_RESET_MAGIC_NUM 64
|
||||
#define AMDGPU_MAX_DF_PERFMONS 4
|
||||
#define AMDGPU_PRODUCT_NAME_LEN 64
|
||||
struct amdgpu_reset_domain;
|
||||
|
||||
struct amdgpu_device {
|
||||
struct device *dev;
|
||||
struct pci_dev *pdev;
|
||||
|
@ -1050,9 +1052,7 @@ struct amdgpu_device {
|
|||
bool in_s4;
|
||||
bool in_s0ix;
|
||||
|
||||
atomic_t in_gpu_reset;
|
||||
enum pp_mp1_state mp1_state;
|
||||
struct rw_semaphore reset_sem;
|
||||
struct amdgpu_doorbell_index doorbell_index;
|
||||
|
||||
struct mutex notifier_lock;
|
||||
|
@ -1100,6 +1100,8 @@ struct amdgpu_device {
|
|||
struct list_head ras_list;
|
||||
|
||||
struct ip_discovery_top *ip_top;
|
||||
|
||||
struct amdgpu_reset_domain *reset_domain;
|
||||
};
|
||||
|
||||
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
|
||||
|
@ -1293,6 +1295,8 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
|
|||
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
|
||||
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
struct amdgpu_job* job);
|
||||
int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
|
||||
struct amdgpu_job *job);
|
||||
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
|
||||
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_need_post(struct amdgpu_device *adev);
|
||||
|
@ -1479,8 +1483,6 @@ static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
|
|||
return adev->gmc.tmz_enabled;
|
||||
}
|
||||
|
||||
static inline int amdgpu_in_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
return atomic_read(&adev->in_gpu_reset);
|
||||
}
|
||||
int amdgpu_in_reset(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -312,7 +312,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
|
||||
used_vram = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
|
||||
used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
|
||||
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
|
||||
|
||||
spin_lock(&adev->mm_stats.lock);
|
||||
|
|
|
@ -37,6 +37,8 @@
|
|||
#include "amdgpu_fw_attestation.h"
|
||||
#include "amdgpu_umr.h"
|
||||
|
||||
#include "amdgpu_reset.h"
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
/**
|
||||
|
@ -1284,7 +1286,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
|
|||
}
|
||||
|
||||
/* Avoid accidently unparking the sched thread during GPU reset */
|
||||
r = down_write_killable(&adev->reset_sem);
|
||||
r = down_write_killable(&adev->reset_domain->sem);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -1313,7 +1315,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
|
|||
kthread_unpark(ring->sched.thread);
|
||||
}
|
||||
|
||||
up_write(&adev->reset_sem);
|
||||
up_write(&adev->reset_domain->sem);
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
|
@ -1522,7 +1524,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
|
|||
return -ENOMEM;
|
||||
|
||||
/* Avoid accidently unparking the sched thread during GPU reset */
|
||||
r = down_read_killable(&adev->reset_sem);
|
||||
r = down_read_killable(&adev->reset_domain->sem);
|
||||
if (r)
|
||||
goto pro_end;
|
||||
|
||||
|
@ -1565,7 +1567,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
|
|||
/* restart the scheduler */
|
||||
kthread_unpark(ring->sched.thread);
|
||||
|
||||
up_read(&adev->reset_sem);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
|
||||
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
||||
|
||||
|
|
|
@ -426,10 +426,10 @@ bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
|
|||
* the lock.
|
||||
*/
|
||||
if (in_task()) {
|
||||
if (down_read_trylock(&adev->reset_sem))
|
||||
up_read(&adev->reset_sem);
|
||||
if (down_read_trylock(&adev->reset_domain->sem))
|
||||
up_read(&adev->reset_domain->sem);
|
||||
else
|
||||
lockdep_assert_held(&adev->reset_sem);
|
||||
lockdep_assert_held(&adev->reset_domain->sem);
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
|
@ -455,9 +455,9 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
|
|||
if ((reg * 4) < adev->rmmio_size) {
|
||||
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
|
||||
amdgpu_sriov_runtime(adev) &&
|
||||
down_read_trylock(&adev->reset_sem)) {
|
||||
down_read_trylock(&adev->reset_domain->sem)) {
|
||||
ret = amdgpu_kiq_rreg(adev, reg);
|
||||
up_read(&adev->reset_sem);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
} else {
|
||||
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
|
||||
}
|
||||
|
@ -540,9 +540,9 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
|
|||
if ((reg * 4) < adev->rmmio_size) {
|
||||
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
|
||||
amdgpu_sriov_runtime(adev) &&
|
||||
down_read_trylock(&adev->reset_sem)) {
|
||||
down_read_trylock(&adev->reset_domain->sem)) {
|
||||
amdgpu_kiq_wreg(adev, reg, v);
|
||||
up_read(&adev->reset_sem);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
} else {
|
||||
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
|
||||
}
|
||||
|
@ -2331,6 +2331,49 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
|
|||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
|
||||
{
|
||||
long timeout;
|
||||
int r, i;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
|
||||
/* No need to setup the GPU scheduler for rings that don't need it */
|
||||
if (!ring || ring->no_scheduler)
|
||||
continue;
|
||||
|
||||
switch (ring->funcs->type) {
|
||||
case AMDGPU_RING_TYPE_GFX:
|
||||
timeout = adev->gfx_timeout;
|
||||
break;
|
||||
case AMDGPU_RING_TYPE_COMPUTE:
|
||||
timeout = adev->compute_timeout;
|
||||
break;
|
||||
case AMDGPU_RING_TYPE_SDMA:
|
||||
timeout = adev->sdma_timeout;
|
||||
break;
|
||||
default:
|
||||
timeout = adev->video_timeout;
|
||||
break;
|
||||
}
|
||||
|
||||
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
|
||||
ring->num_hw_submission, amdgpu_job_hang_limit,
|
||||
timeout, adev->reset_domain->wq,
|
||||
ring->sched_score, ring->name,
|
||||
adev->dev);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create scheduler on ring %s.\n",
|
||||
ring->name);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* amdgpu_device_ip_init - run init for hardware IPs
|
||||
*
|
||||
|
@ -2442,8 +2485,28 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
|||
if (r)
|
||||
goto init_failed;
|
||||
|
||||
if (adev->gmc.xgmi.num_physical_nodes > 1)
|
||||
amdgpu_xgmi_add_device(adev);
|
||||
/**
|
||||
* In case of XGMI grab extra reference for reset domain for this device
|
||||
*/
|
||||
if (adev->gmc.xgmi.num_physical_nodes > 1) {
|
||||
if (amdgpu_xgmi_add_device(adev) == 0) {
|
||||
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
|
||||
|
||||
if (!hive->reset_domain ||
|
||||
!amdgpu_reset_get_reset_domain(hive->reset_domain)) {
|
||||
r = -ENOENT;
|
||||
goto init_failed;
|
||||
}
|
||||
|
||||
/* Drop the early temporary reset domain we created for device */
|
||||
amdgpu_reset_put_reset_domain(adev->reset_domain);
|
||||
adev->reset_domain = hive->reset_domain;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_device_init_schedulers(adev);
|
||||
if (r)
|
||||
goto init_failed;
|
||||
|
||||
/* Don't init kfd if whole hive need to be reset during init */
|
||||
if (!adev->gmc.xgmi.pending_reset)
|
||||
|
@ -3543,8 +3606,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
mutex_init(&adev->mn_lock);
|
||||
mutex_init(&adev->virt.vf_errors.lock);
|
||||
hash_init(adev->mn_hash);
|
||||
atomic_set(&adev->in_gpu_reset, 0);
|
||||
init_rwsem(&adev->reset_sem);
|
||||
mutex_init(&adev->psp.mutex);
|
||||
mutex_init(&adev->notifier_lock);
|
||||
mutex_init(&adev->pm.stable_pstate_ctx_lock);
|
||||
|
@ -3630,6 +3691,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset domain needs to be present early, before XGMI hive discovered
|
||||
* (if any) and intitialized to use reset sem and in_gpu reset flag
|
||||
* early on during init.
|
||||
*/
|
||||
adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE ,"amdgpu-reset-dev");
|
||||
if (!adev->reset_domain)
|
||||
return -ENOMEM;
|
||||
|
||||
/* early init functions */
|
||||
r = amdgpu_device_ip_early_init(adev);
|
||||
if (r)
|
||||
|
@ -4006,6 +4076,9 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
|
|||
if (adev->mman.discovery_bin)
|
||||
amdgpu_discovery_fini(adev);
|
||||
|
||||
amdgpu_reset_put_reset_domain(adev->reset_domain);
|
||||
adev->reset_domain = NULL;
|
||||
|
||||
kfree(adev->pci_state);
|
||||
|
||||
}
|
||||
|
@ -4817,17 +4890,8 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
|
|||
return r;
|
||||
}
|
||||
|
||||
static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
|
||||
struct amdgpu_hive_info *hive)
|
||||
static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
|
||||
{
|
||||
if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
|
||||
return false;
|
||||
|
||||
if (hive) {
|
||||
down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
|
||||
} else {
|
||||
down_write(&adev->reset_sem);
|
||||
}
|
||||
|
||||
switch (amdgpu_asic_reset_method(adev)) {
|
||||
case AMD_RESET_METHOD_MODE1:
|
||||
|
@ -4840,56 +4904,12 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
|
|||
adev->mp1_state = PP_MP1_STATE_NONE;
|
||||
break;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
|
||||
static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_vf_error_trans_all(adev);
|
||||
adev->mp1_state = PP_MP1_STATE_NONE;
|
||||
atomic_set(&adev->in_gpu_reset, 0);
|
||||
up_write(&adev->reset_sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* to lockup a list of amdgpu devices in a hive safely, if not a hive
|
||||
* with multiple nodes, it will be similar as amdgpu_device_lock_adev.
|
||||
*
|
||||
* unlock won't require roll back.
|
||||
*/
|
||||
static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
|
||||
{
|
||||
struct amdgpu_device *tmp_adev = NULL;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
|
||||
if (!hive) {
|
||||
dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
|
||||
return -ENODEV;
|
||||
}
|
||||
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
|
||||
if (!amdgpu_device_lock_adev(tmp_adev, hive))
|
||||
goto roll_back;
|
||||
}
|
||||
} else if (!amdgpu_device_lock_adev(adev, hive))
|
||||
return -EAGAIN;
|
||||
|
||||
return 0;
|
||||
roll_back:
|
||||
if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
|
||||
/*
|
||||
* if the lockup iteration break in the middle of a hive,
|
||||
* it may means there may has a race issue,
|
||||
* or a hive device locked up independently.
|
||||
* we may be in trouble and may not, so will try to roll back
|
||||
* the lock and give out a warnning.
|
||||
*/
|
||||
dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
|
||||
list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
|
||||
amdgpu_device_unlock_adev(tmp_adev);
|
||||
}
|
||||
}
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
|
||||
|
@ -5023,7 +5043,7 @@ static void amdgpu_device_recheck_guilty_jobs(
|
|||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
|
||||
* amdgpu_device_gpu_recover_imp - reset the asic and recover scheduler
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @job: which job trigger hang
|
||||
|
@ -5033,7 +5053,7 @@ static void amdgpu_device_recheck_guilty_jobs(
|
|||
* Returns 0 for success or an error on failure.
|
||||
*/
|
||||
|
||||
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
|
||||
struct amdgpu_job *job)
|
||||
{
|
||||
struct list_head device_list, *device_list_handle = NULL;
|
||||
|
@ -5067,26 +5087,10 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
dev_info(adev->dev, "GPU %s begin!\n",
|
||||
need_emergency_restart ? "jobs stop":"reset");
|
||||
|
||||
/*
|
||||
* Here we trylock to avoid chain of resets executing from
|
||||
* either trigger by jobs on different adevs in XGMI hive or jobs on
|
||||
* different schedulers for same device while this TO handler is running.
|
||||
* We always reset all schedulers for device and all devices for XGMI
|
||||
* hive so that should take care of them too.
|
||||
*/
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
hive = amdgpu_get_xgmi_hive(adev);
|
||||
if (hive) {
|
||||
if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
|
||||
DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
|
||||
job ? job->base.id : -1, hive->hive_id);
|
||||
amdgpu_put_xgmi_hive(hive);
|
||||
if (job && job->vm)
|
||||
drm_sched_increase_karma(&job->base);
|
||||
return 0;
|
||||
}
|
||||
if (hive)
|
||||
mutex_lock(&hive->hive_lock);
|
||||
}
|
||||
|
||||
reset_context.method = AMD_RESET_METHOD_NONE;
|
||||
reset_context.reset_req_dev = adev;
|
||||
|
@ -5094,22 +5098,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
reset_context.hive = hive;
|
||||
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
|
||||
/*
|
||||
* lock the device before we try to operate the linked list
|
||||
* if didn't get the device lock, don't touch the linked list since
|
||||
* others may iterating it.
|
||||
*/
|
||||
r = amdgpu_device_lock_hive_adev(adev, hive);
|
||||
if (r) {
|
||||
dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
|
||||
job ? job->base.id : -1);
|
||||
|
||||
/* even we skipped this reset, still need to set the job to guilty */
|
||||
if (job && job->vm)
|
||||
drm_sched_increase_karma(&job->base);
|
||||
goto skip_recovery;
|
||||
}
|
||||
|
||||
/*
|
||||
* Build list of devices to reset.
|
||||
* In case we are in XGMI hive mode, resort the device list
|
||||
|
@ -5127,8 +5115,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
device_list_handle = &device_list;
|
||||
}
|
||||
|
||||
/* We need to lock reset domain only once both for XGMI and single device */
|
||||
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
|
||||
reset_list);
|
||||
amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
|
||||
|
||||
/* block all schedulers and reset given job's ring */
|
||||
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
|
||||
|
||||
amdgpu_device_set_mp1_state(tmp_adev);
|
||||
|
||||
/*
|
||||
* Try to put the audio codec into suspend state
|
||||
* before gpu reset started.
|
||||
|
@ -5280,21 +5276,55 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
|
||||
if (audio_suspended)
|
||||
amdgpu_device_resume_display_audio(tmp_adev);
|
||||
amdgpu_device_unlock_adev(tmp_adev);
|
||||
|
||||
amdgpu_device_unset_mp1_state(tmp_adev);
|
||||
}
|
||||
|
||||
skip_recovery:
|
||||
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
|
||||
reset_list);
|
||||
amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
|
||||
|
||||
if (hive) {
|
||||
atomic_set(&hive->in_reset, 0);
|
||||
mutex_unlock(&hive->hive_lock);
|
||||
amdgpu_put_xgmi_hive(hive);
|
||||
}
|
||||
|
||||
if (r && r != -EAGAIN)
|
||||
if (r)
|
||||
dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
struct amdgpu_recover_work_struct {
|
||||
struct work_struct base;
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_job *job;
|
||||
int ret;
|
||||
};
|
||||
|
||||
static void amdgpu_device_queue_gpu_recover_work(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_recover_work_struct *recover_work = container_of(work, struct amdgpu_recover_work_struct, base);
|
||||
|
||||
recover_work->ret = amdgpu_device_gpu_recover_imp(recover_work->adev, recover_work->job);
|
||||
}
|
||||
/*
|
||||
* Serialize gpu recover into reset domain single threaded wq
|
||||
*/
|
||||
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
struct amdgpu_job *job)
|
||||
{
|
||||
struct amdgpu_recover_work_struct work = {.adev = adev, .job = job};
|
||||
|
||||
INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work);
|
||||
|
||||
if (!amdgpu_reset_domain_schedule(adev->reset_domain, &work.base))
|
||||
return -EAGAIN;
|
||||
|
||||
flush_work(&work.base);
|
||||
|
||||
return work.ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
|
||||
*
|
||||
|
@ -5482,20 +5512,6 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
|
||||
if (!ring || !ring->sched.thread)
|
||||
continue;
|
||||
|
||||
cancel_delayed_work_sync(&ring->sched.work_tdr);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_pci_error_detected - Called when a PCI error is detected.
|
||||
* @pdev: PCI device struct
|
||||
|
@ -5526,14 +5542,11 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
|
|||
/* Fatal error, prepare for slot reset */
|
||||
case pci_channel_io_frozen:
|
||||
/*
|
||||
* Cancel and wait for all TDRs in progress if failing to
|
||||
* set adev->in_gpu_reset in amdgpu_device_lock_adev
|
||||
*
|
||||
* Locking adev->reset_sem will prevent any external access
|
||||
* Locking adev->reset_domain->sem will prevent any external access
|
||||
* to GPU during PCI error recovery
|
||||
*/
|
||||
while (!amdgpu_device_lock_adev(adev, NULL))
|
||||
amdgpu_cancel_all_tdr(adev);
|
||||
amdgpu_device_lock_reset_domain(adev->reset_domain);
|
||||
amdgpu_device_set_mp1_state(adev);
|
||||
|
||||
/*
|
||||
* Block any work scheduling as we do for regular GPU reset
|
||||
|
@ -5640,7 +5653,8 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
|
|||
DRM_INFO("PCIe error recovery succeeded\n");
|
||||
} else {
|
||||
DRM_ERROR("PCIe error recovery failed, err:%d", r);
|
||||
amdgpu_device_unlock_adev(adev);
|
||||
amdgpu_device_unset_mp1_state(adev);
|
||||
amdgpu_device_unlock_reset_domain(adev->reset_domain);
|
||||
}
|
||||
|
||||
return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
|
||||
|
@ -5677,7 +5691,8 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
|
|||
drm_sched_start(&ring->sched, true);
|
||||
}
|
||||
|
||||
amdgpu_device_unlock_adev(adev);
|
||||
amdgpu_device_unset_mp1_state(adev);
|
||||
amdgpu_device_unlock_reset_domain(adev->reset_domain);
|
||||
}
|
||||
|
||||
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
|
||||
|
@ -5754,6 +5769,11 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
|
|||
amdgpu_asic_invalidate_hdp(adev, ring);
|
||||
}
|
||||
|
||||
int amdgpu_in_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
return atomic_read(&adev->reset_domain->in_gpu_reset);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_halt() - bring hardware to some kind of halt state
|
||||
*
|
||||
|
|
|
@ -961,7 +961,7 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
|
|||
int ret;
|
||||
unsigned int i, block_width, block_height, block_size_log2;
|
||||
|
||||
if (!rfb->base.dev->mode_config.allow_fb_modifiers)
|
||||
if (rfb->base.dev->mode_config.fb_modifiers_not_supported)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < format_info->num_planes; ++i) {
|
||||
|
@ -1148,7 +1148,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!dev->mode_config.allow_fb_modifiers) {
|
||||
if (dev->mode_config.fb_modifiers_not_supported) {
|
||||
drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
|
||||
"GFX9+ requires FB check based on format modifier\n");
|
||||
ret = check_tiling_flags_gfx6(rfb);
|
||||
|
@ -1156,7 +1156,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (dev->mode_config.allow_fb_modifiers &&
|
||||
if (!dev->mode_config.fb_modifiers_not_supported &&
|
||||
!(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
|
||||
ret = convert_tiling_flags_to_modifier(rfb);
|
||||
if (ret) {
|
||||
|
|
|
@ -446,24 +446,18 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
|||
* for the requested ring.
|
||||
*
|
||||
* @ring: ring to init the fence driver on
|
||||
* @num_hw_submission: number of entries on the hardware queue
|
||||
* @sched_score: optional score atomic shared with other schedulers
|
||||
*
|
||||
* Init the fence driver for the requested ring (all asics).
|
||||
* Helper function for amdgpu_fence_driver_init().
|
||||
*/
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||
unsigned num_hw_submission,
|
||||
atomic_t *sched_score)
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
long timeout;
|
||||
int r;
|
||||
|
||||
if (!adev)
|
||||
return -EINVAL;
|
||||
|
||||
if (!is_power_of_2(num_hw_submission))
|
||||
if (!is_power_of_2(ring->num_hw_submission))
|
||||
return -EINVAL;
|
||||
|
||||
ring->fence_drv.cpu_addr = NULL;
|
||||
|
@ -474,41 +468,14 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
|||
|
||||
timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
|
||||
|
||||
ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
|
||||
ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1;
|
||||
spin_lock_init(&ring->fence_drv.lock);
|
||||
ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
|
||||
ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!ring->fence_drv.fences)
|
||||
return -ENOMEM;
|
||||
|
||||
/* No need to setup the GPU scheduler for rings that don't need it */
|
||||
if (ring->no_scheduler)
|
||||
return 0;
|
||||
|
||||
switch (ring->funcs->type) {
|
||||
case AMDGPU_RING_TYPE_GFX:
|
||||
timeout = adev->gfx_timeout;
|
||||
break;
|
||||
case AMDGPU_RING_TYPE_COMPUTE:
|
||||
timeout = adev->compute_timeout;
|
||||
break;
|
||||
case AMDGPU_RING_TYPE_SDMA:
|
||||
timeout = adev->sdma_timeout;
|
||||
break;
|
||||
default:
|
||||
timeout = adev->video_timeout;
|
||||
break;
|
||||
}
|
||||
|
||||
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
|
||||
num_hw_submission, amdgpu_job_hang_limit,
|
||||
timeout, NULL, sched_score, ring->name);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create scheduler on ring %s.\n",
|
||||
ring->name);
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
|
|||
struct ttm_resource_manager *man;
|
||||
|
||||
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
|
||||
return sysfs_emit(buf, "%llu\n", man->size * PAGE_SIZE);
|
||||
return sysfs_emit(buf, "%llu\n", man->size);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -77,8 +77,9 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
|
|||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||
struct ttm_resource_manager *man = &adev->mman.gtt_mgr.manager;
|
||||
|
||||
return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr));
|
||||
return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
|
||||
|
@ -130,20 +131,17 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
|
|||
struct amdgpu_gtt_node *node;
|
||||
int r;
|
||||
|
||||
if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
|
||||
atomic64_add_return(num_pages, &mgr->used) > man->size) {
|
||||
atomic64_sub(num_pages, &mgr->used);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
|
||||
if (!node) {
|
||||
r = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
node->tbo = tbo;
|
||||
ttm_resource_init(tbo, place, &node->base.base);
|
||||
if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
|
||||
ttm_resource_manager_usage(man) > man->size) {
|
||||
r = -ENOSPC;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
if (place->lpfn) {
|
||||
spin_lock(&mgr->lock);
|
||||
|
@ -169,11 +167,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
|
|||
err_free:
|
||||
ttm_resource_fini(man, &node->base.base);
|
||||
kfree(node);
|
||||
|
||||
err_out:
|
||||
if (!(place->flags & TTM_PL_FLAG_TEMPORARY))
|
||||
atomic64_sub(num_pages, &mgr->used);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -196,25 +189,10 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
|
|||
drm_mm_remove_node(&node->base.mm_nodes[0]);
|
||||
spin_unlock(&mgr->lock);
|
||||
|
||||
if (!(res->placement & TTM_PL_FLAG_TEMPORARY))
|
||||
atomic64_sub(res->num_pages, &mgr->used);
|
||||
|
||||
ttm_resource_fini(man, res);
|
||||
kfree(node);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gtt_mgr_usage - return usage of GTT domain
|
||||
*
|
||||
* @mgr: amdgpu_gtt_mgr pointer
|
||||
*
|
||||
* Return how many bytes are used in the GTT domain
|
||||
*/
|
||||
uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr)
|
||||
{
|
||||
return atomic64_read(&mgr->used) * PAGE_SIZE;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gtt_mgr_recover - re-init gart
|
||||
*
|
||||
|
@ -255,9 +233,6 @@ static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man,
|
|||
spin_lock(&mgr->lock);
|
||||
drm_mm_print(&mgr->mm, printer);
|
||||
spin_unlock(&mgr->lock);
|
||||
|
||||
drm_printf(printer, "man size:%llu pages, gtt used:%llu pages\n",
|
||||
man->size, atomic64_read(&mgr->used));
|
||||
}
|
||||
|
||||
static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = {
|
||||
|
@ -283,14 +258,12 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
|
|||
man->use_tt = true;
|
||||
man->func = &amdgpu_gtt_mgr_func;
|
||||
|
||||
ttm_resource_manager_init(man, &adev->mman.bdev,
|
||||
gtt_size >> PAGE_SHIFT);
|
||||
ttm_resource_manager_init(man, &adev->mman.bdev, gtt_size);
|
||||
|
||||
start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
|
||||
size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
|
||||
drm_mm_init(&mgr->mm, start, size);
|
||||
spin_lock_init(&mgr->lock);
|
||||
atomic64_set(&mgr->used, 0);
|
||||
|
||||
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager);
|
||||
ttm_resource_manager_set_used(man, true);
|
||||
|
|
|
@ -64,10 +64,9 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
|
|||
ti.process_name, ti.tgid, ti.task_name, ti.pid);
|
||||
|
||||
if (amdgpu_device_should_recover_gpu(ring->adev)) {
|
||||
r = amdgpu_device_gpu_recover(ring->adev, job);
|
||||
r = amdgpu_device_gpu_recover_imp(ring->adev, job);
|
||||
if (r)
|
||||
DRM_ERROR("GPU Recovery Failed: %d\n", r);
|
||||
|
||||
} else {
|
||||
drm_sched_suspend_timeout(&ring->sched);
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
|
|
|
@ -604,13 +604,13 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_VRAM_USAGE:
|
||||
ui64 = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
|
||||
ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_VIS_VRAM_USAGE:
|
||||
ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_GTT_USAGE:
|
||||
ui64 = amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr);
|
||||
ui64 = ttm_resource_manager_usage(&adev->mman.gtt_mgr.manager);
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_GDS_CONFIG: {
|
||||
struct drm_amdgpu_info_gds gds_info;
|
||||
|
@ -642,14 +642,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
case AMDGPU_INFO_MEMORY: {
|
||||
struct drm_amdgpu_memory_info mem;
|
||||
struct ttm_resource_manager *gtt_man =
|
||||
ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
|
||||
&adev->mman.gtt_mgr.manager;
|
||||
struct ttm_resource_manager *vram_man =
|
||||
&adev->mman.vram_mgr.manager;
|
||||
|
||||
memset(&mem, 0, sizeof(mem));
|
||||
mem.vram.total_heap_size = adev->gmc.real_vram_size;
|
||||
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
|
||||
atomic64_read(&adev->vram_pin_size) -
|
||||
AMDGPU_VM_RESERVED_VRAM;
|
||||
mem.vram.heap_usage =
|
||||
amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
|
||||
ttm_resource_manager_usage(vram_man);
|
||||
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
|
||||
|
||||
mem.cpu_accessible_vram.total_heap_size =
|
||||
|
@ -667,8 +670,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
mem.gtt.total_heap_size *= PAGE_SIZE;
|
||||
mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
|
||||
atomic64_read(&adev->gart_pin_size);
|
||||
mem.gtt.heap_usage =
|
||||
amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr);
|
||||
mem.gtt.heap_usage = ttm_resource_manager_usage(gtt_man);
|
||||
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
|
||||
|
||||
return copy_to_user(out, &mem,
|
||||
|
|
|
@ -451,7 +451,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
|
|||
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
|
||||
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
|
||||
|
||||
if (size < (man->size << PAGE_SHIFT))
|
||||
if (size < man->size)
|
||||
return true;
|
||||
else
|
||||
goto fail;
|
||||
|
@ -460,7 +460,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
|
|||
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
|
||||
|
||||
if (size < (man->size << PAGE_SHIFT))
|
||||
if (size < man->size)
|
||||
return true;
|
||||
else
|
||||
goto fail;
|
||||
|
|
|
@ -25,12 +25,6 @@
|
|||
|
||||
#include "amdgpu.h"
|
||||
|
||||
static inline struct amdgpu_preempt_mgr *
|
||||
to_preempt_mgr(struct ttm_resource_manager *man)
|
||||
{
|
||||
return container_of(man, struct amdgpu_preempt_mgr, manager);
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: mem_info_preempt_used
|
||||
*
|
||||
|
@ -45,10 +39,9 @@ static ssize_t mem_info_preempt_used_show(struct device *dev,
|
|||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||
struct ttm_resource_manager *man;
|
||||
struct ttm_resource_manager *man = &adev->mman.preempt_mgr;
|
||||
|
||||
man = ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_PREEMPT);
|
||||
return sysfs_emit(buf, "%llu\n", amdgpu_preempt_mgr_usage(man));
|
||||
return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(mem_info_preempt_used);
|
||||
|
@ -68,16 +61,12 @@ static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
|
|||
const struct ttm_place *place,
|
||||
struct ttm_resource **res)
|
||||
{
|
||||
struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
|
||||
|
||||
*res = kzalloc(sizeof(**res), GFP_KERNEL);
|
||||
if (!*res)
|
||||
return -ENOMEM;
|
||||
|
||||
ttm_resource_init(tbo, place, *res);
|
||||
(*res)->start = AMDGPU_BO_INVALID_OFFSET;
|
||||
|
||||
atomic64_add((*res)->num_pages, &mgr->used);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -92,49 +81,13 @@ static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
|
|||
static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
|
||||
struct ttm_resource *res)
|
||||
{
|
||||
struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
|
||||
|
||||
atomic64_sub(res->num_pages, &mgr->used);
|
||||
ttm_resource_fini(man, res);
|
||||
kfree(res);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_preempt_mgr_usage - return usage of PREEMPT domain
|
||||
*
|
||||
* @man: TTM memory type manager
|
||||
*
|
||||
* Return how many bytes are used in the GTT domain
|
||||
*/
|
||||
uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man)
|
||||
{
|
||||
struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
|
||||
s64 result = atomic64_read(&mgr->used);
|
||||
|
||||
return (result > 0 ? result : 0) * PAGE_SIZE;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_preempt_mgr_debug - dump VRAM table
|
||||
*
|
||||
* @man: TTM memory type manager
|
||||
* @printer: DRM printer to use
|
||||
*
|
||||
* Dump the table content using printk.
|
||||
*/
|
||||
static void amdgpu_preempt_mgr_debug(struct ttm_resource_manager *man,
|
||||
struct drm_printer *printer)
|
||||
{
|
||||
struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
|
||||
|
||||
drm_printf(printer, "man size:%llu pages, preempt used:%lld pages\n",
|
||||
man->size, (u64)atomic64_read(&mgr->used));
|
||||
}
|
||||
|
||||
static const struct ttm_resource_manager_func amdgpu_preempt_mgr_func = {
|
||||
.alloc = amdgpu_preempt_mgr_new,
|
||||
.free = amdgpu_preempt_mgr_del,
|
||||
.debug = amdgpu_preempt_mgr_debug
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -146,8 +99,7 @@ static const struct ttm_resource_manager_func amdgpu_preempt_mgr_func = {
|
|||
*/
|
||||
int amdgpu_preempt_mgr_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr;
|
||||
struct ttm_resource_manager *man = &mgr->manager;
|
||||
struct ttm_resource_manager *man = &adev->mman.preempt_mgr;
|
||||
int ret;
|
||||
|
||||
man->use_tt = true;
|
||||
|
@ -155,16 +107,13 @@ int amdgpu_preempt_mgr_init(struct amdgpu_device *adev)
|
|||
|
||||
ttm_resource_manager_init(man, &adev->mman.bdev, (1 << 30));
|
||||
|
||||
atomic64_set(&mgr->used, 0);
|
||||
|
||||
ret = device_create_file(adev->dev, &dev_attr_mem_info_preempt_used);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to create device file mem_info_preempt_used\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT,
|
||||
&mgr->manager);
|
||||
ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, man);
|
||||
ttm_resource_manager_set_used(man, true);
|
||||
return 0;
|
||||
}
|
||||
|
@ -179,8 +128,7 @@ int amdgpu_preempt_mgr_init(struct amdgpu_device *adev)
|
|||
*/
|
||||
void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr;
|
||||
struct ttm_resource_manager *man = &mgr->manager;
|
||||
struct ttm_resource_manager *man = &adev->mman.preempt_mgr;
|
||||
int ret;
|
||||
|
||||
ttm_resource_manager_set_used(man, false);
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
#include <linux/debugfs.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include "amdgpu_reset.h"
|
||||
|
||||
#define EEPROM_I2C_MADDR_VEGA20 0x0
|
||||
#define EEPROM_I2C_MADDR_ARCTURUS 0x40000
|
||||
#define EEPROM_I2C_MADDR_ARCTURUS_D342 0x0
|
||||
|
@ -193,12 +195,12 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control)
|
|||
__encode_table_header_to_buf(&control->tbl_hdr, buf);
|
||||
|
||||
/* i2c may be unstable in gpu reset */
|
||||
down_read(&adev->reset_sem);
|
||||
down_read(&adev->reset_domain->sem);
|
||||
res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus,
|
||||
control->i2c_address +
|
||||
control->ras_header_offset,
|
||||
buf, RAS_TABLE_HEADER_SIZE);
|
||||
up_read(&adev->reset_sem);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
|
||||
if (res < 0) {
|
||||
DRM_ERROR("Failed to write EEPROM table header:%d", res);
|
||||
|
@ -390,13 +392,13 @@ static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control,
|
|||
int res;
|
||||
|
||||
/* i2c may be unstable in gpu reset */
|
||||
down_read(&adev->reset_sem);
|
||||
down_read(&adev->reset_domain->sem);
|
||||
buf_size = num * RAS_TABLE_RECORD_SIZE;
|
||||
res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus,
|
||||
control->i2c_address +
|
||||
RAS_INDEX_TO_OFFSET(control, fri),
|
||||
buf, buf_size);
|
||||
up_read(&adev->reset_sem);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
if (res < 0) {
|
||||
DRM_ERROR("Writing %d EEPROM table records error:%d",
|
||||
num, res);
|
||||
|
@ -550,12 +552,12 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
|
|||
goto Out;
|
||||
}
|
||||
|
||||
down_read(&adev->reset_sem);
|
||||
down_read(&adev->reset_domain->sem);
|
||||
res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
|
||||
control->i2c_address +
|
||||
control->ras_record_offset,
|
||||
buf, buf_size);
|
||||
up_read(&adev->reset_sem);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
if (res < 0) {
|
||||
DRM_ERROR("EEPROM failed reading records:%d\n",
|
||||
res);
|
||||
|
@ -645,13 +647,13 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
|
|||
int res;
|
||||
|
||||
/* i2c may be unstable in gpu reset */
|
||||
down_read(&adev->reset_sem);
|
||||
down_read(&adev->reset_domain->sem);
|
||||
buf_size = num * RAS_TABLE_RECORD_SIZE;
|
||||
res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
|
||||
control->i2c_address +
|
||||
RAS_INDEX_TO_OFFSET(control, fri),
|
||||
buf, buf_size);
|
||||
up_read(&adev->reset_sem);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
if (res < 0) {
|
||||
DRM_ERROR("Reading %d EEPROM table records error:%d",
|
||||
num, res);
|
||||
|
|
|
@ -96,3 +96,59 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
|
|||
return reset_handler->restore_hwcontext(adev->reset_cntl,
|
||||
reset_context);
|
||||
}
|
||||
|
||||
|
||||
void amdgpu_reset_destroy_reset_domain(struct kref *ref)
|
||||
{
|
||||
struct amdgpu_reset_domain *reset_domain = container_of(ref,
|
||||
struct amdgpu_reset_domain,
|
||||
refcount);
|
||||
if (reset_domain->wq)
|
||||
destroy_workqueue(reset_domain->wq);
|
||||
|
||||
kvfree(reset_domain);
|
||||
}
|
||||
|
||||
struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type,
|
||||
char *wq_name)
|
||||
{
|
||||
struct amdgpu_reset_domain *reset_domain;
|
||||
|
||||
reset_domain = kvzalloc(sizeof(struct amdgpu_reset_domain), GFP_KERNEL);
|
||||
if (!reset_domain) {
|
||||
DRM_ERROR("Failed to allocate amdgpu_reset_domain!");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
reset_domain->type = type;
|
||||
kref_init(&reset_domain->refcount);
|
||||
|
||||
reset_domain->wq = create_singlethread_workqueue(wq_name);
|
||||
if (!reset_domain->wq) {
|
||||
DRM_ERROR("Failed to allocate wq for amdgpu_reset_domain!");
|
||||
amdgpu_reset_put_reset_domain(reset_domain);
|
||||
return NULL;
|
||||
|
||||
}
|
||||
|
||||
atomic_set(&reset_domain->in_gpu_reset, 0);
|
||||
init_rwsem(&reset_domain->sem);
|
||||
|
||||
return reset_domain;
|
||||
}
|
||||
|
||||
void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain)
|
||||
{
|
||||
atomic_set(&reset_domain->in_gpu_reset, 1);
|
||||
down_write(&reset_domain->sem);
|
||||
}
|
||||
|
||||
|
||||
void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain)
|
||||
{
|
||||
atomic_set(&reset_domain->in_gpu_reset, 0);
|
||||
up_write(&reset_domain->sem);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -70,6 +70,21 @@ struct amdgpu_reset_control {
|
|||
void (*async_reset)(struct work_struct *work);
|
||||
};
|
||||
|
||||
|
||||
enum amdgpu_reset_domain_type {
|
||||
SINGLE_DEVICE,
|
||||
XGMI_HIVE
|
||||
};
|
||||
|
||||
struct amdgpu_reset_domain {
|
||||
struct kref refcount;
|
||||
struct workqueue_struct *wq;
|
||||
enum amdgpu_reset_domain_type type;
|
||||
struct rw_semaphore sem;
|
||||
atomic_t in_gpu_reset;
|
||||
};
|
||||
|
||||
|
||||
int amdgpu_reset_init(struct amdgpu_device *adev);
|
||||
int amdgpu_reset_fini(struct amdgpu_device *adev);
|
||||
|
||||
|
@ -82,4 +97,29 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
|
|||
int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
|
||||
struct amdgpu_reset_handler *handler);
|
||||
|
||||
struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type,
|
||||
char *wq_name);
|
||||
|
||||
void amdgpu_reset_destroy_reset_domain(struct kref *ref);
|
||||
|
||||
static inline bool amdgpu_reset_get_reset_domain(struct amdgpu_reset_domain *domain)
|
||||
{
|
||||
return kref_get_unless_zero(&domain->refcount) != 0;
|
||||
}
|
||||
|
||||
static inline void amdgpu_reset_put_reset_domain(struct amdgpu_reset_domain *domain)
|
||||
{
|
||||
kref_put(&domain->refcount, amdgpu_reset_destroy_reset_domain);
|
||||
}
|
||||
|
||||
static inline bool amdgpu_reset_domain_schedule(struct amdgpu_reset_domain *domain,
|
||||
struct work_struct *work)
|
||||
{
|
||||
return queue_work(domain->wq, work);
|
||||
}
|
||||
|
||||
void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain);
|
||||
|
||||
void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -191,8 +191,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
ring->adev = adev;
|
||||
ring->idx = adev->num_rings++;
|
||||
adev->rings[ring->idx] = ring;
|
||||
r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission,
|
||||
sched_score);
|
||||
ring->num_hw_submission = sched_hw_submission;
|
||||
ring->sched_score = sched_score;
|
||||
r = amdgpu_fence_driver_init_ring(ring);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -114,9 +114,7 @@ struct amdgpu_fence_driver {
|
|||
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
|
||||
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
|
||||
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||
unsigned num_hw_submission,
|
||||
atomic_t *sched_score);
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
|
||||
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
||||
struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type);
|
||||
|
@ -251,6 +249,8 @@ struct amdgpu_ring {
|
|||
bool has_compute_vm_bug;
|
||||
bool no_scheduler;
|
||||
int hw_prio;
|
||||
unsigned num_hw_submission;
|
||||
atomic_t *sched_score;
|
||||
};
|
||||
|
||||
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
|
||||
|
|
|
@ -261,10 +261,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
|||
|
||||
dma_resv_for_each_fence(&cursor, resv, true, f) {
|
||||
dma_fence_chain_for_each(f, f) {
|
||||
struct dma_fence_chain *chain = to_dma_fence_chain(f);
|
||||
struct dma_fence *tmp = dma_fence_chain_contained(f);
|
||||
|
||||
if (amdgpu_sync_test_fence(adev, mode, owner, chain ?
|
||||
chain->fence : f)) {
|
||||
if (amdgpu_sync_test_fence(adev, mode, owner, tmp)) {
|
||||
r = amdgpu_sync_fence(sync, f);
|
||||
dma_fence_put(f);
|
||||
if (r)
|
||||
|
|
|
@ -1941,7 +1941,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
|
|||
size = adev->gmc.real_vram_size;
|
||||
else
|
||||
size = adev->gmc.visible_vram_size;
|
||||
man->size = size >> PAGE_SHIFT;
|
||||
man->size = size;
|
||||
adev->mman.buffer_funcs_enabled = enable;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,6 @@ struct amdgpu_vram_mgr {
|
|||
spinlock_t lock;
|
||||
struct list_head reservations_pending;
|
||||
struct list_head reserved_pages;
|
||||
atomic64_t usage;
|
||||
atomic64_t vis_usage;
|
||||
};
|
||||
|
||||
|
@ -52,12 +51,6 @@ struct amdgpu_gtt_mgr {
|
|||
struct ttm_resource_manager manager;
|
||||
struct drm_mm mm;
|
||||
spinlock_t lock;
|
||||
atomic64_t used;
|
||||
};
|
||||
|
||||
struct amdgpu_preempt_mgr {
|
||||
struct ttm_resource_manager manager;
|
||||
atomic64_t used;
|
||||
};
|
||||
|
||||
struct amdgpu_mman {
|
||||
|
@ -76,7 +69,7 @@ struct amdgpu_mman {
|
|||
|
||||
struct amdgpu_vram_mgr vram_mgr;
|
||||
struct amdgpu_gtt_mgr gtt_mgr;
|
||||
struct amdgpu_preempt_mgr preempt_mgr;
|
||||
struct ttm_resource_manager preempt_mgr;
|
||||
|
||||
uint64_t stolen_vga_size;
|
||||
struct amdgpu_bo *stolen_vga_memory;
|
||||
|
@ -118,7 +111,6 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev);
|
|||
void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
|
||||
|
||||
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
|
||||
uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr);
|
||||
void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr);
|
||||
|
||||
uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man);
|
||||
|
@ -133,7 +125,6 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
|
|||
void amdgpu_vram_mgr_free_sgt(struct device *dev,
|
||||
enum dma_data_direction dir,
|
||||
struct sg_table *sgt);
|
||||
uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr);
|
||||
uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr);
|
||||
int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
|
||||
uint64_t start, uint64_t size);
|
||||
|
|
|
@ -575,8 +575,10 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
|
|||
vf2pf_info->driver_cert = 0;
|
||||
vf2pf_info->os_info.all = 0;
|
||||
|
||||
vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr) >> 20;
|
||||
vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
|
||||
vf2pf_info->fb_usage =
|
||||
ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
|
||||
vf2pf_info->fb_vis_usage =
|
||||
amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
|
||||
vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
|
||||
vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
|
||||
|
||||
|
|
|
@ -96,9 +96,9 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
|
|||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||
struct ttm_resource_manager *man = &adev->mman.vram_mgr.manager;
|
||||
|
||||
return sysfs_emit(buf, "%llu\n",
|
||||
amdgpu_vram_mgr_usage(&adev->mman.vram_mgr));
|
||||
return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -253,7 +253,9 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
|
|||
|
||||
vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
|
||||
atomic64_add(vis_usage, &mgr->vis_usage);
|
||||
atomic64_add(rsv->mm_node.size << PAGE_SHIFT, &mgr->usage);
|
||||
spin_lock(&man->bdev->lru_lock);
|
||||
man->usage += rsv->mm_node.size << PAGE_SHIFT;
|
||||
spin_unlock(&man->bdev->lru_lock);
|
||||
list_move(&rsv->node, &mgr->reserved_pages);
|
||||
}
|
||||
}
|
||||
|
@ -378,19 +380,13 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||
|
||||
lpfn = place->lpfn;
|
||||
if (!lpfn)
|
||||
lpfn = man->size;
|
||||
lpfn = man->size >> PAGE_SHIFT;
|
||||
|
||||
max_bytes = adev->gmc.mc_vram_size;
|
||||
if (tbo->type != ttm_bo_type_kernel)
|
||||
max_bytes -= AMDGPU_VM_RESERVED_VRAM;
|
||||
|
||||
/* bail out quickly if there's likely not enough VRAM for this BO */
|
||||
mem_bytes = tbo->base.size;
|
||||
if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
|
||||
r = -ENOSPC;
|
||||
goto error_sub;
|
||||
}
|
||||
|
||||
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
|
||||
pages_per_node = ~0ul;
|
||||
num_nodes = 1;
|
||||
|
@ -408,13 +404,17 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||
|
||||
node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!node) {
|
||||
r = -ENOMEM;
|
||||
goto error_sub;
|
||||
}
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
ttm_resource_init(tbo, place, &node->base);
|
||||
|
||||
/* bail out quickly if there's likely not enough VRAM for this BO */
|
||||
if (ttm_resource_manager_usage(man) > max_bytes) {
|
||||
r = -ENOSPC;
|
||||
goto error_fini;
|
||||
}
|
||||
|
||||
mode = DRM_MM_INSERT_BEST;
|
||||
if (place->flags & TTM_PL_FLAG_TOPDOWN)
|
||||
mode = DRM_MM_INSERT_HIGH;
|
||||
|
@ -472,11 +472,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||
while (i--)
|
||||
drm_mm_remove_node(&node->mm_nodes[i]);
|
||||
spin_unlock(&mgr->lock);
|
||||
error_fini:
|
||||
ttm_resource_fini(man, &node->base);
|
||||
kvfree(node);
|
||||
|
||||
error_sub:
|
||||
atomic64_sub(mem_bytes, &mgr->usage);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -494,7 +493,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
|
|||
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
|
||||
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
|
||||
struct amdgpu_device *adev = to_amdgpu_device(mgr);
|
||||
uint64_t usage = 0, vis_usage = 0;
|
||||
uint64_t vis_usage = 0;
|
||||
unsigned i, pages;
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
|
@ -503,13 +502,11 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
|
|||
struct drm_mm_node *mm = &node->mm_nodes[i];
|
||||
|
||||
drm_mm_remove_node(mm);
|
||||
usage += mm->size << PAGE_SHIFT;
|
||||
vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
|
||||
}
|
||||
amdgpu_vram_mgr_do_reserve(man);
|
||||
spin_unlock(&mgr->lock);
|
||||
|
||||
atomic64_sub(usage, &mgr->usage);
|
||||
atomic64_sub(vis_usage, &mgr->vis_usage);
|
||||
|
||||
ttm_resource_fini(man, res);
|
||||
|
@ -627,18 +624,6 @@ void amdgpu_vram_mgr_free_sgt(struct device *dev,
|
|||
kfree(sgt);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vram_mgr_usage - how many bytes are used in this domain
|
||||
*
|
||||
* @mgr: amdgpu_vram_mgr pointer
|
||||
*
|
||||
* Returns how many bytes are used in this domain.
|
||||
*/
|
||||
uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr)
|
||||
{
|
||||
return atomic64_read(&mgr->usage);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
|
||||
*
|
||||
|
@ -664,13 +649,12 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
|
|||
{
|
||||
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
|
||||
|
||||
drm_printf(printer, " vis usage:%llu\n",
|
||||
amdgpu_vram_mgr_vis_usage(mgr));
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
drm_mm_print(&mgr->mm, printer);
|
||||
spin_unlock(&mgr->lock);
|
||||
|
||||
drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
|
||||
man->size, amdgpu_vram_mgr_usage(mgr) >> 20,
|
||||
amdgpu_vram_mgr_vis_usage(mgr) >> 20);
|
||||
}
|
||||
|
||||
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
|
||||
|
@ -692,11 +676,11 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
|
|||
struct ttm_resource_manager *man = &mgr->manager;
|
||||
|
||||
ttm_resource_manager_init(man, &adev->mman.bdev,
|
||||
adev->gmc.real_vram_size >> PAGE_SHIFT);
|
||||
adev->gmc.real_vram_size);
|
||||
|
||||
man->func = &amdgpu_vram_mgr_func;
|
||||
|
||||
drm_mm_init(&mgr->mm, 0, man->size);
|
||||
drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT);
|
||||
spin_lock_init(&mgr->lock);
|
||||
INIT_LIST_HEAD(&mgr->reservations_pending);
|
||||
INIT_LIST_HEAD(&mgr->reserved_pages);
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
#include "wafl/wafl2_4_0_0_smn.h"
|
||||
#include "wafl/wafl2_4_0_0_sh_mask.h"
|
||||
|
||||
#include "amdgpu_reset.h"
|
||||
|
||||
#define smnPCS_XGMI23_PCS_ERROR_STATUS 0x11a01210
|
||||
#define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c
|
||||
#define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210
|
||||
|
@ -227,6 +229,9 @@ static void amdgpu_xgmi_hive_release(struct kobject *kobj)
|
|||
struct amdgpu_hive_info *hive = container_of(
|
||||
kobj, struct amdgpu_hive_info, kobj);
|
||||
|
||||
amdgpu_reset_put_reset_domain(hive->reset_domain);
|
||||
hive->reset_domain = NULL;
|
||||
|
||||
mutex_destroy(&hive->hive_lock);
|
||||
kfree(hive);
|
||||
}
|
||||
|
@ -398,15 +403,35 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
|
|||
goto pro_end;
|
||||
}
|
||||
|
||||
/**
|
||||
* Avoid recreating reset domain when hive is reconstructed for the case
|
||||
* of reset the devices in the XGMI hive during probe for SRIOV
|
||||
* See https://www.spinics.net/lists/amd-gfx/msg58836.html
|
||||
*/
|
||||
if (adev->reset_domain->type != XGMI_HIVE) {
|
||||
hive->reset_domain = amdgpu_reset_create_reset_domain(XGMI_HIVE, "amdgpu-reset-hive");
|
||||
if (!hive->reset_domain) {
|
||||
dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n");
|
||||
ret = -ENOMEM;
|
||||
kobject_put(&hive->kobj);
|
||||
kfree(hive);
|
||||
hive = NULL;
|
||||
goto pro_end;
|
||||
}
|
||||
} else {
|
||||
amdgpu_reset_get_reset_domain(adev->reset_domain);
|
||||
hive->reset_domain = adev->reset_domain;
|
||||
}
|
||||
|
||||
hive->hive_id = adev->gmc.xgmi.hive_id;
|
||||
INIT_LIST_HEAD(&hive->device_list);
|
||||
INIT_LIST_HEAD(&hive->node);
|
||||
mutex_init(&hive->hive_lock);
|
||||
atomic_set(&hive->in_reset, 0);
|
||||
atomic_set(&hive->number_devices, 0);
|
||||
task_barrier_init(&hive->tb);
|
||||
hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
|
||||
hive->hi_req_gpu = NULL;
|
||||
|
||||
/*
|
||||
* hive pstate on boot is high in vega20 so we have to go to low
|
||||
* pstate on after boot.
|
||||
|
|
|
@ -33,7 +33,6 @@ struct amdgpu_hive_info {
|
|||
struct list_head node;
|
||||
atomic_t number_devices;
|
||||
struct mutex hive_lock;
|
||||
atomic_t in_reset;
|
||||
int hi_req_count;
|
||||
struct amdgpu_device *hi_req_gpu;
|
||||
struct task_barrier tb;
|
||||
|
@ -42,6 +41,8 @@ struct amdgpu_hive_info {
|
|||
AMDGPU_XGMI_PSTATE_MAX_VEGA20,
|
||||
AMDGPU_XGMI_PSTATE_UNKNOWN
|
||||
} pstate;
|
||||
|
||||
struct amdgpu_reset_domain *reset_domain;
|
||||
};
|
||||
|
||||
struct amdgpu_pcs_ras_field {
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string_helpers.h>
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <drm/drm_util.h>
|
||||
|
@ -740,7 +742,7 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
|
|||
break;
|
||||
}
|
||||
if (arg != ATOM_COND_ALWAYS)
|
||||
SDEBUG(" taken: %s\n", execute ? "yes" : "no");
|
||||
SDEBUG(" taken: %s\n", str_yes_no(execute));
|
||||
SDEBUG(" target: 0x%04X\n", target);
|
||||
if (execute) {
|
||||
if (ctx->last_jump == (ctx->start + target)) {
|
||||
|
|
|
@ -2798,6 +2798,8 @@ static int dce_v10_0_sw_init(void *handle)
|
|||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
r = amdgpu_display_modeset_create_props(adev);
|
||||
|
|
|
@ -2916,6 +2916,8 @@ static int dce_v11_0_sw_init(void *handle)
|
|||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
r = amdgpu_display_modeset_create_props(adev);
|
||||
|
|
|
@ -2674,6 +2674,7 @@ static int dce_v6_0_sw_init(void *handle)
|
|||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
r = amdgpu_display_modeset_create_props(adev);
|
||||
|
|
|
@ -2695,6 +2695,8 @@ static int dce_v8_0_sw_init(void *handle)
|
|||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
r = amdgpu_display_modeset_create_props(adev);
|
||||
|
|
|
@ -48,6 +48,8 @@
|
|||
#include "athub_v2_0.h"
|
||||
#include "athub_v2_1.h"
|
||||
|
||||
#include "amdgpu_reset.h"
|
||||
|
||||
#if 0
|
||||
static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
|
||||
{
|
||||
|
@ -328,7 +330,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||
*/
|
||||
if (adev->gfx.kiq.ring.sched.ready &&
|
||||
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
|
||||
down_read_trylock(&adev->reset_sem)) {
|
||||
down_read_trylock(&adev->reset_domain->sem)) {
|
||||
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
|
||||
const unsigned eng = 17;
|
||||
u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
|
||||
|
@ -338,7 +340,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
|
||||
1 << vmid);
|
||||
|
||||
up_read(&adev->reset_sem);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -62,6 +62,8 @@
|
|||
#include "amdgpu_ras.h"
|
||||
#include "amdgpu_xgmi.h"
|
||||
|
||||
#include "amdgpu_reset.h"
|
||||
|
||||
/* add these here since we already include dce12 headers and these are for DCN */
|
||||
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
|
||||
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
|
||||
|
@ -787,13 +789,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||
*/
|
||||
if (adev->gfx.kiq.ring.sched.ready &&
|
||||
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
|
||||
down_read_trylock(&adev->reset_sem)) {
|
||||
down_read_trylock(&adev->reset_domain->sem)) {
|
||||
uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
|
||||
uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
|
||||
|
||||
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
|
||||
1 << vmid);
|
||||
up_read(&adev->reset_sem);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -900,7 +902,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
|||
if (amdgpu_in_reset(adev))
|
||||
return -EIO;
|
||||
|
||||
if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) {
|
||||
if (ring->sched.ready && down_read_trylock(&adev->reset_domain->sem)) {
|
||||
/* Vega20+XGMI caches PTEs in TC and TLB. Add a
|
||||
* heavy-weight TLB flush (type 2), which flushes
|
||||
* both. Due to a race condition with concurrent
|
||||
|
@ -927,7 +929,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
|||
if (r) {
|
||||
amdgpu_ring_undo(ring);
|
||||
spin_unlock(&adev->gfx.kiq.ring_lock);
|
||||
up_read(&adev->reset_sem);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
|
@ -936,10 +938,10 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
|||
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
|
||||
if (r < 1) {
|
||||
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
|
||||
up_read(&adev->reset_sem);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
return -ETIME;
|
||||
}
|
||||
up_read(&adev->reset_sem);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
#include "soc15_common.h"
|
||||
#include "mxgpu_ai.h"
|
||||
|
||||
#include "amdgpu_reset.h"
|
||||
|
||||
static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
|
||||
{
|
||||
WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
|
||||
|
@ -257,10 +259,10 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
|
|||
* otherwise the mailbox msg will be ruined/reseted by
|
||||
* the VF FLR.
|
||||
*/
|
||||
if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
|
||||
if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
|
||||
return;
|
||||
|
||||
down_write(&adev->reset_sem);
|
||||
down_write(&adev->reset_domain->sem);
|
||||
|
||||
amdgpu_virt_fini_data_exchange(adev);
|
||||
|
||||
|
@ -275,14 +277,14 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
|
|||
} while (timeout > 1);
|
||||
|
||||
flr_done:
|
||||
atomic_set(&adev->in_gpu_reset, 0);
|
||||
up_write(&adev->reset_sem);
|
||||
atomic_set(&adev->reset_domain->in_gpu_reset, 0);
|
||||
up_write(&adev->reset_domain->sem);
|
||||
|
||||
/* Trigger recovery for world switch failure if no TDR */
|
||||
if (amdgpu_device_should_recover_gpu(adev)
|
||||
&& (!amdgpu_device_has_job_running(adev) ||
|
||||
adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT))
|
||||
amdgpu_device_gpu_recover(adev, NULL);
|
||||
amdgpu_device_gpu_recover_imp(adev, NULL);
|
||||
}
|
||||
|
||||
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||
|
@ -307,8 +309,11 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
|
|||
|
||||
switch (event) {
|
||||
case IDH_FLR_NOTIFICATION:
|
||||
if (amdgpu_sriov_runtime(adev))
|
||||
schedule_work(&adev->virt.flr_work);
|
||||
if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
|
||||
WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
|
||||
&adev->virt.flr_work),
|
||||
"Failed to queue work! at %s",
|
||||
__func__);
|
||||
break;
|
||||
case IDH_QUERY_ALIVE:
|
||||
xgpu_ai_mailbox_send_ack(adev);
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
#include "soc15_common.h"
|
||||
#include "mxgpu_nv.h"
|
||||
|
||||
#include "amdgpu_reset.h"
|
||||
|
||||
static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
|
||||
{
|
||||
WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
|
||||
|
@ -281,10 +283,10 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
|
|||
* otherwise the mailbox msg will be ruined/reseted by
|
||||
* the VF FLR.
|
||||
*/
|
||||
if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
|
||||
if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
|
||||
return;
|
||||
|
||||
down_write(&adev->reset_sem);
|
||||
down_write(&adev->reset_domain->sem);
|
||||
|
||||
amdgpu_virt_fini_data_exchange(adev);
|
||||
|
||||
|
@ -299,8 +301,8 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
|
|||
} while (timeout > 1);
|
||||
|
||||
flr_done:
|
||||
atomic_set(&adev->in_gpu_reset, 0);
|
||||
up_write(&adev->reset_sem);
|
||||
atomic_set(&adev->reset_domain->in_gpu_reset, 0);
|
||||
up_write(&adev->reset_domain->sem);
|
||||
|
||||
/* Trigger recovery for world switch failure if no TDR */
|
||||
if (amdgpu_device_should_recover_gpu(adev)
|
||||
|
@ -309,7 +311,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
|
|||
adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
|
||||
adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
|
||||
adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
|
||||
amdgpu_device_gpu_recover(adev, NULL);
|
||||
amdgpu_device_gpu_recover_imp(adev, NULL);
|
||||
}
|
||||
|
||||
static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||
|
@ -337,8 +339,11 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
|
|||
|
||||
switch (event) {
|
||||
case IDH_FLR_NOTIFICATION:
|
||||
if (amdgpu_sriov_runtime(adev))
|
||||
schedule_work(&adev->virt.flr_work);
|
||||
if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
|
||||
WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
|
||||
&adev->virt.flr_work),
|
||||
"Failed to queue work! at %s",
|
||||
__func__);
|
||||
break;
|
||||
/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
|
||||
* it byfar since that polling thread will handle it,
|
||||
|
|
|
@ -42,6 +42,8 @@
|
|||
#include "smu/smu_7_1_3_d.h"
|
||||
#include "mxgpu_vi.h"
|
||||
|
||||
#include "amdgpu_reset.h"
|
||||
|
||||
/* VI golden setting */
|
||||
static const u32 xgpu_fiji_mgcg_cgcg_init[] = {
|
||||
mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
|
||||
|
@ -521,7 +523,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
|
|||
|
||||
/* Trigger recovery due to world switch failure */
|
||||
if (amdgpu_device_should_recover_gpu(adev))
|
||||
amdgpu_device_gpu_recover(adev, NULL);
|
||||
amdgpu_device_gpu_recover_imp(adev, NULL);
|
||||
}
|
||||
|
||||
static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||
|
@ -550,8 +552,11 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
|
|||
r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
|
||||
|
||||
/* only handle FLR_NOTIFY now */
|
||||
if (!r)
|
||||
schedule_work(&adev->virt.flr_work);
|
||||
if (!r && !amdgpu_in_reset(adev))
|
||||
WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
|
||||
&adev->virt.flr_work),
|
||||
"Failed to queue work! at %s",
|
||||
__func__);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -7899,6 +7899,9 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
|
|||
if (res)
|
||||
return res;
|
||||
|
||||
if (modifiers == NULL)
|
||||
adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
|
||||
res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
|
||||
&dm_plane_funcs, formats, num_formats,
|
||||
modifiers, plane->type, NULL);
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <linux/string_helpers.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include "dc.h"
|
||||
|
@ -49,11 +50,6 @@ struct dmub_debugfs_trace_entry {
|
|||
uint32_t param1;
|
||||
};
|
||||
|
||||
static inline const char *yesno(bool v)
|
||||
{
|
||||
return v ? "yes" : "no";
|
||||
}
|
||||
|
||||
/* parse_write_buffer_into_params - Helper function to parse debugfs write buffer into an array
|
||||
*
|
||||
* Function takes in attributes passed to debugfs write entry
|
||||
|
@ -857,12 +853,12 @@ static int psr_capability_show(struct seq_file *m, void *data)
|
|||
if (!(link->connector_signal & SIGNAL_TYPE_EDP))
|
||||
return -ENODEV;
|
||||
|
||||
seq_printf(m, "Sink support: %s", yesno(link->dpcd_caps.psr_caps.psr_version != 0));
|
||||
seq_printf(m, "Sink support: %s", str_yes_no(link->dpcd_caps.psr_caps.psr_version != 0));
|
||||
if (link->dpcd_caps.psr_caps.psr_version)
|
||||
seq_printf(m, " [0x%02x]", link->dpcd_caps.psr_caps.psr_version);
|
||||
seq_puts(m, "\n");
|
||||
|
||||
seq_printf(m, "Driver support: %s", yesno(link->psr_settings.psr_feature_enabled));
|
||||
seq_printf(m, "Driver support: %s", str_yes_no(link->psr_settings.psr_feature_enabled));
|
||||
if (link->psr_settings.psr_version)
|
||||
seq_printf(m, " [0x%02x]", link->psr_settings.psr_version);
|
||||
seq_puts(m, "\n");
|
||||
|
@ -1211,8 +1207,8 @@ static int dp_dsc_fec_support_show(struct seq_file *m, void *data)
|
|||
drm_modeset_drop_locks(&ctx);
|
||||
drm_modeset_acquire_fini(&ctx);
|
||||
|
||||
seq_printf(m, "FEC_Sink_Support: %s\n", yesno(is_fec_supported));
|
||||
seq_printf(m, "DSC_Sink_Support: %s\n", yesno(is_dsc_supported));
|
||||
seq_printf(m, "FEC_Sink_Support: %s\n", str_yes_no(is_fec_supported));
|
||||
seq_printf(m, "DSC_Sink_Support: %s\n", str_yes_no(is_dsc_supported));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -273,6 +273,9 @@ static int __init armada_drm_init(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (drm_firmware_drivers_only())
|
||||
return -ENODEV;
|
||||
|
||||
ret = platform_driver_register(&armada_lcd_platform_driver);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_gem_cma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_module.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_simple_kms_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
@ -359,7 +360,7 @@ static struct platform_driver aspeed_gfx_platform_driver = {
|
|||
},
|
||||
};
|
||||
|
||||
module_platform_driver(aspeed_gfx_platform_driver);
|
||||
drm_module_platform_driver(aspeed_gfx_platform_driver);
|
||||
|
||||
MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>");
|
||||
MODULE_DESCRIPTION("ASPEED BMC DRM/KMS driver");
|
||||
|
|
|
@ -272,64 +272,6 @@ static bool ast_launch_m68k(struct drm_device *dev)
|
|||
return true;
|
||||
}
|
||||
|
||||
u8 ast_get_dp501_max_clk(struct drm_device *dev)
|
||||
{
|
||||
struct ast_private *ast = to_ast_private(dev);
|
||||
u32 boot_address, offset, data;
|
||||
u8 linkcap[4], linkrate, linklanes, maxclk = 0xff;
|
||||
u32 *plinkcap;
|
||||
|
||||
if (ast->config_mode == ast_use_p2a) {
|
||||
boot_address = get_fw_base(ast);
|
||||
|
||||
/* validate FW version */
|
||||
offset = AST_DP501_GBL_VERSION;
|
||||
data = ast_mindwm(ast, boot_address + offset);
|
||||
if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1) /* version: 1x */
|
||||
return maxclk;
|
||||
|
||||
/* Read Link Capability */
|
||||
offset = AST_DP501_LINKRATE;
|
||||
plinkcap = (u32 *)linkcap;
|
||||
*plinkcap = ast_mindwm(ast, boot_address + offset);
|
||||
if (linkcap[2] == 0) {
|
||||
linkrate = linkcap[0];
|
||||
linklanes = linkcap[1];
|
||||
data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
|
||||
if (data > 0xff)
|
||||
data = 0xff;
|
||||
maxclk = (u8)data;
|
||||
}
|
||||
} else {
|
||||
if (!ast->dp501_fw_buf)
|
||||
return AST_DP501_DEFAULT_DCLK; /* 1024x768 as default */
|
||||
|
||||
/* dummy read */
|
||||
offset = 0x0000;
|
||||
data = readl(ast->dp501_fw_buf + offset);
|
||||
|
||||
/* validate FW version */
|
||||
offset = AST_DP501_GBL_VERSION;
|
||||
data = readl(ast->dp501_fw_buf + offset);
|
||||
if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1) /* version: 1x */
|
||||
return maxclk;
|
||||
|
||||
/* Read Link Capability */
|
||||
offset = AST_DP501_LINKRATE;
|
||||
plinkcap = (u32 *)linkcap;
|
||||
*plinkcap = readl(ast->dp501_fw_buf + offset);
|
||||
if (linkcap[2] == 0) {
|
||||
linkrate = linkcap[0];
|
||||
linklanes = linkcap[1];
|
||||
data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
|
||||
if (data > 0xff)
|
||||
data = 0xff;
|
||||
maxclk = (u8)data;
|
||||
}
|
||||
}
|
||||
return maxclk;
|
||||
}
|
||||
|
||||
bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
|
||||
{
|
||||
struct ast_private *ast = to_ast_private(dev);
|
||||
|
|
|
@ -69,7 +69,6 @@ enum ast_chip {
|
|||
enum ast_tx_chip {
|
||||
AST_TX_NONE,
|
||||
AST_TX_SIL164,
|
||||
AST_TX_ITE66121,
|
||||
AST_TX_DP501,
|
||||
};
|
||||
|
||||
|
@ -130,15 +129,26 @@ struct ast_i2c_chan {
|
|||
struct i2c_algo_bit_data bit;
|
||||
};
|
||||
|
||||
struct ast_connector {
|
||||
struct ast_vga_connector {
|
||||
struct drm_connector base;
|
||||
struct ast_i2c_chan *i2c;
|
||||
};
|
||||
|
||||
static inline struct ast_connector *
|
||||
to_ast_connector(struct drm_connector *connector)
|
||||
static inline struct ast_vga_connector *
|
||||
to_ast_vga_connector(struct drm_connector *connector)
|
||||
{
|
||||
return container_of(connector, struct ast_connector, base);
|
||||
return container_of(connector, struct ast_vga_connector, base);
|
||||
}
|
||||
|
||||
struct ast_sil164_connector {
|
||||
struct drm_connector base;
|
||||
struct ast_i2c_chan *i2c;
|
||||
};
|
||||
|
||||
static inline struct ast_sil164_connector *
|
||||
to_ast_sil164_connector(struct drm_connector *connector)
|
||||
{
|
||||
return container_of(connector, struct ast_sil164_connector, base);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -161,8 +171,20 @@ struct ast_private {
|
|||
struct drm_plane primary_plane;
|
||||
struct ast_cursor_plane cursor_plane;
|
||||
struct drm_crtc crtc;
|
||||
struct drm_encoder encoder;
|
||||
struct ast_connector connector;
|
||||
union {
|
||||
struct {
|
||||
struct drm_encoder encoder;
|
||||
struct ast_vga_connector vga_connector;
|
||||
} vga;
|
||||
struct {
|
||||
struct drm_encoder encoder;
|
||||
struct ast_sil164_connector sil164_connector;
|
||||
} sil164;
|
||||
struct {
|
||||
struct drm_encoder encoder;
|
||||
struct drm_connector connector;
|
||||
} dp501;
|
||||
} output;
|
||||
|
||||
bool support_wide_screen;
|
||||
enum {
|
||||
|
@ -172,7 +194,6 @@ struct ast_private {
|
|||
} config_mode;
|
||||
|
||||
enum ast_tx_chip tx_chip_type;
|
||||
u8 dp501_maxclk;
|
||||
u8 *dp501_fw_addr;
|
||||
const struct firmware *dp501_fw; /* dp501 fw */
|
||||
};
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include <drm/drm_gem_atomic_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_gem_vram_helper.h>
|
||||
#include <drm/drm_managed.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_simple_kms_helper.h>
|
||||
|
@ -1005,6 +1006,71 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
}
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
|
||||
{
|
||||
struct ast_private *ast = to_ast_private(crtc->dev);
|
||||
enum drm_mode_status status;
|
||||
uint32_t jtemp;
|
||||
|
||||
if (ast->support_wide_screen) {
|
||||
if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050))
|
||||
return MODE_OK;
|
||||
if ((mode->hdisplay == 1280) && (mode->vdisplay == 800))
|
||||
return MODE_OK;
|
||||
if ((mode->hdisplay == 1440) && (mode->vdisplay == 900))
|
||||
return MODE_OK;
|
||||
if ((mode->hdisplay == 1360) && (mode->vdisplay == 768))
|
||||
return MODE_OK;
|
||||
if ((mode->hdisplay == 1600) && (mode->vdisplay == 900))
|
||||
return MODE_OK;
|
||||
|
||||
if ((ast->chip == AST2100) || (ast->chip == AST2200) ||
|
||||
(ast->chip == AST2300) || (ast->chip == AST2400) ||
|
||||
(ast->chip == AST2500)) {
|
||||
if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080))
|
||||
return MODE_OK;
|
||||
|
||||
if ((mode->hdisplay == 1920) && (mode->vdisplay == 1200)) {
|
||||
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
|
||||
if (jtemp & 0x01)
|
||||
return MODE_NOMODE;
|
||||
else
|
||||
return MODE_OK;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
status = MODE_NOMODE;
|
||||
|
||||
switch (mode->hdisplay) {
|
||||
case 640:
|
||||
if (mode->vdisplay == 480)
|
||||
status = MODE_OK;
|
||||
break;
|
||||
case 800:
|
||||
if (mode->vdisplay == 600)
|
||||
status = MODE_OK;
|
||||
break;
|
||||
case 1024:
|
||||
if (mode->vdisplay == 768)
|
||||
status = MODE_OK;
|
||||
break;
|
||||
case 1280:
|
||||
if (mode->vdisplay == 1024)
|
||||
status = MODE_OK;
|
||||
break;
|
||||
case 1600:
|
||||
if (mode->vdisplay == 1200)
|
||||
status = MODE_OK;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
|
@ -1107,6 +1173,7 @@ ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
|
|||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
|
||||
.mode_valid = ast_crtc_helper_mode_valid,
|
||||
.atomic_check = ast_crtc_helper_atomic_check,
|
||||
.atomic_flush = ast_crtc_helper_atomic_flush,
|
||||
.atomic_enable = ast_crtc_helper_atomic_enable,
|
||||
|
@ -1187,128 +1254,37 @@ static int ast_crtc_init(struct drm_device *dev)
|
|||
}
|
||||
|
||||
/*
|
||||
* Encoder
|
||||
* VGA Connector
|
||||
*/
|
||||
|
||||
static int ast_encoder_init(struct drm_device *dev)
|
||||
static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct ast_private *ast = to_ast_private(dev);
|
||||
struct drm_encoder *encoder = &ast->encoder;
|
||||
int ret;
|
||||
struct ast_vga_connector *ast_vga_connector = to_ast_vga_connector(connector);
|
||||
struct edid *edid;
|
||||
int count;
|
||||
|
||||
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!ast_vga_connector->i2c)
|
||||
goto err_drm_connector_update_edid_property;
|
||||
|
||||
encoder->possible_crtcs = 1;
|
||||
edid = drm_get_edid(connector, &ast_vga_connector->i2c->adapter);
|
||||
if (!edid)
|
||||
goto err_drm_connector_update_edid_property;
|
||||
|
||||
count = drm_add_edid_modes(connector, edid);
|
||||
kfree(edid);
|
||||
|
||||
return count;
|
||||
|
||||
err_drm_connector_update_edid_property:
|
||||
drm_connector_update_edid_property(connector, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Connector
|
||||
*/
|
||||
|
||||
static int ast_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct ast_connector *ast_connector = to_ast_connector(connector);
|
||||
struct ast_private *ast = to_ast_private(connector->dev);
|
||||
struct edid *edid = NULL;
|
||||
bool flags = false;
|
||||
int ret;
|
||||
|
||||
if (ast->tx_chip_type == AST_TX_DP501) {
|
||||
ast->dp501_maxclk = 0xff;
|
||||
edid = kmalloc(128, GFP_KERNEL);
|
||||
if (!edid)
|
||||
return -ENOMEM;
|
||||
|
||||
flags = ast_dp501_read_edid(connector->dev, (u8 *)edid);
|
||||
if (flags)
|
||||
ast->dp501_maxclk = ast_get_dp501_max_clk(connector->dev);
|
||||
else
|
||||
kfree(edid);
|
||||
}
|
||||
if (!flags && ast_connector->i2c)
|
||||
edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
|
||||
if (edid) {
|
||||
drm_connector_update_edid_property(&ast_connector->base, edid);
|
||||
ret = drm_add_edid_modes(connector, edid);
|
||||
kfree(edid);
|
||||
return ret;
|
||||
}
|
||||
drm_connector_update_edid_property(&ast_connector->base, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct ast_private *ast = to_ast_private(connector->dev);
|
||||
int flags = MODE_NOMODE;
|
||||
uint32_t jtemp;
|
||||
|
||||
if (ast->support_wide_screen) {
|
||||
if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050))
|
||||
return MODE_OK;
|
||||
if ((mode->hdisplay == 1280) && (mode->vdisplay == 800))
|
||||
return MODE_OK;
|
||||
if ((mode->hdisplay == 1440) && (mode->vdisplay == 900))
|
||||
return MODE_OK;
|
||||
if ((mode->hdisplay == 1360) && (mode->vdisplay == 768))
|
||||
return MODE_OK;
|
||||
if ((mode->hdisplay == 1600) && (mode->vdisplay == 900))
|
||||
return MODE_OK;
|
||||
|
||||
if ((ast->chip == AST2100) || (ast->chip == AST2200) ||
|
||||
(ast->chip == AST2300) || (ast->chip == AST2400) ||
|
||||
(ast->chip == AST2500)) {
|
||||
if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080))
|
||||
return MODE_OK;
|
||||
|
||||
if ((mode->hdisplay == 1920) && (mode->vdisplay == 1200)) {
|
||||
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
|
||||
if (jtemp & 0x01)
|
||||
return MODE_NOMODE;
|
||||
else
|
||||
return MODE_OK;
|
||||
}
|
||||
}
|
||||
}
|
||||
switch (mode->hdisplay) {
|
||||
case 640:
|
||||
if (mode->vdisplay == 480)
|
||||
flags = MODE_OK;
|
||||
break;
|
||||
case 800:
|
||||
if (mode->vdisplay == 600)
|
||||
flags = MODE_OK;
|
||||
break;
|
||||
case 1024:
|
||||
if (mode->vdisplay == 768)
|
||||
flags = MODE_OK;
|
||||
break;
|
||||
case 1280:
|
||||
if (mode->vdisplay == 1024)
|
||||
flags = MODE_OK;
|
||||
break;
|
||||
case 1600:
|
||||
if (mode->vdisplay == 1200)
|
||||
flags = MODE_OK;
|
||||
break;
|
||||
default:
|
||||
return flags;
|
||||
}
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
|
||||
.get_modes = ast_get_modes,
|
||||
.mode_valid = ast_mode_valid,
|
||||
static const struct drm_connector_helper_funcs ast_vga_connector_helper_funcs = {
|
||||
.get_modes = ast_vga_connector_helper_get_modes,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs ast_connector_funcs = {
|
||||
static const struct drm_connector_funcs ast_vga_connector_funcs = {
|
||||
.reset = drm_atomic_helper_connector_reset,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.destroy = drm_connector_cleanup,
|
||||
|
@ -1316,33 +1292,237 @@ static const struct drm_connector_funcs ast_connector_funcs = {
|
|||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
};
|
||||
|
||||
static int ast_connector_init(struct drm_device *dev)
|
||||
static int ast_vga_connector_init(struct drm_device *dev,
|
||||
struct ast_vga_connector *ast_vga_connector)
|
||||
{
|
||||
struct ast_private *ast = to_ast_private(dev);
|
||||
struct ast_connector *ast_connector = &ast->connector;
|
||||
struct drm_connector *connector = &ast_connector->base;
|
||||
struct drm_encoder *encoder = &ast->encoder;
|
||||
struct drm_connector *connector = &ast_vga_connector->base;
|
||||
int ret;
|
||||
|
||||
ast_connector->i2c = ast_i2c_create(dev);
|
||||
if (!ast_connector->i2c)
|
||||
ast_vga_connector->i2c = ast_i2c_create(dev);
|
||||
if (!ast_vga_connector->i2c)
|
||||
drm_err(dev, "failed to add ddc bus for connector\n");
|
||||
|
||||
if (ast_connector->i2c)
|
||||
drm_connector_init_with_ddc(dev, connector, &ast_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VGA,
|
||||
&ast_connector->i2c->adapter);
|
||||
if (ast_vga_connector->i2c)
|
||||
ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VGA,
|
||||
&ast_vga_connector->i2c->adapter);
|
||||
else
|
||||
drm_connector_init(dev, connector, &ast_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VGA);
|
||||
ret = drm_connector_init(dev, connector, &ast_vga_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VGA);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_connector_helper_add(connector, &ast_connector_helper_funcs);
|
||||
drm_connector_helper_add(connector, &ast_vga_connector_helper_funcs);
|
||||
|
||||
connector->interlace_allowed = 0;
|
||||
connector->doublescan_allowed = 0;
|
||||
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
||||
|
||||
drm_connector_attach_encoder(connector, encoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ast_vga_output_init(struct ast_private *ast)
|
||||
{
|
||||
struct drm_device *dev = &ast->base;
|
||||
struct drm_crtc *crtc = &ast->crtc;
|
||||
struct drm_encoder *encoder = &ast->output.vga.encoder;
|
||||
struct ast_vga_connector *ast_vga_connector = &ast->output.vga.vga_connector;
|
||||
struct drm_connector *connector = &ast_vga_connector->base;
|
||||
int ret;
|
||||
|
||||
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
|
||||
if (ret)
|
||||
return ret;
|
||||
encoder->possible_crtcs = drm_crtc_mask(crtc);
|
||||
|
||||
ret = ast_vga_connector_init(dev, ast_vga_connector);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_connector_attach_encoder(connector, encoder);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* SIL164 Connector
|
||||
*/
|
||||
|
||||
static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct ast_sil164_connector *ast_sil164_connector = to_ast_sil164_connector(connector);
|
||||
struct edid *edid;
|
||||
int count;
|
||||
|
||||
if (!ast_sil164_connector->i2c)
|
||||
goto err_drm_connector_update_edid_property;
|
||||
|
||||
edid = drm_get_edid(connector, &ast_sil164_connector->i2c->adapter);
|
||||
if (!edid)
|
||||
goto err_drm_connector_update_edid_property;
|
||||
|
||||
count = drm_add_edid_modes(connector, edid);
|
||||
kfree(edid);
|
||||
|
||||
return count;
|
||||
|
||||
err_drm_connector_update_edid_property:
|
||||
drm_connector_update_edid_property(connector, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_connector_helper_funcs ast_sil164_connector_helper_funcs = {
|
||||
.get_modes = ast_sil164_connector_helper_get_modes,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs ast_sil164_connector_funcs = {
|
||||
.reset = drm_atomic_helper_connector_reset,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.destroy = drm_connector_cleanup,
|
||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
};
|
||||
|
||||
static int ast_sil164_connector_init(struct drm_device *dev,
|
||||
struct ast_sil164_connector *ast_sil164_connector)
|
||||
{
|
||||
struct drm_connector *connector = &ast_sil164_connector->base;
|
||||
int ret;
|
||||
|
||||
ast_sil164_connector->i2c = ast_i2c_create(dev);
|
||||
if (!ast_sil164_connector->i2c)
|
||||
drm_err(dev, "failed to add ddc bus for connector\n");
|
||||
|
||||
if (ast_sil164_connector->i2c)
|
||||
ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_DVII,
|
||||
&ast_sil164_connector->i2c->adapter);
|
||||
else
|
||||
ret = drm_connector_init(dev, connector, &ast_sil164_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_DVII);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_connector_helper_add(connector, &ast_sil164_connector_helper_funcs);
|
||||
|
||||
connector->interlace_allowed = 0;
|
||||
connector->doublescan_allowed = 0;
|
||||
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ast_sil164_output_init(struct ast_private *ast)
|
||||
{
|
||||
struct drm_device *dev = &ast->base;
|
||||
struct drm_crtc *crtc = &ast->crtc;
|
||||
struct drm_encoder *encoder = &ast->output.sil164.encoder;
|
||||
struct ast_sil164_connector *ast_sil164_connector = &ast->output.sil164.sil164_connector;
|
||||
struct drm_connector *connector = &ast_sil164_connector->base;
|
||||
int ret;
|
||||
|
||||
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
|
||||
if (ret)
|
||||
return ret;
|
||||
encoder->possible_crtcs = drm_crtc_mask(crtc);
|
||||
|
||||
ret = ast_sil164_connector_init(dev, ast_sil164_connector);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_connector_attach_encoder(connector, encoder);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* DP501 Connector
|
||||
*/
|
||||
|
||||
static int ast_dp501_connector_helper_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
void *edid;
|
||||
bool succ;
|
||||
int count;
|
||||
|
||||
edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
|
||||
if (!edid)
|
||||
goto err_drm_connector_update_edid_property;
|
||||
|
||||
succ = ast_dp501_read_edid(connector->dev, edid);
|
||||
if (!succ)
|
||||
goto err_kfree;
|
||||
|
||||
drm_connector_update_edid_property(connector, edid);
|
||||
count = drm_add_edid_modes(connector, edid);
|
||||
kfree(edid);
|
||||
|
||||
return count;
|
||||
|
||||
err_kfree:
|
||||
kfree(edid);
|
||||
err_drm_connector_update_edid_property:
|
||||
drm_connector_update_edid_property(connector, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_connector_helper_funcs ast_dp501_connector_helper_funcs = {
|
||||
.get_modes = ast_dp501_connector_helper_get_modes,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs ast_dp501_connector_funcs = {
|
||||
.reset = drm_atomic_helper_connector_reset,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.destroy = drm_connector_cleanup,
|
||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
};
|
||||
|
||||
static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector *connector)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_DisplayPort);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs);
|
||||
|
||||
connector->interlace_allowed = 0;
|
||||
connector->doublescan_allowed = 0;
|
||||
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ast_dp501_output_init(struct ast_private *ast)
|
||||
{
|
||||
struct drm_device *dev = &ast->base;
|
||||
struct drm_crtc *crtc = &ast->crtc;
|
||||
struct drm_encoder *encoder = &ast->output.dp501.encoder;
|
||||
struct drm_connector *connector = &ast->output.dp501.connector;
|
||||
int ret;
|
||||
|
||||
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
|
||||
if (ret)
|
||||
return ret;
|
||||
encoder->possible_crtcs = drm_crtc_mask(crtc);
|
||||
|
||||
ret = ast_dp501_connector_init(dev, connector);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_connector_attach_encoder(connector, encoder);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1351,8 +1531,7 @@ static int ast_connector_init(struct drm_device *dev)
|
|||
* Mode config
|
||||
*/
|
||||
|
||||
static const struct drm_mode_config_helper_funcs
|
||||
ast_mode_config_helper_funcs = {
|
||||
static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = {
|
||||
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
|
||||
};
|
||||
|
||||
|
@ -1404,8 +1583,20 @@ int ast_mode_config_init(struct ast_private *ast)
|
|||
return ret;
|
||||
|
||||
ast_crtc_init(dev);
|
||||
ast_encoder_init(dev);
|
||||
ast_connector_init(dev);
|
||||
|
||||
switch (ast->tx_chip_type) {
|
||||
case AST_TX_NONE:
|
||||
ret = ast_vga_output_init(ast);
|
||||
break;
|
||||
case AST_TX_SIL164:
|
||||
ret = ast_sil164_output_init(ast);
|
||||
break;
|
||||
case AST_TX_DP501:
|
||||
ret = ast_dp501_output_init(ast);
|
||||
break;
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_gem_cma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_module.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
|
@ -833,7 +834,7 @@ static struct platform_driver atmel_hlcdc_dc_platform_driver = {
|
|||
.of_match_table = atmel_hlcdc_dc_of_match,
|
||||
},
|
||||
};
|
||||
module_platform_driver(atmel_hlcdc_dc_platform_driver);
|
||||
drm_module_platform_driver(atmel_hlcdc_dc_platform_driver);
|
||||
|
||||
MODULE_AUTHOR("Jean-Jacques Hiblot <jjhiblot@traphandler.com>");
|
||||
MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
|
||||
|
|
|
@ -75,6 +75,14 @@ config DRM_DISPLAY_CONNECTOR
|
|||
on ARM-based platforms. Saying Y here when this driver is not needed
|
||||
will not cause any issue.
|
||||
|
||||
config DRM_ITE_IT6505
|
||||
tristate "ITE IT6505 DisplayPort bridge"
|
||||
depends on OF
|
||||
select DRM_KMS_HELPER
|
||||
select EXTCON
|
||||
help
|
||||
ITE IT6505 DisplayPort bridge chip driver.
|
||||
|
||||
config DRM_LONTIUM_LT8912B
|
||||
tristate "Lontium LT8912B DSI/HDMI bridge"
|
||||
depends on OF
|
||||
|
|
|
@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o
|
|||
obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
|
||||
obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o
|
||||
obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
|
||||
obj-$(CONFIG_DRM_ITE_IT6505) += ite-it6505.o
|
||||
obj-$(CONFIG_DRM_LONTIUM_LT8912B) += lontium-lt8912b.o
|
||||
obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o
|
||||
obj-$(CONFIG_DRM_LONTIUM_LT9611UXC) += lontium-lt9611uxc.o
|
||||
|
|
|
@ -32,6 +32,8 @@ config DRM_ANALOGIX_ANX7625
|
|||
tristate "Analogix Anx7625 MIPI to DP interface support"
|
||||
depends on DRM
|
||||
depends on OF
|
||||
select DRM_DP_AUX_BUS
|
||||
select DRM_DP_HELPER
|
||||
select DRM_MIPI_DSI
|
||||
help
|
||||
ANX7625 is an ultra-low power 4K mobile HD transmitter
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_bridge.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/dp/drm_dp_aux_bus.h>
|
||||
#include <drm/dp/drm_dp_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_hdcp.h>
|
||||
|
@ -129,6 +130,23 @@ static int anx7625_reg_write(struct anx7625_data *ctx,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int anx7625_reg_block_write(struct anx7625_data *ctx,
|
||||
struct i2c_client *client,
|
||||
u8 reg_addr, u8 len, u8 *buf)
|
||||
{
|
||||
int ret;
|
||||
struct device *dev = &client->dev;
|
||||
|
||||
i2c_access_workaround(ctx, client);
|
||||
|
||||
ret = i2c_smbus_write_i2c_block_data(client, reg_addr, len, buf);
|
||||
if (ret < 0)
|
||||
dev_err(dev, "write i2c block failed id=%x\n:%x",
|
||||
client->addr, reg_addr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int anx7625_write_or(struct anx7625_data *ctx,
|
||||
struct i2c_client *client,
|
||||
u8 offset, u8 mask)
|
||||
|
@ -214,25 +232,28 @@ static int wait_aux_op_finish(struct anx7625_data *ctx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int anx7625_aux_dpcd_read(struct anx7625_data *ctx,
|
||||
u32 address, u8 len, u8 *buf)
|
||||
static int anx7625_aux_trans(struct anx7625_data *ctx, u8 op, u32 address,
|
||||
u8 len, u8 *buf)
|
||||
{
|
||||
struct device *dev = &ctx->client->dev;
|
||||
int ret;
|
||||
u8 addrh, addrm, addrl;
|
||||
u8 cmd;
|
||||
bool is_write = !(op & DP_AUX_I2C_READ);
|
||||
|
||||
if (len > MAX_DPCD_BUFFER_SIZE) {
|
||||
if (len > DP_AUX_MAX_PAYLOAD_BYTES) {
|
||||
dev_err(dev, "exceed aux buffer len.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!len)
|
||||
return len;
|
||||
|
||||
addrl = address & 0xFF;
|
||||
addrm = (address >> 8) & 0xFF;
|
||||
addrh = (address >> 16) & 0xFF;
|
||||
|
||||
cmd = DPCD_CMD(len, DPCD_READ);
|
||||
cmd = ((len - 1) << 4) | 0x09;
|
||||
cmd = DPCD_CMD(len, op);
|
||||
|
||||
/* Set command and length */
|
||||
ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
|
||||
|
@ -246,6 +267,9 @@ static int anx7625_aux_dpcd_read(struct anx7625_data *ctx,
|
|||
ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
|
||||
AP_AUX_ADDR_19_16, addrh);
|
||||
|
||||
if (is_write)
|
||||
ret |= anx7625_reg_block_write(ctx, ctx->i2c.rx_p0_client,
|
||||
AP_AUX_BUFF_START, len, buf);
|
||||
/* Enable aux access */
|
||||
ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
|
||||
AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN);
|
||||
|
@ -255,14 +279,17 @@ static int anx7625_aux_dpcd_read(struct anx7625_data *ctx,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
usleep_range(2000, 2100);
|
||||
|
||||
ret = wait_aux_op_finish(ctx);
|
||||
if (ret) {
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "aux IO error: wait aux op finish.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Write done */
|
||||
if (is_write)
|
||||
return len;
|
||||
|
||||
/* Read done, read out dpcd data */
|
||||
ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client,
|
||||
AP_AUX_BUFF_START, len, buf);
|
||||
if (ret < 0) {
|
||||
|
@ -270,7 +297,7 @@ static int anx7625_aux_dpcd_read(struct anx7625_data *ctx,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return len;
|
||||
}
|
||||
|
||||
static int anx7625_video_mute_control(struct anx7625_data *ctx,
|
||||
|
@ -845,7 +872,7 @@ static int anx7625_hdcp_enable(struct anx7625_data *ctx)
|
|||
}
|
||||
|
||||
/* Read downstream capability */
|
||||
anx7625_aux_dpcd_read(ctx, 0x68028, 1, &bcap);
|
||||
anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, 0x68028, 1, &bcap);
|
||||
if (!(bcap & 0x01)) {
|
||||
pr_warn("downstream not support HDCP 1.4, cap(%x).\n", bcap);
|
||||
return 0;
|
||||
|
@ -918,6 +945,7 @@ static void anx7625_dp_stop(struct anx7625_data *ctx)
|
|||
{
|
||||
struct device *dev = &ctx->client->dev;
|
||||
int ret;
|
||||
u8 data;
|
||||
|
||||
DRM_DEV_DEBUG_DRIVER(dev, "stop dp output\n");
|
||||
|
||||
|
@ -929,6 +957,11 @@ static void anx7625_dp_stop(struct anx7625_data *ctx)
|
|||
ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, 0x08, 0x7f);
|
||||
|
||||
ret |= anx7625_video_mute_control(ctx, 1);
|
||||
|
||||
dev_dbg(dev, "notify downstream enter into standby\n");
|
||||
/* Downstream monitor enter into standby mode */
|
||||
data = 2;
|
||||
ret |= anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, 0x000600, 1, &data);
|
||||
if (ret < 0)
|
||||
DRM_DEV_ERROR(dev, "IO error : mute video fail\n");
|
||||
|
||||
|
@ -1076,7 +1109,8 @@ static int segments_edid_read(struct anx7625_data *ctx,
|
|||
static int sp_tx_edid_read(struct anx7625_data *ctx,
|
||||
u8 *pedid_blocks_buf)
|
||||
{
|
||||
u8 offset, edid_pos;
|
||||
u8 offset;
|
||||
int edid_pos;
|
||||
int count, blocks_num;
|
||||
u8 pblock_buf[MAX_DPCD_BUFFER_SIZE];
|
||||
u8 i, j;
|
||||
|
@ -1627,11 +1661,56 @@ static int anx7625_parse_dt(struct device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool anx7625_of_panel_on_aux_bus(struct device *dev)
|
||||
{
|
||||
struct device_node *bus, *panel;
|
||||
|
||||
bus = of_get_child_by_name(dev->of_node, "aux-bus");
|
||||
if (!bus)
|
||||
return false;
|
||||
|
||||
panel = of_get_child_by_name(bus, "panel");
|
||||
of_node_put(bus);
|
||||
if (!panel)
|
||||
return false;
|
||||
of_node_put(panel);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline struct anx7625_data *bridge_to_anx7625(struct drm_bridge *bridge)
|
||||
{
|
||||
return container_of(bridge, struct anx7625_data, bridge);
|
||||
}
|
||||
|
||||
static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
|
||||
struct drm_dp_aux_msg *msg)
|
||||
{
|
||||
struct anx7625_data *ctx = container_of(aux, struct anx7625_data, aux);
|
||||
struct device *dev = &ctx->client->dev;
|
||||
u8 request = msg->request & ~DP_AUX_I2C_MOT;
|
||||
int ret = 0;
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
msg->reply = 0;
|
||||
switch (request) {
|
||||
case DP_AUX_NATIVE_WRITE:
|
||||
case DP_AUX_I2C_WRITE:
|
||||
case DP_AUX_NATIVE_READ:
|
||||
case DP_AUX_I2C_READ:
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (!ret)
|
||||
ret = anx7625_aux_trans(ctx, msg->request, msg->address,
|
||||
msg->size, msg->buffer);
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct edid *anx7625_get_edid(struct anx7625_data *ctx)
|
||||
{
|
||||
struct device *dev = &ctx->client->dev;
|
||||
|
@ -2038,6 +2117,13 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
ctx->aux.drm_dev = bridge->dev;
|
||||
err = drm_dp_aux_register(&ctx->aux);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to register aux channel: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (ctx->pdata.panel_bridge) {
|
||||
err = drm_bridge_attach(bridge->encoder,
|
||||
ctx->pdata.panel_bridge,
|
||||
|
@ -2051,6 +2137,13 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void anx7625_bridge_detach(struct drm_bridge *bridge)
|
||||
{
|
||||
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
|
||||
|
||||
drm_dp_aux_unregister(&ctx->aux);
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
anx7625_bridge_mode_valid(struct drm_bridge *bridge,
|
||||
const struct drm_display_info *info,
|
||||
|
@ -2316,6 +2409,7 @@ static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge,
|
|||
|
||||
static const struct drm_bridge_funcs anx7625_bridge_funcs = {
|
||||
.attach = anx7625_bridge_attach,
|
||||
.detach = anx7625_bridge_detach,
|
||||
.mode_valid = anx7625_bridge_mode_valid,
|
||||
.mode_set = anx7625_bridge_mode_set,
|
||||
.atomic_check = anx7625_bridge_atomic_check,
|
||||
|
@ -2473,6 +2567,12 @@ static const struct dev_pm_ops anx7625_pm_ops = {
|
|||
anx7625_runtime_pm_resume, NULL)
|
||||
};
|
||||
|
||||
static void anx7625_runtime_disable(void *data)
|
||||
{
|
||||
pm_runtime_dont_use_autosuspend(data);
|
||||
pm_runtime_disable(data);
|
||||
}
|
||||
|
||||
static int anx7625_i2c_probe(struct i2c_client *client,
|
||||
const struct i2c_device_id *id)
|
||||
{
|
||||
|
@ -2487,7 +2587,7 @@ static int anx7625_i2c_probe(struct i2c_client *client,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
platform = kzalloc(sizeof(*platform), GFP_KERNEL);
|
||||
platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
|
||||
if (!platform) {
|
||||
DRM_DEV_ERROR(dev, "fail to allocate driver data\n");
|
||||
return -ENOMEM;
|
||||
|
@ -2495,13 +2595,6 @@ static int anx7625_i2c_probe(struct i2c_client *client,
|
|||
|
||||
pdata = &platform->pdata;
|
||||
|
||||
ret = anx7625_parse_dt(dev, pdata);
|
||||
if (ret) {
|
||||
if (ret != -EPROBE_DEFER)
|
||||
DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret);
|
||||
goto free_platform;
|
||||
}
|
||||
|
||||
platform->client = client;
|
||||
i2c_set_clientdata(client, platform);
|
||||
|
||||
|
@ -2524,7 +2617,7 @@ static int anx7625_i2c_probe(struct i2c_client *client,
|
|||
if (!platform->hdcp_workqueue) {
|
||||
dev_err(dev, "fail to create work queue\n");
|
||||
ret = -ENOMEM;
|
||||
goto free_platform;
|
||||
return ret;
|
||||
}
|
||||
|
||||
platform->pdata.intp_irq = client->irq;
|
||||
|
@ -2549,6 +2642,19 @@ static int anx7625_i2c_probe(struct i2c_client *client,
|
|||
}
|
||||
}
|
||||
|
||||
platform->aux.name = "anx7625-aux";
|
||||
platform->aux.dev = dev;
|
||||
platform->aux.transfer = anx7625_aux_transfer;
|
||||
drm_dp_aux_init(&platform->aux);
|
||||
devm_of_dp_aux_populate_ep_devices(&platform->aux);
|
||||
|
||||
ret = anx7625_parse_dt(dev, pdata);
|
||||
if (ret) {
|
||||
if (ret != -EPROBE_DEFER)
|
||||
DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (anx7625_register_i2c_dummy_clients(platform, client) != 0) {
|
||||
ret = -ENOMEM;
|
||||
DRM_DEV_ERROR(dev, "fail to reserve I2C bus.\n");
|
||||
|
@ -2556,6 +2662,12 @@ static int anx7625_i2c_probe(struct i2c_client *client,
|
|||
}
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
pm_runtime_set_autosuspend_delay(dev, 1000);
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
pm_suspend_ignore_children(dev, true);
|
||||
ret = devm_add_action_or_reset(dev, anx7625_runtime_disable, dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!platform->pdata.low_power_mode) {
|
||||
anx7625_disable_pd_protocol(platform);
|
||||
|
@ -2568,7 +2680,8 @@ static int anx7625_i2c_probe(struct i2c_client *client,
|
|||
|
||||
platform->bridge.funcs = &anx7625_bridge_funcs;
|
||||
platform->bridge.of_node = client->dev.of_node;
|
||||
platform->bridge.ops = DRM_BRIDGE_OP_EDID;
|
||||
if (!anx7625_of_panel_on_aux_bus(&client->dev))
|
||||
platform->bridge.ops |= DRM_BRIDGE_OP_EDID;
|
||||
if (!platform->pdata.panel_bridge)
|
||||
platform->bridge.ops |= DRM_BRIDGE_OP_HPD |
|
||||
DRM_BRIDGE_OP_DETECT;
|
||||
|
@ -2609,9 +2722,6 @@ static int anx7625_i2c_probe(struct i2c_client *client,
|
|||
if (platform->hdcp_workqueue)
|
||||
destroy_workqueue(platform->hdcp_workqueue);
|
||||
|
||||
free_platform:
|
||||
kfree(platform);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2638,7 +2748,6 @@ static int anx7625_i2c_remove(struct i2c_client *client)
|
|||
if (platform->pdata.audio_en)
|
||||
anx7625_unregister_audio(platform);
|
||||
|
||||
kfree(platform);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -242,8 +242,6 @@
|
|||
|
||||
#define AP_AUX_COMMAND 0x27 /* com+len */
|
||||
#define LENGTH_SHIFT 4
|
||||
#define DPCD_READ 0x09
|
||||
#define DPCD_WRITE 0x08
|
||||
#define DPCD_CMD(len, cmd) ((((len) - 1) << LENGTH_SHIFT) | (cmd))
|
||||
|
||||
/* Bit 0&1: 3D video structure */
|
||||
|
@ -474,6 +472,7 @@ struct anx7625_data {
|
|||
u8 bridge_attached;
|
||||
struct drm_connector *connector;
|
||||
struct mipi_dsi_device *dsi;
|
||||
struct drm_dp_aux aux;
|
||||
};
|
||||
|
||||
#endif /* __ANX7625_H__ */
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1164,7 +1164,11 @@ static int lt9611_probe(struct i2c_client *client,
|
|||
|
||||
lt9611_enable_hpd_interrupts(lt9611);
|
||||
|
||||
return lt9611_audio_init(dev, lt9611);
|
||||
ret = lt9611_audio_init(dev, lt9611);
|
||||
if (ret)
|
||||
goto err_remove_bridge;
|
||||
|
||||
return 0;
|
||||
|
||||
err_remove_bridge:
|
||||
drm_bridge_remove(<9611->bridge);
|
||||
|
|
|
@ -860,18 +860,19 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
|
|||
memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode));
|
||||
drm_mode_debug_printmodeline(adjusted_mode);
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
if (pm_runtime_resume_and_get(dev) < 0)
|
||||
return;
|
||||
|
||||
if (clk_prepare_enable(dsi->lcdif_clk) < 0)
|
||||
return;
|
||||
goto runtime_put;
|
||||
if (clk_prepare_enable(dsi->core_clk) < 0)
|
||||
return;
|
||||
goto runtime_put;
|
||||
|
||||
/* Step 1 from DSI reset-out instructions */
|
||||
ret = reset_control_deassert(dsi->rst_pclk);
|
||||
if (ret < 0) {
|
||||
DRM_DEV_ERROR(dev, "Failed to deassert PCLK: %d\n", ret);
|
||||
return;
|
||||
goto runtime_put;
|
||||
}
|
||||
|
||||
/* Step 2 from DSI reset-out instructions */
|
||||
|
@ -881,13 +882,18 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
|
|||
ret = reset_control_deassert(dsi->rst_esc);
|
||||
if (ret < 0) {
|
||||
DRM_DEV_ERROR(dev, "Failed to deassert ESC: %d\n", ret);
|
||||
return;
|
||||
goto runtime_put;
|
||||
}
|
||||
ret = reset_control_deassert(dsi->rst_byte);
|
||||
if (ret < 0) {
|
||||
DRM_DEV_ERROR(dev, "Failed to deassert BYTE: %d\n", ret);
|
||||
return;
|
||||
goto runtime_put;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
runtime_put:
|
||||
pm_runtime_put_sync(dev);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -138,6 +138,17 @@ static int panel_bridge_get_modes(struct drm_bridge *bridge,
|
|||
return drm_panel_get_modes(panel_bridge->panel, connector);
|
||||
}
|
||||
|
||||
static void panel_bridge_debugfs_init(struct drm_bridge *bridge,
|
||||
struct dentry *root)
|
||||
{
|
||||
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
|
||||
struct drm_panel *panel = panel_bridge->panel;
|
||||
|
||||
root = debugfs_create_dir("panel", root);
|
||||
if (panel->funcs->debugfs_init)
|
||||
panel->funcs->debugfs_init(panel, root);
|
||||
}
|
||||
|
||||
static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
|
||||
.attach = panel_bridge_attach,
|
||||
.detach = panel_bridge_detach,
|
||||
|
@ -150,6 +161,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
|
|||
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
|
||||
.atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt,
|
||||
.debugfs_init = panel_bridge_debugfs_init,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -2551,8 +2551,9 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
|
|||
if (!output_fmts)
|
||||
return NULL;
|
||||
|
||||
/* If dw-hdmi is the only bridge, avoid negociating with ourselves */
|
||||
if (list_is_singular(&bridge->encoder->bridge_chain)) {
|
||||
/* If dw-hdmi is the first or only bridge, avoid negociating with ourselves */
|
||||
if (list_is_singular(&bridge->encoder->bridge_chain) ||
|
||||
list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain)) {
|
||||
*num_output_fmts = 1;
|
||||
output_fmts[0] = MEDIA_BUS_FMT_FIXED;
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_bridge.h>
|
||||
#include <drm/drm_bridge_connector.h>
|
||||
#include <drm/dp/drm_dp_aux_bus.h>
|
||||
#include <drm/dp/drm_dp_helper.h>
|
||||
#include <drm/drm_mipi_dsi.h>
|
||||
|
@ -174,7 +175,7 @@ struct ti_sn65dsi86 {
|
|||
struct regmap *regmap;
|
||||
struct drm_dp_aux aux;
|
||||
struct drm_bridge bridge;
|
||||
struct drm_connector connector;
|
||||
struct drm_connector *connector;
|
||||
struct device_node *host_node;
|
||||
struct mipi_dsi_device *dsi;
|
||||
struct clk *refclk;
|
||||
|
@ -646,54 +647,6 @@ static struct auxiliary_driver ti_sn_aux_driver = {
|
|||
.id_table = ti_sn_aux_id_table,
|
||||
};
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* DRM Connector Operations
|
||||
*/
|
||||
|
||||
static struct ti_sn65dsi86 *
|
||||
connector_to_ti_sn65dsi86(struct drm_connector *connector)
|
||||
{
|
||||
return container_of(connector, struct ti_sn65dsi86, connector);
|
||||
}
|
||||
|
||||
static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct ti_sn65dsi86 *pdata = connector_to_ti_sn65dsi86(connector);
|
||||
|
||||
return drm_bridge_get_modes(pdata->next_bridge, connector);
|
||||
}
|
||||
|
||||
static struct drm_connector_helper_funcs ti_sn_bridge_connector_helper_funcs = {
|
||||
.get_modes = ti_sn_bridge_connector_get_modes,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs ti_sn_bridge_connector_funcs = {
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.destroy = drm_connector_cleanup,
|
||||
.reset = drm_atomic_helper_connector_reset,
|
||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
};
|
||||
|
||||
static int ti_sn_bridge_connector_init(struct ti_sn65dsi86 *pdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_connector_init(pdata->bridge.dev, &pdata->connector,
|
||||
&ti_sn_bridge_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_eDP);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to initialize connector with drm\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_connector_helper_add(&pdata->connector,
|
||||
&ti_sn_bridge_connector_helper_funcs);
|
||||
drm_connector_attach_encoder(&pdata->connector, pdata->bridge.encoder);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*------------------------------------------------------------------------------
|
||||
* DRM Bridge
|
||||
*/
|
||||
|
@ -757,10 +710,6 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = ti_sn_bridge_connector_init(pdata);
|
||||
if (ret < 0)
|
||||
goto err_conn_init;
|
||||
|
||||
/* We never want the next bridge to *also* create a connector: */
|
||||
flags |= DRM_BRIDGE_ATTACH_NO_CONNECTOR;
|
||||
|
||||
|
@ -768,13 +717,20 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
|
|||
ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge,
|
||||
&pdata->bridge, flags);
|
||||
if (ret < 0)
|
||||
goto err_dsi_host;
|
||||
goto err_initted_aux;
|
||||
|
||||
pdata->connector = drm_bridge_connector_init(pdata->bridge.dev,
|
||||
pdata->bridge.encoder);
|
||||
if (IS_ERR(pdata->connector)) {
|
||||
ret = PTR_ERR(pdata->connector);
|
||||
goto err_initted_aux;
|
||||
}
|
||||
|
||||
drm_connector_attach_encoder(pdata->connector, pdata->bridge.encoder);
|
||||
|
||||
return 0;
|
||||
|
||||
err_dsi_host:
|
||||
drm_connector_cleanup(&pdata->connector);
|
||||
err_conn_init:
|
||||
err_initted_aux:
|
||||
drm_dp_aux_unregister(&pdata->aux);
|
||||
return ret;
|
||||
}
|
||||
|
@ -824,7 +780,7 @@ static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata)
|
|||
|
||||
static unsigned int ti_sn_bridge_get_bpp(struct ti_sn65dsi86 *pdata)
|
||||
{
|
||||
if (pdata->connector.display_info.bpc <= 6)
|
||||
if (pdata->connector->display_info.bpc <= 6)
|
||||
return 18;
|
||||
else
|
||||
return 24;
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/string_helpers.h>
|
||||
|
||||
#include <drm/dp/drm_dp_helper.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
@ -1322,7 +1323,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
|
|||
bool branch_device = drm_dp_is_branch(dpcd);
|
||||
|
||||
seq_printf(m, "\tDP branch device present: %s\n",
|
||||
branch_device ? "yes" : "no");
|
||||
str_yes_no(branch_device));
|
||||
|
||||
if (!branch_device)
|
||||
return;
|
||||
|
|
|
@ -216,6 +216,20 @@ static void drm_bridge_connector_destroy(struct drm_connector *connector)
|
|||
kfree(bridge_connector);
|
||||
}
|
||||
|
||||
static void drm_bridge_connector_debugfs_init(struct drm_connector *connector,
|
||||
struct dentry *root)
|
||||
{
|
||||
struct drm_bridge_connector *bridge_connector =
|
||||
to_drm_bridge_connector(connector);
|
||||
struct drm_encoder *encoder = bridge_connector->encoder;
|
||||
struct drm_bridge *bridge;
|
||||
|
||||
list_for_each_entry(bridge, &encoder->bridge_chain, chain_node) {
|
||||
if (bridge->funcs->debugfs_init)
|
||||
bridge->funcs->debugfs_init(bridge, root);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs drm_bridge_connector_funcs = {
|
||||
.reset = drm_atomic_helper_connector_reset,
|
||||
.detect = drm_bridge_connector_detect,
|
||||
|
@ -223,6 +237,7 @@ static const struct drm_connector_funcs drm_bridge_connector_funcs = {
|
|||
.destroy = drm_bridge_connector_destroy,
|
||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
.debugfs_init = drm_bridge_connector_debugfs_init,
|
||||
};
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
|
|
|
@ -211,7 +211,7 @@ static int split_block(struct drm_buddy *mm,
|
|||
}
|
||||
|
||||
static struct drm_buddy_block *
|
||||
get_buddy(struct drm_buddy_block *block)
|
||||
__get_buddy(struct drm_buddy_block *block)
|
||||
{
|
||||
struct drm_buddy_block *parent;
|
||||
|
||||
|
@ -225,6 +225,23 @@ get_buddy(struct drm_buddy_block *block)
|
|||
return parent->left;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_get_buddy - get buddy address
|
||||
*
|
||||
* @block: DRM buddy block
|
||||
*
|
||||
* Returns the corresponding buddy block for @block, or NULL
|
||||
* if this is a root block and can't be merged further.
|
||||
* Requires some kind of locking to protect against
|
||||
* any concurrent allocate and free operations.
|
||||
*/
|
||||
struct drm_buddy_block *
|
||||
drm_get_buddy(struct drm_buddy_block *block)
|
||||
{
|
||||
return __get_buddy(block);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_get_buddy);
|
||||
|
||||
static void __drm_buddy_free(struct drm_buddy *mm,
|
||||
struct drm_buddy_block *block)
|
||||
{
|
||||
|
@ -233,7 +250,7 @@ static void __drm_buddy_free(struct drm_buddy *mm,
|
|||
while ((parent = block->parent)) {
|
||||
struct drm_buddy_block *buddy;
|
||||
|
||||
buddy = get_buddy(block);
|
||||
buddy = __get_buddy(block);
|
||||
|
||||
if (!drm_buddy_block_is_free(buddy))
|
||||
break;
|
||||
|
@ -282,63 +299,6 @@ void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_buddy_free_list);
|
||||
|
||||
/**
|
||||
* drm_buddy_alloc_blocks - allocate power-of-two blocks
|
||||
*
|
||||
* @mm: DRM buddy manager to allocate from
|
||||
* @order: size of the allocation
|
||||
*
|
||||
* The order value here translates to:
|
||||
*
|
||||
* 0 = 2^0 * mm->chunk_size
|
||||
* 1 = 2^1 * mm->chunk_size
|
||||
* 2 = 2^2 * mm->chunk_size
|
||||
*
|
||||
* Returns:
|
||||
* allocated ptr to the &drm_buddy_block on success
|
||||
*/
|
||||
struct drm_buddy_block *
|
||||
drm_buddy_alloc_blocks(struct drm_buddy *mm, unsigned int order)
|
||||
{
|
||||
struct drm_buddy_block *block = NULL;
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
for (i = order; i <= mm->max_order; ++i) {
|
||||
block = list_first_entry_or_null(&mm->free_list[i],
|
||||
struct drm_buddy_block,
|
||||
link);
|
||||
if (block)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!block)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
|
||||
BUG_ON(!drm_buddy_block_is_free(block));
|
||||
|
||||
while (i != order) {
|
||||
err = split_block(mm, block);
|
||||
if (unlikely(err))
|
||||
goto out_free;
|
||||
|
||||
/* Go low */
|
||||
block = block->left;
|
||||
i--;
|
||||
}
|
||||
|
||||
mark_allocated(block);
|
||||
mm->avail -= drm_buddy_block_size(mm, block);
|
||||
kmemleak_update_trace(block);
|
||||
return block;
|
||||
|
||||
out_free:
|
||||
if (i != order)
|
||||
__drm_buddy_free(mm, block);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_buddy_alloc_blocks);
|
||||
|
||||
static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
|
||||
{
|
||||
return s1 <= e2 && e1 >= s2;
|
||||
|
@ -349,49 +309,157 @@ static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
|
|||
return s1 <= s2 && e1 >= e2;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_buddy_alloc_range - allocate range
|
||||
*
|
||||
* @mm: DRM buddy manager to allocate from
|
||||
* @blocks: output list head to add allocated blocks
|
||||
* @start: start of the allowed range for this block
|
||||
* @size: size of the allocation
|
||||
*
|
||||
* Intended for pre-allocating portions of the address space, for example to
|
||||
* reserve a block for the initial framebuffer or similar, hence the expectation
|
||||
* here is that drm_buddy_alloc_blocks() is still the main vehicle for
|
||||
* allocations, so if that's not the case then the drm_mm range allocator is
|
||||
* probably a much better fit, and so you should probably go use that instead.
|
||||
*
|
||||
* Note that it's safe to chain together multiple alloc_ranges
|
||||
* with the same blocks list
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, error code on failure.
|
||||
*/
|
||||
int drm_buddy_alloc_range(struct drm_buddy *mm,
|
||||
struct list_head *blocks,
|
||||
u64 start, u64 size)
|
||||
static struct drm_buddy_block *
|
||||
alloc_range_bias(struct drm_buddy *mm,
|
||||
u64 start, u64 end,
|
||||
unsigned int order)
|
||||
{
|
||||
struct drm_buddy_block *block;
|
||||
struct drm_buddy_block *buddy;
|
||||
LIST_HEAD(dfs);
|
||||
int err;
|
||||
int i;
|
||||
|
||||
end = end - 1;
|
||||
|
||||
for (i = 0; i < mm->n_roots; ++i)
|
||||
list_add_tail(&mm->roots[i]->tmp_link, &dfs);
|
||||
|
||||
do {
|
||||
u64 block_start;
|
||||
u64 block_end;
|
||||
|
||||
block = list_first_entry_or_null(&dfs,
|
||||
struct drm_buddy_block,
|
||||
tmp_link);
|
||||
if (!block)
|
||||
break;
|
||||
|
||||
list_del(&block->tmp_link);
|
||||
|
||||
if (drm_buddy_block_order(block) < order)
|
||||
continue;
|
||||
|
||||
block_start = drm_buddy_block_offset(block);
|
||||
block_end = block_start + drm_buddy_block_size(mm, block) - 1;
|
||||
|
||||
if (!overlaps(start, end, block_start, block_end))
|
||||
continue;
|
||||
|
||||
if (drm_buddy_block_is_allocated(block))
|
||||
continue;
|
||||
|
||||
if (contains(start, end, block_start, block_end) &&
|
||||
order == drm_buddy_block_order(block)) {
|
||||
/*
|
||||
* Find the free block within the range.
|
||||
*/
|
||||
if (drm_buddy_block_is_free(block))
|
||||
return block;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!drm_buddy_block_is_split(block)) {
|
||||
err = split_block(mm, block);
|
||||
if (unlikely(err))
|
||||
goto err_undo;
|
||||
}
|
||||
|
||||
list_add(&block->right->tmp_link, &dfs);
|
||||
list_add(&block->left->tmp_link, &dfs);
|
||||
} while (1);
|
||||
|
||||
return ERR_PTR(-ENOSPC);
|
||||
|
||||
err_undo:
|
||||
/*
|
||||
* We really don't want to leave around a bunch of split blocks, since
|
||||
* bigger is better, so make sure we merge everything back before we
|
||||
* free the allocated blocks.
|
||||
*/
|
||||
buddy = __get_buddy(block);
|
||||
if (buddy &&
|
||||
(drm_buddy_block_is_free(block) &&
|
||||
drm_buddy_block_is_free(buddy)))
|
||||
__drm_buddy_free(mm, block);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static struct drm_buddy_block *
|
||||
get_maxblock(struct list_head *head)
|
||||
{
|
||||
struct drm_buddy_block *max_block = NULL, *node;
|
||||
|
||||
max_block = list_first_entry_or_null(head,
|
||||
struct drm_buddy_block,
|
||||
link);
|
||||
if (!max_block)
|
||||
return NULL;
|
||||
|
||||
list_for_each_entry(node, head, link) {
|
||||
if (drm_buddy_block_offset(node) >
|
||||
drm_buddy_block_offset(max_block))
|
||||
max_block = node;
|
||||
}
|
||||
|
||||
return max_block;
|
||||
}
|
||||
|
||||
static struct drm_buddy_block *
|
||||
alloc_from_freelist(struct drm_buddy *mm,
|
||||
unsigned int order,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct drm_buddy_block *block = NULL;
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
for (i = order; i <= mm->max_order; ++i) {
|
||||
if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
|
||||
block = get_maxblock(&mm->free_list[i]);
|
||||
if (block)
|
||||
break;
|
||||
} else {
|
||||
block = list_first_entry_or_null(&mm->free_list[i],
|
||||
struct drm_buddy_block,
|
||||
link);
|
||||
if (block)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!block)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
|
||||
BUG_ON(!drm_buddy_block_is_free(block));
|
||||
|
||||
while (i != order) {
|
||||
err = split_block(mm, block);
|
||||
if (unlikely(err))
|
||||
goto err_undo;
|
||||
|
||||
block = block->right;
|
||||
i--;
|
||||
}
|
||||
return block;
|
||||
|
||||
err_undo:
|
||||
if (i != order)
|
||||
__drm_buddy_free(mm, block);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int __alloc_range(struct drm_buddy *mm,
|
||||
struct list_head *dfs,
|
||||
u64 start, u64 size,
|
||||
struct list_head *blocks)
|
||||
{
|
||||
struct drm_buddy_block *block;
|
||||
struct drm_buddy_block *buddy;
|
||||
LIST_HEAD(allocated);
|
||||
LIST_HEAD(dfs);
|
||||
u64 end;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
if (size < mm->chunk_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (!IS_ALIGNED(size | start, mm->chunk_size))
|
||||
return -EINVAL;
|
||||
|
||||
if (range_overflows(start, size, mm->size))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < mm->n_roots; ++i)
|
||||
list_add_tail(&mm->roots[i]->tmp_link, &dfs);
|
||||
|
||||
end = start + size - 1;
|
||||
|
||||
|
@ -399,7 +467,7 @@ int drm_buddy_alloc_range(struct drm_buddy *mm,
|
|||
u64 block_start;
|
||||
u64 block_end;
|
||||
|
||||
block = list_first_entry_or_null(&dfs,
|
||||
block = list_first_entry_or_null(dfs,
|
||||
struct drm_buddy_block,
|
||||
tmp_link);
|
||||
if (!block)
|
||||
|
@ -436,8 +504,8 @@ int drm_buddy_alloc_range(struct drm_buddy *mm,
|
|||
goto err_undo;
|
||||
}
|
||||
|
||||
list_add(&block->right->tmp_link, &dfs);
|
||||
list_add(&block->left->tmp_link, &dfs);
|
||||
list_add(&block->right->tmp_link, dfs);
|
||||
list_add(&block->left->tmp_link, dfs);
|
||||
} while (1);
|
||||
|
||||
list_splice_tail(&allocated, blocks);
|
||||
|
@ -449,7 +517,7 @@ int drm_buddy_alloc_range(struct drm_buddy *mm,
|
|||
* bigger is better, so make sure we merge everything back before we
|
||||
* free the allocated blocks.
|
||||
*/
|
||||
buddy = get_buddy(block);
|
||||
buddy = __get_buddy(block);
|
||||
if (buddy &&
|
||||
(drm_buddy_block_is_free(block) &&
|
||||
drm_buddy_block_is_free(buddy)))
|
||||
|
@ -459,7 +527,189 @@ int drm_buddy_alloc_range(struct drm_buddy *mm,
|
|||
drm_buddy_free_list(mm, &allocated);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_buddy_alloc_range);
|
||||
|
||||
static int __drm_buddy_alloc_range(struct drm_buddy *mm,
|
||||
u64 start,
|
||||
u64 size,
|
||||
struct list_head *blocks)
|
||||
{
|
||||
LIST_HEAD(dfs);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mm->n_roots; ++i)
|
||||
list_add_tail(&mm->roots[i]->tmp_link, &dfs);
|
||||
|
||||
return __alloc_range(mm, &dfs, start, size, blocks);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_buddy_block_trim - free unused pages
|
||||
*
|
||||
* @mm: DRM buddy manager
|
||||
* @new_size: original size requested
|
||||
* @blocks: Input and output list of allocated blocks.
|
||||
* MUST contain single block as input to be trimmed.
|
||||
* On success will contain the newly allocated blocks
|
||||
* making up the @new_size. Blocks always appear in
|
||||
* ascending order
|
||||
*
|
||||
* For contiguous allocation, we round up the size to the nearest
|
||||
* power of two value, drivers consume *actual* size, so remaining
|
||||
* portions are unused and can be optionally freed with this function
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, error code on failure.
|
||||
*/
|
||||
int drm_buddy_block_trim(struct drm_buddy *mm,
|
||||
u64 new_size,
|
||||
struct list_head *blocks)
|
||||
{
|
||||
struct drm_buddy_block *parent;
|
||||
struct drm_buddy_block *block;
|
||||
LIST_HEAD(dfs);
|
||||
u64 new_start;
|
||||
int err;
|
||||
|
||||
if (!list_is_singular(blocks))
|
||||
return -EINVAL;
|
||||
|
||||
block = list_first_entry(blocks,
|
||||
struct drm_buddy_block,
|
||||
link);
|
||||
|
||||
if (WARN_ON(!drm_buddy_block_is_allocated(block)))
|
||||
return -EINVAL;
|
||||
|
||||
if (new_size > drm_buddy_block_size(mm, block))
|
||||
return -EINVAL;
|
||||
|
||||
if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
|
||||
return -EINVAL;
|
||||
|
||||
if (new_size == drm_buddy_block_size(mm, block))
|
||||
return 0;
|
||||
|
||||
list_del(&block->link);
|
||||
mark_free(mm, block);
|
||||
mm->avail += drm_buddy_block_size(mm, block);
|
||||
|
||||
/* Prevent recursively freeing this node */
|
||||
parent = block->parent;
|
||||
block->parent = NULL;
|
||||
|
||||
new_start = drm_buddy_block_offset(block);
|
||||
list_add(&block->tmp_link, &dfs);
|
||||
err = __alloc_range(mm, &dfs, new_start, new_size, blocks);
|
||||
if (err) {
|
||||
mark_allocated(block);
|
||||
mm->avail -= drm_buddy_block_size(mm, block);
|
||||
list_add(&block->link, blocks);
|
||||
}
|
||||
|
||||
block->parent = parent;
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_buddy_block_trim);
|
||||
|
||||
/**
|
||||
* drm_buddy_alloc_blocks - allocate power-of-two blocks
|
||||
*
|
||||
* @mm: DRM buddy manager to allocate from
|
||||
* @start: start of the allowed range for this block
|
||||
* @end: end of the allowed range for this block
|
||||
* @size: size of the allocation
|
||||
* @min_page_size: alignment of the allocation
|
||||
* @blocks: output list head to add allocated blocks
|
||||
* @flags: DRM_BUDDY_*_ALLOCATION flags
|
||||
*
|
||||
* alloc_range_bias() called on range limitations, which traverses
|
||||
* the tree and returns the desired block.
|
||||
*
|
||||
* alloc_from_freelist() called when *no* range restrictions
|
||||
* are enforced, which picks the block from the freelist.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, error code on failure.
|
||||
*/
|
||||
int drm_buddy_alloc_blocks(struct drm_buddy *mm,
|
||||
u64 start, u64 end, u64 size,
|
||||
u64 min_page_size,
|
||||
struct list_head *blocks,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct drm_buddy_block *block = NULL;
|
||||
unsigned int min_order, order;
|
||||
unsigned long pages;
|
||||
LIST_HEAD(allocated);
|
||||
int err;
|
||||
|
||||
if (size < mm->chunk_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (min_page_size < mm->chunk_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (!is_power_of_2(min_page_size))
|
||||
return -EINVAL;
|
||||
|
||||
if (!IS_ALIGNED(start | end | size, mm->chunk_size))
|
||||
return -EINVAL;
|
||||
|
||||
if (end > mm->size)
|
||||
return -EINVAL;
|
||||
|
||||
if (range_overflows(start, size, mm->size))
|
||||
return -EINVAL;
|
||||
|
||||
/* Actual range allocation */
|
||||
if (start + size == end)
|
||||
return __drm_buddy_alloc_range(mm, start, size, blocks);
|
||||
|
||||
pages = size >> ilog2(mm->chunk_size);
|
||||
order = fls(pages) - 1;
|
||||
min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
|
||||
|
||||
do {
|
||||
order = min(order, (unsigned int)fls(pages) - 1);
|
||||
BUG_ON(order > mm->max_order);
|
||||
BUG_ON(order < min_order);
|
||||
|
||||
do {
|
||||
if (flags & DRM_BUDDY_RANGE_ALLOCATION)
|
||||
/* Allocate traversing within the range */
|
||||
block = alloc_range_bias(mm, start, end, order);
|
||||
else
|
||||
/* Allocate from freelist */
|
||||
block = alloc_from_freelist(mm, order, flags);
|
||||
|
||||
if (!IS_ERR(block))
|
||||
break;
|
||||
|
||||
if (order-- == min_order) {
|
||||
err = -ENOSPC;
|
||||
goto err_free;
|
||||
}
|
||||
} while (1);
|
||||
|
||||
mark_allocated(block);
|
||||
mm->avail -= drm_buddy_block_size(mm, block);
|
||||
kmemleak_update_trace(block);
|
||||
list_add_tail(&block->link, &allocated);
|
||||
|
||||
pages -= BIT(order);
|
||||
|
||||
if (!pages)
|
||||
break;
|
||||
} while (1);
|
||||
|
||||
list_splice_tail(&allocated, blocks);
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
drm_buddy_free_list(mm, &allocated);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_buddy_alloc_blocks);
|
||||
|
||||
/**
|
||||
* drm_buddy_block_print - print block information
|
||||
|
|
|
@ -112,8 +112,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
|
|||
kunmap_atomic(page_virtual);
|
||||
}
|
||||
#else
|
||||
pr_err("Architecture has no drm_cache.c support\n");
|
||||
WARN_ON_ONCE(1);
|
||||
WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(drm_clflush_pages);
|
||||
|
@ -143,8 +142,7 @@ drm_clflush_sg(struct sg_table *st)
|
|||
if (wbinvd_on_all_cpus())
|
||||
pr_err("Timed out waiting for cache flush\n");
|
||||
#else
|
||||
pr_err("Architecture has no drm_cache.c support\n");
|
||||
WARN_ON_ONCE(1);
|
||||
WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(drm_clflush_sg);
|
||||
|
@ -177,8 +175,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
|
|||
if (wbinvd_on_all_cpus())
|
||||
pr_err("Timed out waiting for cache flush\n");
|
||||
#else
|
||||
pr_err("Architecture has no drm_cache.c support\n");
|
||||
WARN_ON_ONCE(1);
|
||||
WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(drm_clflush_virt_range);
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string_helpers.h>
|
||||
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_client.h>
|
||||
|
@ -241,7 +242,7 @@ static void drm_client_connectors_enabled(struct drm_connector **connectors,
|
|||
connector = connectors[i];
|
||||
enabled[i] = drm_connector_enabled(connector, true);
|
||||
DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
|
||||
connector->display_info.non_desktop ? "non desktop" : enabled[i] ? "yes" : "no");
|
||||
connector->display_info.non_desktop ? "non desktop" : str_yes_no(enabled[i]));
|
||||
|
||||
any_enabled |= enabled[i];
|
||||
}
|
||||
|
|
|
@ -436,6 +436,9 @@ void drm_debugfs_connector_add(struct drm_connector *connector)
|
|||
/* vrr range */
|
||||
debugfs_create_file("vrr_range", S_IRUGO, root, connector,
|
||||
&vrr_range_fops);
|
||||
|
||||
if (connector->funcs->debugfs_init)
|
||||
connector->funcs->debugfs_init(connector, root);
|
||||
}
|
||||
|
||||
void drm_debugfs_connector_remove(struct drm_connector *connector)
|
||||
|
|
|
@ -5340,6 +5340,9 @@ drm_reset_display_info(struct drm_connector *connector)
|
|||
info->rgb_quant_range_selectable = false;
|
||||
memset(&info->hdmi, 0, sizeof(info->hdmi));
|
||||
|
||||
info->edid_hdmi_rgb444_dc_modes = 0;
|
||||
info->edid_hdmi_ycbcr444_dc_modes = 0;
|
||||
|
||||
info->non_desktop = 0;
|
||||
memset(&info->monitor_range, 0, sizeof(info->monitor_range));
|
||||
|
||||
|
|
|
@ -680,6 +680,31 @@ static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y,
|
|||
schedule_work(&helper->damage_work);
|
||||
}
|
||||
|
||||
/* Convert memory region into area of scanlines and pixels per scanline */
|
||||
static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, size_t len,
|
||||
struct drm_rect *clip)
|
||||
{
|
||||
off_t end = off + len;
|
||||
u32 x1 = 0;
|
||||
u32 y1 = off / info->fix.line_length;
|
||||
u32 x2 = info->var.xres;
|
||||
u32 y2 = DIV_ROUND_UP(end, info->fix.line_length);
|
||||
|
||||
if ((y2 - y1) == 1) {
|
||||
/*
|
||||
* We've only written to a single scanline. Try to reduce
|
||||
* the number of horizontal pixels that need an update.
|
||||
*/
|
||||
off_t bit_off = (off % info->fix.line_length) * 8;
|
||||
off_t bit_end = (end % info->fix.line_length) * 8;
|
||||
|
||||
x1 = bit_off / info->var.bits_per_pixel;
|
||||
x2 = DIV_ROUND_UP(bit_end, info->var.bits_per_pixel);
|
||||
}
|
||||
|
||||
drm_rect_init(clip, x1, y1, x2 - x1, y2 - y1);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_fb_helper_deferred_io() - fbdev deferred_io callback function
|
||||
* @info: fb_info struct pointer
|
||||
|
@ -693,23 +718,23 @@ void drm_fb_helper_deferred_io(struct fb_info *info,
|
|||
{
|
||||
unsigned long start, end, min, max;
|
||||
struct page *page;
|
||||
u32 y1, y2;
|
||||
struct drm_rect damage_area;
|
||||
|
||||
min = ULONG_MAX;
|
||||
max = 0;
|
||||
list_for_each_entry(page, pagelist, lru) {
|
||||
start = page->index << PAGE_SHIFT;
|
||||
end = start + PAGE_SIZE - 1;
|
||||
end = start + PAGE_SIZE;
|
||||
min = min(min, start);
|
||||
max = max(max, end);
|
||||
}
|
||||
if (min >= max)
|
||||
return;
|
||||
|
||||
if (min < max) {
|
||||
y1 = min / info->fix.line_length;
|
||||
y2 = min_t(u32, DIV_ROUND_UP(max, info->fix.line_length),
|
||||
info->var.yres);
|
||||
drm_fb_helper_damage(info, 0, y1, info->var.xres, y2 - y1);
|
||||
}
|
||||
drm_fb_helper_memory_range_to_clip(info, min, max - min, &damage_area);
|
||||
drm_fb_helper_damage(info, damage_area.x1, damage_area.y1,
|
||||
drm_rect_width(&damage_area),
|
||||
drm_rect_height(&damage_area));
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
|
||||
|
||||
|
@ -741,11 +766,18 @@ EXPORT_SYMBOL(drm_fb_helper_sys_read);
|
|||
ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
loff_t pos = *ppos;
|
||||
ssize_t ret;
|
||||
struct drm_rect damage_area;
|
||||
|
||||
ret = fb_sys_write(info, buf, count, ppos);
|
||||
if (ret > 0)
|
||||
drm_fb_helper_damage(info, 0, 0, info->var.xres, info->var.yres);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area);
|
||||
drm_fb_helper_damage(info, damage_area.x1, damage_area.y1,
|
||||
drm_rect_width(&damage_area),
|
||||
drm_rect_height(&damage_area));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2224,6 +2256,7 @@ static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
|
|||
loff_t pos = *ppos;
|
||||
size_t total_size;
|
||||
ssize_t ret;
|
||||
struct drm_rect damage_area;
|
||||
int err = 0;
|
||||
|
||||
if (info->screen_size)
|
||||
|
@ -2252,13 +2285,19 @@ static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
|
|||
else
|
||||
ret = fb_write_screen_buffer(info, buf, count, pos);
|
||||
|
||||
if (ret > 0)
|
||||
*ppos += ret;
|
||||
if (ret < 0)
|
||||
return ret; /* return last error, if any */
|
||||
else if (!ret)
|
||||
return err; /* return previous error, if any */
|
||||
|
||||
if (ret > 0)
|
||||
drm_fb_helper_damage(info, 0, 0, info->var.xres_virtual, info->var.yres_virtual);
|
||||
*ppos += ret;
|
||||
|
||||
return ret ? ret : err;
|
||||
drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area);
|
||||
drm_fb_helper_damage(info, damage_area.x1, damage_area.y1,
|
||||
drm_rect_width(&damage_area),
|
||||
drm_rect_height(&damage_area));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void drm_fbdev_fb_fillrect(struct fb_info *info,
|
||||
|
@ -2346,6 +2385,7 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
|
|||
fbi->fbops = &drm_fbdev_fb_ops;
|
||||
fbi->screen_size = sizes->surface_height * fb->pitches[0];
|
||||
fbi->fix.smem_len = fbi->screen_size;
|
||||
fbi->flags = FBINFO_DEFAULT;
|
||||
|
||||
drm_fb_helper_fill_info(fbi, fb_helper, sizes);
|
||||
|
||||
|
@ -2353,19 +2393,21 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
|
|||
fbi->screen_buffer = vzalloc(fbi->screen_size);
|
||||
if (!fbi->screen_buffer)
|
||||
return -ENOMEM;
|
||||
fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
|
||||
|
||||
fbi->fbdefio = &drm_fbdev_defio;
|
||||
|
||||
fb_deferred_io_init(fbi);
|
||||
} else {
|
||||
/* buffer is mapped for HW framebuffer */
|
||||
ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (map.is_iomem)
|
||||
if (map.is_iomem) {
|
||||
fbi->screen_base = map.vaddr_iomem;
|
||||
else
|
||||
} else {
|
||||
fbi->screen_buffer = map.vaddr;
|
||||
fbi->flags |= FBINFO_VIRTFB;
|
||||
}
|
||||
|
||||
/*
|
||||
* Shamelessly leak the physical address to user-space. As
|
||||
|
|
|
@ -12,9 +12,11 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <drm/drm_device.h>
|
||||
#include <drm/drm_format_helper.h>
|
||||
#include <drm/drm_framebuffer.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_print.h>
|
||||
#include <drm/drm_rect.h>
|
||||
|
||||
static unsigned int clip_offset(const struct drm_rect *clip, unsigned int pitch, unsigned int cpp)
|
||||
|
@ -464,6 +466,21 @@ void drm_fb_xrgb8888_to_xrgb2101010_toio(void __iomem *dst,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010_toio);
|
||||
|
||||
static void drm_fb_xrgb8888_to_gray8_line(u8 *dst, const u32 *src, unsigned int pixels)
|
||||
{
|
||||
unsigned int x;
|
||||
|
||||
for (x = 0; x < pixels; x++) {
|
||||
u8 r = (*src & 0x00ff0000) >> 16;
|
||||
u8 g = (*src & 0x0000ff00) >> 8;
|
||||
u8 b = *src & 0x000000ff;
|
||||
|
||||
/* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
|
||||
*dst++ = (3 * r + 6 * g + b) / 10;
|
||||
src++;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_fb_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale
|
||||
* @dst: 8-bit grayscale destination buffer
|
||||
|
@ -484,8 +501,9 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010_toio);
|
|||
void drm_fb_xrgb8888_to_gray8(void *dst, unsigned int dst_pitch, const void *vaddr,
|
||||
const struct drm_framebuffer *fb, const struct drm_rect *clip)
|
||||
{
|
||||
unsigned int len = (clip->x2 - clip->x1) * sizeof(u32);
|
||||
unsigned int x, y;
|
||||
unsigned int linepixels = clip->x2 - clip->x1;
|
||||
unsigned int len = linepixels * sizeof(u32);
|
||||
unsigned int y;
|
||||
void *buf;
|
||||
u8 *dst8;
|
||||
u32 *src32;
|
||||
|
@ -508,16 +526,7 @@ void drm_fb_xrgb8888_to_gray8(void *dst, unsigned int dst_pitch, const void *vad
|
|||
for (y = clip->y1; y < clip->y2; y++) {
|
||||
dst8 = dst;
|
||||
src32 = memcpy(buf, vaddr, len);
|
||||
for (x = clip->x1; x < clip->x2; x++) {
|
||||
u8 r = (*src32 & 0x00ff0000) >> 16;
|
||||
u8 g = (*src32 & 0x0000ff00) >> 8;
|
||||
u8 b = *src32 & 0x000000ff;
|
||||
|
||||
/* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
|
||||
*dst8++ = (3 * r + 6 * g + b) / 10;
|
||||
src32++;
|
||||
}
|
||||
|
||||
drm_fb_xrgb8888_to_gray8_line(dst8, src32, linepixels);
|
||||
vaddr += fb->pitches[0];
|
||||
dst += dst_pitch;
|
||||
}
|
||||
|
@ -584,3 +593,111 @@ int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_for
|
|||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_blit_toio);
|
||||
|
||||
static void drm_fb_gray8_to_mono_reversed_line(u8 *dst, const u8 *src, unsigned int pixels,
|
||||
unsigned int start_offset, unsigned int end_len)
|
||||
{
|
||||
unsigned int xb, i;
|
||||
|
||||
for (xb = 0; xb < pixels; xb++) {
|
||||
unsigned int start = 0, end = 8;
|
||||
u8 byte = 0x00;
|
||||
|
||||
if (xb == 0 && start_offset)
|
||||
start = start_offset;
|
||||
|
||||
if (xb == pixels - 1 && end_len)
|
||||
end = end_len;
|
||||
|
||||
for (i = start; i < end; i++) {
|
||||
unsigned int x = xb * 8 + i;
|
||||
|
||||
byte >>= 1;
|
||||
if (src[x] >> 7)
|
||||
byte |= BIT(7);
|
||||
}
|
||||
*dst++ = byte;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_fb_xrgb8888_to_mono_reversed - Convert XRGB8888 to reversed monochrome
|
||||
* @dst: reversed monochrome destination buffer
|
||||
* @dst_pitch: Number of bytes between two consecutive scanlines within dst
|
||||
* @src: XRGB8888 source buffer
|
||||
* @fb: DRM framebuffer
|
||||
* @clip: Clip rectangle area to copy
|
||||
*
|
||||
* DRM doesn't have native monochrome support.
|
||||
* Such drivers can announce the commonly supported XR24 format to userspace
|
||||
* and use this function to convert to the native format.
|
||||
*
|
||||
* This function uses drm_fb_xrgb8888_to_gray8() to convert to grayscale and
|
||||
* then the result is converted from grayscale to reversed monohrome.
|
||||
*/
|
||||
void drm_fb_xrgb8888_to_mono_reversed(void *dst, unsigned int dst_pitch, const void *vaddr,
|
||||
const struct drm_framebuffer *fb, const struct drm_rect *clip)
|
||||
{
|
||||
unsigned int linepixels = drm_rect_width(clip);
|
||||
unsigned int lines = clip->y2 - clip->y1;
|
||||
unsigned int cpp = fb->format->cpp[0];
|
||||
unsigned int len_src32 = linepixels * cpp;
|
||||
struct drm_device *dev = fb->dev;
|
||||
unsigned int start_offset, end_len;
|
||||
unsigned int y;
|
||||
u8 *mono = dst, *gray8;
|
||||
u32 *src32;
|
||||
|
||||
if (drm_WARN_ON(dev, fb->format->format != DRM_FORMAT_XRGB8888))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The reversed mono destination buffer contains 1 bit per pixel
|
||||
* and destination scanlines have to be in multiple of 8 pixels.
|
||||
*/
|
||||
if (!dst_pitch)
|
||||
dst_pitch = DIV_ROUND_UP(linepixels, 8);
|
||||
|
||||
drm_WARN_ONCE(dev, dst_pitch % 8 != 0, "dst_pitch is not a multiple of 8\n");
|
||||
|
||||
/*
|
||||
* The cma memory is write-combined so reads are uncached.
|
||||
* Speed up by fetching one line at a time.
|
||||
*
|
||||
* Also, format conversion from XR24 to reversed monochrome
|
||||
* are done line-by-line but are converted to 8-bit grayscale
|
||||
* as an intermediate step.
|
||||
*
|
||||
* Allocate a buffer to be used for both copying from the cma
|
||||
* memory and to store the intermediate grayscale line pixels.
|
||||
*/
|
||||
src32 = kmalloc(len_src32 + linepixels, GFP_KERNEL);
|
||||
if (!src32)
|
||||
return;
|
||||
|
||||
gray8 = (u8 *)src32 + len_src32;
|
||||
|
||||
/*
|
||||
* For damage handling, it is possible that only parts of the source
|
||||
* buffer is copied and this could lead to start and end pixels that
|
||||
* are not aligned to multiple of 8.
|
||||
*
|
||||
* Calculate if the start and end pixels are not aligned and set the
|
||||
* offsets for the reversed mono line conversion function to adjust.
|
||||
*/
|
||||
start_offset = clip->x1 % 8;
|
||||
end_len = clip->x2 % 8;
|
||||
|
||||
vaddr += clip_offset(clip, fb->pitches[0], cpp);
|
||||
for (y = 0; y < lines; y++) {
|
||||
src32 = memcpy(src32, vaddr, len_src32);
|
||||
drm_fb_xrgb8888_to_gray8_line(gray8, src32, linepixels);
|
||||
drm_fb_gray8_to_mono_reversed_line(mono, gray8, dst_pitch,
|
||||
start_offset, end_len);
|
||||
vaddr += fb->pitches[0];
|
||||
mono += dst_pitch;
|
||||
}
|
||||
|
||||
kfree(src32);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono_reversed);
|
||||
|
|
|
@ -309,7 +309,7 @@ drm_internal_framebuffer_create(struct drm_device *dev,
|
|||
}
|
||||
|
||||
if (r->flags & DRM_MODE_FB_MODIFIERS &&
|
||||
!dev->mode_config.allow_fb_modifiers) {
|
||||
dev->mode_config.fb_modifiers_not_supported) {
|
||||
DRM_DEBUG_KMS("driver does not support fb modifiers\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
@ -594,7 +594,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev,
|
|||
r->pixel_format = fb->format->format;
|
||||
|
||||
r->flags = 0;
|
||||
if (dev->mode_config.allow_fb_modifiers)
|
||||
if (!dev->mode_config.fb_modifiers_not_supported)
|
||||
r->flags |= DRM_MODE_FB_MODIFIERS;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(r->handles); i++) {
|
||||
|
@ -607,7 +607,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev,
|
|||
for (i = 0; i < fb->format->num_planes; i++) {
|
||||
r->pitches[i] = fb->pitches[i];
|
||||
r->offsets[i] = fb->offsets[i];
|
||||
if (dev->mode_config.allow_fb_modifiers)
|
||||
if (!dev->mode_config.fb_modifiers_not_supported)
|
||||
r->modifier[i] = fb->modifier;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,20 +25,21 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/iosys-map.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/pagevec.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string_helpers.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <drm/drm.h>
|
||||
#include <drm/drm_device.h>
|
||||
|
@ -1145,7 +1146,7 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
|
|||
drm_vma_node_start(&obj->vma_node));
|
||||
drm_printf_indent(p, indent, "size=%zu\n", obj->size);
|
||||
drm_printf_indent(p, indent, "imported=%s\n",
|
||||
obj->import_attach ? "yes" : "no");
|
||||
str_yes_no(obj->import_attach));
|
||||
|
||||
if (obj->funcs->print_info)
|
||||
obj->funcs->print_info(p, indent, obj);
|
||||
|
|
|
@ -46,6 +46,7 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
|
|||
.vmap = drm_gem_shmem_object_vmap,
|
||||
.vunmap = drm_gem_shmem_object_vunmap,
|
||||
.mmap = drm_gem_shmem_object_mmap,
|
||||
.vm_ops = &drm_gem_shmem_vm_ops,
|
||||
};
|
||||
|
||||
static struct drm_gem_shmem_object *
|
||||
|
@ -588,11 +589,12 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
|
|||
drm_gem_vm_close(vma);
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
|
||||
const struct vm_operations_struct drm_gem_shmem_vm_ops = {
|
||||
.fault = drm_gem_shmem_fault,
|
||||
.open = drm_gem_shmem_vm_open,
|
||||
.close = drm_gem_shmem_vm_close,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
|
||||
|
||||
/**
|
||||
* drm_gem_shmem_mmap - Memory-map a shmem GEM object
|
||||
|
@ -624,11 +626,10 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
|
|||
return ret;
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND;
|
||||
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||
if (shmem->map_wc)
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
vma->vm_ops = &drm_gem_shmem_vm_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -297,7 +297,7 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
|
|||
req->value = 64;
|
||||
break;
|
||||
case DRM_CAP_ADDFB2_MODIFIERS:
|
||||
req->value = dev->mode_config.allow_fb_modifiers;
|
||||
req->value = !dev->mode_config.fb_modifiers_not_supported;
|
||||
break;
|
||||
case DRM_CAP_CRTC_IN_VBLANK_EVENT:
|
||||
req->value = 1;
|
||||
|
|
|
@ -880,7 +880,7 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo);
|
|||
* @dst: mode to overwrite
|
||||
* @src: mode to copy
|
||||
*
|
||||
* Copy an existing mode into another mode, preserving the object id and
|
||||
* Copy an existing mode into another mode, preserving the
|
||||
* list head of the destination mode.
|
||||
*/
|
||||
void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
|
||||
|
|
|
@ -237,6 +237,9 @@ static int __drm_universal_plane_init(struct drm_device *dev,
|
|||
const char *name, va_list ap)
|
||||
{
|
||||
struct drm_mode_config *config = &dev->mode_config;
|
||||
static const uint64_t default_modifiers[] = {
|
||||
DRM_FORMAT_MOD_LINEAR,
|
||||
};
|
||||
unsigned int format_modifier_count = 0;
|
||||
int ret;
|
||||
|
||||
|
@ -277,16 +280,16 @@ static int __drm_universal_plane_init(struct drm_device *dev,
|
|||
|
||||
while (*temp_modifiers++ != DRM_FORMAT_MOD_INVALID)
|
||||
format_modifier_count++;
|
||||
} else {
|
||||
if (!dev->mode_config.fb_modifiers_not_supported) {
|
||||
format_modifiers = default_modifiers;
|
||||
format_modifier_count = ARRAY_SIZE(default_modifiers);
|
||||
}
|
||||
}
|
||||
|
||||
/* autoset the cap and check for consistency across all planes */
|
||||
if (format_modifier_count) {
|
||||
drm_WARN_ON(dev, !config->allow_fb_modifiers &&
|
||||
!list_empty(&config->plane_list));
|
||||
config->allow_fb_modifiers = true;
|
||||
} else {
|
||||
drm_WARN_ON(dev, config->allow_fb_modifiers);
|
||||
}
|
||||
drm_WARN_ON(dev, config->fb_modifiers_not_supported &&
|
||||
format_modifier_count);
|
||||
|
||||
plane->modifier_count = format_modifier_count;
|
||||
plane->modifiers = kmalloc_array(format_modifier_count,
|
||||
|
@ -341,7 +344,7 @@ static int __drm_universal_plane_init(struct drm_device *dev,
|
|||
drm_object_attach_property(&plane->base, config->prop_src_h, 0);
|
||||
}
|
||||
|
||||
if (config->allow_fb_modifiers)
|
||||
if (format_modifier_count)
|
||||
create_in_format_blob(dev, plane);
|
||||
|
||||
return 0;
|
||||
|
@ -368,8 +371,8 @@ static int __drm_universal_plane_init(struct drm_device *dev,
|
|||
* drm_universal_plane_init() to let the DRM managed resource infrastructure
|
||||
* take care of cleanup and deallocation.
|
||||
*
|
||||
* Drivers supporting modifiers must set @format_modifiers on all their planes,
|
||||
* even those that only support DRM_FORMAT_MOD_LINEAR.
|
||||
* Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set
|
||||
* @format_modifiers to NULL. The plane will advertise the linear modifier.
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, error code on failure.
|
||||
|
|
|
@ -379,6 +379,7 @@ static void drm_privacy_screen_device_release(struct device *dev)
|
|||
* drm_privacy_screen_register - register a privacy-screen
|
||||
* @parent: parent-device for the privacy-screen
|
||||
* @ops: &struct drm_privacy_screen_ops pointer with ops for the privacy-screen
|
||||
* @data: Private data owned by the privacy screen provider
|
||||
*
|
||||
* Create and register a privacy-screen.
|
||||
*
|
||||
|
|
|
@ -853,12 +853,57 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
|
|||
&args->handle);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Try to flatten a dma_fence_chain into a dma_fence_array so that it can be
|
||||
* added as timeline fence to a chain again.
|
||||
*/
|
||||
static int drm_syncobj_flatten_chain(struct dma_fence **f)
|
||||
{
|
||||
struct dma_fence_chain *chain = to_dma_fence_chain(*f);
|
||||
struct dma_fence *tmp, **fences;
|
||||
struct dma_fence_array *array;
|
||||
unsigned int count;
|
||||
|
||||
if (!chain)
|
||||
return 0;
|
||||
|
||||
count = 0;
|
||||
dma_fence_chain_for_each(tmp, &chain->base)
|
||||
++count;
|
||||
|
||||
fences = kmalloc_array(count, sizeof(*fences), GFP_KERNEL);
|
||||
if (!fences)
|
||||
return -ENOMEM;
|
||||
|
||||
count = 0;
|
||||
dma_fence_chain_for_each(tmp, &chain->base)
|
||||
fences[count++] = dma_fence_get(tmp);
|
||||
|
||||
array = dma_fence_array_create(count, fences,
|
||||
dma_fence_context_alloc(1),
|
||||
1, false);
|
||||
if (!array)
|
||||
goto free_fences;
|
||||
|
||||
dma_fence_put(*f);
|
||||
*f = &array->base;
|
||||
return 0;
|
||||
|
||||
free_fences:
|
||||
while (count--)
|
||||
dma_fence_put(fences[count]);
|
||||
|
||||
kfree(fences);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
|
||||
struct drm_syncobj_transfer *args)
|
||||
{
|
||||
struct drm_syncobj *timeline_syncobj = NULL;
|
||||
struct dma_fence *fence;
|
||||
struct dma_fence_chain *chain;
|
||||
struct dma_fence *fence;
|
||||
int ret;
|
||||
|
||||
timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
|
||||
|
@ -869,16 +914,22 @@ static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
|
|||
args->src_point, args->flags,
|
||||
&fence);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto err_put_timeline;
|
||||
|
||||
ret = drm_syncobj_flatten_chain(&fence);
|
||||
if (ret)
|
||||
goto err_free_fence;
|
||||
|
||||
chain = dma_fence_chain_alloc();
|
||||
if (!chain) {
|
||||
ret = -ENOMEM;
|
||||
goto err1;
|
||||
goto err_free_fence;
|
||||
}
|
||||
|
||||
drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
|
||||
err1:
|
||||
err_free_fence:
|
||||
dma_fence_put(fence);
|
||||
err:
|
||||
err_put_timeline:
|
||||
drm_syncobj_put(timeline_syncobj);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -195,7 +195,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu)
|
|||
ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
|
||||
etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
|
||||
msecs_to_jiffies(500), NULL, NULL,
|
||||
dev_name(gpu->dev));
|
||||
dev_name(gpu->dev), gpu->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -455,6 +455,9 @@ static int exynos_drm_init(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (drm_firmware_drivers_only())
|
||||
return -ENODEV;
|
||||
|
||||
ret = exynos_drm_register_devices();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_gem_cma_helper.h>
|
||||
#include <drm/drm_modeset_helper.h>
|
||||
#include <drm/drm_module.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
|
@ -368,7 +369,7 @@ static struct platform_driver fsl_dcu_drm_platform_driver = {
|
|||
},
|
||||
};
|
||||
|
||||
module_platform_driver(fsl_dcu_drm_platform_driver);
|
||||
drm_module_platform_driver(fsl_dcu_drm_platform_driver);
|
||||
|
||||
MODULE_DESCRIPTION("Freescale DCU DRM Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -538,6 +538,9 @@ static struct pci_driver psb_pci_driver = {
|
|||
|
||||
static int __init psb_init(void)
|
||||
{
|
||||
if (drm_firmware_drivers_only())
|
||||
return -ENODEV;
|
||||
|
||||
return pci_register_driver(&psb_pci_driver);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config DRM_HISI_HIBMC
|
||||
tristate "DRM Support for Hisilicon Hibmc"
|
||||
depends on DRM && PCI && ARM64
|
||||
depends on DRM && PCI && (ARM64 || COMPILE_TEST)
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_VRAM_HELPER
|
||||
select DRM_TTM
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_gem_cma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_module.h>
|
||||
#include <drm/drm_of.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
@ -307,7 +308,7 @@ static struct platform_driver kirin_drm_platform_driver = {
|
|||
},
|
||||
};
|
||||
|
||||
module_platform_driver(kirin_drm_platform_driver);
|
||||
drm_module_platform_driver(kirin_drm_platform_driver);
|
||||
|
||||
MODULE_AUTHOR("Xinliang Liu <xinliang.liu@linaro.org>");
|
||||
MODULE_AUTHOR("Xinliang Liu <z.liuxinliang@hisilicon.com>");
|
||||
|
|
|
@ -305,6 +305,9 @@ static int __init hyperv_init(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (drm_firmware_drivers_only())
|
||||
return -ENODEV;
|
||||
|
||||
ret = pci_register_driver(&hyperv_pci_driver);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
|
|
@ -36,13 +36,14 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
|
|||
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
|
||||
struct i915_ttm_buddy_resource *bman_res;
|
||||
struct drm_buddy *mm = &bman->mm;
|
||||
unsigned long n_pages;
|
||||
unsigned int min_order;
|
||||
unsigned long n_pages, lpfn;
|
||||
u64 min_page_size;
|
||||
u64 size;
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(place->fpfn || place->lpfn);
|
||||
lpfn = place->lpfn;
|
||||
if (!lpfn)
|
||||
lpfn = man->size;
|
||||
|
||||
bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL);
|
||||
if (!bman_res)
|
||||
|
@ -52,6 +53,12 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
|
|||
INIT_LIST_HEAD(&bman_res->blocks);
|
||||
bman_res->mm = mm;
|
||||
|
||||
if (place->flags & TTM_PL_FLAG_TOPDOWN)
|
||||
bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
|
||||
|
||||
if (place->fpfn || lpfn != man->size)
|
||||
bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
|
||||
|
||||
GEM_BUG_ON(!bman_res->base.num_pages);
|
||||
size = bman_res->base.num_pages << PAGE_SHIFT;
|
||||
|
||||
|
@ -60,10 +67,16 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
|
|||
min_page_size = bo->page_alignment << PAGE_SHIFT;
|
||||
|
||||
GEM_BUG_ON(min_page_size < mm->chunk_size);
|
||||
min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
|
||||
|
||||
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
|
||||
unsigned long pages;
|
||||
|
||||
size = roundup_pow_of_two(size);
|
||||
min_order = ilog2(size) - ilog2(mm->chunk_size);
|
||||
min_page_size = size;
|
||||
|
||||
pages = size >> ilog2(mm->chunk_size);
|
||||
if (pages > lpfn)
|
||||
lpfn = pages;
|
||||
}
|
||||
|
||||
if (size > mm->size) {
|
||||
|
@ -73,34 +86,26 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
|
|||
|
||||
n_pages = size >> ilog2(mm->chunk_size);
|
||||
|
||||
do {
|
||||
struct drm_buddy_block *block;
|
||||
unsigned int order;
|
||||
mutex_lock(&bman->lock);
|
||||
err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
|
||||
(u64)lpfn << PAGE_SHIFT,
|
||||
(u64)n_pages << PAGE_SHIFT,
|
||||
min_page_size,
|
||||
&bman_res->blocks,
|
||||
bman_res->flags);
|
||||
mutex_unlock(&bman->lock);
|
||||
if (unlikely(err))
|
||||
goto err_free_blocks;
|
||||
|
||||
order = fls(n_pages) - 1;
|
||||
GEM_BUG_ON(order > mm->max_order);
|
||||
GEM_BUG_ON(order < min_order);
|
||||
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
|
||||
u64 original_size = (u64)bman_res->base.num_pages << PAGE_SHIFT;
|
||||
|
||||
do {
|
||||
mutex_lock(&bman->lock);
|
||||
block = drm_buddy_alloc_blocks(mm, order);
|
||||
mutex_unlock(&bman->lock);
|
||||
if (!IS_ERR(block))
|
||||
break;
|
||||
|
||||
if (order-- == min_order) {
|
||||
err = -ENOSPC;
|
||||
goto err_free_blocks;
|
||||
}
|
||||
} while (1);
|
||||
|
||||
n_pages -= BIT(order);
|
||||
|
||||
list_add_tail(&block->link, &bman_res->blocks);
|
||||
|
||||
if (!n_pages)
|
||||
break;
|
||||
} while (1);
|
||||
mutex_lock(&bman->lock);
|
||||
drm_buddy_block_trim(mm,
|
||||
original_size,
|
||||
&bman_res->blocks);
|
||||
mutex_unlock(&bman->lock);
|
||||
}
|
||||
|
||||
*res = &bman_res->base;
|
||||
return 0;
|
||||
|
@ -268,10 +273,17 @@ int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
|
|||
{
|
||||
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
|
||||
struct drm_buddy *mm = &bman->mm;
|
||||
unsigned long flags = 0;
|
||||
int ret;
|
||||
|
||||
flags |= DRM_BUDDY_RANGE_ALLOCATION;
|
||||
|
||||
mutex_lock(&bman->lock);
|
||||
ret = drm_buddy_alloc_range(mm, &bman->reserved, start, size);
|
||||
ret = drm_buddy_alloc_blocks(mm, start,
|
||||
start + size,
|
||||
size, mm->chunk_size,
|
||||
&bman->reserved,
|
||||
flags);
|
||||
mutex_unlock(&bman->lock);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -20,6 +20,7 @@ struct drm_buddy;
|
|||
*
|
||||
* @base: struct ttm_resource base class we extend
|
||||
* @blocks: the list of struct i915_buddy_block for this resource/allocation
|
||||
* @flags: DRM_BUDDY_*_ALLOCATION flags
|
||||
* @mm: the struct i915_buddy_mm for this resource
|
||||
*
|
||||
* Extends the struct ttm_resource to manage an address space allocation with
|
||||
|
@ -28,6 +29,7 @@ struct drm_buddy;
|
|||
struct i915_ttm_buddy_resource {
|
||||
struct ttm_resource base;
|
||||
struct list_head blocks;
|
||||
unsigned long flags;
|
||||
struct drm_buddy *mm;
|
||||
};
|
||||
|
||||
|
|
|
@ -341,6 +341,9 @@ static struct platform_driver * const drivers[] = {
|
|||
|
||||
static int __init imx_drm_init(void)
|
||||
{
|
||||
if (drm_firmware_drivers_only())
|
||||
return -ENODEV;
|
||||
|
||||
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
|
||||
}
|
||||
module_init(imx_drm_init);
|
||||
|
|
|
@ -65,8 +65,10 @@ struct ingenic_dma_hwdescs {
|
|||
struct jz_soc_info {
|
||||
bool needs_dev_clk;
|
||||
bool has_osd;
|
||||
bool has_alpha;
|
||||
bool map_noncoherent;
|
||||
bool use_extended_hwdesc;
|
||||
bool plane_f0_not_working;
|
||||
unsigned int max_width, max_height;
|
||||
const u32 *formats_f0, *formats_f1;
|
||||
unsigned int num_formats_f0, num_formats_f1;
|
||||
|
@ -453,7 +455,7 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
|
|||
if (!crtc)
|
||||
return 0;
|
||||
|
||||
if (plane == &priv->f0)
|
||||
if (priv->soc_info->plane_f0_not_working && plane == &priv->f0)
|
||||
return -EINVAL;
|
||||
|
||||
crtc_state = drm_atomic_get_existing_crtc_state(state,
|
||||
|
@ -1055,6 +1057,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
|
|||
long parent_rate;
|
||||
unsigned int i, clone_mask = 0;
|
||||
int ret, irq;
|
||||
u32 osdc = 0;
|
||||
|
||||
soc_info = of_device_get_match_data(dev);
|
||||
if (!soc_info) {
|
||||
|
@ -1312,7 +1315,10 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
|
|||
|
||||
/* Enable OSD if available */
|
||||
if (soc_info->has_osd)
|
||||
regmap_write(priv->map, JZ_REG_LCD_OSDC, JZ_LCD_OSDC_OSDEN);
|
||||
osdc |= JZ_LCD_OSDC_OSDEN;
|
||||
if (soc_info->has_alpha)
|
||||
osdc |= JZ_LCD_OSDC_ALPHAEN;
|
||||
regmap_write(priv->map, JZ_REG_LCD_OSDC, osdc);
|
||||
|
||||
mutex_init(&priv->clk_mutex);
|
||||
priv->clock_nb.notifier_call = ingenic_drm_update_pixclk;
|
||||
|
@ -1511,7 +1517,9 @@ static const struct jz_soc_info jz4770_soc_info = {
|
|||
static const struct jz_soc_info jz4780_soc_info = {
|
||||
.needs_dev_clk = true,
|
||||
.has_osd = true,
|
||||
.has_alpha = true,
|
||||
.use_extended_hwdesc = true,
|
||||
.plane_f0_not_working = true, /* REVISIT */
|
||||
.max_width = 4096,
|
||||
.max_height = 2048,
|
||||
.formats_f1 = jz4770_formats_f1,
|
||||
|
@ -1543,6 +1551,9 @@ static int ingenic_drm_init(void)
|
|||
{
|
||||
int err;
|
||||
|
||||
if (drm_firmware_drivers_only())
|
||||
return -ENODEV;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU)) {
|
||||
err = platform_driver_register(ingenic_ipu_driver_ptr);
|
||||
if (err)
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_gem_cma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_module.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
|
@ -628,7 +629,7 @@ static struct platform_driver kmb_platform_driver = {
|
|||
},
|
||||
};
|
||||
|
||||
module_platform_driver(kmb_platform_driver);
|
||||
drm_module_platform_driver(kmb_platform_driver);
|
||||
|
||||
MODULE_AUTHOR("Intel Corporation");
|
||||
MODULE_DESCRIPTION("Keembay Display driver");
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue