Main pull request for drm for 4.10 kernel

-----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJYT3qqAAoJEAx081l5xIa+dLMP/2dqBybSAeWlPmAwVenIHRtS
 KFNktISezFSY/LBcIP2mHkFJmjTKBMZFxWnyEJL9NmFUD1cS2WMyNnC1282h/+rD
 +P8Bsmzmt/daV4UTFxVDpzlmVlavAyakNi6FnSQfAfmf+3PB1yzU3gn8ld9pU/if
 h7KEp9fDn9eYZreTRfCUloI2yoVpD9d0DG3uaGDN/N0kGUnCC6TZT5ig5j2JO016
 fYf/DqoYAk3ItWF9WK/uG7qJIGi37afCpQq+kbSSJk+p3HjJqu8JUe9jzqYdl7j9
 26TGSY5o9WLhZkxDgbcCIJzcFJhMmXgMdhjil9lqaHmnNG5FPFU7g8DK1CZqbel9
 m8+aRPn1EgxIahMgdl8NblW1pfO2Kco0tZmoP5vXx1uqhivd67h0hiQqp66WxOJd
 i2yMLncaCEv8M161CVEgtzuI5a7nCfaZv7J9ArzbkD/huBwu51IZgTs7Dz4njgvz
 VPB5FBTB/ZYteErUNoh6gjF0hLngWvvJSPvuzT+EFO7yypek0IJ28GTdbxYSP+jR
 13697s5Itigf/D3KUdRRGsWRzyVVN9n+djkl//sy5ddL9eOlKSKEga4ujOUjTWaW
 hTvAxpK9GmJS/Iun5jIP6f75zDbi+e8FWUeB/OI2lPtnApaSKdXBTPXsco2RnTEV
 +G6XrH8IMEIsTxOk7hWU
 =7s/c
 -----END PGP SIGNATURE-----

Merge tag 'drm-for-v4.10' of git://people.freedesktop.org/~airlied/linux

Pull drm updates from Dave Airlie:
 "This is the main pull request for drm for 4.10 kernel.

  New drivers:
   - ZTE VOU display driver (zxdrm)
   - Amlogic Meson Graphic Controller GXBB/GXL/GXM SoCs (meson)
   - MXSFB support (mxsfb)

  Core:
   - Format handling has been reworked
   - Better atomic state debugging
   - drm_mm leak debugging
   - Atomic explicit fencing support
   - fbdev helper ops
   - Documentation updates
   - MST fbcon fixes

  Bridge:
   - Silicon Image SiI8620 driver

  Panel:
   - Add support for new simple panels

  i915:
   - GVT Device model
   - Better HDMI2.0 support on skylake
   - More watermark fixes
   - GPU idling rework for suspend/resume
   - DP Audio workarounds
   - Scheduler prep-work
   - Opregion CADL handling
   - GPU scheduler and priority boosting

  amdgfx/radeon:
   - Support for virtual devices
   - New VM manager for non-contig VRAM buffers
   - UVD powergating
   - SI register header cleanup
   - Cursor fixes
   - Powermanagement fixes

  nouveau:
   - Powermangement reworks for better voltage/clock changes
   - Atomic modesetting support
   - Displayport Multistream (MST) support.
   - GP102/104 hang and cursor fixes
   - GP106 support

  hisilicon:
   - hibmc support (BMC chip for aarch64 servers)

  armada:
   - add tracing support for overlay change
   - refactor plane support
   - de-midlayer the driver

  omapdrm:
   - Timing code cleanups

  rcar-du:
   - R8A7792/R8A7796 support
   - Misc fixes.

  sunxi:
   - A31 SoC display engine support

  imx-drm:
   - YUV format support
   - Cleanup plane atomic update

  mali-dp:
   - Misc fixes

  dw-hdmi:
   - Add support for HDMI i2c master controller

  tegra:
   - IOMMU support fixes
   - Error handling fixes

  tda998x:
   - Fix connector registration
   - Improved robustness
   - Fix infoframe/audio compliance

  virtio:
   - fix busid issues
   - allocate more vbufs

  qxl:
   - misc fixes and cleanups.

  vc4:
   - Fragment shader threading
   - ETC1 support
   - VEC (tv-out) support

  msm:
   - A5XX GPU support
   - Lots of atomic changes

  tilcdc:
   - Misc fixes and cleanups.

  etnaviv:
   - Fix dma-buf export path
   - DRAW_INSTANCED support
   - fix driver on i.MX6SX

  exynos:
   - HDMI refactoring

  fsl-dcu:
   - fbdev changes"

* tag 'drm-for-v4.10' of git://people.freedesktop.org/~airlied/linux: (1343 commits)
  drm/nouveau/kms/nv50: fix atomic regression on original G80
  drm/nouveau/bl: Do not register interface if Apple GMUX detected
  drm/nouveau/bl: Assign different names to interfaces
  drm/nouveau/bios/dp: fix handling of LevelEntryTableIndex on DP table 4.2
  drm/nouveau/ltc: protect clearing of comptags with mutex
  drm/nouveau/gr/gf100-: handle GPC/TPC/MPC trap
  drm/nouveau/core: recognise GP106 chipset
  drm/nouveau/ttm: wait for bo fence to signal before unmapping vmas
  drm/nouveau/gr/gf100-: FECS intr handling is not relevant on proprietary ucode
  drm/nouveau/gr/gf100-: properly ack all FECS error interrupts
  drm/nouveau/fifo/gf100-: recover from host mmu faults
  drm: Add fake controlD* symlinks for backwards compat
  drm/vc4: Don't use drm_put_dev
  drm/vc4: Document VEC DT binding
  drm/vc4: Add support for the VEC (Video Encoder) IP
  drm: Add TV connector states to drm_connector_state
  drm: Turn DRM_MODE_SUBCONNECTOR_xx definitions into an enum
  drm/vc4: Fix ->clock_select setting for the VEC encoder
  drm/amdgpu/dce6: Set MASTER_UPDATE_MODE to 0 in resume_mc_access as well
  drm/amdgpu: use pin rather than pin_restricted in a few cases
  ...
This commit is contained in:
Linus Torvalds 2016-12-13 09:35:09 -08:00
commit 9439b3710d
1010 changed files with 129375 additions and 27317 deletions

View File

@ -0,0 +1,112 @@
Amlogic Meson Display Controller
================================
The Amlogic Meson Display controller is composed of several components
that are going to be documented below:
DMC|---------------VPU (Video Processing Unit)----------------|------HHI------|
| vd1 _______ _____________ _________________ | |
D |-------| |----| | | | | HDMI PLL |
D | vd2 | VIU | | Video Post | | Video Encoders |<---|-----VCLK |
R |-------| |----| Processing | | | | |
| osd2 | | | |---| Enci ----------|----|-----VDAC------|
R |-------| CSC |----| Scalers | | Encp ----------|----|----HDMI-TX----|
A | osd1 | | | Blenders | | Encl ----------|----|---------------|
M |-------|______|----|____________| |________________| | |
___|__________________________________________________________|_______________|
VIU: Video Input Unit
---------------------
The Video Input Unit is in charge of the pixel scanout from the DDR memory.
It fetches the frames addresses, stride and parameters from the "Canvas" memory.
This part is also in charge of the CSC (Colorspace Conversion).
It can handle 2 OSD Planes and 2 Video Planes.
VPP: Video Post Processing
--------------------------
The Video Post Processing is in charge of the scaling and blending of the
various planes into a single pixel stream.
There is a special "pre-blending" used by the video planes with a dedicated
scaler and a "post-blending" to merge with the OSD Planes.
The OSD planes also have a dedicated scaler for one of the OSD.
VENC: Video Encoders
--------------------
The VENC is composed of the multiple pixel encoders :
- ENCI : Interlace Video encoder for CVBS and Interlace HDMI
- ENCP : Progressive Video Encoder for HDMI
- ENCL : LCD LVDS Encoder
The VENC Unit gets a Pixel Clocks (VCLK) from a dedicated HDMI PLL and clock
tree and provides the scanout clock to the VPP and VIU.
The ENCI is connected to a single VDAC for Composite Output.
The ENCI and ENCP are connected to an on-chip HDMI Transceiver.
Device Tree Bindings:
---------------------
VPU: Video Processing Unit
--------------------------
Required properties:
- compatible: value should be different for each SoC family as :
- GXBB (S905) : "amlogic,meson-gxbb-vpu"
- GXL (S905X, S905D) : "amlogic,meson-gxl-vpu"
- GXM (S912) : "amlogic,meson-gxm-vpu"
followed by the common "amlogic,meson-gx-vpu"
- reg: base address and size of he following memory-mapped regions :
- vpu
- hhi
- dmc
- reg-names: should contain the names of the previous memory regions
- interrupts: should contain the VENC Vsync interrupt number
Required nodes:
The connections to the VPU output video ports are modeled using the OF graph
bindings specified in Documentation/devicetree/bindings/graph.txt.
The following table lists for each supported model the port number
corresponding to each VPU output.
Port 0 Port 1
-----------------------------------------
S905 (GXBB) CVBS VDAC HDMI-TX
S905X (GXL) CVBS VDAC HDMI-TX
S905D (GXL) CVBS VDAC HDMI-TX
S912 (GXM) CVBS VDAC HDMI-TX
Example:
tv-connector {
compatible = "composite-video-connector";
port {
tv_connector_in: endpoint {
remote-endpoint = <&cvbs_vdac_out>;
};
};
};
vpu: vpu@d0100000 {
compatible = "amlogic,meson-gxbb-vpu";
reg = <0x0 0xd0100000 0x0 0x100000>,
<0x0 0xc883c000 0x0 0x1000>,
<0x0 0xc8838000 0x0 0x1000>;
reg-names = "vpu", "hhi", "dmc";
interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
#address-cells = <1>;
#size-cells = <0>;
/* CVBS VDAC output port */
port@0 {
reg = <0>;
cvbs_vdac_out: endpoint {
remote-endpoint = <&tv_connector_in>;
};
};
};

View File

@ -43,6 +43,13 @@ Required properties for DPI:
- port: Port node with a single endpoint connecting to the panel
device, as defined in [1]
Required properties for VEC:
- compatible: Should be "brcm,bcm2835-vec"
- reg: Physical base address and length of the registers
- clocks: The core clock the unit runs on
- interrupts: The interrupt number
See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
Required properties for V3D:
- compatible: Should be "brcm,bcm2835-v3d"
- reg: Physical base address and length of the V3D's registers
@ -92,6 +99,13 @@ dpi: dpi@7e208000 {
};
};
vec: vec@7e806000 {
compatible = "brcm,bcm2835-vec";
reg = <0x7e806000 0x1000>;
clocks = <&clocks BCM2835_CLOCK_VEC>;
interrupts = <2 27>;
};
v3d: v3d@7ec00000 {
compatible = "brcm,bcm2835-v3d";
reg = <0x7ec00000 0x1000>;

View File

@ -16,6 +16,8 @@ graph bindings specified in Documentation/devicetree/bindings/graph.txt.
- Video port 0 for RGB input
- Video port 1 for VGA output
Optional properties:
- vdd-supply: Power supply for DAC
Example
-------

View File

@ -19,7 +19,9 @@ Required properties:
Optional properties
- reg-io-width: the width of the reg:1,4, default set to 1 if not present
- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing,
if the property is omitted, a functionally reduced I2C bus
controller on DW HDMI is probed
- clocks, clock-names: phandle to the HDMI CEC clock, name should be "cec"
Example:

View File

@ -6,10 +6,15 @@ Required properties:
Optional properties:
- powerdown-gpios: power-down gpio
- reg: I2C address. If and only if present the device node
should be placed into the i2c controller node where the
tfp410 i2c is connected to.
Required nodes:
- Video port 0 for DPI input
- Video port 1 for DVI output
- Video port 0 for DPI input [1].
- Video port 1 for DVI output [1].
[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
Example
-------

View File

@ -1,20 +1,57 @@
* Freescale MXS LCD Interface (LCDIF)
New bindings:
=============
Required properties:
- compatible: Should be "fsl,<chip>-lcdif". Supported chips include
imx23 and imx28.
- reg: Address and length of the register set for lcdif
- interrupts: Should contain lcdif interrupts
- display : phandle to display node (see below for details)
- compatible: Should be "fsl,imx23-lcdif" for i.MX23.
Should be "fsl,imx28-lcdif" for i.MX28.
Should be "fsl,imx6sx-lcdif" for i.MX6SX.
- reg: Address and length of the register set for LCDIF
- interrupts: Should contain LCDIF interrupt
- clocks: A list of phandle + clock-specifier pairs, one for each
entry in 'clock-names'.
- clock-names: A list of clock names. For MXSFB it should contain:
- "pix" for the LCDIF block clock
- (MX6SX-only) "axi", "disp_axi" for the bus interface clock
Required sub-nodes:
- port: The connection to an encoder chip.
Example:
lcdif1: display-controller@2220000 {
compatible = "fsl,imx6sx-lcdif", "fsl,imx28-lcdif";
reg = <0x02220000 0x4000>;
interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_LCDIF1_PIX>,
<&clks IMX6SX_CLK_LCDIF_APB>,
<&clks IMX6SX_CLK_DISPLAY_AXI>;
clock-names = "pix", "axi", "disp_axi";
port {
parallel_out: endpoint {
remote-endpoint = <&panel_in_parallel>;
};
};
};
Deprecated bindings:
====================
Required properties:
- compatible: Should be "fsl,imx23-lcdif" for i.MX23.
Should be "fsl,imx28-lcdif" for i.MX28.
- reg: Address and length of the register set for LCDIF
- interrupts: Should contain LCDIF interrupts
- display: phandle to display node (see below for details)
* display node
Required properties:
- bits-per-pixel : <16> for RGB565, <32> for RGB888/666.
- bus-width : number of data lines. Could be <8>, <16>, <18> or <24>.
- bits-per-pixel: <16> for RGB565, <32> for RGB888/666.
- bus-width: number of data lines. Could be <8>, <16>, <18> or <24>.
Required sub-node:
- display-timings : Refer to binding doc display-timing.txt for details.
- display-timings: Refer to binding doc display-timing.txt for details.
Examples:

View File

@ -0,0 +1,7 @@
AU Optronics Corporation 13.3" FHD (1920x1080) TFT LCD panel
Required properties:
- compatible: should be "auo,g133han01"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -0,0 +1,7 @@
AU Optronics Corporation 18.5" FHD (1920x1080) TFT LCD panel
Required properties:
- compatible: should be "auo,g185han01"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -0,0 +1,7 @@
AU Optronics Corporation 21.5" FHD (1920x1080) color TFT LCD panel
Required properties:
- compatible: should be "auo,t215hvn01"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -0,0 +1,7 @@
Chunghwa Picture Tubes Ltd. 7" WXGA TFT LCD panel
Required properties:
- compatible: should be "chunghwa,claa070wp03xg"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -32,6 +32,14 @@ optional properties:
- active low = drive pixel data on falling edge/
sample data on rising edge
- ignored = ignored
- syncclk-active: with
- active high = drive sync on rising edge/
sample sync on falling edge of pixel
clock
- active low = drive sync on falling edge/
sample sync on rising edge of pixel
clock
- omitted = same configuration as pixelclk-active
- interlaced (bool): boolean to enable interlaced mode
- doublescan (bool): boolean to enable doublescan mode
- doubleclk (bool): boolean to enable doubleclock mode

View File

@ -0,0 +1,7 @@
New Vision Display 7.0" 800 RGB x 480 TFT LCD panel
Required properties:
- compatible: should be "nvd,9128"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -0,0 +1,36 @@
Sharp 15" LQ150X1LG11 XGA TFT LCD panel
Required properties:
- compatible: should be "sharp,lq150x1lg11"
- power-supply: regulator to provide the VCC supply voltage (3.3 volts)
Optional properties:
- backlight: phandle of the backlight device
- rlud-gpios: a single GPIO for the RL/UD (rotate 180 degrees) pin.
- sellvds-gpios: a single GPIO for the SELLVDS pin.
If rlud-gpios and/or sellvds-gpios are not specified, the RL/UD and/or SELLVDS
pins are assumed to be handled appropriately by the hardware.
Example:
backlight: backlight {
compatible = "pwm-backlight";
pwms = <&pwm 0 100000>; /* VBR */
brightness-levels = <0 20 40 60 80 100>;
default-brightness-level = <2>;
power-supply = <&vdd_12v_reg>; /* VDD */
enable-gpios = <&gpio 42 GPIO_ACTIVE_HIGH>; /* XSTABY */
};
panel {
compatible = "sharp,lq150x1lg11";
power-supply = <&vcc_3v3_reg>; /* VCC */
backlight = <&backlight>;
rlud-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>; /* RL/UD */
sellvds-gpios = <&gpio 18 GPIO_ACTIVE_HIGH>; /* SELLVDS */
};

View File

@ -6,9 +6,11 @@ Required Properties:
- "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
- "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
- "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
- "renesas,du-r8a7792" for R8A7792 (R-Car V2H) compatible DU
- "renesas,du-r8a7793" for R8A7793 (R-Car M2-N) compatible DU
- "renesas,du-r8a7794" for R8A7794 (R-Car E2) compatible DU
- "renesas,du-r8a7795" for R8A7795 (R-Car H3) compatible DU
- "renesas,du-r8a7796" for R8A7796 (R-Car M3-W) compatible DU
- reg: A list of base address and length of each memory resource, one for
each entry in the reg-names property.
@ -25,10 +27,10 @@ Required Properties:
- clock-names: Name of the clocks. This property is model-dependent.
- R8A7779 uses a single functional clock. The clock doesn't need to be
named.
- R8A779[01345] use one functional clock per channel and one clock per LVDS
encoder (if available). The functional clocks must be named "du.x" with
"x" being the channel numerical index. The LVDS clocks must be named
"lvds.x" with "x" being the LVDS encoder numerical index.
- R8A779[0123456] use one functional clock per channel and one clock per
LVDS encoder (if available). The functional clocks must be named "du.x"
with "x" being the channel numerical index. The LVDS clocks must be
named "lvds.x" with "x" being the LVDS encoder numerical index.
- In addition to the functional and encoder clocks, all DU versions also
support externally supplied pixel clocks. Those clocks are optional.
When supplied they must be named "dclkin.x" with "x" being the input
@ -47,9 +49,11 @@ corresponding to each DU output.
R8A7779 (H1) DPAD 0 DPAD 1 - -
R8A7790 (H2) DPAD LVDS 0 LVDS 1 -
R8A7791 (M2-W) DPAD LVDS 0 - -
R8A7792 (V2H) DPAD 0 DPAD 1 - -
R8A7793 (M2-N) DPAD LVDS 0 - -
R8A7794 (E2) DPAD 0 DPAD 1 - -
R8A7795 (H3) DPAD HDMI 0 HDMI 1 LVDS
R8A7796 (M3-W) DPAD HDMI LVDS -
Example: R8A7790 (R-Car H2) DU

View File

@ -28,6 +28,8 @@ The TCON acts as a timing controller for RGB, LVDS and TV interfaces.
Required properties:
- compatible: value must be either:
* allwinner,sun5i-a13-tcon
* allwinner,sun6i-a31-tcon
* allwinner,sun6i-a31s-tcon
* allwinner,sun8i-a33-tcon
- reg: base address and size of memory-mapped region
- interrupts: interrupt associated to this IP
@ -50,7 +52,7 @@ Required properties:
second the block connected to the TCON channel 1 (usually the TV
encoder)
On the A13, there is one more clock required:
On SoCs other than the A33, there is one more clock required:
- 'tcon-ch1': The clock driving the TCON channel 1
DRC
@ -64,6 +66,8 @@ adaptive backlight control.
Required properties:
- compatible: value must be one of:
* allwinner,sun6i-a31-drc
* allwinner,sun6i-a31s-drc
* allwinner,sun8i-a33-drc
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
@ -87,6 +91,7 @@ system.
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-backend
* allwinner,sun6i-a31-display-backend
* allwinner,sun8i-a33-display-backend
- reg: base address and size of the memory-mapped region.
- clocks: phandles to the clocks feeding the frontend and backend
@ -117,6 +122,7 @@ deinterlacing and color space conversion.
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-frontend
* allwinner,sun6i-a31-display-frontend
* allwinner,sun8i-a33-display-frontend
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
@ -142,6 +148,8 @@ extra node.
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-engine
* allwinner,sun6i-a31-display-engine
* allwinner,sun6i-a31s-display-engine
* allwinner,sun8i-a33-display-engine
- allwinner,pipelines: list of phandle to the display engine

View File

@ -1,7 +1,9 @@
Device-Tree bindings for tilcdc DRM driver
Required properties:
- compatible: value should be "ti,am33xx-tilcdc".
- compatible: value should be one of the following:
- "ti,am33xx-tilcdc" for AM335x based boards
- "ti,da850-tilcdc" for DA850/AM18x/OMAP-L138 based boards
- interrupts: the interrupt number
- reg: base address and size of the LCDC device
@ -51,7 +53,7 @@ Optional nodes:
Example:
fb: fb@4830e000 {
compatible = "ti,am33xx-tilcdc";
compatible = "ti,am33xx-tilcdc", "ti,da850-tilcdc";
reg = <0x4830e000 0x1000>;
interrupt-parent = <&intc>;
interrupts = <36>;

View File

@ -0,0 +1,84 @@
ZTE VOU Display Controller
This is a display controller found on ZTE ZX296718 SoC. It includes multiple
Graphic Layer (GL) and Video Layer (VL), two Mixers/Channels, and a few blocks
handling scaling, color space conversion etc. VOU also integrates the support
for typical output devices, like HDMI, TV Encoder, VGA, and RGB LCD.
* Master VOU node
It must be the parent node of all the sub-device nodes.
Required properties:
- compatible: should be "zte,zx296718-vou"
- #address-cells: should be <1>
- #size-cells: should be <1>
- ranges: list of address translations between VOU and sub-devices
* VOU DPC device
Required properties:
- compatible: should be "zte,zx296718-dpc"
- reg: Physical base address and length of DPC register regions, one for each
entry in 'reg-names'
- reg-names: The names of register regions. The following regions are required:
"osd"
"timing_ctrl"
"dtrc"
"vou_ctrl"
"otfppu"
- interrupts: VOU DPC interrupt number to CPU
- clocks: A list of phandle + clock-specifier pairs, one for each entry
in 'clock-names'
- clock-names: A list of clock names. The following clocks are required:
"aclk"
"ppu_wclk"
"main_wclk"
"aux_wclk"
* HDMI output device
Required properties:
- compatible: should be "zte,zx296718-hdmi"
- reg: Physical base address and length of the HDMI device IO region
- interrupts : HDMI interrupt number to CPU
- clocks: A list of phandle + clock-specifier pairs, one for each entry
in 'clock-names'
- clock-names: A list of clock names. The following clocks are required:
"osc_cec"
"osc_clk"
"xclk"
Example:
vou: vou@1440000 {
compatible = "zte,zx296718-vou";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0x1440000 0x10000>;
dpc: dpc@0 {
compatible = "zte,zx296718-dpc";
reg = <0x0000 0x1000>, <0x1000 0x1000>,
<0x5000 0x1000>, <0x6000 0x1000>,
<0xa000 0x1000>;
reg-names = "osd", "timing_ctrl",
"dtrc", "vou_ctrl",
"otfppu";
interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&topcrm VOU_ACLK>, <&topcrm VOU_PPU_WCLK>,
<&topcrm VOU_MAIN_WCLK>, <&topcrm VOU_AUX_WCLK>;
clock-names = "aclk", "ppu_wclk",
"main_wclk", "aux_wclk";
};
hdmi: hdmi@c000 {
compatible = "zte,zx296718-hdmi";
reg = <0xc000 0x4000>;
interrupts = <GIC_SPI 82 IRQ_TYPE_EDGE_RISING>;
clocks = <&topcrm HDMI_OSC_CEC>,
<&topcrm HDMI_OSC_CLK>,
<&topcrm HDMI_XCLK>;
clock-names = "osc_cec", "osc_clk", "xclk";
};
};

View File

@ -187,6 +187,7 @@ netgear NETGEAR
netlogic Broadcom Corporation (formerly NetLogic Microsystems)
netxeon Shenzhen Netxeon Technology CO., LTD
newhaven Newhaven Display International
nvd New Vision Display
nintendo Nintendo
nokia Nokia
nuvoton Nuvoton Technology Corporation

View File

@ -0,0 +1,33 @@
Silicon Image SiI8620 HDMI/MHL bridge bindings
Required properties:
- compatible: "sil,sii8620"
- reg: i2c address of the bridge
- cvcc10-supply: Digital Core Supply Voltage (1.0V)
- iovcc18-supply: I/O Supply Voltage (1.8V)
- interrupts, interrupt-parent: interrupt specifier of INT pin
- reset-gpios: gpio specifier of RESET pin
- clocks, clock-names: specification and name of "xtal" clock
- video interfaces: Device node can contain video interface port
node for HDMI encoder according to [1].
[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
Example:
sii8620@39 {
reg = <0x39>;
compatible = "sil,sii8620";
cvcc10-supply = <&ldo36_reg>;
iovcc18-supply = <&ldo34_reg>;
interrupt-parent = <&gpf0>;
interrupts = <2 0>;
reset-gpio = <&gpv7 0 0>;
clocks = <&pmu_system_controller 0>;
clock-names = "xtal";
port {
mhl_to_hdmi: endpoint {
remote-endpoint = <&hdmi_to_mhl>;
};
};
};

View File

@ -143,6 +143,9 @@ Device Instance and Driver Handling
.. kernel-doc:: drivers/gpu/drm/drm_drv.c
:export:
.. kernel-doc:: include/drm/drm_drv.h
:internal:
Driver Load
-----------
@ -350,6 +353,23 @@ how the ioctl is allowed to be called.
.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
:export:
Misc Utilities
==============
Printer
-------
.. kernel-doc:: include/drm/drm_print.h
:doc: print
.. kernel-doc:: include/drm/drm_print.h
:internal:
.. kernel-doc:: drivers/gpu/drm/drm_print.c
:export:
Legacy Support Code
===================

View File

@ -63,6 +63,9 @@ Atomic State Reset and Initialization
.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
:doc: atomic state reset and initialization
Helper Functions Reference
--------------------------
.. kernel-doc:: include/drm/drm_atomic_helper.h
:internal:
@ -261,14 +264,6 @@ Plane Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
:export:
Tile group
==========
# FIXME: This should probably be moved into a property documentation section
.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
:doc: Tile group
Auxiliary Modeset Helpers
=========================

View File

@ -15,6 +15,17 @@ be setup by initializing the following fields.
- struct drm_mode_config_funcs \*funcs;
Mode setting functions.
Mode Configuration
KMS Core Structures and Functions
=================================
.. kernel-doc:: drivers/gpu/drm/drm_mode_config.c
:export:
.. kernel-doc:: include/drm/drm_mode_config.h
:internal:
Modeset Base Object Abstraction
===============================
@ -24,18 +35,6 @@ Modeset Base Object Abstraction
.. kernel-doc:: drivers/gpu/drm/drm_mode_object.c
:export:
KMS Data Structures
===================
.. kernel-doc:: include/drm/drm_crtc.h
:internal:
KMS API Functions
=================
.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
:export:
Atomic Mode Setting Function Reference
======================================
@ -45,6 +44,15 @@ Atomic Mode Setting Function Reference
.. kernel-doc:: include/drm/drm_atomic.h
:internal:
CRTC Abstraction
================
.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
:export:
.. kernel-doc:: include/drm/drm_crtc.h
:internal:
Frame Buffer Abstraction
========================
@ -63,52 +71,17 @@ Frame Buffer Functions Reference
DRM Format Handling
===================
.. kernel-doc:: include/drm/drm_fourcc.h
:internal:
.. kernel-doc:: drivers/gpu/drm/drm_fourcc.c
:export:
Dumb Buffer Objects
===================
The KMS API doesn't standardize backing storage object creation and
leaves it to driver-specific ioctls. Furthermore actually creating a
buffer object even for GEM-based drivers is done through a
driver-specific ioctl - GEM only has a common userspace interface for
sharing and destroying objects. While not an issue for full-fledged
graphics stacks that include device-specific userspace components (in
libdrm for instance), this limit makes DRM-based early boot graphics
unnecessarily complex.
Dumb objects partly alleviate the problem by providing a standard API to
create dumb buffers suitable for scanout, which can then be used to
create KMS frame buffers.
To support dumb objects drivers must implement the dumb_create,
dumb_destroy and dumb_map_offset operations.
- int (\*dumb_create)(struct drm_file \*file_priv, struct
drm_device \*dev, struct drm_mode_create_dumb \*args);
The dumb_create operation creates a driver object (GEM or TTM
handle) suitable for scanout based on the width, height and depth
from the struct :c:type:`struct drm_mode_create_dumb
<drm_mode_create_dumb>` argument. It fills the argument's
handle, pitch and size fields with a handle for the newly created
object and its line pitch and size in bytes.
- int (\*dumb_destroy)(struct drm_file \*file_priv, struct
drm_device \*dev, uint32_t handle);
The dumb_destroy operation destroys a dumb object created by
dumb_create.
- int (\*dumb_map_offset)(struct drm_file \*file_priv, struct
drm_device \*dev, uint32_t handle, uint64_t \*offset);
The dumb_map_offset operation associates an mmap fake offset with
the object given by the handle and returns it. Drivers must use the
:c:func:`drm_gem_create_mmap_offset()` function to associate
the fake offset as described in ?.
Note that dumb objects may not be used for gpu acceleration, as has been
attempted on some ARM embedded platforms. Such drivers really must have
a hardware-specific ioctl to allocate suitable buffer objects.
.. kernel-doc:: drivers/gpu/drm/drm_dumb_buffers.c
:doc: overview
Plane Abstraction
=================
@ -287,6 +260,12 @@ Property Types and Blob Property Support
.. kernel-doc:: drivers/gpu/drm/drm_property.c
:export:
Standard Connector Properties
-----------------------------
.. kernel-doc:: drivers/gpu/drm/drm_connector.c
:doc: standard connector properties
Plane Composition Properties
----------------------------
@ -308,6 +287,18 @@ Color Management Properties
.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
:export:
Tile Group Property
-------------------
.. kernel-doc:: drivers/gpu/drm/drm_connector.c
:doc: Tile group
Explicit Fencing Properties
---------------------------
.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
:doc: explicit fencing properties
Existing KMS Properties
-----------------------

View File

@ -216,3 +216,9 @@ interfaces. Especially since all hardware-acceleration interfaces to
userspace are driver specific for efficiency and other reasons these
interfaces can be rather substantial. Hence every driver has its own
chapter.
Testing and validation
======================
.. kernel-doc:: drivers/gpu/drm/drm_debugfs_crc.c
:doc: CRC ABI

View File

@ -49,6 +49,15 @@ Intel GVT-g Guest Support(vGPU)
.. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c
:internal:
Intel GVT-g Host Support(vGPU device model)
-------------------------------------------
.. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c
:doc: Intel GVT-g host support
.. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c
:internal:
Display Hardware Handling
=========================
@ -180,7 +189,7 @@ Display Refresh Rate Switching (DRRS)
DPIO
----
.. kernel-doc:: drivers/gpu/drm/i915/i915_reg.h
.. kernel-doc:: drivers/gpu/drm/i915/intel_dpio_phy.c
:doc: DPIO
CSR firmware support for DMC
@ -249,19 +258,19 @@ Global GTT views
GTT Fences and Swizzling
------------------------
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
:internal:
Global GTT Fence Handling
~~~~~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
:doc: fence register handling
Hardware Tiling and Swizzling Details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
:doc: tiling swizzling details
Object Tiling IOCTLs

View File

@ -6,7 +6,7 @@
This document serves as a guide for device drivers writers on what the
sync_file API is, and how drivers can support it. Sync file is the carrier of
the fences(struct fence) that are needed to synchronize between drivers or
the fences(struct dma_fence) that are needed to synchronize between drivers or
across process boundaries.
The sync_file API is meant to be used to send and receive fence information
@ -32,9 +32,9 @@ in-fences and out-fences
Sync files can go either to or from userspace. When a sync_file is sent from
the driver to userspace we call the fences it contains 'out-fences'. They are
related to a buffer that the driver is processing or is going to process, so
the driver creates an out-fence to be able to notify, through fence_signal(),
when it has finished using (or processing) that buffer. Out-fences are fences
that the driver creates.
the driver creates an out-fence to be able to notify, through
dma_fence_signal(), when it has finished using (or processing) that buffer.
Out-fences are fences that the driver creates.
On the other hand if the driver receives fence(s) through a sync_file from
userspace we call these fence(s) 'in-fences'. Receiveing in-fences means that
@ -47,7 +47,7 @@ Creating Sync Files
When a driver needs to send an out-fence userspace it creates a sync_file.
Interface:
struct sync_file *sync_file_create(struct fence *fence);
struct sync_file *sync_file_create(struct dma_fence *fence);
The caller pass the out-fence and gets back the sync_file. That is just the
first step, next it needs to install an fd on sync_file->file. So it gets an
@ -72,11 +72,11 @@ of the Sync File to the kernel. The kernel can then retrieve the fences
from it.
Interface:
struct fence *sync_file_get_fence(int fd);
struct dma_fence *sync_file_get_fence(int fd);
The returned reference is owned by the caller and must be disposed of
afterwards using fence_put(). In case of error, a NULL is returned instead.
afterwards using dma_fence_put(). In case of error, a NULL is returned instead.
References:
[1] struct sync_file in include/linux/sync_file.h

View File

@ -3933,7 +3933,7 @@ F: include/linux/dma-buf*
F: include/linux/reservation.h
F: include/linux/*fence.h
F: Documentation/dma-buf-sharing.txt
T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
T: git git://anongit.freedesktop.org/drm/drm-misc
SYNC FILE FRAMEWORK
M: Sumit Semwal <sumit.semwal@linaro.org>
@ -3941,10 +3941,12 @@ R: Gustavo Padovan <gustavo@padovan.org>
S: Maintained
L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
F: drivers/dma-buf/sync_file.c
F: drivers/dma-buf/sync_*
F: drivers/dma-buf/sw_sync.c
F: include/linux/sync_file.h
F: include/uapi/linux/sync_file.h
F: Documentation/sync_file.txt
T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
T: git git://anongit.freedesktop.org/drm/drm-misc
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul <vinod.koul@intel.com>
@ -4044,11 +4046,30 @@ F: Documentation/gpu/
F: include/drm/
F: include/uapi/drm/
DRM DRIVERS AND MISC GPU PATCHES
M: Daniel Vetter <daniel.vetter@intel.com>
M: Jani Nikula <jani.nikula@linux.intel.com>
M: Sean Paul <seanpaul@chromium.org>
W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/gpu/
F: drivers/gpu/vga/
F: drivers/gpu/drm/*
F: include/drm/drm*
F: include/uapi/drm/drm*
DRM DRIVER FOR AST SERVER GRAPHICS CHIPS
M: Dave Airlie <airlied@redhat.com>
S: Odd Fixes
F: drivers/gpu/drm/ast/
DRM DRIVERS FOR BRIDGE CHIPS
M: Archit Taneja <architt@codeaurora.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/bridge/
DRM DRIVER FOR BOCHS VIRTUAL GPU
M: Gerd Hoffmann <kraxel@redhat.com>
S: Odd Fixes
@ -4084,7 +4105,6 @@ INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Daniel Vetter <daniel.vetter@intel.com>
M: Jani Nikula <jani.nikula@linux.intel.com>
L: intel-gfx@lists.freedesktop.org
L: dri-devel@lists.freedesktop.org
W: https://01.org/linuxgraphics/
B: https://01.org/linuxgraphics/documentation/how-report-bugs
C: irc://chat.freenode.net/intel-gfx
@ -4096,6 +4116,16 @@ F: include/drm/i915*
F: include/uapi/drm/i915_drm.h
F: Documentation/gpu/i915.rst
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
M: Zhenyu Wang <zhenyuw@linux.intel.com>
M: Zhi Wang <zhi.a.wang@intel.com>
L: igvt-g-dev@lists.01.org
L: intel-gfx@lists.freedesktop.org
W: https://01.org/igvt-g
T: git https://github.com/01org/gvt-linux.git
S: Supported
F: drivers/gpu/drm/i915/gvt/
DRM DRIVERS FOR ATMEL HLCDC
M: Boris Brezillon <boris.brezillon@free-electrons.com>
L: dri-devel@lists.freedesktop.org
@ -4110,6 +4140,15 @@ S: Supported
F: drivers/gpu/drm/sun4i/
F: Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
DRM DRIVERS FOR AMLOGIC SOCS
M: Neil Armstrong <narmstrong@baylibre.com>
L: dri-devel@lists.freedesktop.org
L: linux-amlogic@lists.infradead.org
W: http://linux-meson.com/
S: Supported
F: drivers/gpu/drm/meson/
F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
DRM DRIVERS FOR EXYNOS
M: Inki Dae <inki.dae@samsung.com>
M: Joonyoung Shim <jy0922.shim@samsung.com>
@ -4149,6 +4188,7 @@ F: drivers/gpu/drm/gma500/
DRM DRIVERS FOR HISILICON
M: Xinliang Liu <z.liuxinliang@hisilicon.com>
M: Rongrong Zou <zourongrong@gmail.com>
R: Xinwei Kong <kong.kongxinwei@hisilicon.com>
R: Chen Feng <puck.chen@hisilicon.com>
L: dri-devel@lists.freedesktop.org
@ -4273,6 +4313,7 @@ DRM DRIVERS FOR VIVANTE GPU IP
M: Lucas Stach <l.stach@pengutronix.de>
R: Russell King <linux+etnaviv@armlinux.org.uk>
R: Christian Gmeiner <christian.gmeiner@gmail.com>
L: etnaviv@lists.freedesktop.org
L: dri-devel@lists.freedesktop.org
S: Maintained
F: drivers/gpu/drm/etnaviv/
@ -4313,6 +4354,13 @@ S: Maintained
F: drivers/gpu/drm/tilcdc/
F: Documentation/devicetree/bindings/display/tilcdc/
DRM DRIVERS FOR ZTE ZX
M: Shawn Guo <shawnguo@kernel.org>
L: dri-devel@lists.freedesktop.org
S: Maintained
F: drivers/gpu/drm/zte/
F: Documentation/devicetree/bindings/display/zte,vou.txt
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
@ -8307,6 +8355,12 @@ T: git git://linuxtv.org/mkrufky/tuners.git
S: Maintained
F: drivers/media/tuners/mxl5007t.*
MXSFB DRM DRIVER
M: Marek Vasut <marex@denx.de>
S: Supported
F: drivers/gpu/drm/mxsfb/
F: Documentation/devicetree/bindings/display/mxsfb-drm.txt
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
M: Hyong-Youb Kim <hykim@myri.com>
L: netdev@vger.kernel.org

View File

@ -29,9 +29,20 @@ struct kvm_page_track_notifier_node {
* @gpa: the physical address written by guest.
* @new: the data was written to the address.
* @bytes: the written length.
* @node: this node
*/
void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
int bytes);
int bytes, struct kvm_page_track_notifier_node *node);
/*
* It is called when memory slot is being moved or removed
* users can drop write-protection for the pages in that memory slot
*
* @kvm: the kvm where memory slot being moved or removed
* @slot: the memory slot being moved or removed
* @node: this node
*/
void (*track_flush_slot)(struct kvm *kvm, struct kvm_memory_slot *slot,
struct kvm_page_track_notifier_node *node);
};
void kvm_page_track_init(struct kvm *kvm);
@ -58,4 +69,5 @@ kvm_page_track_unregister_notifier(struct kvm *kvm,
struct kvm_page_track_notifier_node *n);
void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
int bytes);
void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot);
#endif

View File

@ -4405,7 +4405,8 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
}
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes)
const u8 *new, int bytes,
struct kvm_page_track_notifier_node *node)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *sp;
@ -4617,11 +4618,19 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
init_kvm_mmu(vcpu);
}
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot,
struct kvm_page_track_notifier_node *node)
{
kvm_mmu_invalidate_zap_all_pages(kvm);
}
void kvm_mmu_init_vm(struct kvm *kvm)
{
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
node->track_write = kvm_mmu_pte_write;
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
kvm_page_track_register_notifier(kvm, node);
}

View File

@ -106,6 +106,7 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
kvm_flush_remote_tlbs(kvm);
}
EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
/*
* remove the guest page from the tracking pool which stops the interception
@ -135,6 +136,7 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
*/
kvm_mmu_gfn_allow_lpage(slot, gfn);
}
EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page);
/*
* check if the corresponding access on the specified guest page is tracked.
@ -181,6 +183,7 @@ kvm_page_track_register_notifier(struct kvm *kvm,
hlist_add_head_rcu(&n->node, &head->track_notifier_list);
spin_unlock(&kvm->mmu_lock);
}
EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier);
/*
* stop receiving the event interception. It is the opposed operation of
@ -199,6 +202,7 @@ kvm_page_track_unregister_notifier(struct kvm *kvm,
spin_unlock(&kvm->mmu_lock);
synchronize_srcu(&head->track_srcu);
}
EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier);
/*
* Notify the node that write access is intercepted and write emulation is
@ -222,6 +226,31 @@ void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
idx = srcu_read_lock(&head->track_srcu);
hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
if (n->track_write)
n->track_write(vcpu, gpa, new, bytes);
n->track_write(vcpu, gpa, new, bytes, n);
srcu_read_unlock(&head->track_srcu, idx);
}
/*
* Notify the node that memory slot is being removed or moved so that it can
* drop write-protection for the pages in the memory slot.
*
* The node should figure out it has any write-protected pages in this slot
* by itself.
*/
void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
struct kvm_page_track_notifier_head *head;
struct kvm_page_track_notifier_node *n;
int idx;
head = &kvm->arch.track_notifier_head;
if (hlist_empty(&head->track_notifier_list))
return;
idx = srcu_read_lock(&head->track_srcu);
hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
if (n->track_flush_slot)
n->track_flush_slot(kvm, slot, n);
srcu_read_unlock(&head->track_srcu, idx);
}

View File

@ -8175,7 +8175,7 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
kvm_mmu_invalidate_zap_all_pages(kvm);
kvm_page_track_flush_slot(kvm, slot);
}
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)

View File

@ -251,11 +251,11 @@ config DMA_SHARED_BUFFER
APIs extension; the file's descriptor can then be passed on to other
driver.
config FENCE_TRACE
bool "Enable verbose FENCE_TRACE messages"
config DMA_FENCE_TRACE
bool "Enable verbose DMA_FENCE_TRACE messages"
depends on DMA_SHARED_BUFFER
help
Enable the FENCE_TRACE printks. This will add extra
Enable the DMA_FENCE_TRACE printks. This will add extra
spam to the console log, but will make it easier to diagnose
lockup related problems for dma-buffers shared across multiple
devices.

View File

@ -7,7 +7,7 @@ config SYNC_FILE
select DMA_SHARED_BUFFER
---help---
The Sync File Framework adds explicit syncronization via
userspace. It enables send/receive 'struct fence' objects to/from
userspace. It enables send/receive 'struct dma_fence' objects to/from
userspace via Sync File fds for synchronization between drivers via
userspace components. It has been ported from Android.

View File

@ -1,3 +1,3 @@
obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o

View File

@ -25,7 +25,7 @@
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
#include <linux/fence.h>
#include <linux/dma-fence.h>
#include <linux/anon_inodes.h>
#include <linux/export.h>
#include <linux/debugfs.h>
@ -124,7 +124,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
return base + offset;
}
static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
unsigned long flags;
@ -140,7 +140,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
struct dma_buf *dmabuf;
struct reservation_object *resv;
struct reservation_object_list *fobj;
struct fence *fence_excl;
struct dma_fence *fence_excl;
unsigned long events;
unsigned shared_count, seq;
@ -187,20 +187,20 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
spin_unlock_irq(&dmabuf->poll.lock);
if (events & pevents) {
if (!fence_get_rcu(fence_excl)) {
if (!dma_fence_get_rcu(fence_excl)) {
/* force a recheck */
events &= ~pevents;
dma_buf_poll_cb(NULL, &dcb->cb);
} else if (!fence_add_callback(fence_excl, &dcb->cb,
dma_buf_poll_cb)) {
} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
dma_buf_poll_cb)) {
events &= ~pevents;
fence_put(fence_excl);
dma_fence_put(fence_excl);
} else {
/*
* No callback queued, wake up any additional
* waiters.
*/
fence_put(fence_excl);
dma_fence_put(fence_excl);
dma_buf_poll_cb(NULL, &dcb->cb);
}
}
@ -222,9 +222,9 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
goto out;
for (i = 0; i < shared_count; ++i) {
struct fence *fence = rcu_dereference(fobj->shared[i]);
struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
if (!fence_get_rcu(fence)) {
if (!dma_fence_get_rcu(fence)) {
/*
* fence refcount dropped to zero, this means
* that fobj has been freed
@ -235,13 +235,13 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
dma_buf_poll_cb(NULL, &dcb->cb);
break;
}
if (!fence_add_callback(fence, &dcb->cb,
dma_buf_poll_cb)) {
fence_put(fence);
if (!dma_fence_add_callback(fence, &dcb->cb,
dma_buf_poll_cb)) {
dma_fence_put(fence);
events &= ~POLLOUT;
break;
}
fence_put(fence);
dma_fence_put(fence);
}
/* No callback queued, wake up any additional waiters. */

View File

@ -1,5 +1,5 @@
/*
* fence-array: aggregate fences to be waited together
* dma-fence-array: aggregate fences to be waited together
*
* Copyright (C) 2016 Collabora Ltd
* Copyright (C) 2016 Advanced Micro Devices, Inc.
@ -19,35 +19,34 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/fence-array.h>
#include <linux/dma-fence-array.h>
static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
static const char *fence_array_get_driver_name(struct fence *fence)
static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
{
return "fence_array";
return "dma_fence_array";
}
static const char *fence_array_get_timeline_name(struct fence *fence)
static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
{
return "unbound";
}
static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
static void dma_fence_array_cb_func(struct dma_fence *f,
struct dma_fence_cb *cb)
{
struct fence_array_cb *array_cb =
container_of(cb, struct fence_array_cb, cb);
struct fence_array *array = array_cb->array;
struct dma_fence_array_cb *array_cb =
container_of(cb, struct dma_fence_array_cb, cb);
struct dma_fence_array *array = array_cb->array;
if (atomic_dec_and_test(&array->num_pending))
fence_signal(&array->base);
fence_put(&array->base);
dma_fence_signal(&array->base);
dma_fence_put(&array->base);
}
static bool fence_array_enable_signaling(struct fence *fence)
static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
{
struct fence_array *array = to_fence_array(fence);
struct fence_array_cb *cb = (void *)(&array[1]);
struct dma_fence_array *array = to_dma_fence_array(fence);
struct dma_fence_array_cb *cb = (void *)(&array[1]);
unsigned i;
for (i = 0; i < array->num_fences; ++i) {
@ -60,10 +59,10 @@ static bool fence_array_enable_signaling(struct fence *fence)
* until we signal the array as complete (but that is now
* insufficient).
*/
fence_get(&array->base);
if (fence_add_callback(array->fences[i], &cb[i].cb,
fence_array_cb_func)) {
fence_put(&array->base);
dma_fence_get(&array->base);
if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
dma_fence_array_cb_func)) {
dma_fence_put(&array->base);
if (atomic_dec_and_test(&array->num_pending))
return false;
}
@ -72,69 +71,71 @@ static bool fence_array_enable_signaling(struct fence *fence)
return true;
}
static bool fence_array_signaled(struct fence *fence)
static bool dma_fence_array_signaled(struct dma_fence *fence)
{
struct fence_array *array = to_fence_array(fence);
struct dma_fence_array *array = to_dma_fence_array(fence);
return atomic_read(&array->num_pending) <= 0;
}
static void fence_array_release(struct fence *fence)
static void dma_fence_array_release(struct dma_fence *fence)
{
struct fence_array *array = to_fence_array(fence);
struct dma_fence_array *array = to_dma_fence_array(fence);
unsigned i;
for (i = 0; i < array->num_fences; ++i)
fence_put(array->fences[i]);
dma_fence_put(array->fences[i]);
kfree(array->fences);
fence_free(fence);
dma_fence_free(fence);
}
const struct fence_ops fence_array_ops = {
.get_driver_name = fence_array_get_driver_name,
.get_timeline_name = fence_array_get_timeline_name,
.enable_signaling = fence_array_enable_signaling,
.signaled = fence_array_signaled,
.wait = fence_default_wait,
.release = fence_array_release,
const struct dma_fence_ops dma_fence_array_ops = {
.get_driver_name = dma_fence_array_get_driver_name,
.get_timeline_name = dma_fence_array_get_timeline_name,
.enable_signaling = dma_fence_array_enable_signaling,
.signaled = dma_fence_array_signaled,
.wait = dma_fence_default_wait,
.release = dma_fence_array_release,
};
EXPORT_SYMBOL(fence_array_ops);
EXPORT_SYMBOL(dma_fence_array_ops);
/**
* fence_array_create - Create a custom fence array
* dma_fence_array_create - Create a custom fence array
* @num_fences: [in] number of fences to add in the array
* @fences: [in] array containing the fences
* @context: [in] fence context to use
* @seqno: [in] sequence number to use
* @signal_on_any: [in] signal on any fence in the array
*
* Allocate a fence_array object and initialize the base fence with fence_init().
* Allocate a dma_fence_array object and initialize the base fence with
* dma_fence_init().
* In case of error it returns NULL.
*
* The caller should allocate the fences array with num_fences size
* and fill it with the fences it wants to add to the object. Ownership of this
* array is taken and fence_put() is used on each fence on release.
* array is taken and dma_fence_put() is used on each fence on release.
*
* If @signal_on_any is true the fence array signals if any fence in the array
* signals, otherwise it signals when all fences in the array signal.
*/
struct fence_array *fence_array_create(int num_fences, struct fence **fences,
u64 context, unsigned seqno,
bool signal_on_any)
struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence **fences,
u64 context, unsigned seqno,
bool signal_on_any)
{
struct fence_array *array;
struct dma_fence_array *array;
size_t size = sizeof(*array);
/* Allocate the callback structures behind the array. */
size += num_fences * sizeof(struct fence_array_cb);
size += num_fences * sizeof(struct dma_fence_array_cb);
array = kzalloc(size, GFP_KERNEL);
if (!array)
return NULL;
spin_lock_init(&array->lock);
fence_init(&array->base, &fence_array_ops, &array->lock,
context, seqno);
dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
context, seqno);
array->num_fences = num_fences;
atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
@ -142,4 +143,4 @@ struct fence_array *fence_array_create(int num_fences, struct fence **fences,
return array;
}
EXPORT_SYMBOL(fence_array_create);
EXPORT_SYMBOL(dma_fence_array_create);

View File

@ -21,13 +21,13 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/atomic.h>
#include <linux/fence.h>
#include <linux/dma-fence.h>
#define CREATE_TRACE_POINTS
#include <trace/events/fence.h>
#include <trace/events/dma_fence.h>
EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
EXPORT_TRACEPOINT_SYMBOL(fence_emit);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
/*
* fence context counter: each execution context should have its own
@ -35,39 +35,41 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
* context or not. One device can have multiple separate contexts,
* and they're used if some engine can run independently of another.
*/
static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
/**
* fence_context_alloc - allocate an array of fence contexts
* dma_fence_context_alloc - allocate an array of fence contexts
* @num: [in] amount of contexts to allocate
*
* This function will return the first index of the number of fences allocated.
* The fence context is used for setting fence->context to a unique number.
*/
u64 fence_context_alloc(unsigned num)
u64 dma_fence_context_alloc(unsigned num)
{
BUG_ON(!num);
return atomic64_add_return(num, &fence_context_counter) - num;
return atomic64_add_return(num, &dma_fence_context_counter) - num;
}
EXPORT_SYMBOL(fence_context_alloc);
EXPORT_SYMBOL(dma_fence_context_alloc);
/**
* fence_signal_locked - signal completion of a fence
* dma_fence_signal_locked - signal completion of a fence
* @fence: the fence to signal
*
* Signal completion for software callbacks on a fence, this will unblock
* fence_wait() calls and run all the callbacks added with
* fence_add_callback(). Can be called multiple times, but since a fence
* dma_fence_wait() calls and run all the callbacks added with
* dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from unsignaled to signaled state, it will only be effective
* the first time.
*
* Unlike fence_signal, this function must be called with fence->lock held.
* Unlike dma_fence_signal, this function must be called with fence->lock held.
*/
int fence_signal_locked(struct fence *fence)
int dma_fence_signal_locked(struct dma_fence *fence)
{
struct fence_cb *cur, *tmp;
struct dma_fence_cb *cur, *tmp;
int ret = 0;
lockdep_assert_held(fence->lock);
if (WARN_ON(!fence))
return -EINVAL;
@ -76,15 +78,15 @@ int fence_signal_locked(struct fence *fence)
smp_mb__before_atomic();
}
if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
ret = -EINVAL;
/*
* we might have raced with the unlocked fence_signal,
* we might have raced with the unlocked dma_fence_signal,
* still run through all callbacks
*/
} else
trace_fence_signaled(fence);
trace_dma_fence_signaled(fence);
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
list_del_init(&cur->node);
@ -92,19 +94,19 @@ int fence_signal_locked(struct fence *fence)
}
return ret;
}
EXPORT_SYMBOL(fence_signal_locked);
EXPORT_SYMBOL(dma_fence_signal_locked);
/**
* fence_signal - signal completion of a fence
* dma_fence_signal - signal completion of a fence
* @fence: the fence to signal
*
* Signal completion for software callbacks on a fence, this will unblock
* fence_wait() calls and run all the callbacks added with
* fence_add_callback(). Can be called multiple times, but since a fence
* dma_fence_wait() calls and run all the callbacks added with
* dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from unsignaled to signaled state, it will only be effective
* the first time.
*/
int fence_signal(struct fence *fence)
int dma_fence_signal(struct dma_fence *fence)
{
unsigned long flags;
@ -116,13 +118,13 @@ int fence_signal(struct fence *fence)
smp_mb__before_atomic();
}
if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return -EINVAL;
trace_fence_signaled(fence);
trace_dma_fence_signaled(fence);
if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
struct fence_cb *cur, *tmp;
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
struct dma_fence_cb *cur, *tmp;
spin_lock_irqsave(fence->lock, flags);
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
@ -133,10 +135,10 @@ int fence_signal(struct fence *fence)
}
return 0;
}
EXPORT_SYMBOL(fence_signal);
EXPORT_SYMBOL(dma_fence_signal);
/**
* fence_wait_timeout - sleep until the fence gets signaled
* dma_fence_wait_timeout - sleep until the fence gets signaled
* or until timeout elapses
* @fence: [in] the fence to wait on
* @intr: [in] if true, do an interruptible wait
@ -152,78 +154,76 @@ EXPORT_SYMBOL(fence_signal);
* freed before return, resulting in undefined behavior.
*/
signed long
fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
{
signed long ret;
if (WARN_ON(timeout < 0))
return -EINVAL;
if (timeout == 0)
return fence_is_signaled(fence);
trace_fence_wait_start(fence);
trace_dma_fence_wait_start(fence);
ret = fence->ops->wait(fence, intr, timeout);
trace_fence_wait_end(fence);
trace_dma_fence_wait_end(fence);
return ret;
}
EXPORT_SYMBOL(fence_wait_timeout);
EXPORT_SYMBOL(dma_fence_wait_timeout);
void fence_release(struct kref *kref)
void dma_fence_release(struct kref *kref)
{
struct fence *fence =
container_of(kref, struct fence, refcount);
struct dma_fence *fence =
container_of(kref, struct dma_fence, refcount);
trace_fence_destroy(fence);
trace_dma_fence_destroy(fence);
BUG_ON(!list_empty(&fence->cb_list));
if (fence->ops->release)
fence->ops->release(fence);
else
fence_free(fence);
dma_fence_free(fence);
}
EXPORT_SYMBOL(fence_release);
EXPORT_SYMBOL(dma_fence_release);
void fence_free(struct fence *fence)
void dma_fence_free(struct dma_fence *fence)
{
kfree_rcu(fence, rcu);
}
EXPORT_SYMBOL(fence_free);
EXPORT_SYMBOL(dma_fence_free);
/**
* fence_enable_sw_signaling - enable signaling on fence
* dma_fence_enable_sw_signaling - enable signaling on fence
* @fence: [in] the fence to enable
*
* this will request for sw signaling to be enabled, to make the fence
* complete as soon as possible
*/
void fence_enable_sw_signaling(struct fence *fence)
void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
unsigned long flags;
if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
trace_fence_enable_signal(fence);
if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&fence->flags) &&
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
trace_dma_fence_enable_signal(fence);
spin_lock_irqsave(fence->lock, flags);
if (!fence->ops->enable_signaling(fence))
fence_signal_locked(fence);
dma_fence_signal_locked(fence);
spin_unlock_irqrestore(fence->lock, flags);
}
}
EXPORT_SYMBOL(fence_enable_sw_signaling);
EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
/**
* fence_add_callback - add a callback to be called when the fence
* dma_fence_add_callback - add a callback to be called when the fence
* is signaled
* @fence: [in] the fence to wait on
* @cb: [in] the callback to register
* @func: [in] the function to call
*
* cb will be initialized by fence_add_callback, no initialization
* cb will be initialized by dma_fence_add_callback, no initialization
* by the caller is required. Any number of callbacks can be registered
* to a fence, but a callback can only be registered to one fence at a time.
*
@ -232,15 +232,15 @@ EXPORT_SYMBOL(fence_enable_sw_signaling);
* *not* call the callback)
*
* Add a software callback to the fence. Same restrictions apply to
* refcount as it does to fence_wait, however the caller doesn't need to
* refcount as it does to dma_fence_wait, however the caller doesn't need to
* keep a refcount to fence afterwards: when software access is enabled,
* the creator of the fence is required to keep the fence alive until
* after it signals with fence_signal. The callback itself can be called
* after it signals with dma_fence_signal. The callback itself can be called
* from irq context.
*
*/
int fence_add_callback(struct fence *fence, struct fence_cb *cb,
fence_func_t func)
int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
dma_fence_func_t func)
{
unsigned long flags;
int ret = 0;
@ -249,22 +249,23 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
if (WARN_ON(!fence || !func))
return -EINVAL;
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
INIT_LIST_HEAD(&cb->node);
return -ENOENT;
}
spin_lock_irqsave(fence->lock, flags);
was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&fence->flags);
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
ret = -ENOENT;
else if (!was_set) {
trace_fence_enable_signal(fence);
trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
fence_signal_locked(fence);
dma_fence_signal_locked(fence);
ret = -ENOENT;
}
}
@ -278,10 +279,10 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
return ret;
}
EXPORT_SYMBOL(fence_add_callback);
EXPORT_SYMBOL(dma_fence_add_callback);
/**
* fence_remove_callback - remove a callback from the signaling list
* dma_fence_remove_callback - remove a callback from the signaling list
* @fence: [in] the fence to wait on
* @cb: [in] the callback to remove
*
@ -296,7 +297,7 @@ EXPORT_SYMBOL(fence_add_callback);
* with a reference held to the fence.
*/
bool
fence_remove_callback(struct fence *fence, struct fence_cb *cb)
dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
{
unsigned long flags;
bool ret;
@ -311,15 +312,15 @@ fence_remove_callback(struct fence *fence, struct fence_cb *cb)
return ret;
}
EXPORT_SYMBOL(fence_remove_callback);
EXPORT_SYMBOL(dma_fence_remove_callback);
struct default_wait_cb {
struct fence_cb base;
struct dma_fence_cb base;
struct task_struct *task;
};
static void
fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct default_wait_cb *wait =
container_of(cb, struct default_wait_cb, base);
@ -328,25 +329,27 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
}
/**
* fence_default_wait - default sleep until the fence gets signaled
* dma_fence_default_wait - default sleep until the fence gets signaled
* or until timeout elapses
* @fence: [in] the fence to wait on
* @intr: [in] if true, do an interruptible wait
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
* remaining timeout in jiffies on success.
* remaining timeout in jiffies on success. If timeout is zero the value one is
* returned if the fence is already signaled for consistency with other
* functions taking a jiffies timeout.
*/
signed long
fence_default_wait(struct fence *fence, bool intr, signed long timeout)
dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
{
struct default_wait_cb cb;
unsigned long flags;
signed long ret = timeout;
signed long ret = timeout ? timeout : 1;
bool was_set;
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return timeout;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return ret;
spin_lock_irqsave(fence->lock, flags);
@ -355,25 +358,26 @@ fence_default_wait(struct fence *fence, bool intr, signed long timeout)
goto out;
}
was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&fence->flags);
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
goto out;
if (!was_set) {
trace_fence_enable_signal(fence);
trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
fence_signal_locked(fence);
dma_fence_signal_locked(fence);
goto out;
}
}
cb.base.func = fence_default_wait_cb;
cb.base.func = dma_fence_default_wait_cb;
cb.task = current;
list_add(&cb.base.node, &fence->cb_list);
while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
if (intr)
__set_current_state(TASK_INTERRUPTIBLE);
else
@ -395,28 +399,34 @@ fence_default_wait(struct fence *fence, bool intr, signed long timeout)
spin_unlock_irqrestore(fence->lock, flags);
return ret;
}
EXPORT_SYMBOL(fence_default_wait);
EXPORT_SYMBOL(dma_fence_default_wait);
static bool
fence_test_signaled_any(struct fence **fences, uint32_t count)
dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
uint32_t *idx)
{
int i;
for (i = 0; i < count; ++i) {
struct fence *fence = fences[i];
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
struct dma_fence *fence = fences[i];
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
if (idx)
*idx = i;
return true;
}
}
return false;
}
/**
* fence_wait_any_timeout - sleep until any fence gets signaled
* dma_fence_wait_any_timeout - sleep until any fence gets signaled
* or until timeout elapses
* @fences: [in] array of fences to wait on
* @count: [in] number of fences to wait on
* @intr: [in] if true, do an interruptible wait
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
* @idx: [out] the first signaled fence index, meaningful only on
* positive return
*
* Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
* interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
@ -427,8 +437,8 @@ fence_test_signaled_any(struct fence **fences, uint32_t count)
* fence might be freed before return, resulting in undefined behavior.
*/
signed long
fence_wait_any_timeout(struct fence **fences, uint32_t count,
bool intr, signed long timeout)
dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
bool intr, signed long timeout, uint32_t *idx)
{
struct default_wait_cb *cb;
signed long ret = timeout;
@ -439,8 +449,11 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
if (timeout == 0) {
for (i = 0; i < count; ++i)
if (fence_is_signaled(fences[i]))
if (dma_fence_is_signaled(fences[i])) {
if (idx)
*idx = i;
return 1;
}
return 0;
}
@ -452,17 +465,19 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
}
for (i = 0; i < count; ++i) {
struct fence *fence = fences[i];
struct dma_fence *fence = fences[i];
if (fence->ops->wait != fence_default_wait) {
if (fence->ops->wait != dma_fence_default_wait) {
ret = -EINVAL;
goto fence_rm_cb;
}
cb[i].task = current;
if (fence_add_callback(fence, &cb[i].base,
fence_default_wait_cb)) {
if (dma_fence_add_callback(fence, &cb[i].base,
dma_fence_default_wait_cb)) {
/* This fence is already signaled */
if (idx)
*idx = i;
goto fence_rm_cb;
}
}
@ -473,7 +488,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
else
set_current_state(TASK_UNINTERRUPTIBLE);
if (fence_test_signaled_any(fences, count))
if (dma_fence_test_signaled_any(fences, count, idx))
break;
ret = schedule_timeout(ret);
@ -486,34 +501,34 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
fence_rm_cb:
while (i-- > 0)
fence_remove_callback(fences[i], &cb[i].base);
dma_fence_remove_callback(fences[i], &cb[i].base);
err_free_cb:
kfree(cb);
return ret;
}
EXPORT_SYMBOL(fence_wait_any_timeout);
EXPORT_SYMBOL(dma_fence_wait_any_timeout);
/**
* fence_init - Initialize a custom fence.
* dma_fence_init - Initialize a custom fence.
* @fence: [in] the fence to initialize
* @ops: [in] the fence_ops for operations on this fence
* @ops: [in] the dma_fence_ops for operations on this fence
* @lock: [in] the irqsafe spinlock to use for locking this fence
* @context: [in] the execution context this fence is run on
* @seqno: [in] a linear increasing sequence number for this context
*
* Initializes an allocated fence, the caller doesn't have to keep its
* refcount after committing with this fence, but it will need to hold a
* refcount again if fence_ops.enable_signaling gets called. This can
* refcount again if dma_fence_ops.enable_signaling gets called. This can
* be used for other implementing other types of fence.
*
* context and seqno are used for easy comparison between fences, allowing
* to check which fence is later by simply using fence_later.
* to check which fence is later by simply using dma_fence_later.
*/
void
fence_init(struct fence *fence, const struct fence_ops *ops,
spinlock_t *lock, u64 context, unsigned seqno)
dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, unsigned seqno)
{
BUG_ON(!lock);
BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
@ -527,6 +542,6 @@ fence_init(struct fence *fence, const struct fence_ops *ops,
fence->seqno = seqno;
fence->flags = 0UL;
trace_fence_init(fence);
trace_dma_fence_init(fence);
}
EXPORT_SYMBOL(fence_init);
EXPORT_SYMBOL(dma_fence_init);

View File

@ -102,17 +102,17 @@ EXPORT_SYMBOL(reservation_object_reserve_shared);
static void
reservation_object_add_shared_inplace(struct reservation_object *obj,
struct reservation_object_list *fobj,
struct fence *fence)
struct dma_fence *fence)
{
u32 i;
fence_get(fence);
dma_fence_get(fence);
preempt_disable();
write_seqcount_begin(&obj->seq);
for (i = 0; i < fobj->shared_count; ++i) {
struct fence *old_fence;
struct dma_fence *old_fence;
old_fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(obj));
@ -123,7 +123,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
write_seqcount_end(&obj->seq);
preempt_enable();
fence_put(old_fence);
dma_fence_put(old_fence);
return;
}
}
@ -143,12 +143,12 @@ static void
reservation_object_add_shared_replace(struct reservation_object *obj,
struct reservation_object_list *old,
struct reservation_object_list *fobj,
struct fence *fence)
struct dma_fence *fence)
{
unsigned i;
struct fence *old_fence = NULL;
struct dma_fence *old_fence = NULL;
fence_get(fence);
dma_fence_get(fence);
if (!old) {
RCU_INIT_POINTER(fobj->shared[0], fence);
@ -165,7 +165,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
fobj->shared_count = old->shared_count;
for (i = 0; i < old->shared_count; ++i) {
struct fence *check;
struct dma_fence *check;
check = rcu_dereference_protected(old->shared[i],
reservation_object_held(obj));
@ -196,7 +196,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
kfree_rcu(old, rcu);
if (old_fence)
fence_put(old_fence);
dma_fence_put(old_fence);
}
/**
@ -208,7 +208,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
* reservation_object_reserve_shared() has been called.
*/
void reservation_object_add_shared_fence(struct reservation_object *obj,
struct fence *fence)
struct dma_fence *fence)
{
struct reservation_object_list *old, *fobj = obj->staged;
@ -231,9 +231,9 @@ EXPORT_SYMBOL(reservation_object_add_shared_fence);
* Add a fence to the exclusive slot. The obj->lock must be held.
*/
void reservation_object_add_excl_fence(struct reservation_object *obj,
struct fence *fence)
struct dma_fence *fence)
{
struct fence *old_fence = reservation_object_get_excl(obj);
struct dma_fence *old_fence = reservation_object_get_excl(obj);
struct reservation_object_list *old;
u32 i = 0;
@ -242,7 +242,7 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
i = old->shared_count;
if (fence)
fence_get(fence);
dma_fence_get(fence);
preempt_disable();
write_seqcount_begin(&obj->seq);
@ -255,11 +255,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
/* inplace update, no shared fences */
while (i--)
fence_put(rcu_dereference_protected(old->shared[i],
dma_fence_put(rcu_dereference_protected(old->shared[i],
reservation_object_held(obj)));
if (old_fence)
fence_put(old_fence);
dma_fence_put(old_fence);
}
EXPORT_SYMBOL(reservation_object_add_excl_fence);
@ -276,26 +276,32 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
* Zero or -errno
*/
int reservation_object_get_fences_rcu(struct reservation_object *obj,
struct fence **pfence_excl,
struct dma_fence **pfence_excl,
unsigned *pshared_count,
struct fence ***pshared)
struct dma_fence ***pshared)
{
unsigned shared_count = 0;
unsigned retry = 1;
struct fence **shared = NULL, *fence_excl = NULL;
int ret = 0;
struct dma_fence **shared = NULL;
struct dma_fence *fence_excl;
unsigned int shared_count;
int ret = 1;
while (retry) {
do {
struct reservation_object_list *fobj;
unsigned seq;
unsigned int i;
seq = read_seqcount_begin(&obj->seq);
shared_count = i = 0;
rcu_read_lock();
seq = read_seqcount_begin(&obj->seq);
fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl && !dma_fence_get_rcu(fence_excl))
goto unlock;
fobj = rcu_dereference(obj->fence);
if (fobj) {
struct fence **nshared;
struct dma_fence **nshared;
size_t sz = sizeof(*shared) * fobj->shared_max;
nshared = krealloc(shared, sz,
@ -309,52 +315,37 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
}
ret = -ENOMEM;
shared_count = 0;
break;
}
shared = nshared;
memcpy(shared, fobj->shared, sz);
shared_count = fobj->shared_count;
} else
shared_count = 0;
fence_excl = rcu_dereference(obj->fence_excl);
retry = read_seqcount_retry(&obj->seq, seq);
if (retry)
goto unlock;
if (!fence_excl || fence_get_rcu(fence_excl)) {
unsigned i;
for (i = 0; i < shared_count; ++i) {
if (fence_get_rcu(shared[i]))
continue;
/* uh oh, refcount failed, abort and retry */
while (i--)
fence_put(shared[i]);
if (fence_excl) {
fence_put(fence_excl);
fence_excl = NULL;
}
retry = 1;
break;
shared[i] = rcu_dereference(fobj->shared[i]);
if (!dma_fence_get_rcu(shared[i]))
break;
}
} else
retry = 1;
}
if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
while (i--)
dma_fence_put(shared[i]);
dma_fence_put(fence_excl);
goto unlock;
}
ret = 0;
unlock:
rcu_read_unlock();
}
*pshared_count = shared_count;
if (shared_count)
*pshared = shared;
else {
*pshared = NULL;
} while (ret);
if (!shared_count) {
kfree(shared);
shared = NULL;
}
*pshared_count = shared_count;
*pshared = shared;
*pfence_excl = fence_excl;
return ret;
@ -377,12 +368,9 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
unsigned long timeout)
{
struct fence *fence;
struct dma_fence *fence;
unsigned seq, shared_count, i = 0;
long ret = timeout;
if (!timeout)
return reservation_object_test_signaled_rcu(obj, wait_all);
long ret = timeout ? timeout : 1;
retry:
fence = NULL;
@ -397,20 +385,18 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
if (fobj)
shared_count = fobj->shared_count;
if (read_seqcount_retry(&obj->seq, seq))
goto unlock_retry;
for (i = 0; i < shared_count; ++i) {
struct fence *lfence = rcu_dereference(fobj->shared[i]);
struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&lfence->flags))
continue;
if (!fence_get_rcu(lfence))
if (!dma_fence_get_rcu(lfence))
goto unlock_retry;
if (fence_is_signaled(lfence)) {
fence_put(lfence);
if (dma_fence_is_signaled(lfence)) {
dma_fence_put(lfence);
continue;
}
@ -420,18 +406,16 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
}
if (!shared_count) {
struct fence *fence_excl = rcu_dereference(obj->fence_excl);
if (read_seqcount_retry(&obj->seq, seq))
goto unlock_retry;
struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl &&
!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
if (!fence_get_rcu(fence_excl))
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence_excl->flags)) {
if (!dma_fence_get_rcu(fence_excl))
goto unlock_retry;
if (fence_is_signaled(fence_excl))
fence_put(fence_excl);
if (dma_fence_is_signaled(fence_excl))
dma_fence_put(fence_excl);
else
fence = fence_excl;
}
@ -439,8 +423,13 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
rcu_read_unlock();
if (fence) {
ret = fence_wait_timeout(fence, intr, ret);
fence_put(fence);
if (read_seqcount_retry(&obj->seq, seq)) {
dma_fence_put(fence);
goto retry;
}
ret = dma_fence_wait_timeout(fence, intr, ret);
dma_fence_put(fence);
if (ret > 0 && wait_all && (i + 1 < shared_count))
goto retry;
}
@ -454,18 +443,18 @@ EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
static inline int
reservation_object_test_signaled_single(struct fence *passed_fence)
reservation_object_test_signaled_single(struct dma_fence *passed_fence)
{
struct fence *fence, *lfence = passed_fence;
struct dma_fence *fence, *lfence = passed_fence;
int ret = 1;
if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
fence = fence_get_rcu(lfence);
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
fence = dma_fence_get_rcu(lfence);
if (!fence)
return -1;
ret = !!fence_is_signaled(fence);
fence_put(fence);
ret = !!dma_fence_is_signaled(fence);
dma_fence_put(fence);
}
return ret;
}
@ -484,12 +473,13 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
bool test_all)
{
unsigned seq, shared_count;
int ret = true;
int ret;
rcu_read_lock();
retry:
ret = true;
shared_count = 0;
seq = read_seqcount_begin(&obj->seq);
rcu_read_lock();
if (test_all) {
unsigned i;
@ -500,46 +490,35 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
if (fobj)
shared_count = fobj->shared_count;
if (read_seqcount_retry(&obj->seq, seq))
goto unlock_retry;
for (i = 0; i < shared_count; ++i) {
struct fence *fence = rcu_dereference(fobj->shared[i]);
struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
ret = reservation_object_test_signaled_single(fence);
if (ret < 0)
goto unlock_retry;
goto retry;
else if (!ret)
break;
}
/*
* There could be a read_seqcount_retry here, but nothing cares
* about whether it's the old or newer fence pointers that are
* signaled. That race could still have happened after checking
* read_seqcount_retry. If you care, use ww_mutex_lock.
*/
if (read_seqcount_retry(&obj->seq, seq))
goto retry;
}
if (!shared_count) {
struct fence *fence_excl = rcu_dereference(obj->fence_excl);
if (read_seqcount_retry(&obj->seq, seq))
goto unlock_retry;
struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl) {
ret = reservation_object_test_signaled_single(
fence_excl);
if (ret < 0)
goto unlock_retry;
goto retry;
if (read_seqcount_retry(&obj->seq, seq))
goto retry;
}
}
rcu_read_unlock();
return ret;
unlock_retry:
rcu_read_unlock();
goto retry;
}
EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);

View File

@ -21,35 +21,35 @@
#include <linux/export.h>
#include <linux/seqno-fence.h>
static const char *seqno_fence_get_driver_name(struct fence *fence)
static const char *seqno_fence_get_driver_name(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->get_driver_name(fence);
}
static const char *seqno_fence_get_timeline_name(struct fence *fence)
static const char *seqno_fence_get_timeline_name(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->get_timeline_name(fence);
}
static bool seqno_enable_signaling(struct fence *fence)
static bool seqno_enable_signaling(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->enable_signaling(fence);
}
static bool seqno_signaled(struct fence *fence)
static bool seqno_signaled(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
}
static void seqno_release(struct fence *fence)
static void seqno_release(struct dma_fence *fence)
{
struct seqno_fence *f = to_seqno_fence(fence);
@ -57,18 +57,18 @@ static void seqno_release(struct fence *fence)
if (f->ops->release)
f->ops->release(fence);
else
fence_free(&f->base);
dma_fence_free(&f->base);
}
static signed long seqno_wait(struct fence *fence, bool intr,
signed long timeout)
static signed long seqno_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
struct seqno_fence *f = to_seqno_fence(fence);
return f->ops->wait(fence, intr, timeout);
}
const struct fence_ops seqno_fence_ops = {
const struct dma_fence_ops seqno_fence_ops = {
.get_driver_name = seqno_fence_get_driver_name,
.get_timeline_name = seqno_fence_get_timeline_name,
.enable_signaling = seqno_enable_signaling,

View File

@ -68,9 +68,9 @@ struct sw_sync_create_fence_data {
#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
static const struct fence_ops timeline_fence_ops;
static const struct dma_fence_ops timeline_fence_ops;
static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence)
{
if (fence->ops != &timeline_fence_ops)
return NULL;
@ -84,7 +84,7 @@ static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
* Creates a new sync_timeline. Returns the sync_timeline object or NULL in
* case of error.
*/
struct sync_timeline *sync_timeline_create(const char *name)
static struct sync_timeline *sync_timeline_create(const char *name)
{
struct sync_timeline *obj;
@ -93,7 +93,7 @@ struct sync_timeline *sync_timeline_create(const char *name)
return NULL;
kref_init(&obj->kref);
obj->context = fence_context_alloc(1);
obj->context = dma_fence_context_alloc(1);
strlcpy(obj->name, name, sizeof(obj->name));
INIT_LIST_HEAD(&obj->child_list_head);
@ -146,7 +146,7 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
list_for_each_entry_safe(pt, next, &obj->active_list_head,
active_list) {
if (fence_is_signaled_locked(&pt->base))
if (dma_fence_is_signaled_locked(&pt->base))
list_del_init(&pt->active_list);
}
@ -179,30 +179,30 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
spin_lock_irqsave(&obj->child_list_lock, flags);
sync_timeline_get(obj);
fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
obj->context, value);
dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
obj->context, value);
list_add_tail(&pt->child_list, &obj->child_list_head);
INIT_LIST_HEAD(&pt->active_list);
spin_unlock_irqrestore(&obj->child_list_lock, flags);
return pt;
}
static const char *timeline_fence_get_driver_name(struct fence *fence)
static const char *timeline_fence_get_driver_name(struct dma_fence *fence)
{
return "sw_sync";
}
static const char *timeline_fence_get_timeline_name(struct fence *fence)
static const char *timeline_fence_get_timeline_name(struct dma_fence *fence)
{
struct sync_timeline *parent = fence_parent(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
return parent->name;
}
static void timeline_fence_release(struct fence *fence)
static void timeline_fence_release(struct dma_fence *fence)
{
struct sync_pt *pt = fence_to_sync_pt(fence);
struct sync_timeline *parent = fence_parent(fence);
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
unsigned long flags;
spin_lock_irqsave(fence->lock, flags);
@ -212,20 +212,20 @@ static void timeline_fence_release(struct fence *fence)
spin_unlock_irqrestore(fence->lock, flags);
sync_timeline_put(parent);
fence_free(fence);
dma_fence_free(fence);
}
static bool timeline_fence_signaled(struct fence *fence)
static bool timeline_fence_signaled(struct dma_fence *fence)
{
struct sync_timeline *parent = fence_parent(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
return (fence->seqno > parent->value) ? false : true;
}
static bool timeline_fence_enable_signaling(struct fence *fence)
static bool timeline_fence_enable_signaling(struct dma_fence *fence)
{
struct sync_pt *pt = fence_to_sync_pt(fence);
struct sync_timeline *parent = fence_parent(fence);
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
if (timeline_fence_signaled(fence))
return false;
@ -234,26 +234,26 @@ static bool timeline_fence_enable_signaling(struct fence *fence)
return true;
}
static void timeline_fence_value_str(struct fence *fence,
static void timeline_fence_value_str(struct dma_fence *fence,
char *str, int size)
{
snprintf(str, size, "%d", fence->seqno);
}
static void timeline_fence_timeline_value_str(struct fence *fence,
static void timeline_fence_timeline_value_str(struct dma_fence *fence,
char *str, int size)
{
struct sync_timeline *parent = fence_parent(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
snprintf(str, size, "%d", parent->value);
}
static const struct fence_ops timeline_fence_ops = {
static const struct dma_fence_ops timeline_fence_ops = {
.get_driver_name = timeline_fence_get_driver_name,
.get_timeline_name = timeline_fence_get_timeline_name,
.enable_signaling = timeline_fence_enable_signaling,
.signaled = timeline_fence_signaled,
.wait = fence_default_wait,
.wait = dma_fence_default_wait,
.release = timeline_fence_release,
.fence_value_str = timeline_fence_value_str,
.timeline_value_str = timeline_fence_timeline_value_str,
@ -316,8 +316,8 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
}
sync_file = sync_file_create(&pt->base);
dma_fence_put(&pt->base);
if (!sync_file) {
fence_put(&pt->base);
err = -ENOMEM;
goto err;
}

View File

@ -71,12 +71,13 @@ static const char *sync_status_str(int status)
return "error";
}
static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
static void sync_print_fence(struct seq_file *s,
struct dma_fence *fence, bool show)
{
int status = 1;
struct sync_timeline *parent = fence_parent(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
if (fence_is_signaled_locked(fence))
if (dma_fence_is_signaled_locked(fence))
status = fence->status;
seq_printf(s, " %s%sfence %s",
@ -135,10 +136,10 @@ static void sync_print_sync_file(struct seq_file *s,
int i;
seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
sync_status_str(!fence_is_signaled(sync_file->fence)));
sync_status_str(!dma_fence_is_signaled(sync_file->fence)));
if (fence_is_array(sync_file->fence)) {
struct fence_array *array = to_fence_array(sync_file->fence);
if (dma_fence_is_array(sync_file->fence)) {
struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
for (i = 0; i < array->num_fences; ++i)
sync_print_fence(s, array->fences[i], true);

View File

@ -15,7 +15,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/fence.h>
#include <linux/dma-fence.h>
#include <linux/sync_file.h>
#include <uapi/linux/sync_file.h>
@ -45,10 +45,9 @@ struct sync_timeline {
struct list_head sync_timeline_list;
};
static inline struct sync_timeline *fence_parent(struct fence *fence)
static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
{
return container_of(fence->lock, struct sync_timeline,
child_list_lock);
return container_of(fence->lock, struct sync_timeline, child_list_lock);
}
/**
@ -58,7 +57,7 @@ static inline struct sync_timeline *fence_parent(struct fence *fence)
* @active_list: sync timeline active child's list
*/
struct sync_pt {
struct fence base;
struct dma_fence base;
struct list_head child_list;
struct list_head active_list;
};

View File

@ -54,7 +54,7 @@ static struct sync_file *sync_file_alloc(void)
return NULL;
}
static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct sync_file *sync_file;
@ -71,7 +71,7 @@ static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
* takes ownership of @fence. The sync_file can be released with
* fput(sync_file->file). Returns the sync_file or NULL in case of error.
*/
struct sync_file *sync_file_create(struct fence *fence)
struct sync_file *sync_file_create(struct dma_fence *fence)
{
struct sync_file *sync_file;
@ -79,7 +79,7 @@ struct sync_file *sync_file_create(struct fence *fence)
if (!sync_file)
return NULL;
sync_file->fence = fence;
sync_file->fence = dma_fence_get(fence);
snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
fence->ops->get_driver_name(fence),
@ -121,16 +121,16 @@ static struct sync_file *sync_file_fdget(int fd)
* Ensures @fd references a valid sync_file and returns a fence that
* represents all fence in the sync_file. On error NULL is returned.
*/
struct fence *sync_file_get_fence(int fd)
struct dma_fence *sync_file_get_fence(int fd)
{
struct sync_file *sync_file;
struct fence *fence;
struct dma_fence *fence;
sync_file = sync_file_fdget(fd);
if (!sync_file)
return NULL;
fence = fence_get(sync_file->fence);
fence = dma_fence_get(sync_file->fence);
fput(sync_file->file);
return fence;
@ -138,22 +138,23 @@ struct fence *sync_file_get_fence(int fd)
EXPORT_SYMBOL(sync_file_get_fence);
static int sync_file_set_fence(struct sync_file *sync_file,
struct fence **fences, int num_fences)
struct dma_fence **fences, int num_fences)
{
struct fence_array *array;
struct dma_fence_array *array;
/*
* The reference for the fences in the new sync_file and held
* in add_fence() during the merge procedure, so for num_fences == 1
* we already own a new reference to the fence. For num_fence > 1
* we own the reference of the fence_array creation.
* we own the reference of the dma_fence_array creation.
*/
if (num_fences == 1) {
sync_file->fence = fences[0];
kfree(fences);
} else {
array = fence_array_create(num_fences, fences,
fence_context_alloc(1), 1, false);
array = dma_fence_array_create(num_fences, fences,
dma_fence_context_alloc(1),
1, false);
if (!array)
return -ENOMEM;
@ -163,10 +164,11 @@ static int sync_file_set_fence(struct sync_file *sync_file,
return 0;
}
static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
static struct dma_fence **get_fences(struct sync_file *sync_file,
int *num_fences)
{
if (fence_is_array(sync_file->fence)) {
struct fence_array *array = to_fence_array(sync_file->fence);
if (dma_fence_is_array(sync_file->fence)) {
struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
*num_fences = array->num_fences;
return array->fences;
@ -176,12 +178,13 @@ static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
return &sync_file->fence;
}
static void add_fence(struct fence **fences, int *i, struct fence *fence)
static void add_fence(struct dma_fence **fences,
int *i, struct dma_fence *fence)
{
fences[*i] = fence;
if (!fence_is_signaled(fence)) {
fence_get(fence);
if (!dma_fence_is_signaled(fence)) {
dma_fence_get(fence);
(*i)++;
}
}
@ -200,7 +203,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
struct sync_file *sync_file;
struct fence **fences, **nfences, **a_fences, **b_fences;
struct dma_fence **fences, **nfences, **a_fences, **b_fences;
int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
sync_file = sync_file_alloc();
@ -226,8 +229,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
* and sync_file_create, this is a reasonable assumption.
*/
for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
struct fence *pt_a = a_fences[i_a];
struct fence *pt_b = b_fences[i_b];
struct dma_fence *pt_a = a_fences[i_a];
struct dma_fence *pt_b = b_fences[i_b];
if (pt_a->context < pt_b->context) {
add_fence(fences, &i, pt_a);
@ -255,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
add_fence(fences, &i, b_fences[i_b]);
if (i == 0)
fences[i++] = fence_get(a_fences[0]);
fences[i++] = dma_fence_get(a_fences[0]);
if (num_fences > i) {
nfences = krealloc(fences, i * sizeof(*fences),
@ -286,8 +289,8 @@ static void sync_file_free(struct kref *kref)
kref);
if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
fence_remove_callback(sync_file->fence, &sync_file->cb);
fence_put(sync_file->fence);
dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
dma_fence_put(sync_file->fence);
kfree(sync_file);
}
@ -305,14 +308,13 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
poll_wait(file, &sync_file->wq, wait);
if (!poll_does_not_wait(wait) &&
!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
if (fence_add_callback(sync_file->fence, &sync_file->cb,
fence_check_cb_func) < 0)
if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
fence_check_cb_func) < 0)
wake_up_all(&sync_file->wq);
}
return fence_is_signaled(sync_file->fence) ? POLLIN : 0;
return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0;
}
static long sync_file_ioctl_merge(struct sync_file *sync_file,
@ -370,14 +372,14 @@ static long sync_file_ioctl_merge(struct sync_file *sync_file,
return err;
}
static void sync_fill_fence_info(struct fence *fence,
static void sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
sizeof(info->obj_name));
strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
sizeof(info->driver_name));
if (fence_is_signaled(fence))
if (dma_fence_is_signaled(fence))
info->status = fence->status >= 0 ? 1 : fence->status;
else
info->status = 0;
@ -389,7 +391,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
{
struct sync_file_info info;
struct sync_fence_info *fence_info = NULL;
struct fence **fences;
struct dma_fence **fences;
__u32 size;
int num_fences, ret, i;
@ -429,7 +431,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
no_fences:
strlcpy(info.name, sync_file->name, sizeof(info.name));
info.status = fence_is_signaled(sync_file->fence);
info.status = dma_fence_is_signaled(sync_file->fence);
info.num_fences = num_fences;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))

View File

@ -12,6 +12,7 @@ menuconfig DRM
select I2C
select I2C_ALGOBIT
select DMA_SHARED_BUFFER
select SYNC_FILE
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@ -33,6 +34,20 @@ config DRM_DP_AUX_CHARDEV
read and write values to arbitrary DPCD registers on the DP aux
channel.
config DRM_DEBUG_MM
bool "Insert extra checks and debug info into the DRM range managers"
default n
depends on DRM=y
depends on STACKTRACE_SUPPORT
select STACKDEPOT
help
Enable allocation tracking of memory manager and leak detection on
shutdown.
Recommended for driver developers only.
If in doubt, say "N".
config DRM_KMS_HELPER
tristate
depends on DRM
@ -223,6 +238,12 @@ source "drivers/gpu/drm/hisilicon/Kconfig"
source "drivers/gpu/drm/mediatek/Kconfig"
source "drivers/gpu/drm/zte/Kconfig"
source "drivers/gpu/drm/mxsfb/Kconfig"
source "drivers/gpu/drm/meson/Kconfig"
# Keep legacy drivers last
menuconfig DRM_LEGACY

View File

@ -9,13 +9,14 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_info.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
drm_modeset_lock.o drm_atomic.o drm_bridge.o \
drm_framebuffer.o drm_connector.o drm_blend.o \
drm_encoder.o drm_mode_object.o drm_property.o \
drm_plane.o drm_color_mgmt.o
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@ -23,6 +24,7 @@ drm-$(CONFIG_PCI) += ati_pcigart.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_AGP) += drm_agpsupport.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
@ -79,6 +81,7 @@ obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STI) += sti/
obj-$(CONFIG_DRM_IMX) += imx/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
obj-$(CONFIG_DRM_MESON) += meson/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
@ -86,3 +89,5 @@ obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-$(CONFIG_DRM_ARCPGU)+= arc/
obj-y += hisilicon/
obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/

View File

@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \

View File

@ -90,7 +90,6 @@
#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25
#define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27
#define ENCODER_OBJECT_ID_VIRTUAL 0x28
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
@ -120,7 +119,6 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
#define CONNECTOR_OBJECT_ID_VIRTUAL 0x17
/* deleted */
@ -149,7 +147,6 @@
#define GRAPH_OBJECT_ENUM_ID5 0x05
#define GRAPH_OBJECT_ENUM_ID6 0x06
#define GRAPH_OBJECT_ENUM_ID7 0x07
#define GRAPH_OBJECT_ENUM_VIRTUAL 0x08
/****************************************************/
/* Graphics Object ID Bit definition */
@ -411,10 +408,6 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
#define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT)
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/

File diff suppressed because it is too large Load Diff

View File

@ -265,14 +265,14 @@ static int acp_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
const struct amdgpu_ip_block_version *ip_version =
const struct amdgpu_ip_block *ip_block =
amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
if (!ip_version)
if (!ip_block)
return -EINVAL;
r = amd_acp_hw_init(adev->acp.cgs_device,
ip_version->major, ip_version->minor);
ip_block->version->major, ip_block->version->minor);
/* -ENODEV means board uses AZ rather than ACP */
if (r == -ENODEV)
return 0;
@ -459,7 +459,7 @@ static int acp_set_powergating_state(void *handle,
return 0;
}
const struct amd_ip_funcs acp_ip_funcs = {
static const struct amd_ip_funcs acp_ip_funcs = {
.name = "acp_ip",
.early_init = acp_early_init,
.late_init = NULL,
@ -475,3 +475,12 @@ const struct amd_ip_funcs acp_ip_funcs = {
.set_clockgating_state = acp_set_clockgating_state,
.set_powergating_state = acp_set_powergating_state,
};
const struct amdgpu_ip_block_version acp_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_ACP,
.major = 2,
.minor = 2,
.rev = 0,
.funcs = &acp_ip_funcs,
};

View File

@ -37,6 +37,6 @@ struct amdgpu_acp {
struct acp_pm_domain *acp_genpd;
};
extern const struct amd_ip_funcs acp_ip_funcs;
extern const struct amdgpu_ip_block_version acp_ip_block;
#endif /* __AMDGPU_ACP_H__ */

View File

@ -1115,49 +1115,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
return 0;
}
uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev)
{
GET_ENGINE_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
return le32_to_cpu(args.ulReturnEngineClock);
}
uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev)
{
GET_MEMORY_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
return le32_to_cpu(args.ulReturnMemoryClock);
}
void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
uint32_t eng_clock)
{
SET_ENGINE_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
uint32_t mem_clock)
{
SET_MEMORY_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
if (adev->flags & AMD_IS_APU)
return;
args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
u32 eng_clock, u32 mem_clock)
{
@ -1256,45 +1213,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *
return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
}
void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
u16 voltage_level,
u8 voltage_type)
{
union set_voltage args;
int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
u8 frev, crev, volt_index = voltage_level;
if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
return;
/* 0xff01 is a flag rather then an actual voltage */
if (voltage_level == 0xff01)
return;
switch (crev) {
case 1:
args.v1.ucVoltageType = voltage_type;
args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
args.v1.ucVoltageIndex = volt_index;
break;
case 2:
args.v2.ucVoltageType = voltage_type;
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
break;
case 3:
args.v3.ucVoltageType = voltage_type;
args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return;
}
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
u16 *leakage_id)
{
@ -1784,6 +1702,19 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
}
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung)
{
u32 tmp = RREG32(mmBIOS_SCRATCH_3);
if (hung)
tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
else
tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
WREG32(mmBIOS_SCRATCH_3, tmp);
}
/* Atom needs data in little endian format
* so swap as appropriate when copying data to
* or from atom. Note that atom operates on

View File

@ -163,16 +163,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
bool strobe_mode,
struct atom_mpll_param *mpll_param);
uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev);
uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev);
void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
uint32_t eng_clock);
void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
uint32_t mem_clock);
void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
u16 voltage_level,
u8 voltage_type);
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
u32 eng_clock, u32 mem_clock);
@ -206,6 +196,8 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung);
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,

View File

@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
{
unsigned long start_jiffies;
unsigned long end_jiffies;
struct fence *fence = NULL;
struct dma_fence *fence = NULL;
int i, r;
start_jiffies = jiffies;
@ -43,17 +43,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
false);
if (r)
goto exit_do_move;
r = fence_wait(fence, false);
r = dma_fence_wait(fence, false);
if (r)
goto exit_do_move;
fence_put(fence);
dma_fence_put(fence);
}
end_jiffies = jiffies;
r = jiffies_to_msecs(end_jiffies - start_jiffies);
exit_do_move:
if (fence)
fence_put(fence);
dma_fence_put(fence);
return r;
}

View File

@ -70,10 +70,11 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
return false;
}
adev->bios = kmalloc(size, GFP_KERNEL);
if (adev->bios == NULL) {
if (!adev->bios) {
iounmap(bios);
return false;
}
adev->bios_size = size;
memcpy_fromio(adev->bios, bios, size);
iounmap(bios);
return true;
@ -103,6 +104,7 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
pci_unmap_rom(adev->pdev, bios);
return false;
}
adev->bios_size = size;
memcpy_fromio(adev->bios, bios, size);
pci_unmap_rom(adev->pdev, bios);
return true;
@ -135,6 +137,7 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
DRM_ERROR("no memory to allocate for BIOS\n");
return false;
}
adev->bios_size = len;
/* read complete BIOS */
return amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
@ -159,6 +162,7 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
if (adev->bios == NULL) {
return false;
}
adev->bios_size = size;
return true;
}
@ -273,6 +277,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
kfree(adev->bios);
return false;
}
adev->bios_size = size;
return true;
}
#else
@ -334,6 +339,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
}
adev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
adev->bios_size = vhdr->ImageLength;
ret = !!adev->bios;
out_unmap:

View File

@ -146,7 +146,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
switch(type) {
case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__VISIBLE_FB:
flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (max_offset > adev->mc.real_vram_size)
return -EINVAL;
@ -157,7 +158,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
place.fpfn =
@ -240,7 +242,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
r = amdgpu_bo_reserve(obj, false);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
min_offset, max_offset, mcaddr);
amdgpu_bo_unreserve(obj);
return r;
@ -624,11 +626,11 @@ static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
int i, r = -1;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].type == block_type) {
r = adev->ip_blocks[i].funcs->set_clockgating_state(
if (adev->ip_blocks[i].version->type == block_type) {
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
(void *)adev,
state);
break;
@ -645,11 +647,11 @@ static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
int i, r = -1;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].type == block_type) {
r = adev->ip_blocks[i].funcs->set_powergating_state(
if (adev->ip_blocks[i].version->type == block_type) {
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
(void *)adev,
state);
break;
@ -685,15 +687,21 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
result = AMDGPU_UCODE_ID_CP_MEC1;
break;
case CGS_UCODE_ID_CP_MEC_JT2:
if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11
|| adev->asic_type == CHIP_POLARIS10)
result = AMDGPU_UCODE_ID_CP_MEC2;
else
/* for VI. JT2 should be the same as JT1, because:
1, MEC2 and MEC1 use exactly same FW.
2, JT2 is not pached but JT1 is.
*/
if (adev->asic_type >= CHIP_TOPAZ)
result = AMDGPU_UCODE_ID_CP_MEC1;
else
result = AMDGPU_UCODE_ID_CP_MEC2;
break;
case CGS_UCODE_ID_RLC_G:
result = AMDGPU_UCODE_ID_RLC_G;
break;
case CGS_UCODE_ID_STORAGE:
result = AMDGPU_UCODE_ID_STORAGE;
break;
default:
DRM_ERROR("Firmware type not supported\n");
}
@ -715,7 +723,7 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
enum cgs_ucode_id type)
{
CGS_FUNC_ADEV;
uint16_t fw_version;
uint16_t fw_version = 0;
switch (type) {
case CGS_UCODE_ID_SDMA0:
@ -745,9 +753,11 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
case CGS_UCODE_ID_RLC_G:
fw_version = adev->gfx.rlc_fw_version;
break;
case CGS_UCODE_ID_STORAGE:
break;
default:
DRM_ERROR("firmware type %d do not have version\n", type);
fw_version = 0;
break;
}
return fw_version;
}
@ -776,12 +786,18 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
(type == CGS_UCODE_ID_CP_MEC_JT2)) {
gpu_addr += le32_to_cpu(header->jt_offset) << 2;
gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
data_size = le32_to_cpu(header->jt_size) << 2;
}
info->mc_addr = gpu_addr;
info->kptr = ucode->kaddr;
info->image_size = data_size;
info->mc_addr = gpu_addr;
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
if (CGS_UCODE_ID_CP_MEC == type)
info->image_size = (header->jt_offset) << 2;
info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
} else {
@ -860,6 +876,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return 0;
}
static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
{
CGS_FUNC_ADEV;
return amdgpu_sriov_vf(adev);
}
static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info)
{
@ -1213,6 +1235,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_notify_dpm_enabled,
amdgpu_cgs_call_acpi_method,
amdgpu_cgs_query_system_info,
amdgpu_cgs_is_virtualization_enabled
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {

View File

@ -1517,88 +1517,6 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.force = amdgpu_connector_dvi_force,
};
static struct drm_encoder *
amdgpu_connector_virtual_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct drm_encoder *encoder;
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
if (!encoder)
continue;
if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
return encoder;
}
/* pick the first one */
if (enc_id)
return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
if (encoder) {
amdgpu_connector_add_common_modes(encoder, connector);
}
return 0;
}
static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static int
amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
{
return 0;
}
static enum drm_connector_status
amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
{
return connector_status_connected;
}
static int
amdgpu_connector_virtual_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
return 0;
}
static void amdgpu_connector_virtual_force(struct drm_connector *connector)
{
return;
}
static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = {
.get_modes = amdgpu_connector_virtual_get_modes,
.mode_valid = amdgpu_connector_virtual_mode_valid,
.best_encoder = amdgpu_connector_virtual_encoder,
};
static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = {
.dpms = amdgpu_connector_virtual_dpms,
.detect = amdgpu_connector_virtual_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_virtual_set_property,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_virtual_force,
};
void
amdgpu_connector_add(struct amdgpu_device *adev,
uint32_t connector_id,
@ -1983,17 +1901,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_VIRTUAL:
amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type);
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
}
}

View File

@ -355,6 +355,7 @@ static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u64 initial_bytes_moved;
uint32_t domain;
int r;
@ -372,9 +373,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
@ -387,9 +388,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
/* Last resort, try to evict something from the current working set */
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *lobj)
struct amdgpu_bo *validated)
{
uint32_t domain = lobj->robj->allowed_domains;
uint32_t domain = validated->allowed_domains;
int r;
if (!p->evictable)
@ -400,11 +401,12 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *candidate = p->evictable;
struct amdgpu_bo *bo = candidate->robj;
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u64 initial_bytes_moved;
uint32_t other;
/* If we reached our current BO we can forget it */
if (candidate == lobj)
if (candidate->robj == validated)
break;
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
@ -420,9 +422,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
/* Good we can try to move this BO somewhere else */
amdgpu_ttm_placement_from_domain(bo, other);
initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
if (unlikely(r))
@ -437,6 +439,23 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
return false;
}
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
{
struct amdgpu_cs_parser *p = param;
int r;
do {
r = amdgpu_cs_bo_validate(p, bo);
} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
if (r)
return r;
if (bo->shadow)
r = amdgpu_cs_bo_validate(p, bo->shadow);
return r;
}
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated)
{
@ -464,18 +483,10 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
if (p->evictable == lobj)
p->evictable = NULL;
do {
r = amdgpu_cs_bo_validate(p, bo);
} while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
r = amdgpu_cs_validate(p, bo);
if (r)
return r;
if (bo->shadow) {
r = amdgpu_cs_bo_validate(p, bo);
if (r)
return r;
}
if (binding_userptr) {
drm_free_large(lobj->user_pages);
lobj->user_pages = NULL;
@ -594,14 +605,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
list_splice(&need_pages, &p->validated);
}
amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
p->bytes_moved = 0;
p->evictable = list_last_entry(&p->validated,
struct amdgpu_bo_list_entry,
tv.head);
r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
amdgpu_cs_validate, p);
if (r) {
DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
goto error_validate;
}
r = amdgpu_cs_list_validate(p, &duplicates);
if (r) {
DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
@ -720,7 +736,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
}
fence_put(parser->fence);
dma_fence_put(parser->fence);
if (parser->ctx)
amdgpu_ctx_put(parser->ctx);
@ -757,7 +773,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (p->bo_list) {
for (i = 0; i < p->bo_list->num_entries; i++) {
struct fence *f;
struct dma_fence *f;
/* ignore duplicates */
bo = p->bo_list->array[i].robj;
@ -807,13 +823,14 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
/* Only for UVD/VCE VM emulation */
if (ring->funcs->parse_cs) {
p->job->vm = NULL;
for (i = 0; i < p->job->num_ibs; i++) {
r = amdgpu_ring_parse_cs(ring, p, i);
if (r)
return r;
}
} else {
}
if (p->job->vm) {
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
r = amdgpu_bo_vm_update_pte(p, vm);
@ -824,16 +841,6 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
return amdgpu_cs_sync_rings(p);
}
static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r)
{
if (r == -EDEADLK) {
r = amdgpu_gpu_reset(adev);
if (!r)
r = -EAGAIN;
}
return r;
}
static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
struct amdgpu_cs_parser *parser)
{
@ -902,7 +909,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset;
r = amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib);
r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
@ -917,9 +924,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return r;
}
ib->gpu_addr = chunk_ib->va_start;
}
ib->gpu_addr = chunk_ib->va_start;
ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags;
j++;
@ -927,8 +934,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
/* UVD & VCE fw doesn't support user fences */
if (parser->job->uf_addr && (
parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
return 0;
@ -957,7 +964,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
for (j = 0; j < num_deps; ++j) {
struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx;
struct fence *fence;
struct dma_fence *fence;
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
deps[j].ip_instance,
@ -979,7 +986,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
} else if (fence) {
r = amdgpu_sync_fence(adev, &p->job->sync,
fence);
fence_put(fence);
dma_fence_put(fence);
amdgpu_ctx_put(ctx);
if (r)
return r;
@ -1009,7 +1016,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->owner = p->filp;
job->fence_ctx = entity->fence_context;
p->fence = fence_get(&job->base.s_fence->finished);
p->fence = dma_fence_get(&job->base.s_fence->finished);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
@ -1037,29 +1044,29 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
amdgpu_cs_parser_fini(&parser, r, false);
r = amdgpu_cs_handle_lockup(adev, r);
return r;
goto out;
}
r = amdgpu_cs_parser_bos(&parser, data);
if (r == -ENOMEM)
DRM_ERROR("Not enough memory for command submission!\n");
else if (r && r != -ERESTARTSYS)
DRM_ERROR("Failed to process the buffer list %d!\n", r);
else if (!r) {
reserved_buffers = true;
r = amdgpu_cs_ib_fill(adev, &parser);
}
if (!r) {
r = amdgpu_cs_dependencies(adev, &parser);
if (r)
DRM_ERROR("Failed in the dependencies handling %d!\n", r);
if (r) {
if (r == -ENOMEM)
DRM_ERROR("Not enough memory for command submission!\n");
else if (r != -ERESTARTSYS)
DRM_ERROR("Failed to process the buffer list %d!\n", r);
goto out;
}
reserved_buffers = true;
r = amdgpu_cs_ib_fill(adev, &parser);
if (r)
goto out;
r = amdgpu_cs_dependencies(adev, &parser);
if (r) {
DRM_ERROR("Failed in the dependencies handling %d!\n", r);
goto out;
}
for (i = 0; i < parser.job->num_ibs; i++)
trace_amdgpu_cs(&parser, i);
@ -1071,7 +1078,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
r = amdgpu_cs_handle_lockup(adev, r);
return r;
}
@ -1092,7 +1098,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
struct fence *fence;
struct dma_fence *fence;
long r;
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
@ -1108,8 +1114,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(fence))
r = PTR_ERR(fence);
else if (fence) {
r = fence_wait_timeout(fence, true, timeout);
fence_put(fence);
r = dma_fence_wait_timeout(fence, true, timeout);
dma_fence_put(fence);
} else
r = 1;
@ -1123,6 +1129,180 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
return 0;
}
/**
* amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
*
* @adev: amdgpu device
* @filp: file private
* @user: drm_amdgpu_fence copied from user space
*/
static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
struct drm_file *filp,
struct drm_amdgpu_fence *user)
{
struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx;
struct dma_fence *fence;
int r;
r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
user->ring, &ring);
if (r)
return ERR_PTR(r);
ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
if (ctx == NULL)
return ERR_PTR(-EINVAL);
fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
amdgpu_ctx_put(ctx);
return fence;
}
/**
* amdgpu_cs_wait_all_fence - wait on all fences to signal
*
* @adev: amdgpu device
* @filp: file private
* @wait: wait parameters
* @fences: array of drm_amdgpu_fence
*/
static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
struct drm_file *filp,
union drm_amdgpu_wait_fences *wait,
struct drm_amdgpu_fence *fences)
{
uint32_t fence_count = wait->in.fence_count;
unsigned int i;
long r = 1;
for (i = 0; i < fence_count; i++) {
struct dma_fence *fence;
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
if (IS_ERR(fence))
return PTR_ERR(fence);
else if (!fence)
continue;
r = dma_fence_wait_timeout(fence, true, timeout);
if (r < 0)
return r;
if (r == 0)
break;
}
memset(wait, 0, sizeof(*wait));
wait->out.status = (r > 0);
return 0;
}
/**
* amdgpu_cs_wait_any_fence - wait on any fence to signal
*
* @adev: amdgpu device
* @filp: file private
* @wait: wait parameters
* @fences: array of drm_amdgpu_fence
*/
static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
struct drm_file *filp,
union drm_amdgpu_wait_fences *wait,
struct drm_amdgpu_fence *fences)
{
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
uint32_t fence_count = wait->in.fence_count;
uint32_t first = ~0;
struct dma_fence **array;
unsigned int i;
long r;
/* Prepare the fence array */
array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
if (array == NULL)
return -ENOMEM;
for (i = 0; i < fence_count; i++) {
struct dma_fence *fence;
fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
if (IS_ERR(fence)) {
r = PTR_ERR(fence);
goto err_free_fence_array;
} else if (fence) {
array[i] = fence;
} else { /* NULL, the fence has been already signaled */
r = 1;
goto out;
}
}
r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
&first);
if (r < 0)
goto err_free_fence_array;
out:
memset(wait, 0, sizeof(*wait));
wait->out.status = (r > 0);
wait->out.first_signaled = first;
/* set return value 0 to indicate success */
r = 0;
err_free_fence_array:
for (i = 0; i < fence_count; i++)
dma_fence_put(array[i]);
kfree(array);
return r;
}
/**
* amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
*
* @dev: drm device
* @data: data from userspace
* @filp: file private
*/
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
union drm_amdgpu_wait_fences *wait = data;
uint32_t fence_count = wait->in.fence_count;
struct drm_amdgpu_fence *fences_user;
struct drm_amdgpu_fence *fences;
int r;
/* Get the fences from userspace */
fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
GFP_KERNEL);
if (fences == NULL)
return -ENOMEM;
fences_user = (void __user *)(unsigned long)(wait->in.fences);
if (copy_from_user(fences, fences_user,
sizeof(struct drm_amdgpu_fence) * fence_count)) {
r = -EFAULT;
goto err_free_fences;
}
if (wait->in.wait_all)
r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
else
r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
err_free_fences:
kfree(fences);
return r;
}
/**
* amdgpu_cs_find_bo_va - find bo_va for VM address
*
@ -1196,6 +1376,15 @@ int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r))
return r;
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
continue;
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r))
return r;
}
return 0;

View File

@ -35,7 +35,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
sizeof(struct fence*), GFP_KERNEL);
sizeof(struct dma_fence*), GFP_KERNEL);
if (!ctx->fences)
return -ENOMEM;
@ -55,18 +55,18 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
rq, amdgpu_sched_jobs);
if (r)
break;
goto failed;
}
if (i < adev->num_rings) {
for (j = 0; j < i; j++)
amd_sched_entity_fini(&adev->rings[j]->sched,
&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
return r;
}
return 0;
failed:
for (j = 0; j < i; j++)
amd_sched_entity_fini(&adev->rings[j]->sched,
&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
return r;
}
static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
@ -79,7 +79,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
for (j = 0; j < amdgpu_sched_jobs; ++j)
fence_put(ctx->rings[i].fences[j]);
dma_fence_put(ctx->rings[i].fences[j]);
kfree(ctx->fences);
ctx->fences = NULL;
@ -241,39 +241,39 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
}
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct fence *fence)
struct dma_fence *fence)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = cring->sequence;
unsigned idx = 0;
struct fence *other = NULL;
struct dma_fence *other = NULL;
idx = seq & (amdgpu_sched_jobs - 1);
other = cring->fences[idx];
if (other) {
signed long r;
r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
}
fence_get(fence);
dma_fence_get(fence);
spin_lock(&ctx->ring_lock);
cring->fences[idx] = fence;
cring->sequence++;
spin_unlock(&ctx->ring_lock);
fence_put(other);
dma_fence_put(other);
return seq;
}
struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq)
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
struct fence *fence;
struct dma_fence *fence;
spin_lock(&ctx->ring_lock);
@ -288,7 +288,7 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return NULL;
}
fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
spin_unlock(&ctx->ring_lock);
return fence;

View File

@ -264,7 +264,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
if (adev->vram_scratch.robj == NULL) {
r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->vram_scratch.robj);
if (r) {
return r;
@ -442,13 +443,9 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
static void amdgpu_wb_fini(struct amdgpu_device *adev)
{
if (adev->wb.wb_obj) {
if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) {
amdgpu_bo_kunmap(adev->wb.wb_obj);
amdgpu_bo_unpin(adev->wb.wb_obj);
amdgpu_bo_unreserve(adev->wb.wb_obj);
}
amdgpu_bo_unref(&adev->wb.wb_obj);
adev->wb.wb = NULL;
amdgpu_bo_free_kernel(&adev->wb.wb_obj,
&adev->wb.gpu_addr,
(void **)&adev->wb.wb);
adev->wb.wb_obj = NULL;
}
}
@ -467,33 +464,14 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
int r;
if (adev->wb.wb_obj == NULL) {
r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
&adev->wb.wb_obj);
r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
&adev->wb.wb_obj, &adev->wb.gpu_addr,
(void **)&adev->wb.wb);
if (r) {
dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
return r;
}
r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
if (unlikely(r != 0)) {
amdgpu_wb_fini(adev);
return r;
}
r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
&adev->wb.gpu_addr);
if (r) {
amdgpu_bo_unreserve(adev->wb.wb_obj);
dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
amdgpu_wb_fini(adev);
return r;
}
r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
amdgpu_bo_unreserve(adev->wb.wb_obj);
if (r) {
dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
amdgpu_wb_fini(adev);
return r;
}
adev->wb.num_wb = AMDGPU_MAX_WB;
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
@ -1038,6 +1016,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_vm_block_size);
amdgpu_vm_block_size = 9;
}
if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
!amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
amdgpu_vram_page_split);
amdgpu_vram_page_split = 1024;
}
}
/**
@ -1112,11 +1097,11 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].type == block_type) {
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
state);
if (adev->ip_blocks[i].version->type == block_type) {
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
state);
if (r)
return r;
break;
@ -1132,11 +1117,11 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].type == block_type) {
r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
state);
if (adev->ip_blocks[i].version->type == block_type) {
r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
state);
if (r)
return r;
break;
@ -1151,10 +1136,10 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].type == block_type) {
r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
if (adev->ip_blocks[i].version->type == block_type) {
r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
if (r)
return r;
break;
@ -1170,23 +1155,22 @@ bool amdgpu_is_idle(struct amdgpu_device *adev,
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].type == block_type)
return adev->ip_blocks[i].funcs->is_idle((void *)adev);
if (adev->ip_blocks[i].version->type == block_type)
return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
}
return true;
}
const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
struct amdgpu_device *adev,
enum amd_ip_block_type type)
struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
enum amd_ip_block_type type)
{
int i;
for (i = 0; i < adev->num_ip_blocks; i++)
if (adev->ip_blocks[i].type == type)
if (adev->ip_blocks[i].version->type == type)
return &adev->ip_blocks[i];
return NULL;
@ -1207,38 +1191,75 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
enum amd_ip_block_type type,
u32 major, u32 minor)
{
const struct amdgpu_ip_block_version *ip_block;
ip_block = amdgpu_get_ip_block(adev, type);
struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
if (ip_block && ((ip_block->major > major) ||
((ip_block->major == major) &&
(ip_block->minor >= minor))))
if (ip_block && ((ip_block->version->major > major) ||
((ip_block->version->major == major) &&
(ip_block->version->minor >= minor))))
return 0;
return 1;
}
static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
/**
* amdgpu_ip_block_add
*
* @adev: amdgpu_device pointer
* @ip_block_version: pointer to the IP to add
*
* Adds the IP block driver information to the collection of IPs
* on the asic.
*/
int amdgpu_ip_block_add(struct amdgpu_device *adev,
const struct amdgpu_ip_block_version *ip_block_version)
{
if (!ip_block_version)
return -EINVAL;
adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
return 0;
}
static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
{
adev->enable_virtual_display = false;
if (amdgpu_virtual_display) {
struct drm_device *ddev = adev->ddev;
const char *pci_address_name = pci_name(ddev->pdev);
char *pciaddstr, *pciaddstr_tmp, *pciaddname;
char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
pciaddstr_tmp = pciaddstr;
while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
pciaddname = strsep(&pciaddname_tmp, ",");
if (!strcmp(pci_address_name, pciaddname)) {
long num_crtc;
int res = -1;
adev->enable_virtual_display = true;
if (pciaddname_tmp)
res = kstrtol(pciaddname_tmp, 10,
&num_crtc);
if (!res) {
if (num_crtc < 1)
num_crtc = 1;
if (num_crtc > 6)
num_crtc = 6;
adev->mode_info.num_crtc = num_crtc;
} else {
adev->mode_info.num_crtc = 1;
}
break;
}
}
DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
amdgpu_virtual_display, pci_address_name,
adev->enable_virtual_display);
DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
amdgpu_virtual_display, pci_address_name,
adev->enable_virtual_display, adev->mode_info.num_crtc);
kfree(pciaddstr);
}
@ -1248,7 +1269,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
{
int i, r;
amdgpu_whether_enable_virtual_display(adev);
amdgpu_device_enable_virtual_display(adev);
switch (adev->asic_type) {
case CHIP_TOPAZ:
@ -1300,33 +1321,24 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return -EINVAL;
}
adev->ip_block_status = kcalloc(adev->num_ip_blocks,
sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
if (adev->ip_block_status == NULL)
return -ENOMEM;
if (adev->ip_blocks == NULL) {
DRM_ERROR("No IP blocks found!\n");
return r;
}
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d\n", i);
adev->ip_block_status[i].valid = false;
adev->ip_blocks[i].status.valid = false;
} else {
if (adev->ip_blocks[i].funcs->early_init) {
r = adev->ip_blocks[i].funcs->early_init((void *)adev);
if (adev->ip_blocks[i].version->funcs->early_init) {
r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
if (r == -ENOENT) {
adev->ip_block_status[i].valid = false;
adev->ip_blocks[i].status.valid = false;
} else if (r) {
DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
DRM_ERROR("early_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
} else {
adev->ip_block_status[i].valid = true;
adev->ip_blocks[i].status.valid = true;
}
} else {
adev->ip_block_status[i].valid = true;
adev->ip_blocks[i].status.valid = true;
}
}
}
@ -1342,22 +1354,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
if (!adev->ip_blocks[i].status.valid)
continue;
r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
if (r) {
DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
DRM_ERROR("sw_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
adev->ip_block_status[i].sw = true;
adev->ip_blocks[i].status.sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
r = amdgpu_vram_scratch_init(adev);
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
return r;
}
r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
return r;
@ -1367,22 +1380,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu_wb_init failed %d\n", r);
return r;
}
adev->ip_block_status[i].hw = true;
adev->ip_blocks[i].status.hw = true;
}
}
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].sw)
if (!adev->ip_blocks[i].status.sw)
continue;
/* gmc hw init is done early */
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
continue;
r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) {
DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
adev->ip_block_status[i].hw = true;
adev->ip_blocks[i].status.hw = true;
}
return 0;
@ -1393,25 +1407,26 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].funcs->late_init) {
r = adev->ip_blocks[i].funcs->late_init((void *)adev);
if (adev->ip_blocks[i].version->funcs->late_init) {
r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
if (r) {
DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
DRM_ERROR("late_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
adev->ip_block_status[i].late_initialized = true;
adev->ip_blocks[i].status.late_initialized = true;
}
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
/* enable clockgating to save power */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_GATE);
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_GATE);
if (r) {
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
adev->ip_blocks[i].funcs->name, r);
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
@ -1426,68 +1441,77 @@ static int amdgpu_fini(struct amdgpu_device *adev)
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].hw)
if (!adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
adev->ip_blocks[i].funcs->name, r);
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
adev->ip_blocks[i].funcs->name, r);
adev->ip_blocks[i].version->funcs->name, r);
}
adev->ip_block_status[i].hw = false;
adev->ip_blocks[i].status.hw = false;
break;
}
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_status[i].hw)
if (!adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_wb_fini(adev);
amdgpu_vram_scratch_fini(adev);
}
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
}
adev->ip_block_status[i].hw = false;
adev->ip_blocks[i].status.hw = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_status[i].sw)
if (!adev->ip_blocks[i].status.sw)
continue;
r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
/* XXX handle errors */
if (r) {
DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
}
adev->ip_block_status[i].sw = false;
adev->ip_block_status[i].valid = false;
adev->ip_blocks[i].status.sw = false;
adev->ip_blocks[i].status.valid = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_status[i].late_initialized)
if (!adev->ip_blocks[i].status.late_initialized)
continue;
if (adev->ip_blocks[i].funcs->late_fini)
adev->ip_blocks[i].funcs->late_fini((void *)adev);
adev->ip_block_status[i].late_initialized = false;
if (adev->ip_blocks[i].version->funcs->late_fini)
adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
adev->ip_blocks[i].status.late_initialized = false;
}
return 0;
@ -1505,21 +1529,23 @@ int amdgpu_suspend(struct amdgpu_device *adev)
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_status[i].valid)
if (!adev->ip_blocks[i].status.valid)
continue;
/* ungate blocks so that suspend can properly shut them down */
if (i != AMD_IP_BLOCK_TYPE_SMC) {
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
}
}
/* XXX handle errors */
r = adev->ip_blocks[i].funcs->suspend(adev);
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
if (r) {
DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
DRM_ERROR("suspend of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
}
}
@ -1531,11 +1557,12 @@ static int amdgpu_resume(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
if (!adev->ip_blocks[i].status.valid)
continue;
r = adev->ip_blocks[i].funcs->resume(adev);
r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
DRM_ERROR("resume of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
@ -1586,7 +1613,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->vm_manager.vm_pte_funcs = NULL;
adev->vm_manager.vm_pte_num_rings = 0;
adev->gart.gart_funcs = NULL;
adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
adev->smc_rreg = &amdgpu_invalid_rreg;
adev->smc_wreg = &amdgpu_invalid_wreg;
@ -1846,8 +1873,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
kfree(adev->ip_block_status);
adev->ip_block_status = NULL;
adev->accel_working = false;
/* free i2c buses */
amdgpu_i2c_fini(adev);
@ -1943,7 +1968,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
r = amdgpu_suspend(adev);
/* evict remaining vram memory */
/* evict remaining vram memory
* This second call to evict vram is to evict the gart page table
* using the CPU.
*/
amdgpu_bo_evict_vram(adev);
amdgpu_atombios_scratch_regs_save(adev);
@ -2085,13 +2113,13 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
bool asic_hang = false;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].funcs->check_soft_reset)
adev->ip_block_status[i].hang =
adev->ip_blocks[i].funcs->check_soft_reset(adev);
if (adev->ip_block_status[i].hang) {
DRM_INFO("IP block:%d is hang!\n", i);
if (adev->ip_blocks[i].version->funcs->check_soft_reset)
adev->ip_blocks[i].status.hang =
adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
if (adev->ip_blocks[i].status.hang) {
DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
asic_hang = true;
}
}
@ -2103,11 +2131,11 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_block_status[i].hang &&
adev->ip_blocks[i].funcs->pre_soft_reset) {
r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
if (adev->ip_blocks[i].status.hang &&
adev->ip_blocks[i].version->funcs->pre_soft_reset) {
r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
if (r)
return r;
}
@ -2121,13 +2149,13 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
if (!adev->ip_blocks[i].status.valid)
continue;
if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
(adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
(adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
(adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
if (adev->ip_block_status[i].hang) {
if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
if (adev->ip_blocks[i].status.hang) {
DRM_INFO("Some block need full reset!\n");
return true;
}
@ -2141,11 +2169,11 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_block_status[i].hang &&
adev->ip_blocks[i].funcs->soft_reset) {
r = adev->ip_blocks[i].funcs->soft_reset(adev);
if (adev->ip_blocks[i].status.hang &&
adev->ip_blocks[i].version->funcs->soft_reset) {
r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
if (r)
return r;
}
@ -2159,11 +2187,11 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_block_status[i].hang &&
adev->ip_blocks[i].funcs->post_soft_reset)
r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
if (adev->ip_blocks[i].status.hang &&
adev->ip_blocks[i].version->funcs->post_soft_reset)
r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
if (r)
return r;
}
@ -2182,7 +2210,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev)
static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct fence **fence)
struct dma_fence **fence)
{
uint32_t domain;
int r;
@ -2298,30 +2326,30 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
if (need_full_reset && amdgpu_need_backup(adev)) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_bo *bo, *tmp;
struct fence *fence = NULL, *next = NULL;
struct dma_fence *fence = NULL, *next = NULL;
DRM_INFO("recover vram bo from shadow\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) {
r = fence_wait(fence, false);
r = dma_fence_wait(fence, false);
if (r) {
WARN(r, "recovery from shadow isn't comleted\n");
break;
}
}
fence_put(fence);
dma_fence_put(fence);
fence = next;
}
mutex_unlock(&adev->shadow_list_lock);
if (fence) {
r = fence_wait(fence, false);
r = dma_fence_wait(fence, false);
if (r)
WARN(r, "recovery from shadow isn't comleted\n");
}
fence_put(fence);
dma_fence_put(fence);
}
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@ -2470,9 +2498,6 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
adev->debugfs[adev->debugfs_count].num_files = nfiles;
adev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
adev->ddev->control->debugfs_root,
adev->ddev->control);
drm_debugfs_create_files(files, nfiles,
adev->ddev->primary->debugfs_root,
adev->ddev->primary);
@ -2486,9 +2511,6 @@ static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
unsigned i;
for (i = 0; i < adev->debugfs_count; i++) {
drm_debugfs_remove_files(adev->debugfs[i].files,
adev->debugfs[i].num_files,
adev->ddev->control);
drm_debugfs_remove_files(adev->debugfs[i].files,
adev->debugfs[i].num_files,
adev->ddev->primary);
@ -2517,6 +2539,13 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
se_bank = (*pos >> 24) & 0x3FF;
sh_bank = (*pos >> 34) & 0x3FF;
instance_bank = (*pos >> 44) & 0x3FF;
if (se_bank == 0x3FF)
se_bank = 0xFFFFFFFF;
if (sh_bank == 0x3FF)
sh_bank = 0xFFFFFFFF;
if (instance_bank == 0x3FF)
instance_bank = 0xFFFFFFFF;
use_bank = 1;
} else {
use_bank = 0;
@ -2525,8 +2554,8 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
*pos &= 0x3FFFF;
if (use_bank) {
if (sh_bank >= adev->gfx.config.max_sh_per_se ||
se_bank >= adev->gfx.config.max_shader_engines)
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
return -EINVAL;
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
@ -2573,10 +2602,45 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
bool pm_pg_lock, use_bank;
unsigned instance_bank, sh_bank, se_bank;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
/* are we reading registers for which a PG lock is necessary? */
pm_pg_lock = (*pos >> 23) & 1;
if (*pos & (1ULL << 62)) {
se_bank = (*pos >> 24) & 0x3FF;
sh_bank = (*pos >> 34) & 0x3FF;
instance_bank = (*pos >> 44) & 0x3FF;
if (se_bank == 0x3FF)
se_bank = 0xFFFFFFFF;
if (sh_bank == 0x3FF)
sh_bank = 0xFFFFFFFF;
if (instance_bank == 0x3FF)
instance_bank = 0xFFFFFFFF;
use_bank = 1;
} else {
use_bank = 0;
}
*pos &= 0x3FFFF;
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
return -EINVAL;
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
sh_bank, instance_bank);
}
if (pm_pg_lock)
mutex_lock(&adev->pm.mutex);
while (size) {
uint32_t value;
@ -2595,6 +2659,14 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
size -= 4;
}
if (use_bank) {
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
if (pm_pg_lock)
mutex_unlock(&adev->pm.mutex);
return result;
}
@ -2857,6 +2929,116 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
return !r ? 4 : r;
}
static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = f->f_inode->i_private;
int r, x;
ssize_t result=0;
uint32_t offset, se, sh, cu, wave, simd, data[32];
if (size & 3 || *pos & 3)
return -EINVAL;
/* decode offset */
offset = (*pos & 0x7F);
se = ((*pos >> 7) & 0xFF);
sh = ((*pos >> 15) & 0xFF);
cu = ((*pos >> 23) & 0xFF);
wave = ((*pos >> 31) & 0xFF);
simd = ((*pos >> 37) & 0xFF);
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
x = 0;
if (adev->gfx.funcs->read_wave_data)
adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
mutex_unlock(&adev->grbm_idx_mutex);
if (!x)
return -EINVAL;
while (size && (offset < x * 4)) {
uint32_t value;
value = data[offset >> 2];
r = put_user(value, (uint32_t *)buf);
if (r)
return r;
result += 4;
buf += 4;
offset += 4;
size -= 4;
}
return result;
}
static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = f->f_inode->i_private;
int r;
ssize_t result = 0;
uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
if (size & 3 || *pos & 3)
return -EINVAL;
/* decode offset */
offset = (*pos & 0xFFF); /* in dwords */
se = ((*pos >> 12) & 0xFF);
sh = ((*pos >> 20) & 0xFF);
cu = ((*pos >> 28) & 0xFF);
wave = ((*pos >> 36) & 0xFF);
simd = ((*pos >> 44) & 0xFF);
thread = ((*pos >> 52) & 0xFF);
bank = ((*pos >> 60) & 1);
data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
if (bank == 0) {
if (adev->gfx.funcs->read_wave_vgprs)
adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
} else {
if (adev->gfx.funcs->read_wave_sgprs)
adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
}
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
mutex_unlock(&adev->grbm_idx_mutex);
while (size) {
uint32_t value;
value = data[offset++];
r = put_user(value, (uint32_t *)buf);
if (r) {
result = r;
goto err;
}
result += 4;
buf += 4;
size -= 4;
}
err:
kfree(data);
return result;
}
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@ -2894,6 +3076,17 @@ static const struct file_operations amdgpu_debugfs_sensors_fops = {
.llseek = default_llseek
};
static const struct file_operations amdgpu_debugfs_wave_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_wave_read,
.llseek = default_llseek
};
static const struct file_operations amdgpu_debugfs_gpr_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_gpr_read,
.llseek = default_llseek
};
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
@ -2901,6 +3094,8 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_smc_fops,
&amdgpu_debugfs_gca_config_fops,
&amdgpu_debugfs_sensors_fops,
&amdgpu_debugfs_wave_fops,
&amdgpu_debugfs_gpr_fops,
};
static const char *debugfs_regs_names[] = {
@ -2910,6 +3105,8 @@ static const char *debugfs_regs_names[] = {
"amdgpu_regs_smc",
"amdgpu_gca_config",
"amdgpu_sensors",
"amdgpu_wave",
"amdgpu_gpr",
};
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)

View File

@ -35,29 +35,29 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amdgpu_flip_work *work =
container_of(cb, struct amdgpu_flip_work, cb);
fence_put(f);
dma_fence_put(f);
schedule_work(&work->flip_work.work);
}
static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
struct fence **f)
struct dma_fence **f)
{
struct fence *fence= *f;
struct dma_fence *fence= *f;
if (fence == NULL)
return false;
*f = NULL;
if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
return true;
fence_put(fence);
dma_fence_put(fence);
return false;
}
@ -68,9 +68,9 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct amdgpu_flip_work *work =
container_of(delayed_work, struct amdgpu_flip_work, flip_work);
struct amdgpu_device *adev = work->adev;
struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &amdgpuCrtc->base;
struct drm_crtc *crtc = &amdgpu_crtc->base;
unsigned long flags;
unsigned i;
int vpos, hpos;
@ -85,14 +85,14 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/* Wait until we're out of the vertical blank period before the one
* targeted by the flip
*/
if (amdgpuCrtc->enabled &&
if (amdgpu_crtc->enabled &&
(amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
&vpos, &hpos, NULL, NULL,
&crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank -
amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
return;
}
@ -104,12 +104,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
/* Set the flip status */
amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
amdgpuCrtc->crtc_id, amdgpuCrtc, work);
amdgpu_crtc->crtc_id, amdgpu_crtc, work);
}
@ -187,7 +187,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
goto cleanup;
}
r = amdgpu_bo_pin_restricted(new_abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM, &base);
if (unlikely(r != 0)) {
r = -EINVAL;
DRM_ERROR("failed to pin new abo buffer before flip\n");
@ -244,9 +244,9 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
cleanup:
amdgpu_bo_unref(&work->old_abo);
fence_put(work->excl);
dma_fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
fence_put(work->shared[i]);
dma_fence_put(work->shared[i]);
kfree(work->shared);
kfree(work);

View File

@ -553,9 +553,10 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
}
for (i = 0; i < states->numEntries; i++) {
if (i >= AMDGPU_MAX_VCE_LEVELS)
break;
adev->pm.dpm.num_of_vce_states =
states->numEntries > AMD_MAX_VCE_LEVELS ?
AMD_MAX_VCE_LEVELS : states->numEntries;
for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
vce_clk = (VCEClockInfo *)
((u8 *)&array->entries[0] +
(state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
@ -955,3 +956,12 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
return encoded_lanes[lanes];
}
struct amd_vce_state*
amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
{
if (idx < adev->pm.dpm.num_of_vce_states)
return &adev->pm.dpm.vce_states[idx];
return NULL;
}

View File

@ -23,6 +23,453 @@
#ifndef __AMDGPU_DPM_H__
#define __AMDGPU_DPM_H__
enum amdgpu_int_thermal_type {
THERMAL_TYPE_NONE,
THERMAL_TYPE_EXTERNAL,
THERMAL_TYPE_EXTERNAL_GPIO,
THERMAL_TYPE_RV6XX,
THERMAL_TYPE_RV770,
THERMAL_TYPE_ADT7473_WITH_INTERNAL,
THERMAL_TYPE_EVERGREEN,
THERMAL_TYPE_SUMO,
THERMAL_TYPE_NI,
THERMAL_TYPE_SI,
THERMAL_TYPE_EMC2103_WITH_INTERNAL,
THERMAL_TYPE_CI,
THERMAL_TYPE_KV,
};
enum amdgpu_dpm_auto_throttle_src {
AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
};
enum amdgpu_dpm_event_src {
AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
};
#define SCLK_DEEP_SLEEP_MASK 0x8
struct amdgpu_ps {
u32 caps; /* vbios flags */
u32 class; /* vbios flags */
u32 class2; /* vbios flags */
/* UVD clocks */
u32 vclk;
u32 dclk;
/* VCE clocks */
u32 evclk;
u32 ecclk;
bool vce_active;
enum amd_vce_level vce_level;
/* asic priv */
void *ps_priv;
};
struct amdgpu_dpm_thermal {
/* thermal interrupt work */
struct work_struct work;
/* low temperature threshold */
int min_temp;
/* high temperature threshold */
int max_temp;
/* was last interrupt low to high or high to low */
bool high_to_low;
/* interrupt source */
struct amdgpu_irq_src irq;
};
enum amdgpu_clk_action
{
AMDGPU_SCLK_UP = 1,
AMDGPU_SCLK_DOWN
};
struct amdgpu_blacklist_clocks
{
u32 sclk;
u32 mclk;
enum amdgpu_clk_action action;
};
struct amdgpu_clock_and_voltage_limits {
u32 sclk;
u32 mclk;
u16 vddc;
u16 vddci;
};
struct amdgpu_clock_array {
u32 count;
u32 *values;
};
struct amdgpu_clock_voltage_dependency_entry {
u32 clk;
u16 v;
};
struct amdgpu_clock_voltage_dependency_table {
u32 count;
struct amdgpu_clock_voltage_dependency_entry *entries;
};
union amdgpu_cac_leakage_entry {
struct {
u16 vddc;
u32 leakage;
};
struct {
u16 vddc1;
u16 vddc2;
u16 vddc3;
};
};
struct amdgpu_cac_leakage_table {
u32 count;
union amdgpu_cac_leakage_entry *entries;
};
struct amdgpu_phase_shedding_limits_entry {
u16 voltage;
u32 sclk;
u32 mclk;
};
struct amdgpu_phase_shedding_limits_table {
u32 count;
struct amdgpu_phase_shedding_limits_entry *entries;
};
struct amdgpu_uvd_clock_voltage_dependency_entry {
u32 vclk;
u32 dclk;
u16 v;
};
struct amdgpu_uvd_clock_voltage_dependency_table {
u8 count;
struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
};
struct amdgpu_vce_clock_voltage_dependency_entry {
u32 ecclk;
u32 evclk;
u16 v;
};
struct amdgpu_vce_clock_voltage_dependency_table {
u8 count;
struct amdgpu_vce_clock_voltage_dependency_entry *entries;
};
struct amdgpu_ppm_table {
u8 ppm_design;
u16 cpu_core_number;
u32 platform_tdp;
u32 small_ac_platform_tdp;
u32 platform_tdc;
u32 small_ac_platform_tdc;
u32 apu_tdp;
u32 dgpu_tdp;
u32 dgpu_ulv_power;
u32 tj_max;
};
struct amdgpu_cac_tdp_table {
u16 tdp;
u16 configurable_tdp;
u16 tdc;
u16 battery_power_limit;
u16 small_power_limit;
u16 low_cac_leakage;
u16 high_cac_leakage;
u16 maximum_power_delivery_limit;
};
struct amdgpu_dpm_dynamic_state {
struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
struct amdgpu_clock_array valid_sclk_values;
struct amdgpu_clock_array valid_mclk_values;
struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
u32 mclk_sclk_ratio;
u32 sclk_mclk_delta;
u16 vddc_vddci_delta;
u16 min_vddc_for_pcie_gen2;
struct amdgpu_cac_leakage_table cac_leakage_table;
struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
struct amdgpu_ppm_table *ppm_table;
struct amdgpu_cac_tdp_table *cac_tdp_table;
};
struct amdgpu_dpm_fan {
u16 t_min;
u16 t_med;
u16 t_high;
u16 pwm_min;
u16 pwm_med;
u16 pwm_high;
u8 t_hyst;
u32 cycle_delay;
u16 t_max;
u8 control_mode;
u16 default_max_fan_pwm;
u16 default_fan_output_sensitivity;
u16 fan_output_sensitivity;
bool ucode_fan_control;
};
enum amdgpu_pcie_gen {
AMDGPU_PCIE_GEN1 = 0,
AMDGPU_PCIE_GEN2 = 1,
AMDGPU_PCIE_GEN3 = 2,
AMDGPU_PCIE_GEN_INVALID = 0xffff
};
enum amdgpu_dpm_forced_level {
AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
};
struct amdgpu_dpm_funcs {
int (*get_temperature)(struct amdgpu_device *adev);
int (*pre_set_power_state)(struct amdgpu_device *adev);
int (*set_power_state)(struct amdgpu_device *adev);
void (*post_set_power_state)(struct amdgpu_device *adev);
void (*display_configuration_changed)(struct amdgpu_device *adev);
u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
bool (*vblank_too_short)(struct amdgpu_device *adev);
void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
int (*get_sclk_od)(struct amdgpu_device *adev);
int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
int (*get_mclk_od)(struct amdgpu_device *adev);
int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
int (*check_state_equal)(struct amdgpu_device *adev,
struct amdgpu_ps *cps,
struct amdgpu_ps *rps,
bool *equal);
struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx);
};
#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
#define amdgpu_dpm_read_sensor(adev, idx, value) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
-EINVAL)
#define amdgpu_dpm_get_temperature(adev) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
(adev)->pm.funcs->get_temperature((adev)))
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
(adev)->pm.funcs->set_fan_control_mode((adev), (m)))
#define amdgpu_dpm_get_fan_control_mode(adev) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
(adev)->pm.funcs->get_fan_control_mode((adev)))
#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
(adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
(adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_fan_speed_rpm((adev)->powerplay.pp_handle, (s)) : \
-EINVAL)
#define amdgpu_dpm_get_sclk(adev, l) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
(adev)->pm.funcs->get_sclk((adev), (l)))
#define amdgpu_dpm_get_mclk(adev, l) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
(adev)->pm.funcs->get_mclk((adev), (l)))
#define amdgpu_dpm_force_performance_level(adev, l) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
(adev)->pm.funcs->force_performance_level((adev), (l)))
#define amdgpu_dpm_powergate_uvd(adev, g) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
(adev)->pm.funcs->powergate_uvd((adev), (g)))
#define amdgpu_dpm_powergate_vce(adev, g) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
(adev)->pm.funcs->powergate_vce((adev), (g)))
#define amdgpu_dpm_get_current_power_state(adev) \
(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
#define amdgpu_dpm_get_performance_level(adev) \
(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
#define amdgpu_dpm_get_pp_num_states(adev, data) \
(adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
#define amdgpu_dpm_get_pp_table(adev, table) \
(adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
#define amdgpu_dpm_set_pp_table(adev, buf, size) \
(adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
(adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
#define amdgpu_dpm_force_clock_level(adev, type, level) \
(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
#define amdgpu_dpm_get_sclk_od(adev) \
(adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
#define amdgpu_dpm_set_sclk_od(adev, value) \
(adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
#define amdgpu_dpm_get_mclk_od(adev) \
((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_mclk_od(adev, value) \
((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal))
#define amdgpu_dpm_get_vce_clock_state(adev, i) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
(adev)->pm.funcs->get_vce_clock_state((adev), (i)))
struct amdgpu_dpm {
struct amdgpu_ps *ps;
/* number of valid power states */
int num_ps;
/* current power state that is active */
struct amdgpu_ps *current_ps;
/* requested power state */
struct amdgpu_ps *requested_ps;
/* boot up power state */
struct amdgpu_ps *boot_ps;
/* default uvd power state */
struct amdgpu_ps *uvd_ps;
/* vce requirements */
u32 num_of_vce_states;
struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
enum amd_vce_level vce_level;
enum amd_pm_state_type state;
enum amd_pm_state_type user_state;
enum amd_pm_state_type last_state;
enum amd_pm_state_type last_user_state;
u32 platform_caps;
u32 voltage_response_time;
u32 backbias_response_time;
void *priv;
u32 new_active_crtcs;
int new_active_crtc_count;
u32 current_active_crtcs;
int current_active_crtc_count;
struct amdgpu_dpm_dynamic_state dyn_state;
struct amdgpu_dpm_fan fan;
u32 tdp_limit;
u32 near_tdp_limit;
u32 near_tdp_limit_adjusted;
u32 sq_ramping_threshold;
u32 cac_leakage;
u16 tdp_od_limit;
u32 tdp_adjustment;
u16 load_line_slope;
bool power_control;
bool ac_power;
/* special states active */
bool thermal_active;
bool uvd_active;
bool vce_active;
/* thermal handling */
struct amdgpu_dpm_thermal thermal;
/* forced levels */
enum amdgpu_dpm_forced_level forced_level;
};
struct amdgpu_pm {
struct mutex mutex;
u32 current_sclk;
u32 current_mclk;
u32 default_sclk;
u32 default_mclk;
struct amdgpu_i2c_chan *i2c_bus;
/* internal thermal controller on rv6xx+ */
enum amdgpu_int_thermal_type int_thermal_type;
struct device *int_hwmon_dev;
/* fan control parameters */
bool no_fan;
u8 fan_pulses_per_revolution;
u8 fan_min_rpm;
u8 fan_max_rpm;
/* dpm */
bool dpm_enabled;
bool sysfs_initialized;
struct amdgpu_dpm dpm;
const struct firmware *fw; /* SMC firmware */
uint32_t fw_version;
const struct amdgpu_dpm_funcs *funcs;
uint32_t pcie_gen_mask;
uint32_t pcie_mlw_mask;
struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
};
#define R600_SSTU_DFLT 0
#define R600_SST_DFLT 0x00C8
@ -82,4 +529,7 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
u16 default_lanes);
u8 amdgpu_encode_pci_lane_width(u32 lanes);
struct amd_vce_state*
amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx);
#endif

View File

@ -58,9 +58,10 @@
* - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
* - 3.7.0 - Add support for VCE clock list packet
* - 3.8.0 - Add support raster config init in the kernel
* - 3.9.0 - Add support for memory query info about VRAM and GTT.
*/
#define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 8
#define KMS_DRIVER_MINOR 9
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@ -85,12 +86,13 @@ int amdgpu_vm_size = 64;
int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0;
int amdgpu_vram_page_split = 1024;
int amdgpu_exp_hw_support = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_powerplay = -1;
int amdgpu_powercontainment = 1;
int amdgpu_sclk_deep_sleep_en = 1;
int amdgpu_no_evict = 0;
int amdgpu_direct_gma_size = 0;
unsigned amdgpu_pcie_gen_cap = 0;
unsigned amdgpu_pcie_lane_cap = 0;
unsigned amdgpu_cg_mask = 0xffffffff;
@ -165,6 +167,9 @@ module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)");
module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
@ -177,14 +182,14 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)");
module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444);
MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))");
module_param_named(no_evict, amdgpu_no_evict, int, 0444);
MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)");
module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
@ -201,7 +206,8 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)");
MODULE_PARM_DESC(virtual_display,
"Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
static const struct pci_device_id pciidlist[] = {
@ -381,6 +387,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
/* fiji */
{0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
{0x1002, 0x730F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
/* carrizo */
{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},

View File

@ -75,27 +75,21 @@ amdgpufb_release(struct fb_info *info, int user)
static struct fb_ops amdgpufb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = amdgpufb_open,
.fb_release = amdgpufb_release,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_debug_enter = drm_fb_helper_debug_enter,
.fb_debug_leave = drm_fb_helper_debug_leave,
};
int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled)
int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled)
{
int aligned = width;
int pitch_mask = 0;
switch (bpp / 8) {
switch (cpp) {
case 1:
pitch_mask = 255;
break;
@ -110,7 +104,7 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile
aligned += pitch_mask;
aligned &= ~pitch_mask;
return aligned;
return aligned * cpp;
}
static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
@ -139,20 +133,21 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
u32 bpp, depth;
u32 cpp;
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
/* need to align pitch with crtc limits */
mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, bpp,
fb_tiled) * ((bpp + 1) / 8);
mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
fb_tiled);
height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = amdgpu_gem_object_create(adev, aligned_size, 0,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
true, &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
@ -176,7 +171,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
}
ret = amdgpu_bo_pin_restricted(abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL);
ret = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
if (ret) {
amdgpu_bo_unreserve(abo);
goto out_unref;

View File

@ -48,7 +48,7 @@
*/
struct amdgpu_fence {
struct fence base;
struct dma_fence base;
/* RB, DMA, etc. */
struct amdgpu_ring *ring;
@ -74,8 +74,8 @@ void amdgpu_fence_slab_fini(void)
/*
* Cast helper
*/
static const struct fence_ops amdgpu_fence_ops;
static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
static const struct dma_fence_ops amdgpu_fence_ops;
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
@ -131,11 +131,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence;
struct fence *old, **ptr;
struct dma_fence *old, **ptr;
uint32_t seq;
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@ -144,10 +144,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
seq = ++ring->fence_drv.sync_seq;
fence->ring = ring;
fence_init(&fence->base, &amdgpu_fence_ops,
&ring->fence_drv.lock,
adev->fence_context + ring->idx,
seq);
dma_fence_init(&fence->base, &amdgpu_fence_ops,
&ring->fence_drv.lock,
adev->fence_context + ring->idx,
seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, AMDGPU_FENCE_FLAG_INT);
@ -156,12 +156,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
* emitting the fence would mess up the hardware ring buffer.
*/
old = rcu_dereference_protected(*ptr, 1);
if (old && !fence_is_signaled(old)) {
if (old && !dma_fence_is_signaled(old)) {
DRM_INFO("rcu slot is busy\n");
fence_wait(old, false);
dma_fence_wait(old, false);
}
rcu_assign_pointer(*ptr, fence_get(&fence->base));
rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
*f = &fence->base;
@ -212,7 +212,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
seq &= drv->num_fences_mask;
do {
struct fence *fence, **ptr;
struct dma_fence *fence, **ptr;
++last_seq;
last_seq &= drv->num_fences_mask;
@ -225,13 +225,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
r = fence_signal(fence);
r = dma_fence_signal(fence);
if (!r)
FENCE_TRACE(fence, "signaled from irq context\n");
DMA_FENCE_TRACE(fence, "signaled from irq context\n");
else
BUG();
fence_put(fence);
dma_fence_put(fence);
} while (last_seq != seq);
}
@ -261,7 +261,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
struct fence *fence, **ptr;
struct dma_fence *fence, **ptr;
int r;
if (!seq)
@ -270,14 +270,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
rcu_read_lock();
fence = rcu_dereference(*ptr);
if (!fence || !fence_get_rcu(fence)) {
if (!fence || !dma_fence_get_rcu(fence)) {
rcu_read_unlock();
return 0;
}
rcu_read_unlock();
r = fence_wait(fence, false);
fence_put(fence);
r = dma_fence_wait(fence, false);
dma_fence_put(fence);
return r;
}
@ -382,24 +382,27 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
if (!ring->fence_drv.fences)
return -ENOMEM;
timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
if (timeout == 0) {
/*
* FIXME:
* Delayed workqueue cannot use it directly,
* so the scheduler will not use delayed workqueue if
* MAX_SCHEDULE_TIMEOUT is set.
* Currently keep it simple and silly.
*/
timeout = MAX_SCHEDULE_TIMEOUT;
}
r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
num_hw_submission,
timeout, ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);
return r;
/* No need to setup the GPU scheduler for KIQ ring */
if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
if (timeout == 0) {
/*
* FIXME:
* Delayed workqueue cannot use it directly,
* so the scheduler will not use delayed workqueue if
* MAX_SCHEDULE_TIMEOUT is set.
* Currently keep it simple and silly.
*/
timeout = MAX_SCHEDULE_TIMEOUT;
}
r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
num_hw_submission,
timeout, ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);
return r;
}
}
return 0;
@ -453,7 +456,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
amd_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
fence_put(ring->fence_drv.fences[j]);
dma_fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
ring->fence_drv.fences = NULL;
ring->fence_drv.initialized = false;
@ -542,12 +545,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
* Common fence implementation
*/
static const char *amdgpu_fence_get_driver_name(struct fence *fence)
static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
{
return "amdgpu";
}
static const char *amdgpu_fence_get_timeline_name(struct fence *f)
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
return (const char *)fence->ring->name;
@ -561,7 +564,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f)
* to fence_queue that checks if this fence is signaled, and if so it
* signals the fence and removes itself.
*/
static bool amdgpu_fence_enable_signaling(struct fence *f)
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_ring *ring = fence->ring;
@ -569,7 +572,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
return true;
}
@ -583,7 +586,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
*/
static void amdgpu_fence_free(struct rcu_head *rcu)
{
struct fence *f = container_of(rcu, struct fence, rcu);
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct amdgpu_fence *fence = to_amdgpu_fence(f);
kmem_cache_free(amdgpu_fence_slab, fence);
}
@ -596,16 +599,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
static void amdgpu_fence_release(struct fence *f)
static void amdgpu_fence_release(struct dma_fence *f)
{
call_rcu(&f->rcu, amdgpu_fence_free);
}
static const struct fence_ops amdgpu_fence_ops = {
static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
.enable_signaling = amdgpu_fence_enable_signaling,
.wait = fence_default_wait,
.wait = dma_fence_default_wait,
.release = amdgpu_fence_release,
};

View File

@ -126,7 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
if (adev->gart.robj == NULL) {
r = amdgpu_bo_create(adev, adev->gart.table_size,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->gart.robj);
if (r) {
return r;

View File

@ -116,10 +116,11 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
* Call from drm_gem_handle_create which appear in both new and open ioctl
* case.
*/
int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
int amdgpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = abo->adev;
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
@ -142,7 +143,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = bo->adev;
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
@ -407,10 +408,8 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
if (timeout == 0)
ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
else
ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
timeout);
/* ret == 0 means not signaled,
* ret > 0 means signaled
@ -470,6 +469,16 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
return r;
}
static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
{
unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0;
}
/**
* amdgpu_gem_va_update_vm -update the bo_va in its VM
*
@ -480,7 +489,8 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
* vital here, so they are not reported back to userspace.
*/
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va, uint32_t operation)
struct amdgpu_bo_va *bo_va,
uint32_t operation)
{
struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry vm_pd;
@ -503,7 +513,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r)
goto error_print;
amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
list_for_each_entry(entry, &list, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
/* if anything is swapped out don't swap it in here,
@ -511,13 +520,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (domain == AMDGPU_GEM_DOMAIN_CPU)
goto error_unreserve;
}
list_for_each_entry(entry, &duplicates, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
if (domain == AMDGPU_GEM_DOMAIN_CPU)
goto error_unreserve;
}
r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
NULL);
if (r)
goto error_unreserve;
r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
if (r)
@ -538,8 +544,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@ -549,7 +553,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
struct ttm_validate_buffer tv, tv_pd;
struct amdgpu_bo_list_entry vm_pd;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
uint32_t invalid_flags, va_flags = 0;
@ -594,9 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
tv.shared = true;
list_add(&tv.head, &list);
tv_pd.bo = &fpriv->vm.page_directory->tbo;
tv_pd.shared = true;
list_add(&tv_pd.head, &list);
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) {
@ -704,7 +707,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
uint32_t handle;
int r;
args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
args->pitch = amdgpu_align_pitch(adev, args->width,
DIV_ROUND_UP(args->bpp, 8), 0);
args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);

View File

@ -24,6 +24,7 @@
*/
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
/*
* GPU scratch registers helpers function.

View File

@ -27,6 +27,7 @@
int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
unsigned max_sh);
#endif

View File

@ -164,10 +164,13 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
spin_unlock(&mgr->lock);
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
if (!node) {
r = -ENOMEM;
goto err_out;
}
node->start = AMDGPU_BO_INVALID_OFFSET;
node->size = mem->num_pages;
mem->mm_node = node;
if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
@ -175,12 +178,20 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
if (unlikely(r)) {
kfree(node);
mem->mm_node = NULL;
r = 0;
goto err_out;
}
} else {
mem->start = node->start;
}
return 0;
err_out:
spin_lock(&mgr->lock);
mgr->available += mem->num_pages;
spin_unlock(&mgr->lock);
return r;
}
/**

View File

@ -89,7 +89,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Free an IB (all asics).
*/
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
struct fence *f)
struct dma_fence *f)
{
amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
}
@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
* to SI there was just a DE IB.
*/
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ibs, struct fence *last_vm_update,
struct amdgpu_job *job, struct fence **f)
struct amdgpu_ib *ibs, struct dma_fence *last_vm_update,
struct amdgpu_job *job, struct dma_fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
@ -152,8 +152,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}
alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
num_ibs * amdgpu_ring_get_emit_ib_size(ring);
alloc_size = ring->funcs->emit_frame_size + num_ibs *
ring->funcs->emit_ib_size;
r = amdgpu_ring_alloc(ring, alloc_size);
if (r) {
@ -161,7 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return r;
}
if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec)
if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (vm) {

View File

@ -424,15 +424,6 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
return 0;
}
bool amdgpu_irq_get_delayed(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type)
{
if ((type >= src->num_types) || !src->enabled_types)
return false;
return atomic_inc_return(&src->enabled_types[type]) == 1;
}
/**
* amdgpu_irq_put - disable interrupt
*

View File

@ -88,9 +88,6 @@ int amdgpu_irq_update(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
bool amdgpu_irq_get_delayed(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type);
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,

View File

@ -81,7 +81,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
struct fence *f;
struct dma_fence *f;
unsigned i;
/* use sched fence if available */
@ -95,7 +95,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
fence_put(job->fence);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
kfree(job);
}
@ -104,14 +104,14 @@ void amdgpu_job_free(struct amdgpu_job *job)
{
amdgpu_job_free_resources(job);
fence_put(job->fence);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
kfree(job);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct fence **f)
struct dma_fence **f)
{
int r;
job->ring = ring;
@ -125,19 +125,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->owner = owner;
job->fence_ctx = entity->fence_context;
*f = fence_get(&job->base.s_fence->finished);
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
amd_sched_entity_push_job(&job->base);
return 0;
}
static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
{
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
struct fence *fence = amdgpu_sync_get_fence(&job->sync);
struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
if (fence == NULL && vm && !job->vm_id) {
struct amdgpu_ring *ring = job->ring;
@ -155,9 +155,9 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
return fence;
}
static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
{
struct fence *fence = NULL;
struct dma_fence *fence = NULL;
struct amdgpu_job *job;
int r;
@ -176,8 +176,8 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
/* if gpu reset, hw fence will be replaced here */
fence_put(job->fence);
job->fence = fence_get(fence);
dma_fence_put(job->fence);
job->fence = dma_fence_get(fence);
amdgpu_job_free_resources(job);
return fence;
}

View File

@ -308,10 +308,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
for (i = 0; i < adev->num_ip_blocks; i++) {
if (adev->ip_blocks[i].type == type &&
adev->ip_block_status[i].valid) {
ip.hw_ip_version_major = adev->ip_blocks[i].major;
ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
if (adev->ip_blocks[i].version->type == type &&
adev->ip_blocks[i].status.valid) {
ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
ip.capabilities_flags = 0;
ip.available_rings = ring_mask;
ip.ib_start_alignment = ib_start_alignment;
@ -347,8 +347,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
for (i = 0; i < adev->num_ip_blocks; i++)
if (adev->ip_blocks[i].type == type &&
adev->ip_block_status[i].valid &&
if (adev->ip_blocks[i].version->type == type &&
adev->ip_blocks[i].status.valid &&
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
count++;
@ -413,6 +413,36 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &vram_gtt,
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
}
case AMDGPU_INFO_MEMORY: {
struct drm_amdgpu_memory_info mem;
memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->mc.real_vram_size;
mem.vram.usable_heap_size =
adev->mc.real_vram_size - adev->vram_pin_size;
mem.vram.heap_usage = atomic64_read(&adev->vram_usage);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
adev->mc.visible_vram_size;
mem.cpu_accessible_vram.usable_heap_size =
adev->mc.visible_vram_size -
(adev->vram_pin_size - adev->invisible_pin_size);
mem.cpu_accessible_vram.heap_usage =
atomic64_read(&adev->vram_vis_usage);
mem.cpu_accessible_vram.max_allocation =
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
mem.gtt.total_heap_size = adev->mc.gtt_size;
mem.gtt.usable_heap_size =
adev->mc.gtt_size - adev->gart_pin_size;
mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
return copy_to_user(out, &mem,
min((size_t)size, sizeof(mem)))
? -EFAULT : 0;
}
case AMDGPU_INFO_READ_MMR_REG: {
unsigned n, alloc_size;
uint32_t *regs;
@ -475,6 +505,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.ids_flags = 0;
if (adev->flags & AMD_IS_APU)
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
if (amdgpu_sriov_vf(adev))
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
@ -494,6 +526,50 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
}
case AMDGPU_INFO_VCE_CLOCK_TABLE: {
unsigned i;
struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
struct amd_vce_state *vce_state;
for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
if (vce_state) {
vce_clk_table.entries[i].sclk = vce_state->sclk;
vce_clk_table.entries[i].mclk = vce_state->mclk;
vce_clk_table.entries[i].eclk = vce_state->evclk;
vce_clk_table.num_valid_entries++;
}
}
return copy_to_user(out, &vce_clk_table,
min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
}
case AMDGPU_INFO_VBIOS: {
uint32_t bios_size = adev->bios_size;
switch (info->vbios_info.type) {
case AMDGPU_INFO_VBIOS_SIZE:
return copy_to_user(out, &bios_size,
min((size_t)size, sizeof(bios_size)))
? -EFAULT : 0;
case AMDGPU_INFO_VBIOS_IMAGE: {
uint8_t *bios;
uint32_t bios_offset = info->vbios_info.offset;
if (bios_offset >= bios_size)
return -EINVAL;
bios = adev->bios + bios_offset;
return copy_to_user(out, bios,
min((size_t)size, (size_t)(bios_size - bios_offset)))
? -EFAULT : 0;
}
default:
DRM_DEBUG_KMS("Invalid request %d\n",
info->vbios_info.type);
return -EINVAL;
}
}
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@ -775,6 +851,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),

View File

@ -285,7 +285,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
unsigned long end = addr + amdgpu_bo_size(bo) - 1;
struct amdgpu_device *adev = bo->adev;
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_mn *rmn;
struct amdgpu_mn_node *node = NULL;
struct list_head bos;
@ -340,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
*/
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = bo->adev;
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_mn *rmn;
struct list_head *head;

View File

@ -271,8 +271,6 @@ struct amdgpu_display_funcs {
u32 (*vblank_get_counter)(struct amdgpu_device *adev, int crtc);
/* wait for vblank */
void (*vblank_wait)(struct amdgpu_device *adev, int crtc);
/* is dce hung */
bool (*is_display_hung)(struct amdgpu_device *adev);
/* set backlight level */
void (*backlight_set_level)(struct amdgpu_encoder *amdgpu_encoder,
u8 level);
@ -341,8 +339,6 @@ struct amdgpu_mode_info {
int num_dig; /* number of dig blocks */
int disp_priority;
const struct amdgpu_display_funcs *funcs;
struct hrtimer vblank_timer;
enum amdgpu_interrupt_state vsync_timer_enabled;
};
#define AMDGPU_MAX_BL_LEVEL 0xFF
@ -413,6 +409,9 @@ struct amdgpu_crtc {
u32 wm_high;
u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
/* for virtual dce */
struct hrtimer vblank_timer;
enum amdgpu_interrupt_state vsync_timer_enabled;
};
struct amdgpu_encoder_atom_dig {

View File

@ -88,18 +88,19 @@ static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo;
bo = container_of(tbo, struct amdgpu_bo, tbo);
amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
if (!list_empty(&bo->shadow_list)) {
mutex_lock(&bo->adev->shadow_list_lock);
mutex_lock(&adev->shadow_list_lock);
list_del_init(&bo->shadow_list);
mutex_unlock(&bo->adev->shadow_list_lock);
mutex_unlock(&adev->shadow_list_lock);
}
kfree(bo->metadata);
kfree(bo);
@ -121,20 +122,14 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
unsigned lpfn = 0;
if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
adev->mc.visible_vram_size < adev->mc.real_vram_size) {
places[c].fpfn = visible_pfn;
places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_TOPDOWN;
c++;
}
/* This forces a reallocation if the flag wasn't set before */
if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].lpfn = lpfn;
places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
@ -205,8 +200,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
amdgpu_ttm_placement_init(abo->adev, &abo->placement,
abo->placements, domain, abo->flags);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
domain, abo->flags);
}
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
@ -245,7 +242,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
int r;
r = amdgpu_bo_create(adev, size, align, true, domain,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, bo_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
@ -351,7 +349,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
kfree(bo);
return r;
}
bo->adev = adev;
INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va);
bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
@ -374,39 +371,36 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
amdgpu_fill_placement_to_bo(bo, placement);
/* Kernel allocation are uninterruptible */
if (!resv) {
bool locked;
reservation_object_init(&bo->tbo.ttm_resv);
locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
WARN_ON(!locked);
}
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
if (unlikely(r != 0)) {
acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
&amdgpu_ttm_bo_destroy);
if (unlikely(r != 0))
return r;
}
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
struct fence *fence;
struct dma_fence *fence;
if (adev->mman.buffer_funcs_ring == NULL ||
!adev->mman.buffer_funcs_ring->ready) {
r = -EBUSY;
goto fail_free;
}
r = amdgpu_bo_reserve(bo, false);
if (unlikely(r != 0))
goto fail_free;
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r != 0))
r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
if (unlikely(r))
goto fail_unreserve;
amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
amdgpu_bo_fence(bo, fence, false);
amdgpu_bo_unreserve(bo);
fence_put(bo->tbo.moving);
bo->tbo.moving = fence_get(fence);
fence_put(fence);
dma_fence_put(bo->tbo.moving);
bo->tbo.moving = dma_fence_get(fence);
dma_fence_put(fence);
}
if (!resv)
ww_mutex_unlock(&bo->tbo.resv->lock);
*bo_ptr = bo;
trace_amdgpu_bo_create(bo);
@ -414,8 +408,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
return 0;
fail_unreserve:
amdgpu_bo_unreserve(bo);
fail_free:
ww_mutex_unlock(&bo->tbo.resv->lock);
amdgpu_bo_unref(&bo);
return r;
}
@ -491,7 +484,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
struct fence **fence,
struct dma_fence **fence,
bool direct)
{
@ -523,7 +516,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
struct fence **fence,
struct dma_fence **fence,
bool direct)
{
@ -616,6 +609,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 min_offset, u64 max_offset,
u64 *gpu_addr)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i;
unsigned fpfn, lpfn;
@ -643,18 +637,20 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return 0;
}
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
/* force to pin into visible video ram */
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
(!max_offset || max_offset >
bo->adev->mc.visible_vram_size)) {
adev->mc.visible_vram_size)) {
if (WARN_ON_ONCE(min_offset >
bo->adev->mc.visible_vram_size))
adev->mc.visible_vram_size))
return -EINVAL;
fpfn = min_offset >> PAGE_SHIFT;
lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
} else {
fpfn = min_offset >> PAGE_SHIFT;
lpfn = max_offset >> PAGE_SHIFT;
@ -669,12 +665,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r)) {
dev_err(bo->adev->dev, "%p pin failed\n", bo);
dev_err(adev->dev, "%p pin failed\n", bo);
goto error;
}
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r)) {
dev_err(bo->adev->dev, "%p bind failed\n", bo);
dev_err(adev->dev, "%p bind failed\n", bo);
goto error;
}
@ -682,11 +678,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (gpu_addr != NULL)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
bo->adev->vram_pin_size += amdgpu_bo_size(bo);
adev->vram_pin_size += amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
adev->invisible_pin_size += amdgpu_bo_size(bo);
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
bo->adev->gart_pin_size += amdgpu_bo_size(bo);
adev->gart_pin_size += amdgpu_bo_size(bo);
}
error:
@ -700,10 +696,11 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i;
if (!bo->pin_count) {
dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
dev_warn(adev->dev, "%p unpin not necessary\n", bo);
return 0;
}
bo->pin_count--;
@ -715,16 +712,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r)) {
dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
dev_err(adev->dev, "%p validate failed for unpin\n", bo);
goto error;
}
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
adev->vram_pin_size -= amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
adev->invisible_pin_size -= amdgpu_bo_size(bo);
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
adev->gart_pin_size -= amdgpu_bo_size(bo);
}
error:
@ -854,6 +851,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
@ -861,21 +859,21 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
abo = container_of(bo, struct amdgpu_bo, tbo);
amdgpu_vm_bo_invalidate(abo->adev, abo);
amdgpu_vm_bo_invalidate(adev, abo);
/* update statistics */
if (!new_mem)
return;
/* move_notify is called before move happens */
amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem);
amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev;
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
unsigned long offset, size, lpfn;
int i, r;
@ -884,13 +882,14 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return 0;
abo = container_of(bo, struct amdgpu_bo, tbo);
adev = abo->adev;
if (bo->mem.mem_type != TTM_PL_VRAM)
return 0;
size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT;
if ((offset + size) <= adev->mc.visible_vram_size)
/* TODO: figure out how to map scattered VRAM to the CPU */
if ((offset + size) <= adev->mc.visible_vram_size &&
(abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
return 0;
/* Can't move a pinned BO to visible VRAM */
@ -898,6 +897,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return -EINVAL;
/* hurrah the memory is not visible ! */
abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
for (i = 0; i < abo->placement.num_placement; i++) {
@ -931,7 +931,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
* @shared: true if fence should be added shared
*
*/
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared)
{
struct reservation_object *resv = bo->tbo.resv;
@ -959,6 +959,8 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
!bo->pin_count);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
return bo->tbo.offset;
}

View File

@ -71,12 +71,13 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
*/
static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(bo->adev->dev, "%p reserve failed\n", bo);
dev_err(adev->dev, "%p reserve failed\n", bo);
return r;
}
return 0;
@ -156,19 +157,19 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
struct fence **fence, bool direct);
struct dma_fence **fence, bool direct);
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
struct fence **fence,
struct dma_fence **fence,
bool direct);
@ -200,7 +201,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
unsigned size, unsigned align);
void amdgpu_sa_bo_free(struct amdgpu_device *adev,
struct amdgpu_sa_bo **sa_bo,
struct fence *fence);
struct dma_fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);

View File

@ -737,6 +737,21 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
return sprintf(buf, "%i\n", speed);
}
static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
u32 speed;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
if (err)
return err;
return sprintf(buf, "%i\n", speed);
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
@ -744,6 +759,7 @@ static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
@ -753,6 +769,7 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1_min.dev_attr.attr,
&sensor_dev_attr_pwm1_max.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
NULL
};
@ -804,6 +821,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
/* requires powerplay */
if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr)
return 0;
return effective_mode;
}
@ -986,10 +1007,10 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
{
int i;
struct amdgpu_ps *ps;
enum amd_pm_state_type dpm_state;
int ret;
bool equal;
/* if dpm init failed */
if (!adev->pm.dpm_enabled)
@ -1009,46 +1030,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
else
return;
/* no need to reprogram if nothing changed unless we are on BTC+ */
if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
/* vce just modifies an existing state so force a change */
if (ps->vce_active != adev->pm.dpm.vce_active)
goto force;
if (adev->flags & AMD_IS_APU) {
/* for APUs if the num crtcs changed but state is the same,
* all we need to do is update the display configuration.
*/
if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
/* update display watermarks based on new power state */
amdgpu_display_bandwidth_update(adev);
/* update displays */
amdgpu_dpm_display_configuration_changed(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
}
return;
} else {
/* for BTC+ if the num crtcs hasn't changed and state is the same,
* nothing to do, if the num crtcs is > 1 and state is the same,
* update display configuration.
*/
if (adev->pm.dpm.new_active_crtcs ==
adev->pm.dpm.current_active_crtcs) {
return;
} else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
(adev->pm.dpm.new_active_crtc_count > 1)) {
/* update display watermarks based on new power state */
amdgpu_display_bandwidth_update(adev);
/* update displays */
amdgpu_dpm_display_configuration_changed(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
return;
}
}
}
force:
if (amdgpu_dpm == 1) {
printk("switching from power state:\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
@ -1059,31 +1040,21 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
/* update whether vce is active */
ps->vce_active = adev->pm.dpm.vce_active;
amdgpu_dpm_display_configuration_changed(adev);
ret = amdgpu_dpm_pre_set_power_state(adev);
if (ret)
return;
/* update display watermarks based on new power state */
amdgpu_display_bandwidth_update(adev);
if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
equal = false;
/* wait for the rings to drain */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (ring && ring->ready)
amdgpu_fence_wait_empty(ring);
}
if (equal)
return;
/* program the new power state */
amdgpu_dpm_set_power_state(adev);
/* update current power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
amdgpu_dpm_post_set_power_state(adev);
/* update displays */
amdgpu_dpm_display_configuration_changed(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
@ -1135,7 +1106,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.vce_active = true;
/* XXX select vce level based on ring/task */
adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
} else {
mutex_lock(&adev->pm.mutex);
@ -1276,20 +1247,20 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
struct drm_device *ddev = adev->ddev;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
int i = 0;
if (!adev->pm.dpm_enabled)
return;
amdgpu_display_bandwidth_update(adev);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (ring && ring->ready)
amdgpu_fence_wait_empty(ring);
}
if (adev->pp_enabled) {
int i = 0;
amdgpu_display_bandwidth_update(adev);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (ring && ring->ready)
amdgpu_fence_wait_empty(ring);
}
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
} else {
mutex_lock(&adev->pm.mutex);

View File

@ -155,9 +155,6 @@ static int amdgpu_pp_sw_init(void *handle)
ret = adev->powerplay.ip_funcs->sw_init(
adev->powerplay.pp_handle);
if (adev->pp_enabled)
adev->pm.dpm_enabled = true;
return ret;
}
@ -187,6 +184,9 @@ static int amdgpu_pp_hw_init(void *handle)
ret = adev->powerplay.ip_funcs->hw_init(
adev->powerplay.pp_handle);
if ((amdgpu_dpm != 0) && !amdgpu_sriov_vf(adev))
adev->pm.dpm_enabled = true;
return ret;
}
@ -299,7 +299,7 @@ static int amdgpu_pp_soft_reset(void *handle)
return ret;
}
const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.name = "amdgpu_powerplay",
.early_init = amdgpu_pp_early_init,
.late_init = amdgpu_pp_late_init,
@ -316,3 +316,12 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.set_clockgating_state = amdgpu_pp_set_clockgating_state,
.set_powergating_state = amdgpu_pp_set_powergating_state,
};
const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_SMC,
.major = 1,
.minor = 0,
.rev = 0,
.funcs = &amdgpu_pp_ip_funcs,
};

View File

@ -23,11 +23,11 @@
*
*/
#ifndef __AMDGPU_POPWERPLAY_H__
#define __AMDGPU_POPWERPLAY_H__
#ifndef __AMDGPU_POWERPLAY_H__
#define __AMDGPU_POWERPLAY_H__
#include "amd_shared.h"
extern const struct amd_ip_funcs amdgpu_pp_ip_funcs;
extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
#endif /* __AMDSOC_DM_H__ */
#endif /* __AMDGPU_POWERPLAY_H__ */

View File

@ -65,7 +65,7 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
{
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
/* Make sure we aren't trying to allocate more space
* than the maximum for one submission
@ -94,7 +94,7 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
int i;
for (i = 0; i < count; i++)
amdgpu_ring_write(ring, ring->nop);
amdgpu_ring_write(ring, ring->funcs->nop);
}
/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
@ -106,8 +106,8 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
while (ib->length_dw & ring->align_mask)
ib->ptr[ib->length_dw++] = ring->nop;
while (ib->length_dw & ring->funcs->align_mask)
ib->ptr[ib->length_dw++] = ring->funcs->nop;
}
/**
@ -125,8 +125,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
uint32_t count;
/* We pad to match fetch size */
count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
count %= ring->align_mask + 1;
count = ring->funcs->align_mask + 1 -
(ring->wptr & ring->funcs->align_mask);
count %= ring->funcs->align_mask + 1;
ring->funcs->insert_nop(ring, count);
mb();
@ -163,9 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned max_dw, u32 nop, u32 align_mask,
struct amdgpu_irq_src *irq_src, unsigned irq_type,
enum amdgpu_ring_type ring_type)
unsigned max_dw, struct amdgpu_irq_src *irq_src,
unsigned irq_type)
{
int r;
@ -216,9 +216,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->ring_size = roundup_pow_of_two(max_dw * 4 *
amdgpu_sched_hw_submission);
ring->align_mask = align_mask;
ring->nop = nop;
ring->type = ring_type;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {

View File

@ -0,0 +1,186 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#ifndef __AMDGPU_RING_H__
#define __AMDGPU_RING_H__
#include "gpu_scheduler.h"
/* max number of rings */
#define AMDGPU_MAX_RINGS 16
#define AMDGPU_MAX_GFX_RINGS 1
#define AMDGPU_MAX_COMPUTE_RINGS 8
#define AMDGPU_MAX_VCE_RINGS 3
/* some special values for the owner field */
#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
enum amdgpu_ring_type {
AMDGPU_RING_TYPE_GFX,
AMDGPU_RING_TYPE_COMPUTE,
AMDGPU_RING_TYPE_SDMA,
AMDGPU_RING_TYPE_UVD,
AMDGPU_RING_TYPE_VCE,
AMDGPU_RING_TYPE_KIQ
};
struct amdgpu_device;
struct amdgpu_ring;
struct amdgpu_ib;
struct amdgpu_cs_parser;
/*
* Fences.
*/
struct amdgpu_fence_driver {
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
uint32_t sync_seq;
atomic_t last_seq;
bool initialized;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
struct timer_list fallback_timer;
unsigned num_fences_mask;
spinlock_t lock;
struct dma_fence **fences;
};
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
unsigned num_hw_submission);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src,
unsigned irq_type);
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
void amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
/*
* Rings.
*/
/* provided by hw blocks that expose a ring buffer for commands */
struct amdgpu_ring_funcs {
enum amdgpu_ring_type type;
uint32_t align_mask;
u32 nop;
/* ring read/write ptr handling */
u32 (*get_rptr)(struct amdgpu_ring *ring);
u32 (*get_wptr)(struct amdgpu_ring *ring);
void (*set_wptr)(struct amdgpu_ring *ring);
/* validating and patching of IBs */
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
/* constants to calculate how many DW are needed for an emit */
unsigned emit_frame_size;
unsigned emit_ib_size;
/* command emit functions */
void (*emit_ib)(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch);
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
uint64_t seq, unsigned flags);
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
uint64_t pd_addr);
void (*emit_hdp_flush)(struct amdgpu_ring *ring);
void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
uint32_t oa_base, uint32_t oa_size);
/* testing functions */
int (*test_ring)(struct amdgpu_ring *ring);
int (*test_ib)(struct amdgpu_ring *ring, long timeout);
/* insert NOP packets */
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
/* pad the indirect buffer to the necessary number of dw */
void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
/* note usage for clock and power gating */
void (*begin_use)(struct amdgpu_ring *ring);
void (*end_use)(struct amdgpu_ring *ring);
void (*emit_switch_buffer) (struct amdgpu_ring *ring);
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
};
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv;
struct amd_gpu_scheduler sched;
struct amdgpu_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr_offs;
unsigned wptr;
unsigned wptr_old;
unsigned ring_size;
unsigned max_dw;
int count_dw;
uint64_t gpu_addr;
uint32_t ptr_mask;
bool ready;
u32 idx;
u32 me;
u32 pipe;
u32 queue;
struct amdgpu_bo *mqd_obj;
u32 doorbell_index;
bool use_doorbell;
unsigned wptr_offs;
unsigned fence_offs;
uint64_t current_ctx;
char name[16];
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
#endif
};
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, struct amdgpu_irq_src *irq_src,
unsigned irq_type);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
#endif

View File

@ -147,7 +147,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
}
list_del_init(&sa_bo->olist);
list_del_init(&sa_bo->flist);
fence_put(sa_bo->fence);
dma_fence_put(sa_bo->fence);
kfree(sa_bo);
}
@ -161,7 +161,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
if (sa_bo->fence == NULL ||
!fence_is_signaled(sa_bo->fence)) {
!dma_fence_is_signaled(sa_bo->fence)) {
return;
}
amdgpu_sa_bo_remove_locked(sa_bo);
@ -244,7 +244,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
}
static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
struct fence **fences,
struct dma_fence **fences,
unsigned *tries)
{
struct amdgpu_sa_bo *best_bo = NULL;
@ -272,7 +272,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
sa_bo = list_first_entry(&sa_manager->flist[i],
struct amdgpu_sa_bo, flist);
if (!fence_is_signaled(sa_bo->fence)) {
if (!dma_fence_is_signaled(sa_bo->fence)) {
fences[i] = sa_bo->fence;
continue;
}
@ -314,7 +314,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align)
{
struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
unsigned count;
int i, r;
@ -327,9 +327,8 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
return -EINVAL;
*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
if ((*sa_bo) == NULL) {
if (!(*sa_bo))
return -ENOMEM;
}
(*sa_bo)->manager = sa_manager;
(*sa_bo)->fence = NULL;
INIT_LIST_HEAD(&(*sa_bo)->olist);
@ -356,14 +355,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
if (fences[i])
fences[count++] = fence_get(fences[i]);
fences[count++] = dma_fence_get(fences[i]);
if (count) {
spin_unlock(&sa_manager->wq.lock);
t = fence_wait_any_timeout(fences, count, false,
MAX_SCHEDULE_TIMEOUT);
t = dma_fence_wait_any_timeout(fences, count, false,
MAX_SCHEDULE_TIMEOUT,
NULL);
for (i = 0; i < count; ++i)
fence_put(fences[i]);
dma_fence_put(fences[i]);
r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock);
@ -384,7 +384,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
}
void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
struct fence *fence)
struct dma_fence *fence)
{
struct amdgpu_sa_manager *sa_manager;
@ -394,10 +394,10 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
sa_manager = (*sa_bo)->manager;
spin_lock(&sa_manager->wq.lock);
if (fence && !fence_is_signaled(fence)) {
if (fence && !dma_fence_is_signaled(fence)) {
uint32_t idx;
(*sa_bo)->fence = fence_get(fence);
(*sa_bo)->fence = dma_fence_get(fence);
idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
} else {

View File

@ -34,7 +34,7 @@
struct amdgpu_sync_entry {
struct hlist_node node;
struct fence *fence;
struct dma_fence *fence;
};
static struct kmem_cache *amdgpu_sync_slab;
@ -60,7 +60,8 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
*
* Test if the fence was issued by us.
*/
static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
struct dma_fence *f)
{
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
@ -81,7 +82,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
*
* Extract who originally created the fence.
*/
static void *amdgpu_sync_get_owner(struct fence *f)
static void *amdgpu_sync_get_owner(struct dma_fence *f)
{
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
@ -99,13 +100,14 @@ static void *amdgpu_sync_get_owner(struct fence *f)
*
* Either keep the existing fence or the new one, depending which one is later.
*/
static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
static void amdgpu_sync_keep_later(struct dma_fence **keep,
struct dma_fence *fence)
{
if (*keep && fence_is_later(*keep, fence))
if (*keep && dma_fence_is_later(*keep, fence))
return;
fence_put(*keep);
*keep = fence_get(fence);
dma_fence_put(*keep);
*keep = dma_fence_get(fence);
}
/**
@ -117,7 +119,7 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
* Tries to add the fence to an existing hash entry. Returns true when an entry
* was found, false otherwise.
*/
static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
{
struct amdgpu_sync_entry *e;
@ -139,7 +141,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
*
*/
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct fence *f)
struct dma_fence *f)
{
struct amdgpu_sync_entry *e;
@ -158,7 +160,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return -ENOMEM;
hash_add(sync->fences, &e->node, f->context);
e->fence = fence_get(f);
e->fence = dma_fence_get(f);
return 0;
}
@ -177,7 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
void *owner)
{
struct reservation_object_list *flist;
struct fence *f;
struct dma_fence *f;
void *fence_owner;
unsigned i;
int r = 0;
@ -231,15 +233,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
* Returns the next fence not signaled yet without removing it from the sync
* object.
*/
struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring)
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
struct fence *f = e->fence;
struct dma_fence *f = e->fence;
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
if (ring && s_fence) {
@ -247,16 +249,16 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
* when they are scheduled.
*/
if (s_fence->sched == &ring->sched) {
if (fence_is_signaled(&s_fence->scheduled))
if (dma_fence_is_signaled(&s_fence->scheduled))
continue;
return &s_fence->scheduled;
}
}
if (fence_is_signaled(f)) {
if (dma_fence_is_signaled(f)) {
hash_del(&e->node);
fence_put(f);
dma_fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
@ -274,11 +276,11 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
*
* Get and removes the next fence from the sync object not signaled yet.
*/
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
struct fence *f;
struct dma_fence *f;
int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
@ -288,10 +290,10 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
hash_del(&e->node);
kmem_cache_free(amdgpu_sync_slab, e);
if (!fence_is_signaled(f))
if (!dma_fence_is_signaled(f))
return f;
fence_put(f);
dma_fence_put(f);
}
return NULL;
}
@ -311,11 +313,11 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
hash_for_each_safe(sync->fences, i, tmp, e, node) {
hash_del(&e->node);
fence_put(e->fence);
dma_fence_put(e->fence);
kmem_cache_free(amdgpu_sync_slab, e);
}
fence_put(sync->last_vm_update);
dma_fence_put(sync->last_vm_update);
}
/**

View File

@ -0,0 +1,56 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#ifndef __AMDGPU_SYNC_H__
#define __AMDGPU_SYNC_H__
#include <linux/hashtable.h>
struct dma_fence;
struct reservation_object;
struct amdgpu_device;
struct amdgpu_ring;
/*
* Container for fences used to sync command submissions.
*/
struct amdgpu_sync {
DECLARE_HASHTABLE(fences, 4);
struct dma_fence *last_vm_update;
};
void amdgpu_sync_create(struct amdgpu_sync *sync);
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_fence *f);
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
void *owner);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_sync *sync);
int amdgpu_sync_init(void);
void amdgpu_sync_fini(void);
#endif

View File

@ -78,7 +78,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
void *gtt_map, *vram_map;
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
struct fence *fence = NULL;
struct dma_fence *fence = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
@ -118,13 +118,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
r = fence_wait(fence, false);
r = dma_fence_wait(fence, false);
if (r) {
DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
goto out_lclean_unpin;
}
fence_put(fence);
dma_fence_put(fence);
r = amdgpu_bo_kmap(vram_obj, &vram_map);
if (r) {
@ -163,13 +163,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
r = fence_wait(fence, false);
r = dma_fence_wait(fence, false);
if (r) {
DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
goto out_lclean_unpin;
}
fence_put(fence);
dma_fence_put(fence);
r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
@ -216,7 +216,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_unref(&gtt_obj[i]);
}
if (fence)
fence_put(fence);
dma_fence_put(fence);
break;
}

View File

@ -104,7 +104,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__field(struct amdgpu_device *, adev)
__field(struct amd_sched_job *, sched_job)
__field(struct amdgpu_ib *, ib)
__field(struct fence *, fence)
__field(struct dma_fence *, fence)
__field(char *, ring_name)
__field(u32, num_ibs)
),
@ -129,7 +129,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__field(struct amdgpu_device *, adev)
__field(struct amd_sched_job *, sched_job)
__field(struct amdgpu_ib *, ib)
__field(struct fence *, fence)
__field(struct dma_fence *, fence)
__field(char *, ring_name)
__field(u32, num_ibs)
),

View File

@ -34,7 +34,6 @@
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
#include <ttm/ttm_page_alloc.h>
#include <ttm/ttm_memory.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include <linux/seq_file.h>
@ -51,16 +50,6 @@
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev)
{
struct amdgpu_mman *mman;
struct amdgpu_device *adev;
mman = container_of(bdev, struct amdgpu_mman, bdev);
adev = container_of(mman, struct amdgpu_device, mman);
return adev;
}
/*
* Global memory.
@ -75,7 +64,7 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
ttm_mem_global_release(ref->object);
}
int amdgpu_ttm_global_init(struct amdgpu_device *adev)
static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
{
struct drm_global_reference *global_ref;
struct amdgpu_ring *ring;
@ -150,7 +139,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
{
struct amdgpu_device *adev;
adev = amdgpu_get_adev(bdev);
adev = amdgpu_ttm_adev(bdev);
switch (type) {
case TTM_PL_SYSTEM:
@ -168,7 +157,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
man->func = &ttm_bo_manager_func;
man->func = &amdgpu_vram_mgr_func;
man->gpu_offset = adev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
@ -195,6 +184,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
static struct ttm_place placements = {
.fpfn = 0,
@ -213,7 +203,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo = container_of(bo, struct amdgpu_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
if (abo->adev->mman.buffer_funcs_ring->ready == false) {
if (adev->mman.buffer_funcs_ring->ready == false) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
@ -229,7 +219,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
* allocating address space for the BO.
*/
abo->placements[i].lpfn =
abo->adev->mc.gtt_size >> PAGE_SHIFT;
adev->mc.gtt_size >> PAGE_SHIFT;
}
}
break;
@ -260,63 +250,115 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
new_mem->mm_node = NULL;
}
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem,
struct ttm_mem_reg *old_mem)
static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
struct drm_mm_node *mm_node,
struct ttm_mem_reg *mem,
uint64_t *addr)
{
struct amdgpu_device *adev;
struct amdgpu_ring *ring;
uint64_t old_start, new_start;
struct fence *fence;
int r;
adev = amdgpu_get_adev(bo->bdev);
ring = adev->mman.buffer_funcs_ring;
switch (old_mem->mem_type) {
switch (mem->mem_type) {
case TTM_PL_TT:
r = amdgpu_ttm_bind(bo, old_mem);
r = amdgpu_ttm_bind(bo, mem);
if (r)
return r;
case TTM_PL_VRAM:
old_start = (u64)old_mem->start << PAGE_SHIFT;
old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
*addr = mm_node->start << PAGE_SHIFT;
*addr += bo->bdev->man[mem->mem_type].gpu_offset;
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
DRM_ERROR("Unknown placement %d\n", mem->mem_type);
return -EINVAL;
}
switch (new_mem->mem_type) {
case TTM_PL_TT:
r = amdgpu_ttm_bind(bo, new_mem);
if (r)
return r;
case TTM_PL_VRAM:
new_start = (u64)new_mem->start << PAGE_SHIFT;
new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
return -EINVAL;
}
return 0;
}
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem,
struct ttm_mem_reg *old_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct drm_mm_node *old_mm, *new_mm;
uint64_t old_start, old_size, new_start, new_size;
unsigned long num_pages;
struct dma_fence *fence = NULL;
int r;
BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
if (!ring->ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
r = amdgpu_copy_buffer(ring, old_start, new_start,
new_mem->num_pages * PAGE_SIZE, /* bytes */
bo->resv, &fence, false);
old_mm = old_mem->mm_node;
r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
if (r)
return r;
old_size = old_mm->size;
new_mm = new_mem->mm_node;
r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
if (r)
return r;
new_size = new_mm->size;
num_pages = new_mem->num_pages;
while (num_pages) {
unsigned long cur_pages = min(old_size, new_size);
struct dma_fence *next;
r = amdgpu_copy_buffer(ring, old_start, new_start,
cur_pages * PAGE_SIZE,
bo->resv, &next, false);
if (r)
goto error;
dma_fence_put(fence);
fence = next;
num_pages -= cur_pages;
if (!num_pages)
break;
old_size -= cur_pages;
if (!old_size) {
r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
&old_start);
if (r)
goto error;
old_size = old_mm->size;
} else {
old_start += cur_pages * PAGE_SIZE;
}
new_size -= cur_pages;
if (!new_size) {
r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
&new_start);
if (r)
goto error;
new_size = new_mm->size;
} else {
new_start += cur_pages * PAGE_SIZE;
}
}
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
fence_put(fence);
dma_fence_put(fence);
return r;
error:
if (fence)
dma_fence_wait(fence, false);
dma_fence_put(fence);
return r;
}
@ -332,7 +374,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
struct ttm_placement placement;
int r;
adev = amdgpu_get_adev(bo->bdev);
adev = amdgpu_ttm_adev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@ -379,7 +421,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
struct ttm_place placements;
int r;
adev = amdgpu_get_adev(bo->bdev);
adev = amdgpu_ttm_adev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@ -422,7 +464,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
if (WARN_ON_ONCE(abo->pin_count > 0))
return -EINVAL;
adev = amdgpu_get_adev(bo->bdev);
adev = amdgpu_ttm_adev(bo->bdev);
/* remember the eviction */
if (evict)
@ -475,7 +517,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct amdgpu_device *adev = amdgpu_get_adev(bdev);
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
mem->bus.addr = NULL;
mem->bus.offset = 0;
@ -607,7 +649,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
/* prepare the sg table with the user pages */
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned nents;
int r;
@ -639,7 +681,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct sg_page_iter sg_iter;
@ -799,7 +841,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt;
adev = amdgpu_get_adev(bdev);
adev = amdgpu_ttm_adev(bdev);
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
@ -843,7 +885,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
return 0;
}
adev = amdgpu_get_adev(ttm->bdev);
adev = amdgpu_ttm_adev(ttm->bdev);
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@ -889,7 +931,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (slave)
return;
adev = amdgpu_get_adev(ttm->bdev);
adev = amdgpu_ttm_adev(ttm->bdev);
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@ -1012,7 +1054,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
unsigned i, j;
for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
@ -1029,7 +1071,7 @@ static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
unsigned log2_size = min(ilog2(tbo->num_pages),
AMDGPU_TTM_LRU_SIZE - 1);
@ -1060,12 +1102,37 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
return res;
}
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start == AMDGPU_BO_INVALID_OFFSET) {
unsigned long num_pages = bo->mem.num_pages;
struct drm_mm_node *node = bo->mem.mm_node;
/* Check each drm MM node individually */
while (num_pages) {
if (place->fpfn < (node->start + node->size) &&
!(place->lpfn && place->lpfn <= node->start))
return true;
num_pages -= node->size;
++node;
}
return false;
}
return ttm_bo_eviction_valuable(bo, place);
}
static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
.invalidate_caches = &amdgpu_invalidate_caches,
.init_mem_type = &amdgpu_init_mem_type,
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move,
.verify_access = &amdgpu_verify_access,
@ -1083,6 +1150,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
unsigned i, j;
int r;
r = amdgpu_ttm_global_init(adev);
if (r) {
return r;
}
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev,
adev->mman.bo_global_ref.ref.object,
@ -1119,7 +1190,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->stollen_vga_memory);
if (r) {
return r;
@ -1247,7 +1319,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
struct fence **fence, bool direct_submit)
struct dma_fence **fence, bool direct_submit)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@ -1294,7 +1366,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
if (direct_submit) {
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
NULL, NULL, fence);
job->fence = fence_get(*fence);
job->fence = dma_fence_get(*fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
amdgpu_job_free(job);
@ -1313,28 +1385,40 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
}
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct reservation_object *resv,
struct fence **fence)
uint32_t src_data,
struct reservation_object *resv,
struct dma_fence **fence)
{
struct amdgpu_device *adev = bo->adev;
struct amdgpu_job *job;
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
uint32_t max_bytes, byte_count;
uint64_t dst_offset;
struct drm_mm_node *mm_node;
unsigned long num_pages;
unsigned int num_loops, num_dw;
unsigned int i;
struct amdgpu_job *job;
int r;
byte_count = bo->tbo.num_pages << PAGE_SHIFT;
max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
if (!ring->ready) {
DRM_ERROR("Trying to clear memory with ring turned off.\n");
return -EINVAL;
}
num_pages = bo->tbo.num_pages;
mm_node = bo->tbo.mem.mm_node;
num_loops = 0;
while (num_pages) {
uint32_t byte_count = mm_node->size << PAGE_SHIFT;
num_loops += DIV_ROUND_UP(byte_count, max_bytes);
num_pages -= mm_node->size;
++mm_node;
}
num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
/* for IB padding */
while (num_dw & 0x7)
num_dw++;
num_dw += 64;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
if (r)
@ -1342,28 +1426,43 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
AMDGPU_FENCE_OWNER_UNDEFINED);
AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
}
}
dst_offset = bo->tbo.mem.start << PAGE_SHIFT;
for (i = 0; i < num_loops; i++) {
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
num_pages = bo->tbo.num_pages;
mm_node = bo->tbo.mem.mm_node;
amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
dst_offset, cur_size_in_bytes);
while (num_pages) {
uint32_t byte_count = mm_node->size << PAGE_SHIFT;
uint64_t dst_addr;
dst_offset += cur_size_in_bytes;
byte_count -= cur_size_in_bytes;
r = amdgpu_mm_node_addr(&bo->tbo, mm_node,
&bo->tbo.mem, &dst_addr);
if (r)
return r;
while (byte_count) {
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
dst_addr, cur_size_in_bytes);
dst_addr += cur_size_in_bytes;
byte_count -= cur_size_in_bytes;
}
num_pages -= mm_node->size;
++mm_node;
}
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
r = amdgpu_job_submit(job, ring, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r)
goto error_free;
@ -1554,8 +1653,3 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
#endif
}
u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev)
{
return ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object);
}

View File

@ -66,6 +66,7 @@ struct amdgpu_mman {
};
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *tbo,
@ -77,11 +78,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
struct fence **fence, bool direct_submit);
struct dma_fence **fence, bool direct_submit);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct reservation_object *resv,
struct fence **fence);
struct dma_fence **fence);
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);

View File

@ -228,6 +228,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
ucode->mc_addr = mc_addr;
ucode->kaddr = kptr;
if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE)
return 0;
header = (const struct common_firmware_header *)ucode->fw->data;
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
le32_to_cpu(header->ucode_array_offset_bytes)),
@ -236,6 +239,31 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
return 0;
}
static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
uint64_t mc_addr, void *kptr)
{
const struct gfx_firmware_header_v1_0 *header = NULL;
const struct common_firmware_header *comm_hdr = NULL;
uint8_t* src_addr = NULL;
uint8_t* dst_addr = NULL;
if (NULL == ucode->fw)
return 0;
comm_hdr = (const struct common_firmware_header *)ucode->fw->data;
header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
dst_addr = ucode->kaddr +
ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes),
PAGE_SIZE);
src_addr = (uint8_t *)ucode->fw->data +
le32_to_cpu(comm_hdr->ucode_array_offset_bytes) +
(le32_to_cpu(header->jt_offset) * 4);
memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
return 0;
}
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
{
struct amdgpu_bo **bo = &adev->firmware.fw_buf;
@ -247,7 +275,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
const struct common_firmware_header *header = NULL;
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
0, NULL, NULL, bo);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
goto failed;
@ -259,7 +288,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
goto failed_reserve;
}
err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&fw_mc_addr);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
goto failed_pin;
@ -279,6 +309,13 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
header = (const struct common_firmware_header *)ucode->fw->data;
amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset,
fw_buf_ptr + fw_offset);
if (i == AMDGPU_UCODE_ID_CP_MEC1) {
const struct gfx_firmware_header_v1_0 *cp_hdr;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset,
fw_buf_ptr + fw_offset);
fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
}
fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
}

View File

@ -130,6 +130,7 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_CP_MEC1,
AMDGPU_UCODE_ID_CP_MEC2,
AMDGPU_UCODE_ID_RLC_G,
AMDGPU_UCODE_ID_STORAGE,
AMDGPU_UCODE_ID_MAXIMUM,
};

View File

@ -333,7 +333,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
for (i = 0; i < adev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
if (handle != 0 && adev->uvd.filp[i] == filp) {
struct fence *fence;
struct dma_fence *fence;
r = amdgpu_uvd_get_destroy_msg(ring, handle,
false, &fence);
@ -342,8 +342,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
continue;
}
fence_wait(fence, false);
fence_put(fence);
dma_fence_wait(fence, false);
dma_fence_put(fence);
adev->uvd.filp[i] = NULL;
atomic_set(&adev->uvd.handles[i], 0);
@ -360,6 +360,18 @@ static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
}
}
static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
{
uint32_t lo, hi;
uint64_t addr;
lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
return addr;
}
/**
* amdgpu_uvd_cs_pass1 - first parsing round
*
@ -372,14 +384,10 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
uint32_t cmd, lo, hi;
uint64_t addr;
uint32_t cmd;
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r = 0;
lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
if (mapping == NULL) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
@ -698,18 +706,16 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
uint32_t cmd, lo, hi;
uint32_t cmd;
uint64_t start, end;
uint64_t addr;
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r;
lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
if (mapping == NULL)
if (mapping == NULL) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
return -EINVAL;
}
start = amdgpu_bo_gpu_offset(bo);
@ -876,6 +882,9 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
int r;
parser->job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
if (ib->length_dw % 16) {
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
ib->length_dw);
@ -890,10 +899,13 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
ctx.buf_sizes = buf_sizes;
ctx.ib_idx = ib_idx;
/* first round, make sure the buffers are actually in the UVD segment */
r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
if (r)
return r;
/* first round only required on chips without UVD 64 bit address support */
if (!parser->adev->uvd.address_64_bit) {
/* first round, make sure the buffers are actually in the UVD segment */
r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
if (r)
return r;
}
/* second round, patch buffer addresses into the command stream */
r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
@ -909,14 +921,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
}
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
bool direct, struct fence **fence)
bool direct, struct dma_fence **fence)
{
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head head;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct fence *f = NULL;
struct dma_fence *f = NULL;
struct amdgpu_device *adev = ring->adev;
uint64_t addr;
int i, r;
@ -931,7 +943,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r)
return r;
if (!bo->adev->uvd.address_64_bit) {
if (!ring->adev->uvd.address_64_bit) {
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo);
}
@ -960,7 +972,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = fence_get(f);
job->fence = dma_fence_get(f);
if (r)
goto err_free;
@ -975,9 +987,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ttm_eu_fence_buffer_objects(&ticket, &head, f);
if (fence)
*fence = fence_get(f);
*fence = dma_fence_get(f);
amdgpu_bo_unref(&bo);
fence_put(f);
dma_fence_put(f);
return 0;
@ -993,7 +1005,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
crash the vcpu so just try to emmit a dummy create/destroy msg to
avoid this */
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct fence **fence)
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo;
@ -1002,7 +1014,8 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &bo);
if (r)
return r;
@ -1042,7 +1055,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
}
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct fence **fence)
bool direct, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo;
@ -1051,7 +1064,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &bo);
if (r)
return r;
@ -1128,7 +1142,7 @@ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
*/
int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct fence *fence;
struct dma_fence *fence;
long r;
r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
@ -1143,7 +1157,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto error;
}
r = fence_wait_timeout(fence, false, timeout);
r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@ -1154,7 +1168,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
}
fence_put(fence);
dma_fence_put(fence);
error:
return r;

View File

@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
int amdgpu_uvd_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_resume(struct amdgpu_device *adev);
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct fence **fence);
struct dma_fence **fence);
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct fence **fence);
bool direct, struct dma_fence **fence);
void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
struct drm_file *filp);
int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);

View File

@ -157,7 +157,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->vce.vcpu_bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
@ -395,12 +396,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
* Open up a stream for HW test
*/
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct fence **fence)
struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct fence *f = NULL;
struct dma_fence *f = NULL;
uint64_t dummy;
int i, r;
@ -450,14 +451,14 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = fence_get(f);
job->fence = dma_fence_get(f);
if (r)
goto err;
amdgpu_job_free(job);
if (fence)
*fence = fence_get(f);
fence_put(f);
*fence = dma_fence_get(f);
dma_fence_put(f);
return 0;
err:
@ -476,12 +477,12 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
* Close up a stream for HW test or if userspace failed to do so
*/
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct fence **fence)
bool direct, struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct fence *f = NULL;
struct dma_fence *f = NULL;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@ -513,7 +514,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = fence_get(f);
job->fence = dma_fence_get(f);
if (r)
goto err;
@ -526,8 +527,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
}
if (fence)
*fence = fence_get(f);
fence_put(f);
*fence = dma_fence_get(f);
dma_fence_put(f);
return 0;
err:
@ -641,6 +642,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
uint32_t *size = &tmp;
int i, r, idx = 0;
p->job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
r = amdgpu_cs_sysvm_access_required(p);
if (r)
return r;
@ -787,6 +791,96 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
return r;
}
/**
* amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
*
* @p: parser context
*
*/
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
{
struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
int session_idx = -1;
uint32_t destroyed = 0;
uint32_t created = 0;
uint32_t allocated = 0;
uint32_t tmp, handle = 0;
int i, r = 0, idx = 0;
while (idx < ib->length_dw) {
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
if ((len < 8) || (len & 3)) {
DRM_ERROR("invalid VCE command length (%d)!\n", len);
r = -EINVAL;
goto out;
}
switch (cmd) {
case 0x00000001: /* session */
handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
session_idx = amdgpu_vce_validate_handle(p, handle,
&allocated);
if (session_idx < 0) {
r = session_idx;
goto out;
}
break;
case 0x01000001: /* create */
created |= 1 << session_idx;
if (destroyed & (1 << session_idx)) {
destroyed &= ~(1 << session_idx);
allocated |= 1 << session_idx;
} else if (!(allocated & (1 << session_idx))) {
DRM_ERROR("Handle already in use!\n");
r = -EINVAL;
goto out;
}
break;
case 0x02000001: /* destroy */
destroyed |= 1 << session_idx;
break;
default:
break;
}
if (session_idx == -1) {
DRM_ERROR("no session command at start of IB\n");
r = -EINVAL;
goto out;
}
idx += len / 4;
}
if (allocated & ~created) {
DRM_ERROR("New session without create command!\n");
r = -ENOENT;
}
out:
if (!r) {
/* No error, free all destroyed handle slots */
tmp = destroyed;
amdgpu_ib_free(p->adev, ib, NULL);
} else {
/* Error during parsing, free all allocated handle slots */
tmp = allocated;
}
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
if (tmp & (1 << i))
atomic_set(&p->adev->vce.handles[i], 0);
return r;
}
/**
* amdgpu_vce_ring_emit_ib - execute indirect buffer
*
@ -823,18 +917,6 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
amdgpu_ring_write(ring, VCE_CMD_END);
}
unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
{
return
4; /* amdgpu_vce_ring_emit_ib */
}
unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
{
return
6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
}
/**
* amdgpu_vce_ring_test_ring - test if VCE ring is working
*
@ -883,7 +965,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
*/
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct fence *fence = NULL;
struct dma_fence *fence = NULL;
long r;
/* skip vce ring1/2 ib test for now, since it's not reliable */
@ -902,7 +984,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto error;
}
r = fence_wait_timeout(fence, false, timeout);
r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@ -913,6 +995,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
}
error:
fence_put(fence);
dma_fence_put(fence);
return r;
}

View File

@ -29,11 +29,12 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct fence **fence);
struct dma_fence **fence);
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct fence **fence);
bool direct, struct dma_fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,

View File

@ -25,7 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
#include <linux/fence-array.h>
#include <linux/dma-fence-array.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@ -116,38 +116,43 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
}
/**
* amdgpu_vm_get_bos - add the vm BOs to a duplicates list
* amdgpu_vm_validate_pt_bos - validate the page table BOs
*
* @adev: amdgpu device pointer
* @vm: vm providing the BOs
* @duplicates: head of duplicates list
* @validate: callback to do the validation
* @param: parameter for the validation callback
*
* Add the page directory to the BO duplicates list
* for command submission.
* Validate the page table BOs on command submission if neccessary.
*/
void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct list_head *duplicates)
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*validate)(void *p, struct amdgpu_bo *bo),
void *param)
{
uint64_t num_evictions;
unsigned i;
int r;
/* We only need to validate the page tables
* if they aren't already valid.
*/
num_evictions = atomic64_read(&adev->num_evictions);
if (num_evictions == vm->last_eviction_counter)
return;
return 0;
/* add the vm page table to the list */
for (i = 0; i <= vm->max_pde_used; ++i) {
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
struct amdgpu_bo *bo = vm->page_tables[i].bo;
if (!entry->robj)
if (!bo)
continue;
list_add(&entry->tv.head, duplicates);
r = validate(param, bo);
if (r)
return r;
}
return 0;
}
/**
@ -166,12 +171,12 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
spin_lock(&glob->lru_lock);
for (i = 0; i <= vm->max_pde_used; ++i) {
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
struct amdgpu_bo *bo = vm->page_tables[i].bo;
if (!entry->robj)
if (!bo)
continue;
ttm_bo_move_to_lru_tail(&entry->robj->tbo);
ttm_bo_move_to_lru_tail(&bo->tbo);
}
spin_unlock(&glob->lru_lock);
}
@ -194,14 +199,14 @@ static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
* Allocate an id for the vm, adding fences to the sync obj as necessary.
*/
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence,
struct amdgpu_sync *sync, struct dma_fence *fence,
struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
uint64_t fence_context = adev->fence_context + ring->idx;
struct fence *updates = sync->last_vm_update;
struct dma_fence *updates = sync->last_vm_update;
struct amdgpu_vm_id *id, *idle;
struct fence **fences;
struct dma_fence **fences;
unsigned i;
int r = 0;
@ -225,17 +230,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (&idle->list == &adev->vm_manager.ids_lru) {
u64 fence_context = adev->vm_manager.fence_context + ring->idx;
unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
struct fence_array *array;
struct dma_fence_array *array;
unsigned j;
for (j = 0; j < i; ++j)
fence_get(fences[j]);
dma_fence_get(fences[j]);
array = fence_array_create(i, fences, fence_context,
array = dma_fence_array_create(i, fences, fence_context,
seqno, true);
if (!array) {
for (j = 0; j < i; ++j)
fence_put(fences[j]);
dma_fence_put(fences[j]);
kfree(fences);
r = -ENOMEM;
goto error;
@ -243,7 +248,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
r = amdgpu_sync_fence(ring->adev, sync, &array->base);
fence_put(&array->base);
dma_fence_put(&array->base);
if (r)
goto error;
@ -257,7 +262,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Check if we can use a VMID already assigned to this VM */
i = ring->idx;
do {
struct fence *flushed;
struct dma_fence *flushed;
id = vm->ids[i++];
if (i == AMDGPU_MAX_RINGS)
@ -279,12 +284,12 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
continue;
if (id->last_flush->context != fence_context &&
!fence_is_signaled(id->last_flush))
!dma_fence_is_signaled(id->last_flush))
continue;
flushed = id->flushed_updates;
if (updates &&
(!flushed || fence_is_later(updates, flushed)))
(!flushed || dma_fence_is_later(updates, flushed)))
continue;
/* Good we can use this VMID. Remember this submission as
@ -315,14 +320,14 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
fence_put(id->first);
id->first = fence_get(fence);
dma_fence_put(id->first);
id->first = dma_fence_get(fence);
fence_put(id->last_flush);
dma_fence_put(id->last_flush);
id->last_flush = NULL;
fence_put(id->flushed_updates);
id->flushed_updates = fence_get(updates);
dma_fence_put(id->flushed_updates);
id->flushed_updates = dma_fence_get(updates);
id->pd_gpu_addr = job->vm_pd_addr;
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
@ -341,9 +346,9 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
const struct amdgpu_ip_block_version *ip_block;
const struct amdgpu_ip_block *ip_block;
if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
/* only compute rings */
return false;
@ -351,10 +356,10 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
if (!ip_block)
return false;
if (ip_block->major <= 7) {
if (ip_block->version->major <= 7) {
/* gfx7 has no workaround */
return true;
} else if (ip_block->major == 8) {
} else if (ip_block->version->major == 8) {
if (adev->gfx.mec_fw_version >= 673)
/* gfx8 is fixed in MEC firmware 673 */
return false;
@ -393,7 +398,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
amdgpu_vm_is_gpu_reset(adev, id))) {
struct fence *fence;
struct dma_fence *fence;
trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
@ -403,7 +408,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
return r;
mutex_lock(&adev->vm_manager.lock);
fence_put(id->last_flush);
dma_fence_put(id->last_flush);
id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock);
}
@ -524,70 +529,6 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
}
/**
* amdgpu_vm_clear_bo - initially clear the page dir/table
*
* @adev: amdgpu_device pointer
* @bo: bo to clear
*
* need to reserve bo first before calling it.
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo)
{
struct amdgpu_ring *ring;
struct fence *fence = NULL;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
unsigned entries;
uint64_t addr;
int r;
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
r = reservation_object_reserve_shared(bo->tbo.resv);
if (r)
return r;
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
if (r)
goto error;
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (r)
goto error;
addr = amdgpu_bo_gpu_offset(bo);
entries = amdgpu_bo_size(bo) / 8;
r = amdgpu_job_alloc_with_ib(adev, 64, &job);
if (r)
goto error;
memset(&params, 0, sizeof(params));
params.adev = adev;
params.ib = &job->ibs[0];
amdgpu_vm_do_set_ptes(&params, addr, 0, entries, 0, 0);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > 64);
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &fence);
if (r)
goto error_free;
amdgpu_bo_fence(bo, fence, true);
fence_put(fence);
return 0;
error_free:
amdgpu_job_free(job);
error:
return r;
}
/**
* amdgpu_vm_map_gart - Resolve gart mapping of addr
*
@ -612,123 +553,6 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
return result;
}
static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
bool shadow)
{
struct amdgpu_ring *ring;
struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
vm->page_directory;
uint64_t pd_addr;
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0, pt_idx, ndw;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
struct fence *fence = NULL;
int r;
if (!pd)
return 0;
r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
if (r)
return r;
pd_addr = amdgpu_bo_gpu_offset(pd);
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
/* padding, etc. */
ndw = 64;
/* assume the worst case */
ndw += vm->max_pde_used * 6;
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
if (r)
return r;
memset(&params, 0, sizeof(params));
params.adev = adev;
params.ib = &job->ibs[0];
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
uint64_t pde, pt;
if (bo == NULL)
continue;
if (bo->shadow) {
struct amdgpu_bo *shadow = bo->shadow;
r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
if (r)
return r;
}
pt = amdgpu_bo_gpu_offset(bo);
if (!shadow) {
if (vm->page_tables[pt_idx].addr == pt)
continue;
vm->page_tables[pt_idx].addr = pt;
} else {
if (vm->page_tables[pt_idx].shadow_addr == pt)
continue;
vm->page_tables[pt_idx].shadow_addr = pt;
}
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
((last_pt + incr * count) != pt) ||
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
if (count) {
amdgpu_vm_do_set_ptes(&params, last_pde,
last_pt, count, incr,
AMDGPU_PTE_VALID);
}
count = 1;
last_pde = pde;
last_pt = pt;
} else {
++count;
}
}
if (count)
amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
count, incr, AMDGPU_PTE_VALID);
if (params.ib->length_dw != 0) {
amdgpu_ring_pad_ib(ring, params.ib);
amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
WARN_ON(params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &fence);
if (r)
goto error_free;
amdgpu_bo_fence(pd, fence, true);
fence_put(vm->page_directory_fence);
vm->page_directory_fence = fence_get(fence);
fence_put(fence);
} else {
amdgpu_job_free(job);
}
return 0;
error_free:
amdgpu_job_free(job);
return r;
}
/*
* amdgpu_vm_update_pdes - make sure that page directory is valid
*
@ -742,14 +566,135 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
* Returns 0 for success, error for failure.
*/
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
struct amdgpu_vm *vm)
{
struct amdgpu_bo *shadow;
struct amdgpu_ring *ring;
uint64_t pd_addr, shadow_addr;
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
unsigned count = 0, pt_idx, ndw;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
struct dma_fence *fence = NULL;
int r;
r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
shadow = vm->page_directory->shadow;
/* padding, etc. */
ndw = 64;
/* assume the worst case */
ndw += vm->max_pde_used * 6;
pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
if (shadow) {
r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
if (r)
return r;
shadow_addr = amdgpu_bo_gpu_offset(shadow);
ndw *= 2;
} else {
shadow_addr = 0;
}
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
if (r)
return r;
return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
memset(&params, 0, sizeof(params));
params.adev = adev;
params.ib = &job->ibs[0];
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
uint64_t pde, pt;
if (bo == NULL)
continue;
if (bo->shadow) {
struct amdgpu_bo *pt_shadow = bo->shadow;
r = amdgpu_ttm_bind(&pt_shadow->tbo,
&pt_shadow->tbo.mem);
if (r)
return r;
}
pt = amdgpu_bo_gpu_offset(bo);
if (vm->page_tables[pt_idx].addr == pt)
continue;
vm->page_tables[pt_idx].addr = pt;
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
((last_pt + incr * count) != pt) ||
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
if (count) {
if (shadow)
amdgpu_vm_do_set_ptes(&params,
last_shadow,
last_pt, count,
incr,
AMDGPU_PTE_VALID);
amdgpu_vm_do_set_ptes(&params, last_pde,
last_pt, count, incr,
AMDGPU_PTE_VALID);
}
count = 1;
last_pde = pde;
last_shadow = shadow_addr + pt_idx * 8;
last_pt = pt;
} else {
++count;
}
}
if (count) {
if (vm->page_directory->shadow)
amdgpu_vm_do_set_ptes(&params, last_shadow, last_pt,
count, incr, AMDGPU_PTE_VALID);
amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
count, incr, AMDGPU_PTE_VALID);
}
if (params.ib->length_dw == 0) {
amdgpu_job_free(job);
return 0;
}
amdgpu_ring_pad_ib(ring, params.ib);
amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
if (shadow)
amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
WARN_ON(params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &fence);
if (r)
goto error_free;
amdgpu_bo_fence(vm->page_directory, fence, true);
dma_fence_put(vm->page_directory_fence);
vm->page_directory_fence = dma_fence_get(fence);
dma_fence_put(fence);
return 0;
error_free:
amdgpu_job_free(job);
return r;
}
/**
@ -781,11 +726,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* initialize the variables */
addr = start;
pt_idx = addr >> amdgpu_vm_block_size;
pt = vm->page_tables[pt_idx].entry.robj;
pt = vm->page_tables[pt_idx].bo;
if (params->shadow) {
if (!pt->shadow)
return;
pt = vm->page_tables[pt_idx].entry.robj->shadow;
pt = pt->shadow;
}
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@ -804,11 +749,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* walk over the address space and update the page tables */
while (addr < end) {
pt_idx = addr >> amdgpu_vm_block_size;
pt = vm->page_tables[pt_idx].entry.robj;
pt = vm->page_tables[pt_idx].bo;
if (params->shadow) {
if (!pt->shadow)
return;
pt = vm->page_tables[pt_idx].entry.robj->shadow;
pt = pt->shadow;
}
if ((addr & ~mask) == (end & ~mask))
@ -929,20 +874,20 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct fence *exclusive,
struct dma_fence *exclusive,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
uint32_t flags, uint64_t addr,
struct fence **fence)
struct dma_fence **fence)
{
struct amdgpu_ring *ring;
void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned nptes, ncmds, ndw;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
struct fence *f = NULL;
struct dma_fence *f = NULL;
int r;
memset(&params, 0, sizeof(params));
@ -1045,10 +990,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
amdgpu_bo_fence(vm->page_directory, f, true);
if (fence) {
fence_put(*fence);
*fence = fence_get(f);
dma_fence_put(*fence);
*fence = dma_fence_get(f);
}
fence_put(f);
dma_fence_put(f);
return 0;
error_free:
@ -1065,8 +1010,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
* @addr: addr to set the area to
* @flags: HW flags for the mapping
* @nodes: array of drm_mm_nodes with the MC addresses
* @fence: optional resulting fence
*
* Split the mapping into smaller chunks so that each update fits
@ -1074,17 +1019,16 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct fence *exclusive,
struct dma_fence *exclusive,
uint32_t gtt_flags,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
uint32_t flags, uint64_t addr,
struct fence **fence)
uint32_t flags,
struct drm_mm_node *nodes,
struct dma_fence **fence)
{
const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
uint64_t src = 0, start = mapping->it.start;
uint64_t pfn, src = 0, start = mapping->it.start;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@ -1097,23 +1041,40 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_update(mapping);
if (pages_addr) {
if (flags == gtt_flags)
src = adev->gart.table_addr + (addr >> 12) * 8;
addr = 0;
pfn = mapping->offset >> PAGE_SHIFT;
if (nodes) {
while (pfn >= nodes->size) {
pfn -= nodes->size;
++nodes;
}
}
addr += mapping->offset;
if (!pages_addr || src)
return amdgpu_vm_bo_update_mapping(adev, exclusive,
src, pages_addr, vm,
start, mapping->it.last,
flags, addr, fence);
do {
uint64_t max_entries;
uint64_t addr, last;
while (start != mapping->it.last + 1) {
uint64_t last;
if (nodes) {
addr = nodes->start << PAGE_SHIFT;
max_entries = (nodes->size - pfn) *
(PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
} else {
addr = 0;
max_entries = S64_MAX;
}
last = min((uint64_t)mapping->it.last, start + max_size - 1);
if (pages_addr) {
if (flags == gtt_flags)
src = adev->gart.table_addr +
(addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
else
max_entries = min(max_entries, 16ull * 1024ull);
addr = 0;
} else if (flags & AMDGPU_PTE_VALID) {
addr += adev->vm_manager.vram_base_offset;
}
addr += pfn << PAGE_SHIFT;
last = min((uint64_t)mapping->it.last, start + max_entries - 1);
r = amdgpu_vm_bo_update_mapping(adev, exclusive,
src, pages_addr, vm,
start, last, flags, addr,
@ -1121,9 +1082,14 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (r)
return r;
pfn += last - start + 1;
if (nodes && nodes->size == pfn) {
pfn = 0;
++nodes;
}
start = last + 1;
addr += max_size * AMDGPU_GPU_PAGE_SIZE;
}
} while (unlikely(start != mapping->it.last + 1));
return 0;
}
@ -1147,40 +1113,30 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
dma_addr_t *pages_addr = NULL;
uint32_t gtt_flags, flags;
struct ttm_mem_reg *mem;
struct fence *exclusive;
uint64_t addr;
struct drm_mm_node *nodes;
struct dma_fence *exclusive;
int r;
if (clear) {
mem = NULL;
addr = 0;
nodes = NULL;
exclusive = NULL;
} else {
struct ttm_dma_tt *ttm;
mem = &bo_va->bo->tbo.mem;
addr = (u64)mem->start << PAGE_SHIFT;
switch (mem->mem_type) {
case TTM_PL_TT:
nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT) {
ttm = container_of(bo_va->bo->tbo.ttm, struct
ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
break;
case TTM_PL_VRAM:
addr += adev->vm_manager.vram_base_offset;
break;
default:
break;
}
exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
}
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
adev == bo_va->bo->adev) ? flags : 0;
adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0;
spin_lock(&vm->status_lock);
if (!list_empty(&bo_va->vm_status))
@ -1190,7 +1146,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, exclusive,
gtt_flags, pages_addr, vm,
mapping, flags, addr,
mapping, flags, nodes,
&bo_va->last_pt_update);
if (r)
return r;
@ -1405,18 +1361,18 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
struct reservation_object *resv = vm->page_directory->tbo.resv;
struct amdgpu_bo_list_entry *entry;
struct amdgpu_bo *pt;
entry = &vm->page_tables[pt_idx].entry;
if (entry->robj)
if (vm->page_tables[pt_idx].bo)
continue;
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_SHADOW,
AMDGPU_GEM_CREATE_SHADOW |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED,
NULL, resv, &pt);
if (r)
goto error_free;
@ -1426,27 +1382,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
*/
pt->parent = amdgpu_bo_ref(vm->page_directory);
r = amdgpu_vm_clear_bo(adev, vm, pt);
if (r) {
amdgpu_bo_unref(&pt->shadow);
amdgpu_bo_unref(&pt);
goto error_free;
}
if (pt->shadow) {
r = amdgpu_vm_clear_bo(adev, vm, pt->shadow);
if (r) {
amdgpu_bo_unref(&pt->shadow);
amdgpu_bo_unref(&pt);
goto error_free;
}
}
entry->robj = pt;
entry->priority = 0;
entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = true;
entry->user_pages = NULL;
vm->page_tables[pt_idx].bo = pt;
vm->page_tables[pt_idx].addr = 0;
}
@ -1547,7 +1483,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
kfree(mapping);
}
fence_put(bo_va->last_pt_update);
dma_fence_put(bo_va->last_pt_update);
kfree(bo_va);
}
@ -1626,7 +1562,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
r = amdgpu_bo_create(adev, pd_size, align, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_SHADOW,
AMDGPU_GEM_CREATE_SHADOW |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED,
NULL, NULL, &vm->page_directory);
if (r)
goto error_free_sched_entity;
@ -1635,24 +1573,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (r)
goto error_free_page_directory;
r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
if (r)
goto error_unreserve;
if (vm->page_directory->shadow) {
r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory->shadow);
if (r)
goto error_unreserve;
}
vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
amdgpu_bo_unreserve(vm->page_directory);
return 0;
error_unreserve:
amdgpu_bo_unreserve(vm->page_directory);
error_free_page_directory:
amdgpu_bo_unref(&vm->page_directory->shadow);
amdgpu_bo_unref(&vm->page_directory);
@ -1697,7 +1622,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
struct amdgpu_bo *pt = vm->page_tables[i].entry.robj;
struct amdgpu_bo *pt = vm->page_tables[i].bo;
if (!pt)
continue;
@ -1709,7 +1634,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&vm->page_directory->shadow);
amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
dma_fence_put(vm->page_directory_fence);
}
/**
@ -1733,7 +1658,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
&adev->vm_manager.ids_lru);
}
adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
adev->vm_manager.fence_context =
dma_fence_context_alloc(AMDGPU_MAX_RINGS);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
adev->vm_manager.seqno[i] = 0;
@ -1755,9 +1681,9 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_NUM_VM; ++i) {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
fence_put(adev->vm_manager.ids[i].first);
dma_fence_put(adev->vm_manager.ids[i].first);
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
fence_put(id->flushed_updates);
fence_put(id->last_flush);
dma_fence_put(id->flushed_updates);
dma_fence_put(id->last_flush);
}
}

View File

@ -0,0 +1,205 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#ifndef __AMDGPU_VM_H__
#define __AMDGPU_VM_H__
#include <linux/rbtree.h>
#include "gpu_scheduler.h"
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
struct amdgpu_bo_va;
struct amdgpu_job;
struct amdgpu_bo_list_entry;
/*
* GPUVM handling
*/
/* maximum number of VMIDs */
#define AMDGPU_NUM_VM 16
/* Maximum number of PTEs the hardware can write with one command */
#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
/* number of entries in page table */
#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
/* PTBs (Page Table Blocks) need to be aligned to 32K */
#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
/* LOG2 number of continuous pages for the fragment field */
#define AMDGPU_LOG2_PAGES_PER_FRAG 4
#define AMDGPU_PTE_VALID (1 << 0)
#define AMDGPU_PTE_SYSTEM (1 << 1)
#define AMDGPU_PTE_SNOOPED (1 << 2)
/* VI only */
#define AMDGPU_PTE_EXECUTABLE (1 << 4)
#define AMDGPU_PTE_READABLE (1 << 5)
#define AMDGPU_PTE_WRITEABLE (1 << 6)
#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
/* How to programm VM fault handling */
#define AMDGPU_VM_FAULT_STOP_NEVER 0
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
struct amdgpu_vm_pt {
struct amdgpu_bo *bo;
uint64_t addr;
};
struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root va;
/* protecting invalidated */
spinlock_t status_lock;
/* BOs moved, but not yet updated in the PT */
struct list_head invalidated;
/* BOs cleared in the PT because of a move */
struct list_head cleared;
/* BO mappings freed, but not yet updated in the PT */
struct list_head freed;
/* contains the page directory */
struct amdgpu_bo *page_directory;
unsigned max_pde_used;
struct dma_fence *page_directory_fence;
uint64_t last_eviction_counter;
/* array of page tables, one for each page directory entry */
struct amdgpu_vm_pt *page_tables;
/* for id and flush management per ring */
struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
/* protecting freed */
spinlock_t freed_lock;
/* Scheduler entity for page table updates */
struct amd_sched_entity entity;
/* client id */
u64 client_id;
};
struct amdgpu_vm_id {
struct list_head list;
struct dma_fence *first;
struct amdgpu_sync active;
struct dma_fence *last_flush;
atomic64_t owner;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct dma_fence *flushed_updates;
uint32_t current_gpu_reset_count;
uint32_t gds_base;
uint32_t gds_size;
uint32_t gws_base;
uint32_t gws_size;
uint32_t oa_base;
uint32_t oa_size;
};
struct amdgpu_vm_manager {
/* Handling of VMIDs */
struct mutex lock;
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
/* Handling of VM fences */
u64 fence_context;
unsigned seqno[AMDGPU_MAX_RINGS];
uint32_t max_pfn;
/* vram base address for page table entry */
u64 vram_base_offset;
/* is vm enabled? */
bool enabled;
/* vm pte handling */
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
unsigned vm_pte_num_rings;
atomic_t vm_pte_next_ring;
/* client id counter */
atomic64_t client_counter;
};
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct dma_fence *fence,
struct amdgpu_job *job);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_sync *sync);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
uint64_t size, uint32_t flags);
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
#endif

View File

@ -0,0 +1,222 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#include <drm/drmP.h>
#include "amdgpu.h"
struct amdgpu_vram_mgr {
struct drm_mm mm;
spinlock_t lock;
};
/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
*
* @man: TTM memory type manager
* @p_size: maximum size of VRAM
*
* Allocate and initialize the VRAM manager.
*/
static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
struct amdgpu_vram_mgr *mgr;
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
return -ENOMEM;
drm_mm_init(&mgr->mm, 0, p_size);
spin_lock_init(&mgr->lock);
man->priv = mgr;
return 0;
}
/**
* amdgpu_vram_mgr_fini - free and destroy VRAM manager
*
* @man: TTM memory type manager
*
* Destroy and free the VRAM manager, returns -EBUSY if ranges are still
* allocated inside it.
*/
static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
{
struct amdgpu_vram_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
if (!drm_mm_clean(&mgr->mm)) {
spin_unlock(&mgr->lock);
return -EBUSY;
}
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
man->priv = NULL;
return 0;
}
/**
* amdgpu_vram_mgr_new - allocate new ranges
*
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
* @mem: the resulting mem object
*
* Allocate VRAM for the given BO.
*/
static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo);
struct amdgpu_vram_mgr *mgr = man->priv;
struct drm_mm *mm = &mgr->mm;
struct drm_mm_node *nodes;
enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
unsigned long lpfn, num_nodes, pages_per_node, pages_left;
unsigned i;
int r;
lpfn = place->lpfn;
if (!lpfn)
lpfn = man->size;
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ||
place->lpfn || amdgpu_vram_page_split == -1) {
pages_per_node = ~0ul;
num_nodes = 1;
} else {
pages_per_node = max((uint32_t)amdgpu_vram_page_split,
mem->page_alignment);
num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
}
nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
if (!nodes)
return -ENOMEM;
if (place->flags & TTM_PL_FLAG_TOPDOWN) {
sflags = DRM_MM_SEARCH_BELOW;
aflags = DRM_MM_CREATE_TOP;
}
pages_left = mem->num_pages;
spin_lock(&mgr->lock);
for (i = 0; i < num_nodes; ++i) {
unsigned long pages = min(pages_left, pages_per_node);
uint32_t alignment = mem->page_alignment;
if (pages == pages_per_node)
alignment = pages_per_node;
else
sflags |= DRM_MM_SEARCH_BEST;
r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
alignment, 0,
place->fpfn, lpfn,
sflags, aflags);
if (unlikely(r))
goto error;
pages_left -= pages;
}
spin_unlock(&mgr->lock);
mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET;
mem->mm_node = nodes;
return 0;
error:
while (i--)
drm_mm_remove_node(&nodes[i]);
spin_unlock(&mgr->lock);
kfree(nodes);
return r == -ENOSPC ? 0 : r;
}
/**
* amdgpu_vram_mgr_del - free ranges
*
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
* @mem: TTM memory object
*
* Free the allocated VRAM again.
*/
static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct amdgpu_vram_mgr *mgr = man->priv;
struct drm_mm_node *nodes = mem->mm_node;
unsigned pages = mem->num_pages;
if (!mem->mm_node)
return;
spin_lock(&mgr->lock);
while (pages) {
pages -= nodes->size;
drm_mm_remove_node(nodes);
++nodes;
}
spin_unlock(&mgr->lock);
kfree(mem->mm_node);
mem->mm_node = NULL;
}
/**
* amdgpu_vram_mgr_debug - dump VRAM table
*
* @man: TTM memory type manager
* @prefix: text prefix
*
* Dump the table content using printk.
*/
static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
const char *prefix)
{
struct amdgpu_vram_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
drm_mm_debug_table(&mgr->mm, prefix);
spin_unlock(&mgr->lock);
}
const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
amdgpu_vram_mgr_init,
amdgpu_vram_mgr_fini,
amdgpu_vram_mgr_new,
amdgpu_vram_mgr_del,
amdgpu_vram_mgr_debug
};

View File

@ -31,6 +31,7 @@
#include "atom.h"
#include "atom-bits.h"
#include "atombios_encoders.h"
#include "atombios_crtc.h"
#include "amdgpu_atombios.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"

Some files were not shown because too many files have changed in this diff Show More