dmaengine updates for v5.19-rc1
New support: - Tegra gpcdma Driver support - Qualcomm SM8350, Sm8450 and SC7280 Device support - Renesas RZN1 dma and platform support Updates: - stm32 device pause/resume support and updates - DMA memset ops Documentation and usage clarification - Deprecate '#dma-channels' & '#dma-requests' bindings - Driver updates for stm32, ptdma idsx etc -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmKTr5oACgkQfBQHDyUj g0ePBw//UP+A+PPvTdQdlq/spO9Hb76lB1UZ7x7nVsObovyO2hxQl61b5Xo9o8eH 0VIIVB9OU4ysp8eX5Y6m7CUFKa/4MyUSU1HKdspseoap3JKg1EAHEGdhjR++V/dF mqPN7VvmTbW8YDQ6b7Xz/mZedxOSJZL+wltCT2AQGLV1PD+BPZyBfkPl9NarpaX6 OeKatnMiJlZwFjQeVijiqCUx0xZV0G1XfQJDIEzRaBBvYAiHYTjbPUBZVsu5BjoC 70HtxhDKHJu0JFPa91gm7rqhj8XTKFoIGQU7jZqlpgr1IoYvfnotHoQeURa3yviZ lZ6oW0+Y3RKyCcMH5iir2YEGdeaDXEPRb1YS/rz1vcf9b8JNqxXuM9i8Z2EXCVjd qVxC9HzVCBh5EHuJGi1DFoHMrw/NXUanbWqW8C0FzqqTcqvp6DceAgzqcd1FJjwl lgZM7Y5r0WXMzbbhOeOQP34ps+mY17rsBn210K/H75fZW8kTsdwiCOL4VlaK1p/z CCJPYXkxEChbrIYoshXNTqg61bt9F2sEgJ+7FFUbUUOTLlQKFJUZ7fuoU896rDto GndspWpxaslgAzdPuWSKBeR+b9IubgLgKF1BKSTYR6coyUt+hRJFiAx1juAOYbHe CrJat0luP+hELgt1f2TjyYYZFj9Wc84tnqI+ThzXK0GyEN4Ax1c= =ANxH -----END PGP SIGNATURE----- Merge tag 'dmaengine-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine Pull dmaengine updates from Vinod Koul: "Nothing special, this includes a couple of new device support and new driver support and bunch of driver updates. New support: - Tegra gpcdma driver support - Qualcomm SM8350, Sm8450 and SC7280 device support - Renesas RZN1 dma and platform support Updates: - stm32 device pause/resume support and updates - DMA memset ops Documentation and usage clarification - deprecate '#dma-channels' & '#dma-requests' bindings - driver updates for stm32, ptdma idsx etc" * tag 'dmaengine-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (87 commits) dmaengine: idxd: make idxd_wq_enable() return 0 if wq is already enabled dmaengine: sun6i: Add support for the D1 variant dmaengine: sun6i: Add support for 34-bit physical addresses dmaengine: sun6i: Do not use virt_to_phys dt-bindings: dma: sun50i-a64: Add compatible for D1 dmaengine: tegra: Remove unused switch case dmaengine: tegra: Fix uninitialized variable usage dmaengine: stm32-dma: add device_pause/device_resume support dmaengine: stm32-dma: rename pm ops before dma pause/resume introduction dmaengine: stm32-dma: pass DMA_SxSCR value to stm32_dma_handle_chan_done() dmaengine: stm32-dma: introduce stm32_dma_sg_inc to manage chan->next_sg dmaengine: stm32-dmamux: avoid reset of dmamux if used by coprocessor dmaengine: qcom: gpi: Add support for sc7280 dt-bindings: dma: pl330: Add power-domains dmaengine: stm32-mdma: use dev_dbg on non-busy channel spurious it dmaengine: stm32-mdma: fix chan initialization in stm32_mdma_irq_handler() dmaengine: stm32-mdma: remove GISR1 register dmaengine: ti: deprecate '#dma-channels' dmaengine: mmp: deprecate '#dma-channels' dmaengine: pxa: deprecate '#dma-channels' and '#dma-requests' ...
This commit is contained in:
commit
b00ed48bb0
|
@ -39,6 +39,17 @@ properties:
|
|||
'#power-domain-cells':
|
||||
const: 0
|
||||
|
||||
'#address-cells':
|
||||
const: 1
|
||||
|
||||
'#size-cells':
|
||||
const: 1
|
||||
|
||||
patternProperties:
|
||||
"^dma-router@[a-f0-9]+$":
|
||||
type: object
|
||||
$ref: "../dma/renesas,rzn1-dmamux.yaml#"
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
|
|
@ -20,9 +20,11 @@ properties:
|
|||
|
||||
compatible:
|
||||
oneOf:
|
||||
- const: allwinner,sun50i-a64-dma
|
||||
- const: allwinner,sun50i-a100-dma
|
||||
- const: allwinner,sun50i-h6-dma
|
||||
- enum:
|
||||
- allwinner,sun20i-d1-dma
|
||||
- allwinner,sun50i-a64-dma
|
||||
- allwinner,sun50i-a100-dma
|
||||
- allwinner,sun50i-h6-dma
|
||||
- items:
|
||||
- const: allwinner,sun8i-r40-dma
|
||||
- const: allwinner,sun50i-a64-dma
|
||||
|
@ -58,6 +60,7 @@ if:
|
|||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- allwinner,sun20i-d1-dma
|
||||
- allwinner,sun50i-a100-dma
|
||||
- allwinner,sun50i-h6-dma
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
|||
title: Altera mSGDMA IP core
|
||||
|
||||
maintainers:
|
||||
- Olivier Dautricourt <olivier.dautricourt@orolia.com>
|
||||
- Olivier Dautricourt <olivierdautricourt@gmail.com>
|
||||
|
||||
description: |
|
||||
Altera / Intel modular Scatter-Gather Direct Memory Access (mSGDMA)
|
||||
|
|
|
@ -55,6 +55,9 @@ properties:
|
|||
|
||||
dma-coherent: true
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
resets:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
|
|
@ -10,10 +10,12 @@ Required properties:
|
|||
or one irq for pdma device
|
||||
|
||||
Optional properties:
|
||||
- #dma-channels: Number of DMA channels supported by the controller (defaults
|
||||
- dma-channels: Number of DMA channels supported by the controller (defaults
|
||||
to 32 when not specified)
|
||||
- #dma-requests: Number of DMA requestor lines supported by the controller
|
||||
- #dma-channels: deprecated
|
||||
- dma-requests: Number of DMA requestor lines supported by the controller
|
||||
(defaults to 32 when not specified)
|
||||
- #dma-requests: deprecated
|
||||
|
||||
"marvell,pdma-1.0"
|
||||
Used platforms: pxa25x, pxa27x, pxa3xx, pxa93x, pxa168, pxa910, pxa688.
|
||||
|
@ -33,7 +35,7 @@ pdma: dma-controller@d4000000 {
|
|||
reg = <0xd4000000 0x10000>;
|
||||
interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15>;
|
||||
interrupt-parent = <&intcmux32>;
|
||||
#dma-channels = <16>;
|
||||
dma-channels = <16>;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -45,7 +47,7 @@ pdma: dma-controller@d4000000 {
|
|||
compatible = "marvell,pdma-1.0";
|
||||
reg = <0xd4000000 0x10000>;
|
||||
interrupts = <47>;
|
||||
#dma-channels = <16>;
|
||||
dma-channels = <16>;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/dma/nvidia,tegra186-gpc-dma.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: NVIDIA Tegra GPC DMA Controller Device Tree Bindings
|
||||
|
||||
description: |
|
||||
The Tegra General Purpose Central (GPC) DMA controller is used for faster
|
||||
data transfers between memory to memory, memory to device and device to
|
||||
memory.
|
||||
|
||||
maintainers:
|
||||
- Jon Hunter <jonathanh@nvidia.com>
|
||||
- Rajesh Gumasta <rgumasta@nvidia.com>
|
||||
|
||||
allOf:
|
||||
- $ref: "dma-controller.yaml#"
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
oneOf:
|
||||
- const: nvidia,tegra186-gpcdma
|
||||
- items:
|
||||
- const: nvidia,tegra194-gpcdma
|
||||
- const: nvidia,tegra186-gpcdma
|
||||
|
||||
"#dma-cells":
|
||||
const: 1
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
description:
|
||||
Should contain all of the per-channel DMA interrupts in
|
||||
ascending order with respect to the DMA channel index.
|
||||
minItems: 1
|
||||
maxItems: 31
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
reset-names:
|
||||
const: gpcdma
|
||||
|
||||
iommus:
|
||||
maxItems: 1
|
||||
|
||||
dma-coherent: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- resets
|
||||
- reset-names
|
||||
- "#dma-cells"
|
||||
- iommus
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/memory/tegra186-mc.h>
|
||||
#include <dt-bindings/reset/tegra186-reset.h>
|
||||
|
||||
dma-controller@2600000 {
|
||||
compatible = "nvidia,tegra186-gpcdma";
|
||||
reg = <0x2600000 0x210000>;
|
||||
resets = <&bpmp TEGRA186_RESET_GPCDMA>;
|
||||
reset-names = "gpcdma";
|
||||
interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#dma-cells = <1>;
|
||||
iommus = <&smmu TEGRA186_SID_GPCDMA_0>;
|
||||
dma-coherent;
|
||||
};
|
||||
...
|
|
@ -19,9 +19,12 @@ allOf:
|
|||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- qcom,sc7280-gpi-dma
|
||||
- qcom,sdm845-gpi-dma
|
||||
- qcom,sm8150-gpi-dma
|
||||
- qcom,sm8250-gpi-dma
|
||||
- qcom,sm8350-gpi-dma
|
||||
- qcom,sm8450-gpi-dma
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
|
|
@ -42,11 +42,10 @@ properties:
|
|||
- const: renesas,rcar-dmac
|
||||
|
||||
- items:
|
||||
- const: renesas,dmac-r8a779a0 # R-Car V3U
|
||||
|
||||
- items:
|
||||
- const: renesas,dmac-r8a779f0 # R-Car S4-8
|
||||
- const: renesas,rcar-gen4-dmac
|
||||
- enum:
|
||||
- renesas,dmac-r8a779a0 # R-Car V3U
|
||||
- renesas,dmac-r8a779f0 # R-Car S4-8
|
||||
- const: renesas,rcar-gen4-dmac # R-Car Gen4
|
||||
|
||||
reg: true
|
||||
|
||||
|
@ -121,7 +120,6 @@ if:
|
|||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- renesas,dmac-r8a779a0
|
||||
- renesas,rcar-gen4-dmac
|
||||
then:
|
||||
properties:
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/dma/renesas,rzn1-dmamux.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Renesas RZ/N1 DMA mux
|
||||
|
||||
maintainers:
|
||||
- Miquel Raynal <miquel.raynal@bootlin.com>
|
||||
|
||||
allOf:
|
||||
- $ref: "dma-router.yaml#"
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: renesas,rzn1-dmamux
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
description: DMA mux first register offset within the system control parent.
|
||||
|
||||
'#dma-cells':
|
||||
const: 6
|
||||
description:
|
||||
The first four cells are dedicated to the master DMA controller. The fifth
|
||||
cell gives the DMA mux bit index that must be set starting from 0. The
|
||||
sixth cell gives the binary value that must be written there, ie. 0 or 1.
|
||||
|
||||
dma-masters:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
dma-requests:
|
||||
const: 32
|
||||
|
||||
required:
|
||||
- reg
|
||||
- dma-requests
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
dma-router@a0 {
|
||||
compatible = "renesas,rzn1-dmamux";
|
||||
reg = <0xa0 4>;
|
||||
#dma-cells = <6>;
|
||||
dma-masters = <&dma0 &dma1>;
|
||||
dma-requests = <32>;
|
||||
};
|
|
@ -28,7 +28,15 @@ allOf:
|
|||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: sifive,fu540-c000-pdma
|
||||
- enum:
|
||||
- sifive,fu540-c000-pdma
|
||||
- const: sifive,pdma0
|
||||
description:
|
||||
Should be "sifive,<chip>-pdma" and "sifive,pdma<version>".
|
||||
Supported compatible strings are -
|
||||
"sifive,fu540-c000-pdma" for the SiFive PDMA v0 as integrated onto the
|
||||
SiFive FU540 chip resp and "sifive,pdma0" for the SiFive PDMA v0 IP block
|
||||
with no chip integration tweaks.
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
@ -37,6 +45,12 @@ properties:
|
|||
minItems: 1
|
||||
maxItems: 8
|
||||
|
||||
dma-channels:
|
||||
description: For backwards-compatibility, the default value is 4
|
||||
minimum: 1
|
||||
maximum: 4
|
||||
default: 4
|
||||
|
||||
'#dma-cells':
|
||||
const: 1
|
||||
|
||||
|
@ -50,8 +64,9 @@ unevaluatedProperties: false
|
|||
examples:
|
||||
- |
|
||||
dma-controller@3000000 {
|
||||
compatible = "sifive,fu540-c000-pdma";
|
||||
compatible = "sifive,fu540-c000-pdma", "sifive,pdma0";
|
||||
reg = <0x3000000 0x8000>;
|
||||
dma-channels = <4>;
|
||||
interrupts = <23>, <24>, <25>, <26>, <27>, <28>, <29>, <30>;
|
||||
#dma-cells = <1>;
|
||||
};
|
||||
|
|
|
@ -15,7 +15,13 @@ allOf:
|
|||
|
||||
properties:
|
||||
compatible:
|
||||
const: snps,dma-spear1340
|
||||
oneOf:
|
||||
- const: snps,dma-spear1340
|
||||
- items:
|
||||
- enum:
|
||||
- renesas,r9a06g032-dma
|
||||
- const: renesas,rzn1-dma
|
||||
|
||||
|
||||
"#dma-cells":
|
||||
minimum: 3
|
||||
|
|
|
@ -8,10 +8,13 @@ Required properties:
|
|||
- interrupts: Should contain one interrupt shared by all channel.
|
||||
- #dma-cells: must be <1>. Used to represent the number of integer
|
||||
cells in the dmas property of client device.
|
||||
- #dma-channels : Number of DMA channels supported. Should be 32.
|
||||
- dma-channels : Number of DMA channels supported. Should be 32.
|
||||
- clock-names: Should contain the clock of the DMA controller.
|
||||
- clocks: Should contain a clock specifier for each entry in clock-names.
|
||||
|
||||
Deprecated properties:
|
||||
- #dma-channels : Number of DMA channels supported. Should be 32.
|
||||
|
||||
Example:
|
||||
|
||||
Controller:
|
||||
|
@ -20,7 +23,7 @@ apdma: dma-controller@20100000 {
|
|||
reg = <0x20100000 0x4000>;
|
||||
interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#dma-cells = <1>;
|
||||
#dma-channels = <32>;
|
||||
dma-channels = <32>;
|
||||
clock-names = "enable";
|
||||
clocks = <&clk_ap_ahb_gates 5>;
|
||||
};
|
||||
|
|
|
@ -110,7 +110,11 @@ axi_vdma_0: axivdma@40030000 {
|
|||
Required properties:
|
||||
- dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs,
|
||||
where Channel ID is '0' for write/tx and '1' for read/rx
|
||||
channel.
|
||||
channel. For MCMDA, MM2S channel(write/tx) ID start from
|
||||
'0' and is in [0-15] range. S2MM channel(read/rx) ID start
|
||||
from '16' and is in [16-31] range. These channels ID are
|
||||
fixed irrespective of IP configuration.
|
||||
|
||||
- dma-names: a list of DMA channel names, one per "dmas" entry
|
||||
|
||||
Example:
|
||||
|
|
|
@ -206,6 +206,12 @@ Currently, the types available are:
|
|||
- The device is able to perform parity check using RAID6 P+Q
|
||||
algorithm against a memory buffer.
|
||||
|
||||
- DMA_MEMSET
|
||||
|
||||
- The device is able to fill memory with the provided pattern
|
||||
|
||||
- The pattern is treated as a single byte signed value.
|
||||
|
||||
- DMA_INTERRUPT
|
||||
|
||||
- The device is able to trigger a dummy transfer that will
|
||||
|
@ -457,7 +463,7 @@ supported.
|
|||
- Should use dma_set_residue to report it
|
||||
|
||||
- In the case of a cyclic transfer, it should only take into
|
||||
account the current period.
|
||||
account the total size of the cyclic buffer.
|
||||
|
||||
- Should return DMA_OUT_OF_ORDER if the device does not support in order
|
||||
completion and is completing the operation out of order.
|
||||
|
|
|
@ -820,7 +820,7 @@ S: Maintained
|
|||
F: drivers/mailbox/mailbox-altera.c
|
||||
|
||||
ALTERA MSGDMA IP CORE DRIVER
|
||||
M: Olivier Dautricourt <olivier.dautricourt@orolia.com>
|
||||
M: Olivier Dautricourt <olivierdautricourt@gmail.com>
|
||||
R: Stefan Roese <sr@denx.de>
|
||||
L: dmaengine@vger.kernel.org
|
||||
S: Odd Fixes
|
||||
|
@ -19202,6 +19202,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER
|
|||
M: Viresh Kumar <vireshk@kernel.org>
|
||||
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/dma/renesas,rzn1-dmamux.yaml
|
||||
F: Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml
|
||||
F: drivers/dma/dw/
|
||||
F: include/dt-bindings/dma/dw-dmac.h
|
||||
|
|
|
@ -16,13 +16,17 @@
|
|||
#include <linux/math64.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_clock.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/soc/renesas/r9a06g032-sysctrl.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <dt-bindings/clock/r9a06g032-sysctrl.h>
|
||||
|
||||
#define R9A06G032_SYSCTRL_DMAMUX 0xA0
|
||||
|
||||
struct r9a06g032_gate {
|
||||
u16 gate, reset, ready, midle,
|
||||
scon, mirack, mistat;
|
||||
|
@ -315,6 +319,30 @@ struct r9a06g032_priv {
|
|||
void __iomem *reg;
|
||||
};
|
||||
|
||||
static struct r9a06g032_priv *sysctrl_priv;
|
||||
|
||||
/* Exported helper to access the DMAMUX register */
|
||||
int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 dmamux;
|
||||
|
||||
if (!sysctrl_priv)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
spin_lock_irqsave(&sysctrl_priv->lock, flags);
|
||||
|
||||
dmamux = readl(sysctrl_priv->reg + R9A06G032_SYSCTRL_DMAMUX);
|
||||
dmamux &= ~mask;
|
||||
dmamux |= val & mask;
|
||||
writel(dmamux, sysctrl_priv->reg + R9A06G032_SYSCTRL_DMAMUX);
|
||||
|
||||
spin_unlock_irqrestore(&sysctrl_priv->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(r9a06g032_sysctrl_set_dmamux);
|
||||
|
||||
/* register/bit pairs are encoded as an uint16_t */
|
||||
static void
|
||||
clk_rdesc_set(struct r9a06g032_priv *clocks,
|
||||
|
@ -963,7 +991,17 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
return r9a06g032_add_clk_domain(dev);
|
||||
error = r9a06g032_add_clk_domain(dev);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
sysctrl_priv = clocks;
|
||||
|
||||
error = of_platform_populate(np, NULL, NULL, dev);
|
||||
if (error)
|
||||
dev_err(dev, "Failed to populate children (%d)\n", error);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id r9a06g032_match[] = {
|
||||
|
|
|
@ -163,7 +163,7 @@ config DMA_SUN4I
|
|||
|
||||
config DMA_SUN6I
|
||||
tristate "Allwinner A31 SoCs DMA support"
|
||||
depends on MACH_SUN6I || MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST
|
||||
depends on ARCH_SUNXI || COMPILE_TEST
|
||||
depends on RESET_CONTROLLER
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
|
@ -629,6 +629,18 @@ config TXX9_DMAC
|
|||
Support the TXx9 SoC internal DMA controller. This can be
|
||||
integrated in chips such as the Toshiba TX4927/38/39.
|
||||
|
||||
config TEGRA186_GPC_DMA
|
||||
tristate "NVIDIA Tegra GPC DMA support"
|
||||
depends on (ARCH_TEGRA || COMPILE_TEST) && ARCH_DMA_ADDR_T_64BIT
|
||||
depends on IOMMU_API
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Support for the NVIDIA Tegra General Purpose Central DMA controller.
|
||||
The DMA controller has multiple DMA channels which can be configured
|
||||
for different peripherals like UART, SPI, etc which are on APB bus.
|
||||
This DMA controller transfers data from memory to peripheral FIFO
|
||||
or vice versa. It also supports memory to memory data transfer.
|
||||
|
||||
config TEGRA20_APB_DMA
|
||||
tristate "NVIDIA Tegra20 APB DMA support"
|
||||
depends on ARCH_TEGRA || COMPILE_TEST
|
||||
|
|
|
@ -72,6 +72,7 @@ obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o
|
|||
obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
|
||||
obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
|
||||
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
|
||||
obj-$(CONFIG_TEGRA186_GPC_DMA) += tegra186-gpc-dma.o
|
||||
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
|
||||
obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o
|
||||
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
|
||||
|
|
|
@ -1535,14 +1535,6 @@ static void pl08x_free_chan_resources(struct dma_chan *chan)
|
|||
vchan_free_chan_resources(to_virt_chan(chan));
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
|
||||
struct dma_chan *chan, unsigned long flags)
|
||||
{
|
||||
struct dma_async_tx_descriptor *retval = NULL;
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* Code accessing dma_async_is_complete() in a tight loop may give problems.
|
||||
* If slaves are relying on interrupts to signal completion this function
|
||||
|
@ -2760,7 +2752,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
pl08x->memcpy.dev = &adev->dev;
|
||||
pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
|
||||
pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
|
||||
pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
|
||||
pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
|
||||
pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
|
||||
pl08x->memcpy.device_config = pl08x_config;
|
||||
|
@ -2787,8 +2778,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
pl08x->slave.dev = &adev->dev;
|
||||
pl08x->slave.device_free_chan_resources =
|
||||
pl08x_free_chan_resources;
|
||||
pl08x->slave.device_prep_dma_interrupt =
|
||||
pl08x_prep_dma_interrupt;
|
||||
pl08x->slave.device_tx_status = pl08x_dma_tx_status;
|
||||
pl08x->slave.device_issue_pending = pl08x_issue_pending;
|
||||
pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
|
||||
|
|
|
@ -942,6 +942,7 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
|
|||
struct at_desc *desc;
|
||||
void __iomem *vaddr;
|
||||
dma_addr_t paddr;
|
||||
char fill_pattern;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
|
||||
&dest, value, len, flags);
|
||||
|
@ -963,7 +964,14 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
|
|||
__func__);
|
||||
return NULL;
|
||||
}
|
||||
*(u32*)vaddr = value;
|
||||
|
||||
/* Only the first byte of value is to be used according to dmaengine */
|
||||
fill_pattern = (char)value;
|
||||
|
||||
*(u32*)vaddr = (fill_pattern << 24) |
|
||||
(fill_pattern << 16) |
|
||||
(fill_pattern << 8) |
|
||||
fill_pattern;
|
||||
|
||||
desc = atc_create_memset_desc(chan, paddr, dest, len);
|
||||
if (!desc) {
|
||||
|
|
|
@ -1202,6 +1202,7 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
|
|||
unsigned long flags;
|
||||
size_t ublen;
|
||||
u32 dwidth;
|
||||
char pattern;
|
||||
/*
|
||||
* WARNING: The channel configuration is set here since there is no
|
||||
* dmaengine_slave_config call in this case. Moreover we don't know the
|
||||
|
@ -1244,10 +1245,16 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
|
|||
|
||||
chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
|
||||
|
||||
/* Only the first byte of value is to be used according to dmaengine */
|
||||
pattern = (char)value;
|
||||
|
||||
ublen = len >> dwidth;
|
||||
|
||||
desc->lld.mbr_da = dst_addr;
|
||||
desc->lld.mbr_ds = value;
|
||||
desc->lld.mbr_ds = (pattern << 24) |
|
||||
(pattern << 16) |
|
||||
(pattern << 8) |
|
||||
pattern;
|
||||
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
|
||||
| AT_XDMAC_MBR_UBC_NDEN
|
||||
| AT_XDMAC_MBR_UBC_NSEN
|
||||
|
|
|
@ -17,7 +17,9 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -911,6 +912,14 @@ static int jz4780_dma_probe(struct platform_device *pdev)
|
|||
|
||||
dd = &jzdma->dma_device;
|
||||
|
||||
/*
|
||||
* The real segment size limit is dependent on the size unit selected
|
||||
* for the transfer. Because the size unit is selected automatically
|
||||
* and may be as small as 1 byte, use a safe limit of 2^24-1 bytes to
|
||||
* ensure the 24-bit transfer count in the descriptor cannot overflow.
|
||||
*/
|
||||
dma_set_max_seg_size(dev, 0xffffff);
|
||||
|
||||
dma_cap_set(DMA_MEMCPY, dd->cap_mask);
|
||||
dma_cap_set(DMA_SLAVE, dd->cap_mask);
|
||||
dma_cap_set(DMA_CYCLIC, dd->cap_mask);
|
||||
|
|
|
@ -1053,9 +1053,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
|
|||
* When the chan_id is a negative value, we are dynamically adding
|
||||
* the channel. Otherwise we are static enumerating.
|
||||
*/
|
||||
mutex_lock(&device->chan_mutex);
|
||||
chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
|
||||
mutex_unlock(&device->chan_mutex);
|
||||
if (chan->chan_id < 0) {
|
||||
pr_err("%s: unable to alloc ida for chan: %d\n",
|
||||
__func__, chan->chan_id);
|
||||
|
@ -1078,9 +1076,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
|
|||
return 0;
|
||||
|
||||
err_out_ida:
|
||||
mutex_lock(&device->chan_mutex);
|
||||
ida_free(&device->chan_ida, chan->chan_id);
|
||||
mutex_unlock(&device->chan_mutex);
|
||||
err_free_dev:
|
||||
kfree(chan->dev);
|
||||
err_free_local:
|
||||
|
@ -1113,9 +1109,7 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
|
|||
device->chancnt--;
|
||||
chan->dev->chan = NULL;
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
mutex_lock(&device->chan_mutex);
|
||||
ida_free(&device->chan_ida, chan->chan_id);
|
||||
mutex_unlock(&device->chan_mutex);
|
||||
device_unregister(&chan->dev->device);
|
||||
free_percpu(chan->local);
|
||||
}
|
||||
|
@ -1250,7 +1244,6 @@ int dma_async_device_register(struct dma_device *device)
|
|||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
mutex_init(&device->chan_mutex);
|
||||
ida_init(&device->chan_ida);
|
||||
|
||||
/* represent channels in sysfs. Probably want devs too */
|
||||
|
|
|
@ -675,10 +675,16 @@ static int dmatest_func(void *data)
|
|||
/*
|
||||
* src and dst buffers are freed by ourselves below
|
||||
*/
|
||||
if (params->polled)
|
||||
if (params->polled) {
|
||||
flags = DMA_CTRL_ACK;
|
||||
else
|
||||
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
|
||||
} else {
|
||||
if (dma_has_cap(DMA_INTERRUPT, dev->cap_mask)) {
|
||||
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
|
||||
} else {
|
||||
pr_err("Channel does not support interrupt!\n");
|
||||
goto err_pq_array;
|
||||
}
|
||||
}
|
||||
|
||||
ktime = ktime_get();
|
||||
while (!(kthread_should_stop() ||
|
||||
|
@ -906,6 +912,7 @@ static int dmatest_func(void *data)
|
|||
runtime = ktime_to_us(ktime);
|
||||
|
||||
ret = 0;
|
||||
err_pq_array:
|
||||
kfree(dma_pq);
|
||||
err_srcs_array:
|
||||
kfree(srcs);
|
||||
|
|
|
@ -16,6 +16,15 @@ config DW_DMAC
|
|||
Support the Synopsys DesignWare AHB DMA controller. This
|
||||
can be integrated in chips such as the Intel Cherrytrail.
|
||||
|
||||
config RZN1_DMAMUX
|
||||
tristate "Renesas RZ/N1 DMAMUX driver"
|
||||
depends on DW_DMAC
|
||||
depends on ARCH_RZN1 || COMPILE_TEST
|
||||
help
|
||||
Support the Renesas RZ/N1 DMAMUX which is located in front of
|
||||
the Synopsys DesignWare AHB DMA controller located on Renesas
|
||||
SoCs.
|
||||
|
||||
config DW_DMAC_PCI
|
||||
tristate "Synopsys DesignWare AHB DMA PCI driver"
|
||||
depends on PCI
|
||||
|
|
|
@ -9,3 +9,5 @@ dw_dmac-$(CONFIG_OF) += of.o
|
|||
|
||||
obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o
|
||||
dw_dmac_pci-y := pci.o
|
||||
|
||||
obj-$(CONFIG_RZN1_DMAMUX) += rzn1-dmamux.o
|
||||
|
|
|
@ -137,6 +137,7 @@ static void dw_shutdown(struct platform_device *pdev)
|
|||
#ifdef CONFIG_OF
|
||||
static const struct of_device_id dw_dma_of_id_table[] = {
|
||||
{ .compatible = "snps,dma-spear1340", .data = &dw_dma_chip_pdata },
|
||||
{ .compatible = "renesas,rzn1-dma", .data = &dw_dma_chip_pdata },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
|
||||
|
|
|
@ -0,0 +1,155 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2022 Schneider-Electric
|
||||
* Author: Miquel Raynal <miquel.raynal@bootlin.com
|
||||
* Based on TI crossbar driver written by Peter Ujfalusi <peter.ujfalusi@ti.com>
|
||||
*/
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/soc/renesas/r9a06g032-sysctrl.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define RNZ1_DMAMUX_NCELLS 6
|
||||
#define RZN1_DMAMUX_MAX_LINES 64
|
||||
#define RZN1_DMAMUX_LINES_PER_CTLR 16
|
||||
|
||||
struct rzn1_dmamux_data {
|
||||
struct dma_router dmarouter;
|
||||
DECLARE_BITMAP(used_chans, 2 * RZN1_DMAMUX_LINES_PER_CTLR);
|
||||
};
|
||||
|
||||
struct rzn1_dmamux_map {
|
||||
unsigned int req_idx;
|
||||
};
|
||||
|
||||
static void rzn1_dmamux_free(struct device *dev, void *route_data)
|
||||
{
|
||||
struct rzn1_dmamux_data *dmamux = dev_get_drvdata(dev);
|
||||
struct rzn1_dmamux_map *map = route_data;
|
||||
|
||||
dev_dbg(dev, "Unmapping DMAMUX request %u\n", map->req_idx);
|
||||
|
||||
clear_bit(map->req_idx, dmamux->used_chans);
|
||||
|
||||
kfree(map);
|
||||
}
|
||||
|
||||
static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
|
||||
struct of_dma *ofdma)
|
||||
{
|
||||
struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
|
||||
struct rzn1_dmamux_data *dmamux = platform_get_drvdata(pdev);
|
||||
struct rzn1_dmamux_map *map;
|
||||
unsigned int dmac_idx, chan, val;
|
||||
u32 mask;
|
||||
int ret;
|
||||
|
||||
if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
map = kzalloc(sizeof(*map), GFP_KERNEL);
|
||||
if (!map)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
chan = dma_spec->args[0];
|
||||
map->req_idx = dma_spec->args[4];
|
||||
val = dma_spec->args[5];
|
||||
dma_spec->args_count -= 2;
|
||||
|
||||
if (chan >= RZN1_DMAMUX_LINES_PER_CTLR) {
|
||||
dev_err(&pdev->dev, "Invalid DMA request line: %u\n", chan);
|
||||
ret = -EINVAL;
|
||||
goto free_map;
|
||||
}
|
||||
|
||||
if (map->req_idx >= RZN1_DMAMUX_MAX_LINES ||
|
||||
(map->req_idx % RZN1_DMAMUX_LINES_PER_CTLR) != chan) {
|
||||
dev_err(&pdev->dev, "Invalid MUX request line: %u\n", map->req_idx);
|
||||
ret = -EINVAL;
|
||||
goto free_map;
|
||||
}
|
||||
|
||||
dmac_idx = map->req_idx >= RZN1_DMAMUX_LINES_PER_CTLR ? 1 : 0;
|
||||
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", dmac_idx);
|
||||
if (!dma_spec->np) {
|
||||
dev_err(&pdev->dev, "Can't get DMA master\n");
|
||||
ret = -EINVAL;
|
||||
goto free_map;
|
||||
}
|
||||
|
||||
dev_dbg(&pdev->dev, "Mapping DMAMUX request %u to DMAC%u request %u\n",
|
||||
map->req_idx, dmac_idx, chan);
|
||||
|
||||
if (test_and_set_bit(map->req_idx, dmamux->used_chans)) {
|
||||
ret = -EBUSY;
|
||||
goto free_map;
|
||||
}
|
||||
|
||||
mask = BIT(map->req_idx);
|
||||
ret = r9a06g032_sysctrl_set_dmamux(mask, val ? mask : 0);
|
||||
if (ret)
|
||||
goto clear_bitmap;
|
||||
|
||||
return map;
|
||||
|
||||
clear_bitmap:
|
||||
clear_bit(map->req_idx, dmamux->used_chans);
|
||||
free_map:
|
||||
kfree(map);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static const struct of_device_id rzn1_dmac_match[] = {
|
||||
{ .compatible = "renesas,rzn1-dma" },
|
||||
{}
|
||||
};
|
||||
|
||||
static int rzn1_dmamux_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *mux_node = pdev->dev.of_node;
|
||||
const struct of_device_id *match;
|
||||
struct device_node *dmac_node;
|
||||
struct rzn1_dmamux_data *dmamux;
|
||||
|
||||
dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL);
|
||||
if (!dmamux)
|
||||
return -ENOMEM;
|
||||
|
||||
dmac_node = of_parse_phandle(mux_node, "dma-masters", 0);
|
||||
if (!dmac_node)
|
||||
return dev_err_probe(&pdev->dev, -ENODEV, "Can't get DMA master node\n");
|
||||
|
||||
match = of_match_node(rzn1_dmac_match, dmac_node);
|
||||
of_node_put(dmac_node);
|
||||
if (!match)
|
||||
return dev_err_probe(&pdev->dev, -EINVAL, "DMA master is not supported\n");
|
||||
|
||||
dmamux->dmarouter.dev = &pdev->dev;
|
||||
dmamux->dmarouter.route_free = rzn1_dmamux_free;
|
||||
|
||||
platform_set_drvdata(pdev, dmamux);
|
||||
|
||||
return of_dma_router_register(mux_node, rzn1_dmamux_route_allocate,
|
||||
&dmamux->dmarouter);
|
||||
}
|
||||
|
||||
static const struct of_device_id rzn1_dmamux_match[] = {
|
||||
{ .compatible = "renesas,rzn1-dmamux" },
|
||||
{}
|
||||
};
|
||||
|
||||
static struct platform_driver rzn1_dmamux_driver = {
|
||||
.driver = {
|
||||
.name = "renesas,rzn1-dmamux",
|
||||
.of_match_table = rzn1_dmamux_match,
|
||||
},
|
||||
.probe = rzn1_dmamux_probe,
|
||||
};
|
||||
module_platform_driver(rzn1_dmamux_driver);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com");
|
||||
MODULE_DESCRIPTION("Renesas RZ/N1 DMAMUX driver");
|
|
@ -132,7 +132,7 @@ struct ep93xx_dma_desc {
|
|||
/**
|
||||
* struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
|
||||
* @chan: dmaengine API channel
|
||||
* @edma: pointer to to the engine device
|
||||
* @edma: pointer to the engine device
|
||||
* @regs: memory mapped registers
|
||||
* @irq: interrupt number of the channel
|
||||
* @clk: clock used by this channel
|
||||
|
|
|
@ -99,7 +99,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
|
|||
ctx->wq = wq;
|
||||
filp->private_data = ctx;
|
||||
|
||||
if (device_pasid_enabled(idxd)) {
|
||||
if (device_user_pasid_enabled(idxd)) {
|
||||
sva = iommu_sva_bind_device(dev, current->mm, NULL);
|
||||
if (IS_ERR(sva)) {
|
||||
rc = PTR_ERR(sva);
|
||||
|
@ -152,7 +152,7 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
|
|||
if (wq_shared(wq)) {
|
||||
idxd_device_drain_pasid(idxd, ctx->pasid);
|
||||
} else {
|
||||
if (device_pasid_enabled(idxd)) {
|
||||
if (device_user_pasid_enabled(idxd)) {
|
||||
/* The wq disable in the disable pasid function will drain the wq */
|
||||
rc = idxd_wq_disable_pasid(wq);
|
||||
if (rc < 0)
|
||||
|
@ -314,7 +314,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
|
|||
|
||||
mutex_lock(&wq->wq_lock);
|
||||
wq->type = IDXD_WQT_USER;
|
||||
rc = __drv_enable_wq(wq);
|
||||
rc = drv_enable_wq(wq);
|
||||
if (rc < 0)
|
||||
goto err;
|
||||
|
||||
|
@ -329,7 +329,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
|
|||
return 0;
|
||||
|
||||
err_cdev:
|
||||
__drv_disable_wq(wq);
|
||||
drv_disable_wq(wq);
|
||||
err:
|
||||
wq->type = IDXD_WQT_NONE;
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
|
@ -342,7 +342,7 @@ static void idxd_user_drv_remove(struct idxd_dev *idxd_dev)
|
|||
|
||||
mutex_lock(&wq->wq_lock);
|
||||
idxd_wq_del_cdev(wq);
|
||||
__drv_disable_wq(wq);
|
||||
drv_disable_wq(wq);
|
||||
wq->type = IDXD_WQT_NONE;
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
}
|
||||
|
@ -369,10 +369,16 @@ int idxd_cdev_register(void)
|
|||
rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
|
||||
ictx[i].name);
|
||||
if (rc)
|
||||
return rc;
|
||||
goto err_free_chrdev_region;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_chrdev_region:
|
||||
for (i--; i >= 0; i--)
|
||||
unregister_chrdev_region(ictx[i].devt, MINORMASK);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void idxd_cdev_remove(void)
|
||||
|
|
|
@ -184,7 +184,7 @@ int idxd_wq_enable(struct idxd_wq *wq)
|
|||
|
||||
if (wq->state == IDXD_WQ_ENABLED) {
|
||||
dev_dbg(dev, "WQ %d already enabled\n", wq->id);
|
||||
return -ENXIO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
|
||||
|
@ -299,24 +299,46 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd)
|
|||
}
|
||||
}
|
||||
|
||||
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
|
||||
static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
|
||||
{
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
int rc;
|
||||
union wqcfg wqcfg;
|
||||
unsigned int offset;
|
||||
|
||||
rc = idxd_wq_disable(wq, false);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset);
|
||||
wqcfg.priv = priv;
|
||||
wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
|
||||
iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset);
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
}
|
||||
|
||||
static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
|
||||
{
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
union wqcfg wqcfg;
|
||||
unsigned int offset;
|
||||
|
||||
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
|
||||
wqcfg.pasid_en = 1;
|
||||
wqcfg.pasid = pasid;
|
||||
wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX];
|
||||
iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
}
|
||||
|
||||
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = idxd_wq_disable(wq, false);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
__idxd_wq_set_pasid_locked(wq, pasid);
|
||||
|
||||
rc = idxd_wq_enable(wq);
|
||||
if (rc < 0)
|
||||
|
@ -555,19 +577,15 @@ int idxd_device_disable(struct idxd_device *idxd)
|
|||
return -ENXIO;
|
||||
}
|
||||
|
||||
spin_lock(&idxd->dev_lock);
|
||||
idxd_device_clear_state(idxd);
|
||||
idxd->state = IDXD_DEV_DISABLED;
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void idxd_device_reset(struct idxd_device *idxd)
|
||||
{
|
||||
idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
idxd_device_clear_state(idxd);
|
||||
idxd->state = IDXD_DEV_DISABLED;
|
||||
spin_lock(&idxd->dev_lock);
|
||||
idxd_unmask_error_interrupts(idxd);
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
}
|
||||
|
@ -694,15 +712,16 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
|
|||
{
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&idxd->dev_lock);
|
||||
for (i = 0; i < idxd->max_wqs; i++) {
|
||||
struct idxd_wq *wq = idxd->wqs[i];
|
||||
|
||||
mutex_lock(&wq->wq_lock);
|
||||
if (wq->state == IDXD_WQ_ENABLED) {
|
||||
idxd_wq_disable_cleanup(wq);
|
||||
wq->state = IDXD_WQ_DISABLED;
|
||||
}
|
||||
idxd_wq_device_reset_cleanup(wq);
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -711,9 +730,12 @@ void idxd_device_clear_state(struct idxd_device *idxd)
|
|||
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
|
||||
return;
|
||||
|
||||
idxd_device_wqs_clear_state(idxd);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
idxd_groups_clear_state(idxd);
|
||||
idxd_engines_clear_state(idxd);
|
||||
idxd_device_wqs_clear_state(idxd);
|
||||
idxd->state = IDXD_DEV_DISABLED;
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
}
|
||||
|
||||
static void idxd_group_config_write(struct idxd_group *group)
|
||||
|
@ -799,7 +821,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
|
|||
*/
|
||||
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
|
||||
wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
|
||||
wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
|
||||
wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset);
|
||||
}
|
||||
|
||||
if (wq->size == 0 && wq->type != IDXD_WQT_NONE)
|
||||
|
@ -815,14 +837,8 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
|
|||
if (wq_dedicated(wq))
|
||||
wq->wqcfg->mode = 1;
|
||||
|
||||
if (device_pasid_enabled(idxd)) {
|
||||
wq->wqcfg->pasid_en = 1;
|
||||
if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq))
|
||||
wq->wqcfg->pasid = idxd->pasid;
|
||||
}
|
||||
|
||||
/*
|
||||
* Here the priv bit is set depending on the WQ type. priv = 1 if the
|
||||
* The WQ priv bit is set depending on the WQ type. priv = 1 if the
|
||||
* WQ type is kernel to indicate privileged access. This setting only
|
||||
* matters for dedicated WQ. According to the DSA spec:
|
||||
* If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
|
||||
|
@ -832,7 +848,6 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
|
|||
* In the case of a dedicated kernel WQ that is not able to support
|
||||
* the PASID cap, then the configuration will be rejected.
|
||||
*/
|
||||
wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
|
||||
if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
|
||||
!idxd_device_pasid_priv_enabled(idxd) &&
|
||||
wq->type == IDXD_WQT_KERNEL) {
|
||||
|
@ -953,7 +968,7 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
|
|||
if (!wq->group)
|
||||
continue;
|
||||
|
||||
if (wq_shared(wq) && !device_swq_supported(idxd)) {
|
||||
if (wq_shared(wq) && !wq_shared_supported(wq)) {
|
||||
idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
|
||||
dev_warn(dev, "No shared wq support but configured.\n");
|
||||
return -EINVAL;
|
||||
|
@ -1018,6 +1033,9 @@ static int idxd_wq_load_config(struct idxd_wq *wq)
|
|||
|
||||
wq->priority = wq->wqcfg->priority;
|
||||
|
||||
wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift;
|
||||
wq->max_batch_size = 1ULL << wq->wqcfg->max_batch_shift;
|
||||
|
||||
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
|
||||
wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
|
||||
dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
|
||||
|
@ -1161,7 +1179,9 @@ void idxd_wq_free_irq(struct idxd_wq *wq)
|
|||
struct idxd_device *idxd = wq->idxd;
|
||||
struct idxd_irq_entry *ie = &wq->ie;
|
||||
|
||||
synchronize_irq(ie->vector);
|
||||
if (wq->type != IDXD_WQT_KERNEL)
|
||||
return;
|
||||
|
||||
free_irq(ie->vector, ie);
|
||||
idxd_flush_pending_descs(ie);
|
||||
if (idxd->request_int_handles)
|
||||
|
@ -1180,6 +1200,9 @@ int idxd_wq_request_irq(struct idxd_wq *wq)
|
|||
struct idxd_irq_entry *ie;
|
||||
int rc;
|
||||
|
||||
if (wq->type != IDXD_WQT_KERNEL)
|
||||
return 0;
|
||||
|
||||
ie = &wq->ie;
|
||||
ie->vector = pci_irq_vector(pdev, ie->id);
|
||||
ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID;
|
||||
|
@ -1211,7 +1234,7 @@ int idxd_wq_request_irq(struct idxd_wq *wq)
|
|||
return rc;
|
||||
}
|
||||
|
||||
int __drv_enable_wq(struct idxd_wq *wq)
|
||||
int drv_enable_wq(struct idxd_wq *wq)
|
||||
{
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
|
@ -1245,7 +1268,7 @@ int __drv_enable_wq(struct idxd_wq *wq)
|
|||
|
||||
/* Shared WQ checks */
|
||||
if (wq_shared(wq)) {
|
||||
if (!device_swq_supported(idxd)) {
|
||||
if (!wq_shared_supported(wq)) {
|
||||
idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
|
||||
dev_dbg(dev, "PASID not enabled and shared wq.\n");
|
||||
goto err;
|
||||
|
@ -1265,6 +1288,29 @@ int __drv_enable_wq(struct idxd_wq *wq)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* In the event that the WQ is configurable for pasid and priv bits.
|
||||
* For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
|
||||
* However, for non-kernel wq, the driver should only set the pasid_en bit for
|
||||
* shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
|
||||
* pasid_en later on so there is no need to setup.
|
||||
*/
|
||||
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
|
||||
int priv = 0;
|
||||
|
||||
if (wq_pasid_enabled(wq)) {
|
||||
if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
|
||||
u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
|
||||
|
||||
__idxd_wq_set_pasid_locked(wq, pasid);
|
||||
}
|
||||
}
|
||||
|
||||
if (is_idxd_wq_kernel(wq))
|
||||
priv = 1;
|
||||
__idxd_wq_set_priv_locked(wq, priv);
|
||||
}
|
||||
|
||||
rc = 0;
|
||||
spin_lock(&idxd->dev_lock);
|
||||
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
|
||||
|
@ -1289,8 +1335,36 @@ int __drv_enable_wq(struct idxd_wq *wq)
|
|||
}
|
||||
|
||||
wq->client_count = 0;
|
||||
|
||||
rc = idxd_wq_request_irq(wq);
|
||||
if (rc < 0) {
|
||||
idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
|
||||
dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
|
||||
goto err_irq;
|
||||
}
|
||||
|
||||
rc = idxd_wq_alloc_resources(wq);
|
||||
if (rc < 0) {
|
||||
idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
|
||||
dev_dbg(dev, "WQ resource alloc failed\n");
|
||||
goto err_res_alloc;
|
||||
}
|
||||
|
||||
rc = idxd_wq_init_percpu_ref(wq);
|
||||
if (rc < 0) {
|
||||
idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
|
||||
dev_dbg(dev, "percpu_ref setup failed\n");
|
||||
goto err_ref;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_ref:
|
||||
idxd_wq_free_resources(wq);
|
||||
err_res_alloc:
|
||||
idxd_wq_free_irq(wq);
|
||||
err_irq:
|
||||
idxd_wq_unmap_portal(wq);
|
||||
err_map_portal:
|
||||
rc = idxd_wq_disable(wq, false);
|
||||
if (rc < 0)
|
||||
|
@ -1299,17 +1373,7 @@ int __drv_enable_wq(struct idxd_wq *wq)
|
|||
return rc;
|
||||
}
|
||||
|
||||
int drv_enable_wq(struct idxd_wq *wq)
|
||||
{
|
||||
int rc;
|
||||
|
||||
mutex_lock(&wq->wq_lock);
|
||||
rc = __drv_enable_wq(wq);
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void __drv_disable_wq(struct idxd_wq *wq)
|
||||
void drv_disable_wq(struct idxd_wq *wq)
|
||||
{
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
|
@ -1320,21 +1384,16 @@ void __drv_disable_wq(struct idxd_wq *wq)
|
|||
dev_warn(dev, "Clients has claim on wq %d: %d\n",
|
||||
wq->id, idxd_wq_refcount(wq));
|
||||
|
||||
idxd_wq_free_resources(wq);
|
||||
idxd_wq_unmap_portal(wq);
|
||||
|
||||
idxd_wq_drain(wq);
|
||||
idxd_wq_free_irq(wq);
|
||||
idxd_wq_reset(wq);
|
||||
|
||||
percpu_ref_exit(&wq->wq_active);
|
||||
wq->type = IDXD_WQT_NONE;
|
||||
wq->client_count = 0;
|
||||
}
|
||||
|
||||
void drv_disable_wq(struct idxd_wq *wq)
|
||||
{
|
||||
mutex_lock(&wq->wq_lock);
|
||||
__drv_disable_wq(wq);
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
}
|
||||
|
||||
int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
|
||||
{
|
||||
struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
|
||||
|
|
|
@ -87,6 +87,27 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
|
|||
hw->completion_addr = compl;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags)
|
||||
{
|
||||
struct idxd_wq *wq = to_idxd_wq(c);
|
||||
u32 desc_flags;
|
||||
struct idxd_desc *desc;
|
||||
|
||||
if (wq->state != IDXD_WQ_ENABLED)
|
||||
return NULL;
|
||||
|
||||
op_flag_setup(flags, &desc_flags);
|
||||
desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
|
||||
if (IS_ERR(desc))
|
||||
return NULL;
|
||||
|
||||
idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP,
|
||||
0, 0, 0, desc->compl_dma, desc_flags);
|
||||
desc->txd.flags = flags;
|
||||
return &desc->txd;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
|
||||
dma_addr_t dma_src, size_t len, unsigned long flags)
|
||||
|
@ -193,10 +214,12 @@ int idxd_register_dma_device(struct idxd_device *idxd)
|
|||
INIT_LIST_HEAD(&dma->channels);
|
||||
dma->dev = dev;
|
||||
|
||||
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
|
||||
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
|
||||
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
|
||||
dma->device_release = idxd_dma_release;
|
||||
|
||||
dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt;
|
||||
if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
|
||||
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
|
||||
dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
|
||||
|
@ -227,7 +250,7 @@ void idxd_unregister_dma_device(struct idxd_device *idxd)
|
|||
dma_async_device_unregister(&idxd->idxd_dma->dma);
|
||||
}
|
||||
|
||||
int idxd_register_dma_channel(struct idxd_wq *wq)
|
||||
static int idxd_register_dma_channel(struct idxd_wq *wq)
|
||||
{
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
struct dma_device *dma = &idxd->idxd_dma->dma;
|
||||
|
@ -264,7 +287,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void idxd_unregister_dma_channel(struct idxd_wq *wq)
|
||||
static void idxd_unregister_dma_channel(struct idxd_wq *wq)
|
||||
{
|
||||
struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
|
||||
struct dma_chan *chan = &idxd_chan->chan;
|
||||
|
@ -290,34 +313,13 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
|
|||
mutex_lock(&wq->wq_lock);
|
||||
wq->type = IDXD_WQT_KERNEL;
|
||||
|
||||
rc = idxd_wq_request_irq(wq);
|
||||
if (rc < 0) {
|
||||
idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
|
||||
dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
|
||||
goto err_irq;
|
||||
}
|
||||
|
||||
rc = __drv_enable_wq(wq);
|
||||
rc = drv_enable_wq(wq);
|
||||
if (rc < 0) {
|
||||
dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
|
||||
rc = -ENXIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
rc = idxd_wq_alloc_resources(wq);
|
||||
if (rc < 0) {
|
||||
idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
|
||||
dev_dbg(dev, "WQ resource alloc failed\n");
|
||||
goto err_res_alloc;
|
||||
}
|
||||
|
||||
rc = idxd_wq_init_percpu_ref(wq);
|
||||
if (rc < 0) {
|
||||
idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
|
||||
dev_dbg(dev, "percpu_ref setup failed\n");
|
||||
goto err_ref;
|
||||
}
|
||||
|
||||
rc = idxd_register_dma_channel(wq);
|
||||
if (rc < 0) {
|
||||
idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR;
|
||||
|
@ -330,15 +332,8 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
|
|||
return 0;
|
||||
|
||||
err_dma:
|
||||
__idxd_wq_quiesce(wq);
|
||||
percpu_ref_exit(&wq->wq_active);
|
||||
err_ref:
|
||||
idxd_wq_free_resources(wq);
|
||||
err_res_alloc:
|
||||
__drv_disable_wq(wq);
|
||||
drv_disable_wq(wq);
|
||||
err:
|
||||
idxd_wq_free_irq(wq);
|
||||
err_irq:
|
||||
wq->type = IDXD_WQT_NONE;
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
return rc;
|
||||
|
@ -351,11 +346,7 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
|
|||
mutex_lock(&wq->wq_lock);
|
||||
__idxd_wq_quiesce(wq);
|
||||
idxd_unregister_dma_channel(wq);
|
||||
idxd_wq_free_resources(wq);
|
||||
__drv_disable_wq(wq);
|
||||
percpu_ref_exit(&wq->wq_active);
|
||||
idxd_wq_free_irq(wq);
|
||||
wq->type = IDXD_WQT_NONE;
|
||||
drv_disable_wq(wq);
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -239,6 +239,7 @@ enum idxd_device_flag {
|
|||
IDXD_FLAG_CONFIGURABLE = 0,
|
||||
IDXD_FLAG_CMD_RUNNING,
|
||||
IDXD_FLAG_PASID_ENABLED,
|
||||
IDXD_FLAG_USER_PASID_ENABLED,
|
||||
};
|
||||
|
||||
struct idxd_dma_dev {
|
||||
|
@ -469,9 +470,20 @@ static inline bool device_pasid_enabled(struct idxd_device *idxd)
|
|||
return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
|
||||
}
|
||||
|
||||
static inline bool device_swq_supported(struct idxd_device *idxd)
|
||||
static inline bool device_user_pasid_enabled(struct idxd_device *idxd)
|
||||
{
|
||||
return (support_enqcmd && device_pasid_enabled(idxd));
|
||||
return test_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
|
||||
}
|
||||
|
||||
static inline bool wq_pasid_enabled(struct idxd_wq *wq)
|
||||
{
|
||||
return (is_idxd_wq_kernel(wq) && device_pasid_enabled(wq->idxd)) ||
|
||||
(is_idxd_wq_user(wq) && device_user_pasid_enabled(wq->idxd));
|
||||
}
|
||||
|
||||
static inline bool wq_shared_supported(struct idxd_wq *wq)
|
||||
{
|
||||
return (support_enqcmd && wq_pasid_enabled(wq));
|
||||
}
|
||||
|
||||
enum idxd_portal_prot {
|
||||
|
@ -559,9 +571,7 @@ void idxd_unregister_idxd_drv(void);
|
|||
int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
|
||||
void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
|
||||
int drv_enable_wq(struct idxd_wq *wq);
|
||||
int __drv_enable_wq(struct idxd_wq *wq);
|
||||
void drv_disable_wq(struct idxd_wq *wq);
|
||||
void __drv_disable_wq(struct idxd_wq *wq);
|
||||
int idxd_device_init_reset(struct idxd_device *idxd);
|
||||
int idxd_device_enable(struct idxd_device *idxd);
|
||||
int idxd_device_disable(struct idxd_device *idxd);
|
||||
|
@ -602,8 +612,6 @@ int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
|
|||
/* dmaengine */
|
||||
int idxd_register_dma_device(struct idxd_device *idxd);
|
||||
void idxd_unregister_dma_device(struct idxd_device *idxd);
|
||||
int idxd_register_dma_channel(struct idxd_wq *wq);
|
||||
void idxd_unregister_dma_channel(struct idxd_wq *wq);
|
||||
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
|
||||
void idxd_dma_complete_txd(struct idxd_desc *desc,
|
||||
enum idxd_complete_type comp_type, bool free_desc);
|
||||
|
|
|
@ -512,18 +512,15 @@ static int idxd_probe(struct idxd_device *idxd)
|
|||
dev_dbg(dev, "IDXD reset complete\n");
|
||||
|
||||
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
|
||||
rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA);
|
||||
if (rc == 0) {
|
||||
rc = idxd_enable_system_pasid(idxd);
|
||||
if (rc < 0) {
|
||||
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
|
||||
dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
|
||||
} else {
|
||||
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
|
||||
}
|
||||
} else {
|
||||
dev_warn(dev, "Unable to turn on SVA feature.\n");
|
||||
}
|
||||
if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA))
|
||||
dev_warn(dev, "Unable to turn on user SVA feature.\n");
|
||||
else
|
||||
set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
|
||||
|
||||
if (idxd_enable_system_pasid(idxd))
|
||||
dev_warn(dev, "No in-kernel DMA with PASID.\n");
|
||||
else
|
||||
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
|
||||
} else if (!sva) {
|
||||
dev_warn(dev, "User forced SVA off via module param.\n");
|
||||
}
|
||||
|
@ -561,7 +558,8 @@ static int idxd_probe(struct idxd_device *idxd)
|
|||
err:
|
||||
if (device_pasid_enabled(idxd))
|
||||
idxd_disable_system_pasid(idxd);
|
||||
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
|
||||
if (device_user_pasid_enabled(idxd))
|
||||
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -574,7 +572,8 @@ static void idxd_cleanup(struct idxd_device *idxd)
|
|||
idxd_cleanup_internals(idxd);
|
||||
if (device_pasid_enabled(idxd))
|
||||
idxd_disable_system_pasid(idxd);
|
||||
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
|
||||
if (device_user_pasid_enabled(idxd))
|
||||
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
|
||||
}
|
||||
|
||||
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
@ -691,7 +690,8 @@ static void idxd_remove(struct pci_dev *pdev)
|
|||
free_irq(irq_entry->vector, irq_entry);
|
||||
pci_free_irq_vectors(pdev);
|
||||
pci_iounmap(pdev, idxd->reg_base);
|
||||
iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
|
||||
if (device_user_pasid_enabled(idxd))
|
||||
iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
|
||||
pci_disable_device(pdev);
|
||||
destroy_workqueue(idxd->wq);
|
||||
perfmon_pmu_remove(idxd);
|
||||
|
|
|
@ -353,6 +353,7 @@ union wqcfg {
|
|||
} __packed;
|
||||
|
||||
#define WQCFG_PASID_IDX 2
|
||||
#define WQCFG_PRIVL_IDX 2
|
||||
#define WQCFG_OCCUP_IDX 6
|
||||
|
||||
#define WQCFG_OCCUP_MASK 0xffff
|
||||
|
|
|
@ -588,7 +588,7 @@ static ssize_t wq_mode_store(struct device *dev,
|
|||
if (sysfs_streq(buf, "dedicated")) {
|
||||
set_bit(WQ_FLAG_DEDICATED, &wq->flags);
|
||||
wq->threshold = 0;
|
||||
} else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
|
||||
} else if (sysfs_streq(buf, "shared")) {
|
||||
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
|
@ -832,6 +832,7 @@ static ssize_t wq_name_store(struct device *dev,
|
|||
size_t count)
|
||||
{
|
||||
struct idxd_wq *wq = confdev_to_wq(dev);
|
||||
char *input, *pos;
|
||||
|
||||
if (wq->state != IDXD_WQ_DISABLED)
|
||||
return -EPERM;
|
||||
|
@ -846,9 +847,14 @@ static ssize_t wq_name_store(struct device *dev,
|
|||
if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
input = kstrndup(buf, count, GFP_KERNEL);
|
||||
if (!input)
|
||||
return -ENOMEM;
|
||||
|
||||
pos = strim(input);
|
||||
memset(wq->name, 0, WQ_NAME_SIZE + 1);
|
||||
strncpy(wq->name, buf, WQ_NAME_SIZE);
|
||||
strreplace(wq->name, '\n', '\0');
|
||||
sprintf(wq->name, "%s", pos);
|
||||
kfree(input);
|
||||
return count;
|
||||
}
|
||||
|
||||
|
|
|
@ -751,7 +751,6 @@ static int mtk_cqdma_probe(struct platform_device *pdev)
|
|||
struct mtk_cqdma_device *cqdma;
|
||||
struct mtk_cqdma_vchan *vc;
|
||||
struct dma_device *dd;
|
||||
struct resource *res;
|
||||
int err;
|
||||
u32 i;
|
||||
|
||||
|
@ -824,13 +823,10 @@ static int mtk_cqdma_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(cqdma->pc[i]->base);
|
||||
|
||||
/* allocate IRQ resource */
|
||||
res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "No irq resource for %s\n",
|
||||
dev_name(&pdev->dev));
|
||||
return -EINVAL;
|
||||
}
|
||||
cqdma->pc[i]->irq = res->start;
|
||||
err = platform_get_irq(pdev, i);
|
||||
if (err < 0)
|
||||
return err;
|
||||
cqdma->pc[i]->irq = err;
|
||||
|
||||
err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq,
|
||||
mtk_cqdma_irq, 0, dev_name(&pdev->dev),
|
||||
|
|
|
@ -601,7 +601,7 @@ static void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma)
|
|||
cb->flag = 0;
|
||||
}
|
||||
|
||||
cb->vd = 0;
|
||||
cb->vd = NULL;
|
||||
|
||||
/*
|
||||
* Recycle the RXD with the helper WRITE_ONCE that can ensure
|
||||
|
@ -923,13 +923,10 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(hsdma->clk);
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "No irq resource for %s\n",
|
||||
dev_name(&pdev->dev));
|
||||
return -EINVAL;
|
||||
}
|
||||
hsdma->irq = res->start;
|
||||
err = platform_get_irq(pdev, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
hsdma->irq = err;
|
||||
|
||||
refcount_set(&hsdma->pc_refcnt, 0);
|
||||
spin_lock_init(&hsdma->lock);
|
||||
|
|
|
@ -1043,13 +1043,17 @@ static int mmp_pdma_probe(struct platform_device *op)
|
|||
return PTR_ERR(pdev->base);
|
||||
|
||||
of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
|
||||
if (of_id)
|
||||
of_property_read_u32(pdev->dev->of_node, "#dma-channels",
|
||||
&dma_channels);
|
||||
else if (pdata && pdata->dma_channels)
|
||||
if (of_id) {
|
||||
/* Parse new and deprecated dma-channels properties */
|
||||
if (of_property_read_u32(pdev->dev->of_node, "dma-channels",
|
||||
&dma_channels))
|
||||
of_property_read_u32(pdev->dev->of_node, "#dma-channels",
|
||||
&dma_channels);
|
||||
} else if (pdata && pdata->dma_channels) {
|
||||
dma_channels = pdata->dma_channels;
|
||||
else
|
||||
} else {
|
||||
dma_channels = 32; /* default 32 channel */
|
||||
}
|
||||
pdev->dma_channels = dma_channels;
|
||||
|
||||
for (i = 0; i < dma_channels; i++) {
|
||||
|
|
|
@ -591,14 +591,14 @@ static void mv_xor_v2_tasklet(struct tasklet_struct *t)
|
|||
dma_run_dependencies(&next_pending_sw_desc->async_tx);
|
||||
|
||||
/* Lock the channel */
|
||||
spin_lock_bh(&xor_dev->lock);
|
||||
spin_lock(&xor_dev->lock);
|
||||
|
||||
/* add the SW descriptor to the free descriptors list */
|
||||
list_add(&next_pending_sw_desc->free_list,
|
||||
&xor_dev->free_sw_desc);
|
||||
|
||||
/* Release the channel */
|
||||
spin_unlock_bh(&xor_dev->lock);
|
||||
spin_unlock(&xor_dev->lock);
|
||||
|
||||
/* increment the next descriptor */
|
||||
pending_ptr++;
|
||||
|
|
|
@ -1294,7 +1294,7 @@ static int nbpf_probe(struct platform_device *pdev)
|
|||
struct device_node *np = dev->of_node;
|
||||
struct nbpf_device *nbpf;
|
||||
struct dma_device *dma_dev;
|
||||
struct resource *iomem, *irq_res;
|
||||
struct resource *iomem;
|
||||
const struct nbpf_config *cfg;
|
||||
int num_channels;
|
||||
int ret, irq, eirq, i;
|
||||
|
@ -1335,13 +1335,11 @@ static int nbpf_probe(struct platform_device *pdev)
|
|||
nbpf->config = cfg;
|
||||
|
||||
for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
|
||||
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
|
||||
if (!irq_res)
|
||||
break;
|
||||
|
||||
for (irq = irq_res->start; irq <= irq_res->end;
|
||||
irq++, irqs++)
|
||||
irqbuf[irqs] = irq;
|
||||
irq = platform_get_irq_optional(pdev, i);
|
||||
if (irq < 0 && irq != -ENXIO)
|
||||
return irq;
|
||||
if (irq > 0)
|
||||
irqbuf[irqs++] = irq;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -137,7 +137,7 @@ static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
|
|||
struct plx_dma_desc *desc;
|
||||
u32 flags;
|
||||
|
||||
spin_lock_bh(&plxdev->ring_lock);
|
||||
spin_lock(&plxdev->ring_lock);
|
||||
|
||||
while (plxdev->tail != plxdev->head) {
|
||||
desc = plx_dma_get_desc(plxdev, plxdev->tail);
|
||||
|
@ -165,7 +165,7 @@ static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
|
|||
plxdev->tail++;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&plxdev->ring_lock);
|
||||
spin_unlock(&plxdev->ring_lock);
|
||||
}
|
||||
|
||||
static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
|
||||
|
|
|
@ -100,6 +100,7 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
|
|||
struct pt_passthru_engine *pt_engine)
|
||||
{
|
||||
struct ptdma_desc desc;
|
||||
struct pt_device *pt = container_of(cmd_q, struct pt_device, cmd_q);
|
||||
|
||||
cmd_q->cmd_error = 0;
|
||||
cmd_q->total_pt_ops++;
|
||||
|
@ -111,19 +112,14 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
|
|||
desc.dst_lo = lower_32_bits(pt_engine->dst_dma);
|
||||
desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma);
|
||||
|
||||
if (cmd_q->int_en)
|
||||
pt_core_enable_queue_interrupts(pt);
|
||||
else
|
||||
pt_core_disable_queue_interrupts(pt);
|
||||
|
||||
return pt_core_execute_cmd(&desc, cmd_q);
|
||||
}
|
||||
|
||||
static inline void pt_core_disable_queue_interrupts(struct pt_device *pt)
|
||||
{
|
||||
iowrite32(0, pt->cmd_q.reg_control + 0x000C);
|
||||
}
|
||||
|
||||
static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
|
||||
{
|
||||
iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
|
||||
}
|
||||
|
||||
static void pt_do_cmd_complete(unsigned long data)
|
||||
{
|
||||
struct pt_tasklet_data *tdata = (struct pt_tasklet_data *)data;
|
||||
|
@ -144,14 +140,10 @@ static void pt_do_cmd_complete(unsigned long data)
|
|||
cmd->pt_cmd_callback(cmd->data, cmd->ret);
|
||||
}
|
||||
|
||||
static irqreturn_t pt_core_irq_handler(int irq, void *data)
|
||||
void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q)
|
||||
{
|
||||
struct pt_device *pt = data;
|
||||
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
|
||||
u32 status;
|
||||
|
||||
pt_core_disable_queue_interrupts(pt);
|
||||
pt->total_interrupts++;
|
||||
status = ioread32(cmd_q->reg_control + 0x0010);
|
||||
if (status) {
|
||||
cmd_q->int_status = status;
|
||||
|
@ -162,11 +154,21 @@ static irqreturn_t pt_core_irq_handler(int irq, void *data)
|
|||
if ((status & INT_ERROR) && !cmd_q->cmd_error)
|
||||
cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
|
||||
|
||||
/* Acknowledge the interrupt */
|
||||
/* Acknowledge the completion */
|
||||
iowrite32(status, cmd_q->reg_control + 0x0010);
|
||||
pt_core_enable_queue_interrupts(pt);
|
||||
pt_do_cmd_complete((ulong)&pt->tdata);
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t pt_core_irq_handler(int irq, void *data)
|
||||
{
|
||||
struct pt_device *pt = data;
|
||||
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
|
||||
|
||||
pt_core_disable_queue_interrupts(pt);
|
||||
pt->total_interrupts++;
|
||||
pt_check_status_trans(pt, cmd_q);
|
||||
pt_core_enable_queue_interrupts(pt);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
|
|
@ -171,6 +171,7 @@ static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
|
|||
vchan_tx_prep(&chan->vc, &desc->vd, flags);
|
||||
|
||||
desc->pt = chan->pt;
|
||||
desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT);
|
||||
desc->issued_to_hw = 0;
|
||||
desc->status = DMA_IN_PROGRESS;
|
||||
|
||||
|
@ -257,6 +258,17 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
|
|||
pt_cmd_callback(desc, 0);
|
||||
}
|
||||
|
||||
static enum dma_status
|
||||
pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
|
||||
struct dma_tx_state *txstate)
|
||||
{
|
||||
struct pt_device *pt = to_pt_chan(c)->pt;
|
||||
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
|
||||
|
||||
pt_check_status_trans(pt, cmd_q);
|
||||
return dma_cookie_status(c, cookie, txstate);
|
||||
}
|
||||
|
||||
static int pt_pause(struct dma_chan *dma_chan)
|
||||
{
|
||||
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
|
||||
|
@ -291,8 +303,10 @@ static int pt_terminate_all(struct dma_chan *dma_chan)
|
|||
{
|
||||
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
|
||||
unsigned long flags;
|
||||
struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q;
|
||||
LIST_HEAD(head);
|
||||
|
||||
iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
|
||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||
vchan_get_all_descriptors(&chan->vc, &head);
|
||||
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
||||
|
@ -362,7 +376,7 @@ int pt_dmaengine_register(struct pt_device *pt)
|
|||
dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
|
||||
dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
|
||||
dma_dev->device_issue_pending = pt_issue_pending;
|
||||
dma_dev->device_tx_status = dma_cookie_status;
|
||||
dma_dev->device_tx_status = pt_tx_status;
|
||||
dma_dev->device_pause = pt_pause;
|
||||
dma_dev->device_resume = pt_resume;
|
||||
dma_dev->device_terminate_all = pt_terminate_all;
|
||||
|
|
|
@ -206,6 +206,9 @@ struct pt_cmd_queue {
|
|||
unsigned int active;
|
||||
unsigned int suspended;
|
||||
|
||||
/* Interrupt flag */
|
||||
bool int_en;
|
||||
|
||||
/* Register addresses for queue */
|
||||
void __iomem *reg_control;
|
||||
u32 qcontrol; /* Cached control register */
|
||||
|
@ -318,7 +321,17 @@ void pt_core_destroy(struct pt_device *pt);
|
|||
int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
|
||||
struct pt_passthru_engine *pt_engine);
|
||||
|
||||
void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q);
|
||||
void pt_start_queue(struct pt_cmd_queue *cmd_q);
|
||||
void pt_stop_queue(struct pt_cmd_queue *cmd_q);
|
||||
|
||||
static inline void pt_core_disable_queue_interrupts(struct pt_device *pt)
|
||||
{
|
||||
iowrite32(0, pt->cmd_q.reg_control + 0x000C);
|
||||
}
|
||||
|
||||
static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
|
||||
{
|
||||
iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1365,10 +1365,17 @@ static int pxad_probe(struct platform_device *op)
|
|||
|
||||
of_id = of_match_device(pxad_dt_ids, &op->dev);
|
||||
if (of_id) {
|
||||
of_property_read_u32(op->dev.of_node, "#dma-channels",
|
||||
&dma_channels);
|
||||
ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
|
||||
/* Parse new and deprecated dma-channels properties */
|
||||
if (of_property_read_u32(op->dev.of_node, "dma-channels",
|
||||
&dma_channels))
|
||||
of_property_read_u32(op->dev.of_node, "#dma-channels",
|
||||
&dma_channels);
|
||||
/* Parse new and deprecated dma-requests properties */
|
||||
ret = of_property_read_u32(op->dev.of_node, "dma-requests",
|
||||
&nb_requestors);
|
||||
if (ret)
|
||||
ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
|
||||
&nb_requestors);
|
||||
if (ret) {
|
||||
dev_warn(pdev->slave.dev,
|
||||
"#dma-requests set to default 32 as missing in OF: %d",
|
||||
|
|
|
@ -1754,10 +1754,14 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
|
|||
tre->dword[2] = u32_encode_bits(spi->rx_len, TRE_RX_LEN);
|
||||
|
||||
tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
|
||||
if (spi->cmd == SPI_RX)
|
||||
if (spi->cmd == SPI_RX) {
|
||||
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
|
||||
else
|
||||
} else if (spi->cmd == SPI_TX) {
|
||||
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
|
||||
} else { /* SPI_DUPLEX */
|
||||
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
|
||||
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
|
||||
}
|
||||
}
|
||||
|
||||
/* create the dma tre */
|
||||
|
@ -2148,6 +2152,7 @@ static int gpi_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct gpi_dev *gpi_dev;
|
||||
unsigned int i;
|
||||
u32 ee_offset;
|
||||
int ret;
|
||||
|
||||
gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
|
||||
|
@ -2175,6 +2180,9 @@ static int gpi_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ee_offset = (uintptr_t)device_get_match_data(gpi_dev->dev);
|
||||
gpi_dev->ee_base = gpi_dev->ee_base - ee_offset;
|
||||
|
||||
gpi_dev->ev_factor = EV_FACTOR;
|
||||
|
||||
ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64));
|
||||
|
@ -2278,9 +2286,12 @@ static int gpi_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
static const struct of_device_id gpi_of_match[] = {
|
||||
{ .compatible = "qcom,sdm845-gpi-dma" },
|
||||
{ .compatible = "qcom,sm8150-gpi-dma" },
|
||||
{ .compatible = "qcom,sm8250-gpi-dma" },
|
||||
{ .compatible = "qcom,sc7280-gpi-dma", .data = (void *)0x10000 },
|
||||
{ .compatible = "qcom,sdm845-gpi-dma", .data = (void *)0x0 },
|
||||
{ .compatible = "qcom,sm8150-gpi-dma", .data = (void *)0x0 },
|
||||
{ .compatible = "qcom,sm8250-gpi-dma", .data = (void *)0x0 },
|
||||
{ .compatible = "qcom,sm8350-gpi-dma", .data = (void *)0x10000 },
|
||||
{ .compatible = "qcom,sm8450-gpi-dma", .data = (void *)0x10000 },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, gpi_of_match);
|
||||
|
|
|
@ -431,6 +431,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
|
|||
struct hidma_desc *mdesc = NULL;
|
||||
struct hidma_dev *mdma = mchan->dmadev;
|
||||
unsigned long irqflags;
|
||||
u64 byte_pattern, fill_pattern;
|
||||
|
||||
/* Get free descriptor */
|
||||
spin_lock_irqsave(&mchan->lock, irqflags);
|
||||
|
@ -443,9 +444,19 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
|
|||
if (!mdesc)
|
||||
return NULL;
|
||||
|
||||
byte_pattern = (char)value;
|
||||
fill_pattern = (byte_pattern << 56) |
|
||||
(byte_pattern << 48) |
|
||||
(byte_pattern << 40) |
|
||||
(byte_pattern << 32) |
|
||||
(byte_pattern << 24) |
|
||||
(byte_pattern << 16) |
|
||||
(byte_pattern << 8) |
|
||||
byte_pattern;
|
||||
|
||||
mdesc->desc.flags = flags;
|
||||
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
|
||||
value, dest, len, flags,
|
||||
fill_pattern, dest, len, flags,
|
||||
HIDMA_TRE_MEMSET);
|
||||
|
||||
/* Place descriptor in prepared list */
|
||||
|
|
|
@ -482,23 +482,30 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma)
|
|||
static int sf_pdma_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct sf_pdma *pdma;
|
||||
struct sf_pdma_chan *chan;
|
||||
struct resource *res;
|
||||
int len, chans;
|
||||
int ret;
|
||||
int ret, n_chans;
|
||||
const enum dma_slave_buswidth widths =
|
||||
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
|
||||
DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
|
||||
DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES |
|
||||
DMA_SLAVE_BUSWIDTH_64_BYTES;
|
||||
|
||||
chans = PDMA_NR_CH;
|
||||
len = sizeof(*pdma) + sizeof(*chan) * chans;
|
||||
pdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
|
||||
ret = of_property_read_u32(pdev->dev.of_node, "dma-channels", &n_chans);
|
||||
if (ret) {
|
||||
/* backwards-compatibility for no dma-channels property */
|
||||
dev_dbg(&pdev->dev, "set number of channels to default value: 4\n");
|
||||
n_chans = PDMA_MAX_NR_CH;
|
||||
} else if (n_chans > PDMA_MAX_NR_CH) {
|
||||
dev_err(&pdev->dev, "the number of channels exceeds the maximum\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pdma = devm_kzalloc(&pdev->dev, struct_size(pdma, chans, n_chans),
|
||||
GFP_KERNEL);
|
||||
if (!pdma)
|
||||
return -ENOMEM;
|
||||
|
||||
pdma->n_chans = chans;
|
||||
pdma->n_chans = n_chans;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
pdma->membase = devm_ioremap_resource(&pdev->dev, res);
|
||||
|
@ -556,7 +563,7 @@ static int sf_pdma_remove(struct platform_device *pdev)
|
|||
struct sf_pdma_chan *ch;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PDMA_NR_CH; i++) {
|
||||
for (i = 0; i < pdma->n_chans; i++) {
|
||||
ch = &pdma->chans[i];
|
||||
|
||||
devm_free_irq(&pdev->dev, ch->txirq, ch);
|
||||
|
@ -574,6 +581,7 @@ static int sf_pdma_remove(struct platform_device *pdev)
|
|||
|
||||
static const struct of_device_id sf_pdma_dt_ids[] = {
|
||||
{ .compatible = "sifive,fu540-c000-pdma" },
|
||||
{ .compatible = "sifive,pdma0" },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
|
||||
|
|
|
@ -22,11 +22,7 @@
|
|||
#include "../dmaengine.h"
|
||||
#include "../virt-dma.h"
|
||||
|
||||
#define PDMA_NR_CH 4
|
||||
|
||||
#if (PDMA_NR_CH != 4)
|
||||
#error "Please define PDMA_NR_CH to 4"
|
||||
#endif
|
||||
#define PDMA_MAX_NR_CH 4
|
||||
|
||||
#define PDMA_BASE_ADDR 0x3000000
|
||||
#define PDMA_CHAN_OFFSET 0x1000
|
||||
|
@ -118,7 +114,7 @@ struct sf_pdma {
|
|||
void __iomem *membase;
|
||||
void __iomem *mappedbase;
|
||||
u32 n_chans;
|
||||
struct sf_pdma_chan chans[PDMA_NR_CH];
|
||||
struct sf_pdma_chan chans[];
|
||||
};
|
||||
|
||||
#endif /* _SF_PDMA_H */
|
||||
|
|
|
@ -50,7 +50,7 @@ config RENESAS_USB_DMAC
|
|||
|
||||
config RZ_DMAC
|
||||
tristate "Renesas RZ/{G2L,V2L} DMA Controller"
|
||||
depends on ARCH_R9A07G044 || ARCH_R9A07G054 || COMPILE_TEST
|
||||
depends on ARCH_RZG2L || COMPILE_TEST
|
||||
select RENESAS_DMA
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
|
|
|
@ -1117,7 +1117,11 @@ static int sprd_dma_probe(struct platform_device *pdev)
|
|||
u32 chn_count;
|
||||
int ret, i;
|
||||
|
||||
ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count);
|
||||
/* Parse new and deprecated dma-channels properties */
|
||||
ret = device_property_read_u32(&pdev->dev, "dma-channels", &chn_count);
|
||||
if (ret)
|
||||
ret = device_property_read_u32(&pdev->dev, "#dma-channels",
|
||||
&chn_count);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "get dma channels count failed\n");
|
||||
return ret;
|
||||
|
|
|
@ -208,6 +208,7 @@ struct stm32_dma_chan {
|
|||
u32 threshold;
|
||||
u32 mem_burst;
|
||||
u32 mem_width;
|
||||
enum dma_status status;
|
||||
};
|
||||
|
||||
struct stm32_dma_device {
|
||||
|
@ -485,6 +486,7 @@ static void stm32_dma_stop(struct stm32_dma_chan *chan)
|
|||
}
|
||||
|
||||
chan->busy = false;
|
||||
chan->status = DMA_COMPLETE;
|
||||
}
|
||||
|
||||
static int stm32_dma_terminate_all(struct dma_chan *c)
|
||||
|
@ -535,6 +537,13 @@ static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
|
|||
dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr);
|
||||
}
|
||||
|
||||
static void stm32_dma_sg_inc(struct stm32_dma_chan *chan)
|
||||
{
|
||||
chan->next_sg++;
|
||||
if (chan->desc->cyclic && (chan->next_sg == chan->desc->num_sgs))
|
||||
chan->next_sg = 0;
|
||||
}
|
||||
|
||||
static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan);
|
||||
|
||||
static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
|
||||
|
@ -575,7 +584,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
|
|||
stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar);
|
||||
stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr);
|
||||
|
||||
chan->next_sg++;
|
||||
stm32_dma_sg_inc(chan);
|
||||
|
||||
/* Clear interrupt status if it is there */
|
||||
status = stm32_dma_irq_status(chan);
|
||||
|
@ -588,11 +597,11 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
|
|||
stm32_dma_dump_reg(chan);
|
||||
|
||||
/* Start DMA */
|
||||
chan->busy = true;
|
||||
chan->status = DMA_IN_PROGRESS;
|
||||
reg->dma_scr |= STM32_DMA_SCR_EN;
|
||||
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
|
||||
|
||||
chan->busy = true;
|
||||
|
||||
dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
|
||||
}
|
||||
|
||||
|
@ -605,41 +614,131 @@ static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
|
|||
id = chan->id;
|
||||
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
|
||||
|
||||
if (dma_scr & STM32_DMA_SCR_DBM) {
|
||||
if (chan->next_sg == chan->desc->num_sgs)
|
||||
chan->next_sg = 0;
|
||||
sg_req = &chan->desc->sg_req[chan->next_sg];
|
||||
|
||||
sg_req = &chan->desc->sg_req[chan->next_sg];
|
||||
|
||||
if (dma_scr & STM32_DMA_SCR_CT) {
|
||||
dma_sm0ar = sg_req->chan_reg.dma_sm0ar;
|
||||
stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar);
|
||||
dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
|
||||
stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
|
||||
} else {
|
||||
dma_sm1ar = sg_req->chan_reg.dma_sm1ar;
|
||||
stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar);
|
||||
dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
|
||||
stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
|
||||
}
|
||||
if (dma_scr & STM32_DMA_SCR_CT) {
|
||||
dma_sm0ar = sg_req->chan_reg.dma_sm0ar;
|
||||
stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar);
|
||||
dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
|
||||
stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
|
||||
} else {
|
||||
dma_sm1ar = sg_req->chan_reg.dma_sm1ar;
|
||||
stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar);
|
||||
dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
|
||||
stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
|
||||
}
|
||||
}
|
||||
|
||||
static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
|
||||
static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
|
||||
{
|
||||
if (chan->desc) {
|
||||
if (chan->desc->cyclic) {
|
||||
vchan_cyclic_callback(&chan->desc->vdesc);
|
||||
chan->next_sg++;
|
||||
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
|
||||
u32 dma_scr;
|
||||
|
||||
/*
|
||||
* Read and store current remaining data items and peripheral/memory addresses to be
|
||||
* updated on resume
|
||||
*/
|
||||
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
|
||||
/*
|
||||
* Transfer can be paused while between a previous resume and reconfiguration on transfer
|
||||
* complete. If transfer is cyclic and CIRC and DBM have been deactivated for resume, need
|
||||
* to set it here in SCR backup to ensure a good reconfiguration on transfer complete.
|
||||
*/
|
||||
if (chan->desc && chan->desc->cyclic) {
|
||||
if (chan->desc->num_sgs == 1)
|
||||
dma_scr |= STM32_DMA_SCR_CIRC;
|
||||
else
|
||||
dma_scr |= STM32_DMA_SCR_DBM;
|
||||
}
|
||||
chan->chan_reg.dma_scr = dma_scr;
|
||||
|
||||
/*
|
||||
* Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt, otherwise
|
||||
* on resume NDTR autoreload value will be wrong (lower than the initial period length)
|
||||
*/
|
||||
if (chan->desc && chan->desc->cyclic) {
|
||||
dma_scr &= ~(STM32_DMA_SCR_DBM | STM32_DMA_SCR_CIRC);
|
||||
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
|
||||
}
|
||||
|
||||
chan->chan_reg.dma_sndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
|
||||
|
||||
dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
|
||||
}
|
||||
|
||||
static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
|
||||
{
|
||||
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
|
||||
struct stm32_dma_sg_req *sg_req;
|
||||
u32 dma_scr, status, id;
|
||||
|
||||
id = chan->id;
|
||||
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
|
||||
|
||||
/* Clear interrupt status if it is there */
|
||||
status = stm32_dma_irq_status(chan);
|
||||
if (status)
|
||||
stm32_dma_irq_clear(chan, status);
|
||||
|
||||
if (!chan->next_sg)
|
||||
sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1];
|
||||
else
|
||||
sg_req = &chan->desc->sg_req[chan->next_sg - 1];
|
||||
|
||||
/* Reconfigure NDTR with the initial value */
|
||||
stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), sg_req->chan_reg.dma_sndtr);
|
||||
|
||||
/* Restore SPAR */
|
||||
stm32_dma_write(dmadev, STM32_DMA_SPAR(id), sg_req->chan_reg.dma_spar);
|
||||
|
||||
/* Restore SM0AR/SM1AR whatever DBM/CT as they may have been modified */
|
||||
stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sg_req->chan_reg.dma_sm0ar);
|
||||
stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sg_req->chan_reg.dma_sm1ar);
|
||||
|
||||
/* Reactivate CIRC/DBM if needed */
|
||||
if (chan->chan_reg.dma_scr & STM32_DMA_SCR_DBM) {
|
||||
dma_scr |= STM32_DMA_SCR_DBM;
|
||||
/* Restore CT */
|
||||
if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CT)
|
||||
dma_scr &= ~STM32_DMA_SCR_CT;
|
||||
else
|
||||
dma_scr |= STM32_DMA_SCR_CT;
|
||||
} else if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CIRC) {
|
||||
dma_scr |= STM32_DMA_SCR_CIRC;
|
||||
}
|
||||
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
|
||||
|
||||
stm32_dma_configure_next_sg(chan);
|
||||
|
||||
stm32_dma_dump_reg(chan);
|
||||
|
||||
dma_scr |= STM32_DMA_SCR_EN;
|
||||
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
|
||||
|
||||
dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan);
|
||||
}
|
||||
|
||||
static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
|
||||
{
|
||||
if (!chan->desc)
|
||||
return;
|
||||
|
||||
if (chan->desc->cyclic) {
|
||||
vchan_cyclic_callback(&chan->desc->vdesc);
|
||||
stm32_dma_sg_inc(chan);
|
||||
/* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
|
||||
if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
|
||||
stm32_dma_post_resume_reconfigure(chan);
|
||||
else if (scr & STM32_DMA_SCR_DBM)
|
||||
stm32_dma_configure_next_sg(chan);
|
||||
} else {
|
||||
chan->busy = false;
|
||||
if (chan->next_sg == chan->desc->num_sgs) {
|
||||
vchan_cookie_complete(&chan->desc->vdesc);
|
||||
chan->desc = NULL;
|
||||
}
|
||||
stm32_dma_start_transfer(chan);
|
||||
} else {
|
||||
chan->busy = false;
|
||||
chan->status = DMA_COMPLETE;
|
||||
if (chan->next_sg == chan->desc->num_sgs) {
|
||||
vchan_cookie_complete(&chan->desc->vdesc);
|
||||
chan->desc = NULL;
|
||||
}
|
||||
stm32_dma_start_transfer(chan);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -675,8 +774,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
|
|||
|
||||
if (status & STM32_DMA_TCI) {
|
||||
stm32_dma_irq_clear(chan, STM32_DMA_TCI);
|
||||
if (scr & STM32_DMA_SCR_TCIE)
|
||||
stm32_dma_handle_chan_done(chan);
|
||||
if (scr & STM32_DMA_SCR_TCIE) {
|
||||
if (chan->status == DMA_PAUSED && !(scr & STM32_DMA_SCR_EN))
|
||||
stm32_dma_handle_chan_paused(chan);
|
||||
else
|
||||
stm32_dma_handle_chan_done(chan, scr);
|
||||
}
|
||||
status &= ~STM32_DMA_TCI;
|
||||
}
|
||||
|
||||
|
@ -711,6 +814,107 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
|
|||
spin_unlock_irqrestore(&chan->vchan.lock, flags);
|
||||
}
|
||||
|
||||
static int stm32_dma_pause(struct dma_chan *c)
|
||||
{
|
||||
struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (chan->status != DMA_IN_PROGRESS)
|
||||
return -EPERM;
|
||||
|
||||
spin_lock_irqsave(&chan->vchan.lock, flags);
|
||||
ret = stm32_dma_disable_chan(chan);
|
||||
/*
|
||||
* A transfer complete flag is set to indicate the end of transfer due to the stream
|
||||
* interruption, so wait for interrupt
|
||||
*/
|
||||
if (!ret)
|
||||
chan->status = DMA_PAUSED;
|
||||
spin_unlock_irqrestore(&chan->vchan.lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int stm32_dma_resume(struct dma_chan *c)
|
||||
{
|
||||
struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
|
||||
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
|
||||
struct stm32_dma_chan_reg chan_reg = chan->chan_reg;
|
||||
u32 id = chan->id, scr, ndtr, offset, spar, sm0ar, sm1ar;
|
||||
struct stm32_dma_sg_req *sg_req;
|
||||
unsigned long flags;
|
||||
|
||||
if (chan->status != DMA_PAUSED)
|
||||
return -EPERM;
|
||||
|
||||
scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
|
||||
if (WARN_ON(scr & STM32_DMA_SCR_EN))
|
||||
return -EPERM;
|
||||
|
||||
spin_lock_irqsave(&chan->vchan.lock, flags);
|
||||
|
||||
/* sg_reg[prev_sg] contains original ndtr, sm0ar and sm1ar before pausing the transfer */
|
||||
if (!chan->next_sg)
|
||||
sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1];
|
||||
else
|
||||
sg_req = &chan->desc->sg_req[chan->next_sg - 1];
|
||||
|
||||
ndtr = sg_req->chan_reg.dma_sndtr;
|
||||
offset = (ndtr - chan_reg.dma_sndtr) << STM32_DMA_SCR_PSIZE_GET(chan_reg.dma_scr);
|
||||
spar = sg_req->chan_reg.dma_spar;
|
||||
sm0ar = sg_req->chan_reg.dma_sm0ar;
|
||||
sm1ar = sg_req->chan_reg.dma_sm1ar;
|
||||
|
||||
/*
|
||||
* The peripheral and/or memory addresses have to be updated in order to adjust the
|
||||
* address pointers. Need to check increment.
|
||||
*/
|
||||
if (chan_reg.dma_scr & STM32_DMA_SCR_PINC)
|
||||
stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar + offset);
|
||||
else
|
||||
stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar);
|
||||
|
||||
if (!(chan_reg.dma_scr & STM32_DMA_SCR_MINC))
|
||||
offset = 0;
|
||||
|
||||
/*
|
||||
* In case of DBM, the current target could be SM1AR.
|
||||
* Need to temporarily deactivate CIRC/DBM to finish the current transfer, so
|
||||
* SM0AR becomes the current target and must be updated with SM1AR + offset if CT=1.
|
||||
*/
|
||||
if ((chan_reg.dma_scr & STM32_DMA_SCR_DBM) && (chan_reg.dma_scr & STM32_DMA_SCR_CT))
|
||||
stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sm1ar + offset);
|
||||
else
|
||||
stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sm0ar + offset);
|
||||
|
||||
/* NDTR must be restored otherwise internal HW counter won't be correctly reset */
|
||||
stm32_dma_write(dmadev, STM32_DMA_SNDTR(id), chan_reg.dma_sndtr);
|
||||
|
||||
/*
|
||||
* Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt,
|
||||
* otherwise NDTR autoreload value will be wrong (lower than the initial period length)
|
||||
*/
|
||||
if (chan_reg.dma_scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM))
|
||||
chan_reg.dma_scr &= ~(STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM);
|
||||
|
||||
if (chan_reg.dma_scr & STM32_DMA_SCR_DBM)
|
||||
stm32_dma_configure_next_sg(chan);
|
||||
|
||||
stm32_dma_dump_reg(chan);
|
||||
|
||||
/* The stream may then be re-enabled to restart transfer from the point it was stopped */
|
||||
chan->status = DMA_IN_PROGRESS;
|
||||
chan_reg.dma_scr |= STM32_DMA_SCR_EN;
|
||||
stm32_dma_write(dmadev, STM32_DMA_SCR(id), chan_reg.dma_scr);
|
||||
|
||||
spin_unlock_irqrestore(&chan->vchan.lock, flags);
|
||||
|
||||
dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
|
||||
enum dma_transfer_direction direction,
|
||||
enum dma_slave_buswidth *buswidth,
|
||||
|
@ -978,10 +1182,12 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
|
|||
}
|
||||
|
||||
/* Enable Circular mode or double buffer mode */
|
||||
if (buf_len == period_len)
|
||||
if (buf_len == period_len) {
|
||||
chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC;
|
||||
else
|
||||
} else {
|
||||
chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
|
||||
chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
|
||||
}
|
||||
|
||||
/* Clear periph ctrl if client set it */
|
||||
chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
|
||||
|
@ -1091,24 +1297,36 @@ static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan)
|
|||
{
|
||||
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
|
||||
struct stm32_dma_sg_req *sg_req;
|
||||
u32 dma_scr, dma_smar, id;
|
||||
u32 dma_scr, dma_smar, id, period_len;
|
||||
|
||||
id = chan->id;
|
||||
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
|
||||
|
||||
/* In cyclic CIRC but not DBM, CT is not used */
|
||||
if (!(dma_scr & STM32_DMA_SCR_DBM))
|
||||
return true;
|
||||
|
||||
sg_req = &chan->desc->sg_req[chan->next_sg];
|
||||
period_len = sg_req->len;
|
||||
|
||||
/* DBM - take care of a previous pause/resume not yet post reconfigured */
|
||||
if (dma_scr & STM32_DMA_SCR_CT) {
|
||||
dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id));
|
||||
return (dma_smar == sg_req->chan_reg.dma_sm0ar);
|
||||
/*
|
||||
* If transfer has been pause/resumed,
|
||||
* SM0AR is in the range of [SM0AR:SM0AR+period_len]
|
||||
*/
|
||||
return (dma_smar >= sg_req->chan_reg.dma_sm0ar &&
|
||||
dma_smar < sg_req->chan_reg.dma_sm0ar + period_len);
|
||||
}
|
||||
|
||||
dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id));
|
||||
|
||||
return (dma_smar == sg_req->chan_reg.dma_sm1ar);
|
||||
/*
|
||||
* If transfer has been pause/resumed,
|
||||
* SM1AR is in the range of [SM1AR:SM1AR+period_len]
|
||||
*/
|
||||
return (dma_smar >= sg_req->chan_reg.dma_sm1ar &&
|
||||
dma_smar < sg_req->chan_reg.dma_sm1ar + period_len);
|
||||
}
|
||||
|
||||
static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
|
||||
|
@ -1148,7 +1366,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
|
|||
|
||||
residue = stm32_dma_get_remaining_bytes(chan);
|
||||
|
||||
if (!stm32_dma_is_current_sg(chan)) {
|
||||
if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) {
|
||||
n_sg++;
|
||||
if (n_sg == chan->desc->num_sgs)
|
||||
n_sg = 0;
|
||||
|
@ -1188,7 +1406,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
|
|||
u32 residue = 0;
|
||||
|
||||
status = dma_cookie_status(c, cookie, state);
|
||||
if (status == DMA_COMPLETE || !state)
|
||||
if (status == DMA_COMPLETE)
|
||||
return status;
|
||||
|
||||
status = chan->status;
|
||||
|
||||
if (!state)
|
||||
return status;
|
||||
|
||||
spin_lock_irqsave(&chan->vchan.lock, flags);
|
||||
|
@ -1377,6 +1600,8 @@ static int stm32_dma_probe(struct platform_device *pdev)
|
|||
dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
|
||||
dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
|
||||
dd->device_config = stm32_dma_slave_config;
|
||||
dd->device_pause = stm32_dma_pause;
|
||||
dd->device_resume = stm32_dma_resume;
|
||||
dd->device_terminate_all = stm32_dma_terminate_all;
|
||||
dd->device_synchronize = stm32_dma_synchronize;
|
||||
dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
|
||||
|
@ -1482,7 +1707,7 @@ static int stm32_dma_runtime_resume(struct device *dev)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int stm32_dma_suspend(struct device *dev)
|
||||
static int stm32_dma_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
|
||||
int id, ret, scr;
|
||||
|
@ -1506,14 +1731,14 @@ static int stm32_dma_suspend(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int stm32_dma_resume(struct device *dev)
|
||||
static int stm32_dma_pm_resume(struct device *dev)
|
||||
{
|
||||
return pm_runtime_force_resume(dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops stm32_dma_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_suspend, stm32_dma_resume)
|
||||
SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_pm_suspend, stm32_dma_pm_resume)
|
||||
SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
|
||||
stm32_dma_runtime_resume, NULL)
|
||||
};
|
||||
|
|
|
@ -267,7 +267,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
|
|||
ret = PTR_ERR(rst);
|
||||
if (ret == -EPROBE_DEFER)
|
||||
goto err_clk;
|
||||
} else {
|
||||
} else if (count > 1) { /* Don't reset if there is only one dma-master */
|
||||
reset_control_assert(rst);
|
||||
udelay(2);
|
||||
reset_control_deassert(rst);
|
||||
|
|
|
@ -34,7 +34,6 @@
|
|||
#include "virt-dma.h"
|
||||
|
||||
#define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
|
||||
#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */
|
||||
|
||||
/* MDMA Channel x interrupt/status register */
|
||||
#define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */
|
||||
|
@ -73,6 +72,7 @@
|
|||
#define STM32_MDMA_CCR_WEX BIT(14)
|
||||
#define STM32_MDMA_CCR_HEX BIT(13)
|
||||
#define STM32_MDMA_CCR_BEX BIT(12)
|
||||
#define STM32_MDMA_CCR_SM BIT(8)
|
||||
#define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6)
|
||||
#define STM32_MDMA_CCR_PL(n) FIELD_PREP(STM32_MDMA_CCR_PL_MASK, (n))
|
||||
#define STM32_MDMA_CCR_TCIE BIT(5)
|
||||
|
@ -168,7 +168,7 @@
|
|||
|
||||
#define STM32_MDMA_MAX_BUF_LEN 128
|
||||
#define STM32_MDMA_MAX_BLOCK_LEN 65536
|
||||
#define STM32_MDMA_MAX_CHANNELS 63
|
||||
#define STM32_MDMA_MAX_CHANNELS 32
|
||||
#define STM32_MDMA_MAX_REQUESTS 256
|
||||
#define STM32_MDMA_MAX_BURST 128
|
||||
#define STM32_MDMA_VERY_HIGH_PRIORITY 0x3
|
||||
|
@ -248,6 +248,7 @@ struct stm32_mdma_device {
|
|||
u32 nr_channels;
|
||||
u32 nr_requests;
|
||||
u32 nr_ahb_addr_masks;
|
||||
u32 chan_reserved;
|
||||
struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS];
|
||||
u32 ahb_addr_masks[];
|
||||
};
|
||||
|
@ -1317,26 +1318,16 @@ static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
|
|||
static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
|
||||
{
|
||||
struct stm32_mdma_device *dmadev = devid;
|
||||
struct stm32_mdma_chan *chan = devid;
|
||||
struct stm32_mdma_chan *chan;
|
||||
u32 reg, id, ccr, ien, status;
|
||||
|
||||
/* Find out which channel generates the interrupt */
|
||||
status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
|
||||
if (status) {
|
||||
id = __ffs(status);
|
||||
} else {
|
||||
status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1);
|
||||
if (!status) {
|
||||
dev_dbg(mdma2dev(dmadev), "spurious it\n");
|
||||
return IRQ_NONE;
|
||||
}
|
||||
id = __ffs(status);
|
||||
/*
|
||||
* As GISR0 provides status for channel id from 0 to 31,
|
||||
* so GISR1 provides status for channel id from 32 to 62
|
||||
*/
|
||||
id += 32;
|
||||
if (!status) {
|
||||
dev_dbg(mdma2dev(dmadev), "spurious it\n");
|
||||
return IRQ_NONE;
|
||||
}
|
||||
id = __ffs(status);
|
||||
|
||||
chan = &dmadev->chan[id];
|
||||
if (!chan) {
|
||||
|
@ -1354,9 +1345,12 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
|
|||
|
||||
if (!(status & ien)) {
|
||||
spin_unlock(&chan->vchan.lock);
|
||||
dev_warn(chan2dev(chan),
|
||||
"spurious it (status=0x%04x, ien=0x%04x)\n",
|
||||
status, ien);
|
||||
if (chan->busy)
|
||||
dev_warn(chan2dev(chan),
|
||||
"spurious it (status=0x%04x, ien=0x%04x)\n", status, ien);
|
||||
else
|
||||
dev_dbg(chan2dev(chan),
|
||||
"spurious it (status=0x%04x, ien=0x%04x)\n", status, ien);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
|
@ -1456,10 +1450,23 @@ static void stm32_mdma_free_chan_resources(struct dma_chan *c)
|
|||
chan->desc_pool = NULL;
|
||||
}
|
||||
|
||||
static bool stm32_mdma_filter_fn(struct dma_chan *c, void *fn_param)
|
||||
{
|
||||
struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
|
||||
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
|
||||
|
||||
/* Check if chan is marked Secure */
|
||||
if (dmadev->chan_reserved & BIT(chan->id))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
|
||||
struct of_dma *ofdma)
|
||||
{
|
||||
struct stm32_mdma_device *dmadev = ofdma->of_dma_data;
|
||||
dma_cap_mask_t mask = dmadev->ddev.cap_mask;
|
||||
struct stm32_mdma_chan *chan;
|
||||
struct dma_chan *c;
|
||||
struct stm32_mdma_chan_config config;
|
||||
|
@ -1485,7 +1492,7 @@ static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
c = dma_get_any_slave_channel(&dmadev->ddev);
|
||||
c = __dma_request_channel(&mask, stm32_mdma_filter_fn, &config, ofdma->of_node);
|
||||
if (!c) {
|
||||
dev_err(mdma2dev(dmadev), "No more channels available\n");
|
||||
return NULL;
|
||||
|
@ -1615,6 +1622,10 @@ static int stm32_mdma_probe(struct platform_device *pdev)
|
|||
for (i = 0; i < dmadev->nr_channels; i++) {
|
||||
chan = &dmadev->chan[i];
|
||||
chan->id = i;
|
||||
|
||||
if (stm32_mdma_read(dmadev, STM32_MDMA_CCR(i)) & STM32_MDMA_CCR_SM)
|
||||
dmadev->chan_reserved |= BIT(i);
|
||||
|
||||
chan->vchan.desc_free = stm32_mdma_desc_free;
|
||||
vchan_init(&chan->vchan, dd);
|
||||
}
|
||||
|
|
|
@ -90,6 +90,14 @@
|
|||
|
||||
#define DMA_CHAN_CUR_PARA 0x1c
|
||||
|
||||
/*
|
||||
* LLI address mangling
|
||||
*
|
||||
* The LLI link physical address is also mangled, but we avoid dealing
|
||||
* with that by allocating LLIs from the DMA32 zone.
|
||||
*/
|
||||
#define SRC_HIGH_ADDR(x) (((x) & 0x3U) << 16)
|
||||
#define DST_HIGH_ADDR(x) (((x) & 0x3U) << 18)
|
||||
|
||||
/*
|
||||
* Various hardware related defines
|
||||
|
@ -132,6 +140,7 @@ struct sun6i_dma_config {
|
|||
u32 dst_burst_lengths;
|
||||
u32 src_addr_widths;
|
||||
u32 dst_addr_widths;
|
||||
bool has_high_addr;
|
||||
bool has_mbus_clk;
|
||||
};
|
||||
|
||||
|
@ -241,9 +250,7 @@ static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev)
|
|||
static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev,
|
||||
struct sun6i_pchan *pchan)
|
||||
{
|
||||
phys_addr_t reg = virt_to_phys(pchan->base);
|
||||
|
||||
dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n"
|
||||
dev_dbg(sdev->slave.dev, "Chan %d reg:\n"
|
||||
"\t___en(%04x): \t0x%08x\n"
|
||||
"\tpause(%04x): \t0x%08x\n"
|
||||
"\tstart(%04x): \t0x%08x\n"
|
||||
|
@ -252,7 +259,7 @@ static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev,
|
|||
"\t__dst(%04x): \t0x%08x\n"
|
||||
"\tcount(%04x): \t0x%08x\n"
|
||||
"\t_para(%04x): \t0x%08x\n\n",
|
||||
pchan->idx, ®,
|
||||
pchan->idx,
|
||||
DMA_CHAN_ENABLE,
|
||||
readl(pchan->base + DMA_CHAN_ENABLE),
|
||||
DMA_CHAN_PAUSE,
|
||||
|
@ -385,17 +392,16 @@ static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev,
|
|||
}
|
||||
|
||||
static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan,
|
||||
struct sun6i_dma_lli *lli)
|
||||
struct sun6i_dma_lli *v_lli,
|
||||
dma_addr_t p_lli)
|
||||
{
|
||||
phys_addr_t p_lli = virt_to_phys(lli);
|
||||
|
||||
dev_dbg(chan2dev(&vchan->vc.chan),
|
||||
"\n\tdesc: p - %pa v - 0x%p\n"
|
||||
"\n\tdesc:\tp - %pad v - 0x%p\n"
|
||||
"\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n"
|
||||
"\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n",
|
||||
&p_lli, lli,
|
||||
lli->cfg, lli->src, lli->dst,
|
||||
lli->len, lli->para, lli->p_lli_next);
|
||||
&p_lli, v_lli,
|
||||
v_lli->cfg, v_lli->src, v_lli->dst,
|
||||
v_lli->len, v_lli->para, v_lli->p_lli_next);
|
||||
}
|
||||
|
||||
static void sun6i_dma_free_desc(struct virt_dma_desc *vd)
|
||||
|
@ -445,7 +451,7 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
|
|||
pchan->desc = to_sun6i_desc(&desc->tx);
|
||||
pchan->done = NULL;
|
||||
|
||||
sun6i_dma_dump_lli(vchan, pchan->desc->v_lli);
|
||||
sun6i_dma_dump_lli(vchan, pchan->desc->v_lli, pchan->desc->p_lli);
|
||||
|
||||
irq_reg = pchan->idx / DMA_IRQ_CHAN_NR;
|
||||
irq_offset = pchan->idx % DMA_IRQ_CHAN_NR;
|
||||
|
@ -626,6 +632,18 @@ static int set_config(struct sun6i_dma_dev *sdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void sun6i_dma_set_addr(struct sun6i_dma_dev *sdev,
|
||||
struct sun6i_dma_lli *v_lli,
|
||||
dma_addr_t src, dma_addr_t dst)
|
||||
{
|
||||
v_lli->src = lower_32_bits(src);
|
||||
v_lli->dst = lower_32_bits(dst);
|
||||
|
||||
if (sdev->cfg->has_high_addr)
|
||||
v_lli->para |= SRC_HIGH_ADDR(upper_32_bits(src)) |
|
||||
DST_HIGH_ADDR(upper_32_bits(dst));
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
|
||||
struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
||||
size_t len, unsigned long flags)
|
||||
|
@ -648,16 +666,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
|
|||
if (!txd)
|
||||
return NULL;
|
||||
|
||||
v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
|
||||
v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli);
|
||||
if (!v_lli) {
|
||||
dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
|
||||
goto err_txd_free;
|
||||
}
|
||||
|
||||
v_lli->src = src;
|
||||
v_lli->dst = dest;
|
||||
v_lli->len = len;
|
||||
v_lli->para = NORMAL_WAIT;
|
||||
sun6i_dma_set_addr(sdev, v_lli, src, dest);
|
||||
|
||||
burst = convert_burst(8);
|
||||
width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES);
|
||||
|
@ -670,7 +687,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
|
|||
|
||||
sun6i_dma_lli_add(NULL, v_lli, p_lli, txd);
|
||||
|
||||
sun6i_dma_dump_lli(vchan, v_lli);
|
||||
sun6i_dma_dump_lli(vchan, v_lli, p_lli);
|
||||
|
||||
return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
|
||||
|
||||
|
@ -708,7 +725,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
|
|||
return NULL;
|
||||
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
|
||||
v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli);
|
||||
if (!v_lli)
|
||||
goto err_lli_free;
|
||||
|
||||
|
@ -716,8 +733,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
|
|||
v_lli->para = NORMAL_WAIT;
|
||||
|
||||
if (dir == DMA_MEM_TO_DEV) {
|
||||
v_lli->src = sg_dma_address(sg);
|
||||
v_lli->dst = sconfig->dst_addr;
|
||||
sun6i_dma_set_addr(sdev, v_lli,
|
||||
sg_dma_address(sg),
|
||||
sconfig->dst_addr);
|
||||
v_lli->cfg = lli_cfg;
|
||||
sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port);
|
||||
sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE);
|
||||
|
@ -729,8 +747,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
|
|||
sg_dma_len(sg), flags);
|
||||
|
||||
} else {
|
||||
v_lli->src = sconfig->src_addr;
|
||||
v_lli->dst = sg_dma_address(sg);
|
||||
sun6i_dma_set_addr(sdev, v_lli,
|
||||
sconfig->src_addr,
|
||||
sg_dma_address(sg));
|
||||
v_lli->cfg = lli_cfg;
|
||||
sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM);
|
||||
sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE);
|
||||
|
@ -746,14 +765,16 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
|
|||
}
|
||||
|
||||
dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli);
|
||||
for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
|
||||
sun6i_dma_dump_lli(vchan, prev);
|
||||
for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli;
|
||||
p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next)
|
||||
sun6i_dma_dump_lli(vchan, v_lli, p_lli);
|
||||
|
||||
return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
|
||||
|
||||
err_lli_free:
|
||||
for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
|
||||
dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
|
||||
for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli;
|
||||
p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next)
|
||||
dma_pool_free(sdev->pool, v_lli, p_lli);
|
||||
kfree(txd);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -787,7 +808,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
|
|||
return NULL;
|
||||
|
||||
for (i = 0; i < periods; i++) {
|
||||
v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
|
||||
v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli);
|
||||
if (!v_lli) {
|
||||
dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
|
||||
goto err_lli_free;
|
||||
|
@ -797,14 +818,16 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
|
|||
v_lli->para = NORMAL_WAIT;
|
||||
|
||||
if (dir == DMA_MEM_TO_DEV) {
|
||||
v_lli->src = buf_addr + period_len * i;
|
||||
v_lli->dst = sconfig->dst_addr;
|
||||
sun6i_dma_set_addr(sdev, v_lli,
|
||||
buf_addr + period_len * i,
|
||||
sconfig->dst_addr);
|
||||
v_lli->cfg = lli_cfg;
|
||||
sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port);
|
||||
sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE);
|
||||
} else {
|
||||
v_lli->src = sconfig->src_addr;
|
||||
v_lli->dst = buf_addr + period_len * i;
|
||||
sun6i_dma_set_addr(sdev, v_lli,
|
||||
sconfig->src_addr,
|
||||
buf_addr + period_len * i);
|
||||
v_lli->cfg = lli_cfg;
|
||||
sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM);
|
||||
sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE);
|
||||
|
@ -820,8 +843,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
|
|||
return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
|
||||
|
||||
err_lli_free:
|
||||
for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
|
||||
dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
|
||||
for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli;
|
||||
p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next)
|
||||
dma_pool_free(sdev->pool, v_lli, p_lli);
|
||||
kfree(txd);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1174,8 +1198,6 @@ static struct sun6i_dma_config sun50i_a64_dma_cfg = {
|
|||
};
|
||||
|
||||
/*
|
||||
* TODO: Add support for more than 4g physical addressing.
|
||||
*
|
||||
* The A100 binding uses the number of dma channels from the
|
||||
* device tree node.
|
||||
*/
|
||||
|
@ -1194,6 +1216,7 @@ static struct sun6i_dma_config sun50i_a100_dma_cfg = {
|
|||
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
|
||||
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
|
||||
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
|
||||
.has_high_addr = true,
|
||||
.has_mbus_clk = true,
|
||||
};
|
||||
|
||||
|
@ -1248,6 +1271,7 @@ static const struct of_device_id sun6i_dma_match[] = {
|
|||
{ .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg },
|
||||
{ .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg },
|
||||
{ .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg },
|
||||
{ .compatible = "allwinner,sun20i-d1-dma", .data = &sun50i_a100_dma_cfg },
|
||||
{ .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg },
|
||||
{ .compatible = "allwinner,sun50i-a100-dma", .data = &sun50i_a100_dma_cfg },
|
||||
{ .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg },
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1105,8 +1105,12 @@ static int cppi41_dma_probe(struct platform_device *pdev)
|
|||
cdd->qmgr_num_pend = glue_info->qmgr_num_pend;
|
||||
cdd->first_completion_queue = glue_info->first_completion_queue;
|
||||
|
||||
/* Parse new and deprecated dma-channels properties */
|
||||
ret = of_property_read_u32(dev->of_node,
|
||||
"#dma-channels", &cdd->n_chans);
|
||||
"dma-channels", &cdd->n_chans);
|
||||
if (ret)
|
||||
ret = of_property_read_u32(dev->of_node,
|
||||
"#dma-channels", &cdd->n_chans);
|
||||
if (ret)
|
||||
goto err_get_n_chans;
|
||||
|
||||
|
|
|
@ -70,10 +70,10 @@
|
|||
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
|
||||
static struct psil_ep am62_src_ep_map[] = {
|
||||
/* SAUL */
|
||||
PSIL_SAUL(0x7500, 20, 35, 8, 35, 0),
|
||||
PSIL_SAUL(0x7501, 21, 35, 8, 36, 0),
|
||||
PSIL_SAUL(0x7502, 22, 43, 8, 43, 0),
|
||||
PSIL_SAUL(0x7503, 23, 43, 8, 44, 0),
|
||||
PSIL_SAUL(0x7504, 20, 35, 8, 35, 0),
|
||||
PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
|
||||
PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
|
||||
PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
|
||||
/* PDMA_MAIN0 - SPI0-3 */
|
||||
PSIL_PDMA_XY_PKT(0x4302),
|
||||
PSIL_PDMA_XY_PKT(0x4303),
|
||||
|
|
|
@ -229,7 +229,7 @@ struct zynqmp_dma_chan {
|
|||
bool is_dmacoherent;
|
||||
struct tasklet_struct tasklet;
|
||||
bool idle;
|
||||
u32 desc_size;
|
||||
size_t desc_size;
|
||||
bool err;
|
||||
u32 bus_width;
|
||||
u32 src_burst_len;
|
||||
|
@ -486,7 +486,8 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
|
|||
}
|
||||
|
||||
chan->desc_pool_v = dma_alloc_coherent(chan->dev,
|
||||
(2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
|
||||
(2 * ZYNQMP_DMA_DESC_SIZE(chan) *
|
||||
ZYNQMP_DMA_NUM_DESCS),
|
||||
&chan->desc_pool_p, GFP_KERNEL);
|
||||
if (!chan->desc_pool_v)
|
||||
return -ENOMEM;
|
||||
|
@ -1077,7 +1078,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
|
|||
pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT);
|
||||
pm_runtime_use_autosuspend(zdev->dev);
|
||||
pm_runtime_enable(zdev->dev);
|
||||
pm_runtime_get_sync(zdev->dev);
|
||||
ret = pm_runtime_resume_and_get(zdev->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "device wakeup failed.\n");
|
||||
pm_runtime_disable(zdev->dev);
|
||||
}
|
||||
if (!pm_runtime_enabled(zdev->dev)) {
|
||||
ret = zynqmp_dma_runtime_resume(zdev->dev);
|
||||
if (ret)
|
||||
|
@ -1093,7 +1098,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
|
|||
p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
|
||||
p->src_addr_widths = BIT(zdev->chan->bus_width / 8);
|
||||
|
||||
dma_async_device_register(&zdev->common);
|
||||
ret = dma_async_device_register(&zdev->common);
|
||||
if (ret) {
|
||||
dev_err(zdev->dev, "failed to register the dma device\n");
|
||||
goto free_chan_resources;
|
||||
}
|
||||
|
||||
ret = of_dma_controller_register(pdev->dev.of_node,
|
||||
of_zynqmp_dma_xlate, zdev);
|
||||
|
|
|
@ -870,7 +870,6 @@ struct dma_device {
|
|||
struct device *dev;
|
||||
struct module *owner;
|
||||
struct ida chan_ida;
|
||||
struct mutex chan_mutex; /* to protect chan_ida */
|
||||
|
||||
u32 src_addr_widths;
|
||||
u32 dst_addr_widths;
|
||||
|
@ -1031,6 +1030,14 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
|
|||
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* dmaengine_prep_dma_memset() - Prepare a DMA memset descriptor.
|
||||
* @chan: The channel to be used for this descriptor
|
||||
* @dest: Address of buffer to be set
|
||||
* @value: Treated as a single byte value that fills the destination buffer
|
||||
* @len: The total size of dest
|
||||
* @flags: DMA engine flags
|
||||
*/
|
||||
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
|
||||
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
|
||||
unsigned long flags)
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__
|
||||
#define __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__
|
||||
|
||||
#ifdef CONFIG_CLK_R9A06G032
|
||||
int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val);
|
||||
#else
|
||||
static inline int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val) { return -ENODEV; }
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__ */
|
|
@ -53,6 +53,11 @@ enum idxd_scmd_stat {
|
|||
|
||||
/* IAX */
|
||||
#define IDXD_OP_FLAG_RD_SRC2_AECS 0x010000
|
||||
#define IDXD_OP_FLAG_RD_SRC2_2ND 0x020000
|
||||
#define IDXD_OP_FLAG_WR_SRC2_AECS_COMP 0x040000
|
||||
#define IDXD_OP_FLAG_WR_SRC2_AECS_OVFL 0x080000
|
||||
#define IDXD_OP_FLAG_SRC2_STS 0x100000
|
||||
#define IDXD_OP_FLAG_CRC_RFC3720 0x200000
|
||||
|
||||
/* Opcode */
|
||||
enum dsa_opcode {
|
||||
|
@ -81,6 +86,18 @@ enum iax_opcode {
|
|||
IAX_OPCODE_MEMMOVE,
|
||||
IAX_OPCODE_DECOMPRESS = 0x42,
|
||||
IAX_OPCODE_COMPRESS,
|
||||
IAX_OPCODE_CRC64,
|
||||
IAX_OPCODE_ZERO_DECOMP_32 = 0x48,
|
||||
IAX_OPCODE_ZERO_DECOMP_16,
|
||||
IAX_OPCODE_DECOMP_32 = 0x4c,
|
||||
IAX_OPCODE_DECOMP_16,
|
||||
IAX_OPCODE_SCAN = 0x50,
|
||||
IAX_OPCODE_SET_MEMBER,
|
||||
IAX_OPCODE_EXTRACT,
|
||||
IAX_OPCODE_SELECT,
|
||||
IAX_OPCODE_RLE_BURST,
|
||||
IAX_OPCDE_FIND_UNIQUE,
|
||||
IAX_OPCODE_EXPAND,
|
||||
};
|
||||
|
||||
/* Completion record status */
|
||||
|
@ -120,6 +137,7 @@ enum iax_completion_status {
|
|||
IAX_COMP_NONE = 0,
|
||||
IAX_COMP_SUCCESS,
|
||||
IAX_COMP_PAGE_FAULT_IR = 0x04,
|
||||
IAX_COMP_ANALYTICS_ERROR = 0x0a,
|
||||
IAX_COMP_OUTBUF_OVERFLOW,
|
||||
IAX_COMP_BAD_OPCODE = 0x10,
|
||||
IAX_COMP_INVALID_FLAGS,
|
||||
|
@ -140,7 +158,10 @@ enum iax_completion_status {
|
|||
IAX_COMP_WATCHDOG,
|
||||
IAX_COMP_INVALID_COMP_FLAG = 0x30,
|
||||
IAX_COMP_INVALID_FILTER_FLAG,
|
||||
IAX_COMP_INVALID_NUM_ELEMS = 0x33,
|
||||
IAX_COMP_INVALID_INPUT_SIZE,
|
||||
IAX_COMP_INVALID_NUM_ELEMS,
|
||||
IAX_COMP_INVALID_SRC1_WIDTH,
|
||||
IAX_COMP_INVALID_INVERT_OUT,
|
||||
};
|
||||
|
||||
#define DSA_COMP_STATUS_MASK 0x7f
|
||||
|
@ -319,8 +340,12 @@ struct iax_completion_record {
|
|||
uint32_t output_size;
|
||||
uint8_t output_bits;
|
||||
uint8_t rsvd3;
|
||||
uint16_t rsvd4;
|
||||
uint64_t rsvd5[4];
|
||||
uint16_t xor_csum;
|
||||
uint32_t crc;
|
||||
uint32_t min;
|
||||
uint32_t max;
|
||||
uint32_t sum;
|
||||
uint64_t rsvd4[2];
|
||||
} __attribute__((packed));
|
||||
|
||||
struct iax_raw_completion_record {
|
||||
|
|
Loading…
Reference in New Issue