mirror of https://gitee.com/openkylin/linux.git
dmaengine updates for 4.13-rc1
- removal of AVR32 support in dw driver as AVR32 is gone - new driver for Broadcom stream buffer accelerator (SBA) RAID driver - add support for Faraday Technology FTDMAC020 in amba-pl08x driver - IOMMU support in pl330 driver - updates to bunch of drivers -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJZYG73AAoJEHwUBw8lI4NH+DoP/1f0TsYrQFCjNqa4nybjU1Sd bbqnpouuJscwt8Qk2LGuSimi0QG91gQOLvrmueFbXtEg86nPOfa0RnWGNF4qwYFK oliDlXF2PnV65J5kl7CvqXCj6bFiXCULVdO9JD2HFoFB1+lzXN9JQqOG5ne29BQ6 g3HNRlUTXNzQXWisgbAOLxuuvyfv68Zo3wCLYLkd4vC/C4zmxM+KXUG8+s0hS7t3 AOUpYW6F/C+y1Ax+SiACm0QGNZ4rc6/+ZUIIXUO5CfTYGjv6QUdzxiLHtc4br25l 2CoN9IP4V/OxHaW9T1jA61TeAAFr63oXYfDMBBzclzVryZRAIU72ups31uRQXpFz 99zUQ0OsdOCvy0oPInhNd8u+cpyh/4e2RDgSZ9rxw3xVaKFh8lsw5OtcCBQzCMeI xgFCUBHsLjEi4uafJcl6n2T7+Y4Y0KgOmxPHZo3tpq/2a5M6tVy8k68m3afCQylF 1SOxzVZdDRUutPpviQWop6RgP0EcVuzaUJ0vO4nat4j77vuimaPqdk+oLV46XP2d 5I52kcvbVI4BbJavTjVs3FRdcez0pW37iOw+5MOxHE3dnBp4X/3btFzBY4aOsdg0 wVut3B+9U4WHDBF2ConBxxMvGqMYmcssOQ096GdC6oBHHS7x6n7tEVPiZ5iUacn5 LB8k9AZtpBC7nUWPH7FS =srPZ -----END PGP SIGNATURE----- Merge tag 'dmaengine-4.13-rc1' of git://git.infradead.org/users/vkoul/slave-dma Pull dmaengine updates from Vinod Koul: - removal of AVR32 support in dw driver as AVR32 is gone - new driver for Broadcom stream buffer accelerator (SBA) RAID driver - add support for Faraday Technology FTDMAC020 in amba-pl08x driver - IOMMU support in pl330 driver - updates to bunch of drivers * tag 'dmaengine-4.13-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (36 commits) dmaengine: qcom_hidma: correct API violation for submit dmaengine: zynqmp_dma: Remove max len check in zynqmp_dma_prep_memcpy dmaengine: tegra-apb: Really fix runtime-pm usage dmaengine: fsl_raid: make of_device_ids const. dmaengine: qcom_hidma: allow ACPI/DT parameters to be overridden dmaengine: fsldma: set BWC, DAHTS and SAHTS values correctly dmaengine: Kconfig: Simplify the help text for MXS_DMA dmaengine: pl330: Delete unused functions dmaengine: Replace WARN_TAINT_ONCE() with pr_warn_once() dmaengine: Kconfig: Extend the dependency for MXS_DMA dmaengine: mxs: Use %zu for printing a size_t variable dmaengine: ste_dma40: Cleanup scatterlist layering violations dmaengine: imx-dma: cleanup scatterlist layering violations dmaengine: use proper name for the R-Car SoC dmaengine: imx-sdma: Fix compilation warning. dmaengine: imx-sdma: Handle return value of clk_prepare_enable dmaengine: pl330: Add IOMMU support to slave tranfers dmaengine: DW DMAC: Handle return value of clk_prepare_enable dmaengine: pl08x: use GENMASK() to create bitmasks dmaengine: pl08x: Add support for Faraday Technology FTDMAC020 ...
This commit is contained in:
commit
2ceedf97ae
|
@ -3,6 +3,11 @@
|
|||
Required properties:
|
||||
- compatible: "arm,pl080", "arm,primecell";
|
||||
"arm,pl081", "arm,primecell";
|
||||
"faraday,ftdmac020", "arm,primecell"
|
||||
- arm,primecell-periphid: on the FTDMAC020 the primecell ID is not hard-coded
|
||||
in the hardware and must be specified here as <0x0003b080>. This number
|
||||
follows the PrimeCell standard numbering using the JEP106 vendor code 0x38
|
||||
for Faraday Technology.
|
||||
- reg: Address range of the PL08x registers
|
||||
- interrupt: The PL08x interrupt number
|
||||
- clocks: The clock running the IP core clock
|
||||
|
@ -20,8 +25,8 @@ Optional properties:
|
|||
- dma-requests: contains the total number of DMA requests supported by the DMAC
|
||||
- memcpy-burst-size: the size of the bursts for memcpy: 1, 4, 8, 16, 32
|
||||
64, 128 or 256 bytes are legal values
|
||||
- memcpy-bus-width: the bus width used for memcpy: 8, 16 or 32 are legal
|
||||
values
|
||||
- memcpy-bus-width: the bus width used for memcpy in bits: 8, 16 or 32 are legal
|
||||
values, the Faraday FTDMAC020 can also accept 64 bits
|
||||
|
||||
Clients
|
||||
Required properties:
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
* Broadcom SBA RAID engine
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be one of the following
|
||||
"brcm,iproc-sba"
|
||||
"brcm,iproc-sba-v2"
|
||||
The "brcm,iproc-sba" has support for only 6 PQ coefficients
|
||||
The "brcm,iproc-sba-v2" has support for only 30 PQ coefficients
|
||||
- mboxes: List of phandle and mailbox channel specifiers
|
||||
|
||||
Example:
|
||||
|
||||
raid_mbox: mbox@67400000 {
|
||||
...
|
||||
#mbox-cells = <3>;
|
||||
...
|
||||
};
|
||||
|
||||
raid0 {
|
||||
compatible = "brcm,iproc-sba-v2";
|
||||
mboxes = <&raid_mbox 0 0x1 0xffff>,
|
||||
<&raid_mbox 1 0x1 0xffff>,
|
||||
<&raid_mbox 2 0x1 0xffff>,
|
||||
<&raid_mbox 3 0x1 0xffff>,
|
||||
<&raid_mbox 4 0x1 0xffff>,
|
||||
<&raid_mbox 5 0x1 0xffff>,
|
||||
<&raid_mbox 6 0x1 0xffff>,
|
||||
<&raid_mbox 7 0x1 0xffff>;
|
||||
};
|
|
@ -30,8 +30,9 @@ Required Properties:
|
|||
|
||||
- interrupts: interrupt specifiers for the DMAC, one for each entry in
|
||||
interrupt-names.
|
||||
- interrupt-names: one entry per channel, named "ch%u", where %u is the
|
||||
channel number ranging from zero to the number of channels minus one.
|
||||
- interrupt-names: one entry for the error interrupt, named "error", plus one
|
||||
entry per channel, named "ch%u", where %u is the channel number ranging from
|
||||
zero to the number of channels minus one.
|
||||
|
||||
- clock-names: "fck" for the functional clock
|
||||
- clocks: a list of phandle + clock-specifier pairs, one for each entry
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
* SHDMA Device Tree bindings
|
||||
|
||||
Sh-/r-mobile and r-car systems often have multiple identical DMA controller
|
||||
Sh-/r-mobile and R-Car systems often have multiple identical DMA controller
|
||||
instances, capable of serving any of a common set of DMA slave devices, using
|
||||
the same configuration. To describe this topology we require all compatible
|
||||
SHDMA DT nodes to be placed under a DMA multiplexer node. All such compatible
|
||||
|
|
|
@ -137,6 +137,9 @@ static void pl08x_put_signal(const struct pl08x_channel_data *cd, int ch)
|
|||
}
|
||||
|
||||
static struct pl08x_platform_data pl08x_pd = {
|
||||
/* Some reasonable memcpy defaults */
|
||||
.memcpy_burst_size = PL08X_BURST_SZ_256,
|
||||
.memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS,
|
||||
.slave_channels = &pl08x_slave_channels[0],
|
||||
.num_slave_channels = ARRAY_SIZE(pl08x_slave_channels),
|
||||
.get_xfer_signal = pl08x_get_signal,
|
||||
|
|
|
@ -137,16 +137,10 @@ static const struct dma_slave_map s3c64xx_dma0_slave_map[] = {
|
|||
};
|
||||
|
||||
struct pl08x_platform_data s3c64xx_dma0_plat_data = {
|
||||
.memcpy_channel = {
|
||||
.bus_id = "memcpy",
|
||||
.cctl_memcpy =
|
||||
(PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT |
|
||||
PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT |
|
||||
PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT |
|
||||
PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT |
|
||||
PL080_CONTROL_PROT_BUFF | PL080_CONTROL_PROT_CACHE |
|
||||
PL080_CONTROL_PROT_SYS),
|
||||
},
|
||||
.memcpy_burst_size = PL08X_BURST_SZ_4,
|
||||
.memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS,
|
||||
.memcpy_prot_buff = true,
|
||||
.memcpy_prot_cache = true,
|
||||
.lli_buses = PL08X_AHB1,
|
||||
.mem_buses = PL08X_AHB1,
|
||||
.get_xfer_signal = pl08x_get_xfer_signal,
|
||||
|
@ -238,16 +232,10 @@ static const struct dma_slave_map s3c64xx_dma1_slave_map[] = {
|
|||
};
|
||||
|
||||
struct pl08x_platform_data s3c64xx_dma1_plat_data = {
|
||||
.memcpy_channel = {
|
||||
.bus_id = "memcpy",
|
||||
.cctl_memcpy =
|
||||
(PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT |
|
||||
PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT |
|
||||
PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT |
|
||||
PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT |
|
||||
PL080_CONTROL_PROT_BUFF | PL080_CONTROL_PROT_CACHE |
|
||||
PL080_CONTROL_PROT_SYS),
|
||||
},
|
||||
.memcpy_burst_size = PL08X_BURST_SZ_4,
|
||||
.memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS,
|
||||
.memcpy_prot_buff = true,
|
||||
.memcpy_prot_cache = true,
|
||||
.lli_buses = PL08X_AHB1,
|
||||
.mem_buses = PL08X_AHB1,
|
||||
.get_xfer_signal = pl08x_get_xfer_signal,
|
||||
|
|
|
@ -44,16 +44,10 @@ struct pl022_ssp_controller pl022_plat_data = {
|
|||
|
||||
/* dmac device registration */
|
||||
struct pl08x_platform_data pl080_plat_data = {
|
||||
.memcpy_channel = {
|
||||
.bus_id = "memcpy",
|
||||
.cctl_memcpy =
|
||||
(PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
|
||||
PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \
|
||||
PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \
|
||||
PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \
|
||||
PL080_CONTROL_PROT_BUFF | PL080_CONTROL_PROT_CACHE | \
|
||||
PL080_CONTROL_PROT_SYS),
|
||||
},
|
||||
.memcpy_burst_size = PL08X_BURST_SZ_16,
|
||||
.memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS,
|
||||
.memcpy_prot_buff = true,
|
||||
.memcpy_prot_cache = true,
|
||||
.lli_buses = PL08X_AHB1,
|
||||
.mem_buses = PL08X_AHB1,
|
||||
.get_xfer_signal = pl080_get_signal,
|
||||
|
|
|
@ -322,16 +322,10 @@ static struct pl08x_channel_data spear600_dma_info[] = {
|
|||
};
|
||||
|
||||
static struct pl08x_platform_data spear6xx_pl080_plat_data = {
|
||||
.memcpy_channel = {
|
||||
.bus_id = "memcpy",
|
||||
.cctl_memcpy =
|
||||
(PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
|
||||
PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \
|
||||
PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \
|
||||
PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \
|
||||
PL080_CONTROL_PROT_BUFF | PL080_CONTROL_PROT_CACHE | \
|
||||
PL080_CONTROL_PROT_SYS),
|
||||
},
|
||||
.memcpy_burst_size = PL08X_BURST_SZ_16,
|
||||
.memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS,
|
||||
.memcpy_prot_buff = true,
|
||||
.memcpy_prot_cache = true,
|
||||
.lli_buses = PL08X_AHB1,
|
||||
.mem_buses = PL08X_AHB1,
|
||||
.get_xfer_signal = pl080_get_signal,
|
||||
|
|
|
@ -62,9 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan,
|
|||
dma_addr_t dma_dest[2];
|
||||
int src_off = 0;
|
||||
|
||||
if (submit->flags & ASYNC_TX_FENCE)
|
||||
dma_flags |= DMA_PREP_FENCE;
|
||||
|
||||
while (src_cnt > 0) {
|
||||
submit->flags = flags_orig;
|
||||
pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
|
||||
|
@ -83,6 +80,8 @@ do_async_gen_syndrome(struct dma_chan *chan,
|
|||
if (cb_fn_orig)
|
||||
dma_flags |= DMA_PREP_INTERRUPT;
|
||||
}
|
||||
if (submit->flags & ASYNC_TX_FENCE)
|
||||
dma_flags |= DMA_PREP_FENCE;
|
||||
|
||||
/* Drivers force forward progress in case they can not provide
|
||||
* a descriptor
|
||||
|
|
|
@ -62,8 +62,10 @@ config AMBA_PL08X
|
|||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
Platform has a PL08x DMAC device
|
||||
which can provide DMA engine support
|
||||
Say yes if your platform has a PL08x DMAC device which can
|
||||
provide DMA engine support. This includes the original ARM
|
||||
PL080 and PL081, Samsungs PL080 derivative and Faraday
|
||||
Technology's FTDMAC020 PL080 derivative.
|
||||
|
||||
config AMCC_PPC440SPE_ADMA
|
||||
tristate "AMCC PPC440SPe ADMA support"
|
||||
|
@ -99,6 +101,21 @@ config AXI_DMAC
|
|||
controller is often used in Analog Device's reference designs for FPGA
|
||||
platforms.
|
||||
|
||||
config BCM_SBA_RAID
|
||||
tristate "Broadcom SBA RAID engine support"
|
||||
depends on ARM64 || COMPILE_TEST
|
||||
depends on MAILBOX && RAID6_PQ
|
||||
select DMA_ENGINE
|
||||
select DMA_ENGINE_RAID
|
||||
select ASYNC_TX_DISABLE_XOR_VAL_DMA
|
||||
select ASYNC_TX_DISABLE_PQ_VAL_DMA
|
||||
default ARCH_BCM_IPROC
|
||||
help
|
||||
Enable support for Broadcom SBA RAID Engine. The SBA RAID
|
||||
engine is available on most of the Broadcom iProc SoCs. It
|
||||
has the capability to offload memcpy, xor and pq computation
|
||||
for raid5/6.
|
||||
|
||||
config COH901318
|
||||
bool "ST-Ericsson COH901318 DMA support"
|
||||
select DMA_ENGINE
|
||||
|
@ -354,13 +371,12 @@ config MV_XOR_V2
|
|||
|
||||
config MXS_DMA
|
||||
bool "MXS DMA support"
|
||||
depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q || SOC_IMX6UL
|
||||
depends on ARCH_MXS || ARCH_MXC || COMPILE_TEST
|
||||
select STMP_DEVICE
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Support the MXS DMA engine. This engine including APBH-DMA
|
||||
and APBX-DMA is integrated into Freescale
|
||||
i.MX23/28/MX6Q/MX6DL/MX6UL chips.
|
||||
and APBX-DMA is integrated into some Freescale chips.
|
||||
|
||||
config MX3_IPU
|
||||
bool "MX3x Image Processing Unit support"
|
||||
|
|
|
@ -17,6 +17,7 @@ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
|
|||
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
|
||||
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
|
||||
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
|
||||
obj-$(CONFIG_BCM_SBA_RAID) += bcm-sba-raid.o
|
||||
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
|
||||
obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
|
||||
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -306,8 +306,12 @@ static int dw_resume_early(struct device *dev)
|
|||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct dw_dma_chip *chip = platform_get_drvdata(pdev);
|
||||
int ret;
|
||||
|
||||
ret = clk_prepare_enable(chip->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
clk_prepare_enable(chip->clk);
|
||||
return dw_dma_enable(chip);
|
||||
}
|
||||
|
||||
|
|
|
@ -877,7 +877,7 @@ static int fsl_re_remove(struct platform_device *ofdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct of_device_id fsl_re_ids[] = {
|
||||
static const struct of_device_id fsl_re_ids[] = {
|
||||
{ .compatible = "fsl,raideng-v1.0", },
|
||||
{}
|
||||
};
|
||||
|
|
|
@ -269,6 +269,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
|
|||
case 2:
|
||||
case 4:
|
||||
case 8:
|
||||
mode &= ~FSL_DMA_MR_SAHTS_MASK;
|
||||
mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
|
||||
break;
|
||||
}
|
||||
|
@ -301,6 +302,7 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
|
|||
case 2:
|
||||
case 4:
|
||||
case 8:
|
||||
mode &= ~FSL_DMA_MR_DAHTS_MASK;
|
||||
mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
|
||||
break;
|
||||
}
|
||||
|
@ -327,7 +329,8 @@ static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
|
|||
BUG_ON(size > 1024);
|
||||
|
||||
mode = get_mr(chan);
|
||||
mode |= (__ilog2(size) << 24) & 0x0f000000;
|
||||
mode &= ~FSL_DMA_MR_BWC_MASK;
|
||||
mode |= (__ilog2(size) << 24) & FSL_DMA_MR_BWC_MASK;
|
||||
|
||||
set_mr(chan, mode);
|
||||
}
|
||||
|
|
|
@ -36,6 +36,10 @@
|
|||
#define FSL_DMA_MR_DAHE 0x00002000
|
||||
#define FSL_DMA_MR_SAHE 0x00001000
|
||||
|
||||
#define FSL_DMA_MR_SAHTS_MASK 0x0000C000
|
||||
#define FSL_DMA_MR_DAHTS_MASK 0x00030000
|
||||
#define FSL_DMA_MR_BWC_MASK 0x0f000000
|
||||
|
||||
/*
|
||||
* Bandwidth/pause control determines how many bytes a given
|
||||
* channel is allowed to transfer before the DMA engine pauses
|
||||
|
|
|
@ -888,7 +888,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
|
|||
sg_init_table(imxdmac->sg_list, periods);
|
||||
|
||||
for (i = 0; i < periods; i++) {
|
||||
imxdmac->sg_list[i].page_link = 0;
|
||||
sg_assign_page(&imxdmac->sg_list[i], NULL);
|
||||
imxdmac->sg_list[i].offset = 0;
|
||||
imxdmac->sg_list[i].dma_address = dma_addr;
|
||||
sg_dma_len(&imxdmac->sg_list[i]) = period_len;
|
||||
|
@ -896,10 +896,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
|
|||
}
|
||||
|
||||
/* close the loop */
|
||||
imxdmac->sg_list[periods].offset = 0;
|
||||
sg_dma_len(&imxdmac->sg_list[periods]) = 0;
|
||||
imxdmac->sg_list[periods].page_link =
|
||||
((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
|
||||
sg_chain(imxdmac->sg_list, periods + 1, imxdmac->sg_list);
|
||||
|
||||
desc->type = IMXDMA_DESC_CYCLIC;
|
||||
desc->sg = imxdmac->sg_list;
|
||||
|
|
|
@ -1323,7 +1323,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
|
|||
}
|
||||
|
||||
if (period_len > 0xffff) {
|
||||
dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
|
||||
dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
|
||||
channel, period_len, 0xffff);
|
||||
goto err_out;
|
||||
}
|
||||
|
@ -1347,7 +1347,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
|
|||
if (i + 1 == num_periods)
|
||||
param |= BD_WRAP;
|
||||
|
||||
dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
|
||||
dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n",
|
||||
i, period_len, (u64)dma_addr,
|
||||
param & BD_WRAP ? "wrap" : "",
|
||||
param & BD_INTR ? " intr" : "");
|
||||
|
@ -1755,19 +1755,26 @@ static int sdma_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(sdma->clk_ahb))
|
||||
return PTR_ERR(sdma->clk_ahb);
|
||||
|
||||
clk_prepare(sdma->clk_ipg);
|
||||
clk_prepare(sdma->clk_ahb);
|
||||
ret = clk_prepare(sdma->clk_ipg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = clk_prepare(sdma->clk_ahb);
|
||||
if (ret)
|
||||
goto err_clk;
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
|
||||
sdma);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_irq;
|
||||
|
||||
sdma->irq = irq;
|
||||
|
||||
sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
|
||||
if (!sdma->script_addrs)
|
||||
return -ENOMEM;
|
||||
if (!sdma->script_addrs) {
|
||||
ret = -ENOMEM;
|
||||
goto err_irq;
|
||||
}
|
||||
|
||||
/* initially no scripts available */
|
||||
saddr_arr = (s32 *)sdma->script_addrs;
|
||||
|
@ -1882,6 +1889,10 @@ static int sdma_probe(struct platform_device *pdev)
|
|||
dma_async_device_unregister(&sdma->dma_device);
|
||||
err_init:
|
||||
kfree(sdma->script_addrs);
|
||||
err_irq:
|
||||
clk_unprepare(sdma->clk_ahb);
|
||||
err_clk:
|
||||
clk_unprepare(sdma->clk_ipg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1893,6 +1904,8 @@ static int sdma_remove(struct platform_device *pdev)
|
|||
devm_free_irq(&pdev->dev, sdma->irq, sdma);
|
||||
dma_async_device_unregister(&sdma->dma_device);
|
||||
kfree(sdma->script_addrs);
|
||||
clk_unprepare(sdma->clk_ahb);
|
||||
clk_unprepare(sdma->clk_ipg);
|
||||
/* Kill the tasklet */
|
||||
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
|
||||
struct sdma_channel *sdmac = &sdma->channel[i];
|
||||
|
|
|
@ -336,8 +336,8 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
|
|||
}
|
||||
|
||||
if (dca3_tag_map_invalid(ioatdca->tag_map)) {
|
||||
WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
|
||||
"%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
|
||||
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
|
||||
pr_warn_once("%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
|
||||
dev_driver_string(&pdev->dev),
|
||||
dev_name(&pdev->dev));
|
||||
free_dca_provider(dca);
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018
|
||||
#define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF
|
||||
#define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0
|
||||
#define MV_XOR_V2_DMA_IMSG_TIMER_EN BIT(18)
|
||||
#define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C
|
||||
/* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
|
||||
#define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C
|
||||
|
@ -55,6 +56,9 @@
|
|||
#define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800
|
||||
#define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804
|
||||
#define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808
|
||||
#define MV_XOR_V2_DMA_IMSG_TMOT 0x810
|
||||
#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK 0x1FFF
|
||||
#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT 0
|
||||
|
||||
/* XOR Global registers */
|
||||
#define MV_XOR_V2_GLOB_BW_CTRL 0x4
|
||||
|
@ -90,6 +94,13 @@
|
|||
*/
|
||||
#define MV_XOR_V2_DESC_NUM 1024
|
||||
|
||||
/*
|
||||
* Threshold values for descriptors and timeout, determined by
|
||||
* experimentation as giving a good level of performance.
|
||||
*/
|
||||
#define MV_XOR_V2_DONE_IMSG_THRD 0x14
|
||||
#define MV_XOR_V2_TIMER_THRD 0xB0
|
||||
|
||||
/**
|
||||
* struct mv_xor_v2_descriptor - DMA HW descriptor
|
||||
* @desc_id: used by S/W and is not affected by H/W.
|
||||
|
@ -246,6 +257,29 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
|
|||
return MV_XOR_V2_EXT_DESC_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the IMSG threshold
|
||||
*/
|
||||
static inline
|
||||
void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
/* Configure threshold of number of descriptors, and enable timer */
|
||||
reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
|
||||
reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
|
||||
reg |= (MV_XOR_V2_DONE_IMSG_THRD << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
|
||||
reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN;
|
||||
writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
|
||||
|
||||
/* Configure Timer Threshold */
|
||||
reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
|
||||
reg &= (~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK <<
|
||||
MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
|
||||
reg |= (MV_XOR_V2_TIMER_THRD << MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
|
||||
writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
|
||||
}
|
||||
|
||||
static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
|
||||
{
|
||||
struct mv_xor_v2_device *xor_dev = data;
|
||||
|
@ -501,9 +535,6 @@ static void mv_xor_v2_issue_pending(struct dma_chan *chan)
|
|||
mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings);
|
||||
xor_dev->npendings = 0;
|
||||
|
||||
/* Activate the channel */
|
||||
writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
|
||||
|
||||
spin_unlock_bh(&xor_dev->lock);
|
||||
}
|
||||
|
||||
|
@ -665,6 +696,27 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mv_xor_v2_suspend(struct platform_device *dev, pm_message_t state)
|
||||
{
|
||||
struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
|
||||
|
||||
/* Set this bit to disable to stop the XOR unit. */
|
||||
writel(0x1, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mv_xor_v2_resume(struct platform_device *dev)
|
||||
{
|
||||
struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
|
||||
|
||||
mv_xor_v2_set_desc_size(xor_dev);
|
||||
mv_xor_v2_enable_imsg_thrd(xor_dev);
|
||||
mv_xor_v2_descq_init(xor_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mv_xor_v2_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct mv_xor_v2_device *xor_dev;
|
||||
|
@ -795,6 +847,8 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
|
|||
list_add_tail(&xor_dev->dmachan.device_node,
|
||||
&dma_dev->channels);
|
||||
|
||||
mv_xor_v2_enable_imsg_thrd(xor_dev);
|
||||
|
||||
mv_xor_v2_descq_init(xor_dev);
|
||||
|
||||
ret = dma_async_device_register(dma_dev);
|
||||
|
@ -844,6 +898,8 @@ MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids);
|
|||
|
||||
static struct platform_driver mv_xor_v2_driver = {
|
||||
.probe = mv_xor_v2_probe,
|
||||
.suspend = mv_xor_v2_suspend,
|
||||
.resume = mv_xor_v2_resume,
|
||||
.remove = mv_xor_v2_remove,
|
||||
.driver = {
|
||||
.name = "mv_xor_v2",
|
||||
|
|
|
@ -617,7 +617,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
|
|||
|
||||
if (period_len > MAX_XFER_BYTES) {
|
||||
dev_err(mxs_dma->dma_device.dev,
|
||||
"maximum period size exceeded: %d > %d\n",
|
||||
"maximum period size exceeded: %zu > %d\n",
|
||||
period_len, MAX_XFER_BYTES);
|
||||
goto err_out;
|
||||
}
|
||||
|
|
|
@ -443,7 +443,10 @@ struct dma_pl330_chan {
|
|||
/* For D-to-M and M-to-D channels */
|
||||
int burst_sz; /* the peripheral fifo width */
|
||||
int burst_len; /* the number of burst */
|
||||
dma_addr_t fifo_addr;
|
||||
phys_addr_t fifo_addr;
|
||||
/* DMA-mapped view of the FIFO; may differ if an IOMMU is present */
|
||||
dma_addr_t fifo_dma;
|
||||
enum dma_data_direction dir;
|
||||
|
||||
/* for cyclic capability */
|
||||
bool cyclic;
|
||||
|
@ -538,11 +541,6 @@ struct _xfer_spec {
|
|||
struct dma_pl330_desc *desc;
|
||||
};
|
||||
|
||||
static inline bool _queue_empty(struct pl330_thread *thrd)
|
||||
{
|
||||
return thrd->req[0].desc == NULL && thrd->req[1].desc == NULL;
|
||||
}
|
||||
|
||||
static inline bool _queue_full(struct pl330_thread *thrd)
|
||||
{
|
||||
return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL;
|
||||
|
@ -564,23 +562,6 @@ static inline u32 get_revision(u32 periph_id)
|
|||
return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
|
||||
}
|
||||
|
||||
static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
|
||||
enum pl330_dst da, u16 val)
|
||||
{
|
||||
if (dry_run)
|
||||
return SZ_DMAADDH;
|
||||
|
||||
buf[0] = CMD_DMAADDH;
|
||||
buf[0] |= (da << 1);
|
||||
buf[1] = val;
|
||||
buf[2] = val >> 8;
|
||||
|
||||
PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
|
||||
da == 1 ? "DA" : "SA", val);
|
||||
|
||||
return SZ_DMAADDH;
|
||||
}
|
||||
|
||||
static inline u32 _emit_END(unsigned dry_run, u8 buf[])
|
||||
{
|
||||
if (dry_run)
|
||||
|
@ -738,18 +719,6 @@ static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
|
|||
return SZ_DMAMOV;
|
||||
}
|
||||
|
||||
static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
|
||||
{
|
||||
if (dry_run)
|
||||
return SZ_DMANOP;
|
||||
|
||||
buf[0] = CMD_DMANOP;
|
||||
|
||||
PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
|
||||
|
||||
return SZ_DMANOP;
|
||||
}
|
||||
|
||||
static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
|
||||
{
|
||||
if (dry_run)
|
||||
|
@ -817,39 +786,6 @@ static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
|
|||
return SZ_DMASTP;
|
||||
}
|
||||
|
||||
static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
|
||||
{
|
||||
if (dry_run)
|
||||
return SZ_DMASTZ;
|
||||
|
||||
buf[0] = CMD_DMASTZ;
|
||||
|
||||
PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
|
||||
|
||||
return SZ_DMASTZ;
|
||||
}
|
||||
|
||||
static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
|
||||
unsigned invalidate)
|
||||
{
|
||||
if (dry_run)
|
||||
return SZ_DMAWFE;
|
||||
|
||||
buf[0] = CMD_DMAWFE;
|
||||
|
||||
ev &= 0x1f;
|
||||
ev <<= 3;
|
||||
buf[1] = ev;
|
||||
|
||||
if (invalidate)
|
||||
buf[1] |= (1 << 1);
|
||||
|
||||
PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
|
||||
ev >> 3, invalidate ? ", I" : "");
|
||||
|
||||
return SZ_DMAWFE;
|
||||
}
|
||||
|
||||
static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
|
||||
enum pl330_cond cond, u8 peri)
|
||||
{
|
||||
|
@ -2120,11 +2056,60 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need the data direction between the DMAC (the dma-mapping "device") and
|
||||
* the FIFO (the dmaengine "dev"), from the FIFO's point of view. Confusing!
|
||||
*/
|
||||
static enum dma_data_direction
|
||||
pl330_dma_slave_map_dir(enum dma_transfer_direction dir)
|
||||
{
|
||||
switch (dir) {
|
||||
case DMA_MEM_TO_DEV:
|
||||
return DMA_FROM_DEVICE;
|
||||
case DMA_DEV_TO_MEM:
|
||||
return DMA_TO_DEVICE;
|
||||
case DMA_DEV_TO_DEV:
|
||||
return DMA_BIDIRECTIONAL;
|
||||
default:
|
||||
return DMA_NONE;
|
||||
}
|
||||
}
|
||||
|
||||
static void pl330_unprep_slave_fifo(struct dma_pl330_chan *pch)
|
||||
{
|
||||
if (pch->dir != DMA_NONE)
|
||||
dma_unmap_resource(pch->chan.device->dev, pch->fifo_dma,
|
||||
1 << pch->burst_sz, pch->dir, 0);
|
||||
pch->dir = DMA_NONE;
|
||||
}
|
||||
|
||||
|
||||
static bool pl330_prep_slave_fifo(struct dma_pl330_chan *pch,
|
||||
enum dma_transfer_direction dir)
|
||||
{
|
||||
struct device *dev = pch->chan.device->dev;
|
||||
enum dma_data_direction dma_dir = pl330_dma_slave_map_dir(dir);
|
||||
|
||||
/* Already mapped for this config? */
|
||||
if (pch->dir == dma_dir)
|
||||
return true;
|
||||
|
||||
pl330_unprep_slave_fifo(pch);
|
||||
pch->fifo_dma = dma_map_resource(dev, pch->fifo_addr,
|
||||
1 << pch->burst_sz, dma_dir, 0);
|
||||
if (dma_mapping_error(dev, pch->fifo_dma))
|
||||
return false;
|
||||
|
||||
pch->dir = dma_dir;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int pl330_config(struct dma_chan *chan,
|
||||
struct dma_slave_config *slave_config)
|
||||
{
|
||||
struct dma_pl330_chan *pch = to_pchan(chan);
|
||||
|
||||
pl330_unprep_slave_fifo(pch);
|
||||
if (slave_config->direction == DMA_MEM_TO_DEV) {
|
||||
if (slave_config->dst_addr)
|
||||
pch->fifo_addr = slave_config->dst_addr;
|
||||
|
@ -2235,6 +2220,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
|
|||
spin_unlock_irqrestore(&pl330->lock, flags);
|
||||
pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
|
||||
pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
|
||||
pl330_unprep_slave_fifo(pch);
|
||||
}
|
||||
|
||||
static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
|
||||
|
@ -2564,6 +2550,9 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (!pl330_prep_slave_fifo(pch, direction))
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < len / period_len; i++) {
|
||||
desc = pl330_get_desc(pch);
|
||||
if (!desc) {
|
||||
|
@ -2593,12 +2582,12 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
|
|||
desc->rqcfg.src_inc = 1;
|
||||
desc->rqcfg.dst_inc = 0;
|
||||
src = dma_addr;
|
||||
dst = pch->fifo_addr;
|
||||
dst = pch->fifo_dma;
|
||||
break;
|
||||
case DMA_DEV_TO_MEM:
|
||||
desc->rqcfg.src_inc = 0;
|
||||
desc->rqcfg.dst_inc = 1;
|
||||
src = pch->fifo_addr;
|
||||
src = pch->fifo_dma;
|
||||
dst = dma_addr;
|
||||
break;
|
||||
default:
|
||||
|
@ -2711,12 +2700,12 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
struct dma_pl330_chan *pch = to_pchan(chan);
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
dma_addr_t addr;
|
||||
|
||||
if (unlikely(!pch || !sgl || !sg_len))
|
||||
return NULL;
|
||||
|
||||
addr = pch->fifo_addr;
|
||||
if (!pl330_prep_slave_fifo(pch, direction))
|
||||
return NULL;
|
||||
|
||||
first = NULL;
|
||||
|
||||
|
@ -2742,13 +2731,13 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
if (direction == DMA_MEM_TO_DEV) {
|
||||
desc->rqcfg.src_inc = 1;
|
||||
desc->rqcfg.dst_inc = 0;
|
||||
fill_px(&desc->px,
|
||||
addr, sg_dma_address(sg), sg_dma_len(sg));
|
||||
fill_px(&desc->px, pch->fifo_dma, sg_dma_address(sg),
|
||||
sg_dma_len(sg));
|
||||
} else {
|
||||
desc->rqcfg.src_inc = 0;
|
||||
desc->rqcfg.dst_inc = 1;
|
||||
fill_px(&desc->px,
|
||||
sg_dma_address(sg), addr, sg_dma_len(sg));
|
||||
fill_px(&desc->px, sg_dma_address(sg), pch->fifo_dma,
|
||||
sg_dma_len(sg));
|
||||
}
|
||||
|
||||
desc->rqcfg.brst_size = pch->burst_sz;
|
||||
|
@ -2906,6 +2895,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
pch->thread = NULL;
|
||||
pch->chan.device = pd;
|
||||
pch->dmac = pl330;
|
||||
pch->dir = DMA_NONE;
|
||||
|
||||
/* Add the channel to the DMAC list */
|
||||
list_add_tail(&pch->chan.device_node, &pd->channels);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* Qualcomm Technologies HIDMA DMA engine interface
|
||||
*
|
||||
* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -210,6 +210,7 @@ static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
|
|||
INIT_LIST_HEAD(&mchan->prepared);
|
||||
INIT_LIST_HEAD(&mchan->active);
|
||||
INIT_LIST_HEAD(&mchan->completed);
|
||||
INIT_LIST_HEAD(&mchan->queued);
|
||||
|
||||
spin_lock_init(&mchan->lock);
|
||||
list_add_tail(&mchan->chan.device_node, &ddev->channels);
|
||||
|
@ -230,9 +231,15 @@ static void hidma_issue_pending(struct dma_chan *dmach)
|
|||
struct hidma_chan *mchan = to_hidma_chan(dmach);
|
||||
struct hidma_dev *dmadev = mchan->dmadev;
|
||||
unsigned long flags;
|
||||
struct hidma_desc *qdesc, *next;
|
||||
int status;
|
||||
|
||||
spin_lock_irqsave(&mchan->lock, flags);
|
||||
list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
|
||||
hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
|
||||
list_move_tail(&qdesc->node, &mchan->active);
|
||||
}
|
||||
|
||||
if (!mchan->running) {
|
||||
struct hidma_desc *desc = list_first_entry(&mchan->active,
|
||||
struct hidma_desc,
|
||||
|
@ -315,17 +322,18 @@ static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
|
|||
pm_runtime_put_autosuspend(dmadev->ddev.dev);
|
||||
return -ENODEV;
|
||||
}
|
||||
pm_runtime_mark_last_busy(dmadev->ddev.dev);
|
||||
pm_runtime_put_autosuspend(dmadev->ddev.dev);
|
||||
|
||||
mdesc = container_of(txd, struct hidma_desc, desc);
|
||||
spin_lock_irqsave(&mchan->lock, irqflags);
|
||||
|
||||
/* Move descriptor to active */
|
||||
list_move_tail(&mdesc->node, &mchan->active);
|
||||
/* Move descriptor to queued */
|
||||
list_move_tail(&mdesc->node, &mchan->queued);
|
||||
|
||||
/* Update cookie */
|
||||
cookie = dma_cookie_assign(txd);
|
||||
|
||||
hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch);
|
||||
spin_unlock_irqrestore(&mchan->lock, irqflags);
|
||||
|
||||
return cookie;
|
||||
|
@ -431,6 +439,7 @@ static int hidma_terminate_channel(struct dma_chan *chan)
|
|||
list_splice_init(&mchan->active, &list);
|
||||
list_splice_init(&mchan->prepared, &list);
|
||||
list_splice_init(&mchan->completed, &list);
|
||||
list_splice_init(&mchan->queued, &list);
|
||||
spin_unlock_irqrestore(&mchan->lock, irqflags);
|
||||
|
||||
/* this suspends the existing transfer */
|
||||
|
@ -795,8 +804,11 @@ static int hidma_probe(struct platform_device *pdev)
|
|||
device_property_read_u32(&pdev->dev, "desc-count",
|
||||
&dmadev->nr_descriptors);
|
||||
|
||||
if (!dmadev->nr_descriptors && nr_desc_prm)
|
||||
if (nr_desc_prm) {
|
||||
dev_info(&pdev->dev, "overriding number of descriptors as %d\n",
|
||||
nr_desc_prm);
|
||||
dmadev->nr_descriptors = nr_desc_prm;
|
||||
}
|
||||
|
||||
if (!dmadev->nr_descriptors)
|
||||
dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
|
||||
|
|
|
@ -104,6 +104,7 @@ struct hidma_chan {
|
|||
struct dma_chan chan;
|
||||
struct list_head free;
|
||||
struct list_head prepared;
|
||||
struct list_head queued;
|
||||
struct list_head active;
|
||||
struct list_head completed;
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* Qualcomm Technologies HIDMA DMA engine Management interface
|
||||
*
|
||||
* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -49,6 +49,26 @@
|
|||
#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
|
||||
#define HIDMA_MAX_CHANNEL_WEIGHT 15
|
||||
|
||||
static unsigned int max_write_request;
|
||||
module_param(max_write_request, uint, 0644);
|
||||
MODULE_PARM_DESC(max_write_request,
|
||||
"maximum write burst (default: ACPI/DT value)");
|
||||
|
||||
static unsigned int max_read_request;
|
||||
module_param(max_read_request, uint, 0644);
|
||||
MODULE_PARM_DESC(max_read_request,
|
||||
"maximum read burst (default: ACPI/DT value)");
|
||||
|
||||
static unsigned int max_wr_xactions;
|
||||
module_param(max_wr_xactions, uint, 0644);
|
||||
MODULE_PARM_DESC(max_wr_xactions,
|
||||
"maximum number of write transactions (default: ACPI/DT value)");
|
||||
|
||||
static unsigned int max_rd_xactions;
|
||||
module_param(max_rd_xactions, uint, 0644);
|
||||
MODULE_PARM_DESC(max_rd_xactions,
|
||||
"maximum number of read transactions (default: ACPI/DT value)");
|
||||
|
||||
int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev)
|
||||
{
|
||||
unsigned int i;
|
||||
|
@ -207,12 +227,25 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (max_write_request) {
|
||||
dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n",
|
||||
max_write_request);
|
||||
mgmtdev->max_write_request = max_write_request;
|
||||
} else
|
||||
max_write_request = mgmtdev->max_write_request;
|
||||
|
||||
rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes",
|
||||
&mgmtdev->max_read_request);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
|
||||
goto out;
|
||||
}
|
||||
if (max_read_request) {
|
||||
dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n",
|
||||
max_read_request);
|
||||
mgmtdev->max_read_request = max_read_request;
|
||||
} else
|
||||
max_read_request = mgmtdev->max_read_request;
|
||||
|
||||
rc = device_property_read_u32(&pdev->dev, "max-write-transactions",
|
||||
&mgmtdev->max_wr_xactions);
|
||||
|
@ -220,6 +253,12 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
|
|||
dev_err(&pdev->dev, "max-write-transactions missing\n");
|
||||
goto out;
|
||||
}
|
||||
if (max_wr_xactions) {
|
||||
dev_info(&pdev->dev, "overriding max-write-transactions: %d\n",
|
||||
max_wr_xactions);
|
||||
mgmtdev->max_wr_xactions = max_wr_xactions;
|
||||
} else
|
||||
max_wr_xactions = mgmtdev->max_wr_xactions;
|
||||
|
||||
rc = device_property_read_u32(&pdev->dev, "max-read-transactions",
|
||||
&mgmtdev->max_rd_xactions);
|
||||
|
@ -227,6 +266,12 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
|
|||
dev_err(&pdev->dev, "max-read-transactions missing\n");
|
||||
goto out;
|
||||
}
|
||||
if (max_rd_xactions) {
|
||||
dev_info(&pdev->dev, "overriding max-read-transactions: %d\n",
|
||||
max_rd_xactions);
|
||||
mgmtdev->max_rd_xactions = max_rd_xactions;
|
||||
} else
|
||||
max_rd_xactions = mgmtdev->max_rd_xactions;
|
||||
|
||||
mgmtdev->priority = devm_kcalloc(&pdev->dev,
|
||||
mgmtdev->dma_channels,
|
||||
|
|
|
@ -144,6 +144,7 @@ struct rcar_dmac_chan_map {
|
|||
* @chan: base DMA channel object
|
||||
* @iomem: channel I/O memory base
|
||||
* @index: index of this channel in the controller
|
||||
* @irq: channel IRQ
|
||||
* @src: slave memory address and size on the source side
|
||||
* @dst: slave memory address and size on the destination side
|
||||
* @mid_rid: hardware MID/RID for the DMA client using this channel
|
||||
|
@ -161,6 +162,7 @@ struct rcar_dmac_chan {
|
|||
struct dma_chan chan;
|
||||
void __iomem *iomem;
|
||||
unsigned int index;
|
||||
int irq;
|
||||
|
||||
struct rcar_dmac_chan_slave src;
|
||||
struct rcar_dmac_chan_slave dst;
|
||||
|
@ -1008,7 +1010,11 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
|
|||
rcar_dmac_chan_halt(rchan);
|
||||
spin_unlock_irq(&rchan->lock);
|
||||
|
||||
/* Now no new interrupts will occur */
|
||||
/*
|
||||
* Now no new interrupts will occur, but one might already be
|
||||
* running. Wait for it to finish before freeing resources.
|
||||
*/
|
||||
synchronize_irq(rchan->irq);
|
||||
|
||||
if (rchan->mid_rid >= 0) {
|
||||
/* The caller is holding dma_list_mutex */
|
||||
|
@ -1366,6 +1372,13 @@ static void rcar_dmac_issue_pending(struct dma_chan *chan)
|
|||
spin_unlock_irqrestore(&rchan->lock, flags);
|
||||
}
|
||||
|
||||
static void rcar_dmac_device_synchronize(struct dma_chan *chan)
|
||||
{
|
||||
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
|
||||
|
||||
synchronize_irq(rchan->irq);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* IRQ handling
|
||||
*/
|
||||
|
@ -1650,7 +1663,6 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
|
|||
struct dma_chan *chan = &rchan->chan;
|
||||
char pdev_irqname[5];
|
||||
char *irqname;
|
||||
int irq;
|
||||
int ret;
|
||||
|
||||
rchan->index = index;
|
||||
|
@ -1667,8 +1679,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
|
|||
|
||||
/* Request the channel interrupt. */
|
||||
sprintf(pdev_irqname, "ch%u", index);
|
||||
irq = platform_get_irq_byname(pdev, pdev_irqname);
|
||||
if (irq < 0) {
|
||||
rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
|
||||
if (rchan->irq < 0) {
|
||||
dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -1678,11 +1690,13 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
|
|||
if (!irqname)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = devm_request_threaded_irq(dmac->dev, irq, rcar_dmac_isr_channel,
|
||||
ret = devm_request_threaded_irq(dmac->dev, rchan->irq,
|
||||
rcar_dmac_isr_channel,
|
||||
rcar_dmac_isr_channel_thread, 0,
|
||||
irqname, rchan);
|
||||
if (ret) {
|
||||
dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret);
|
||||
dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
|
||||
rchan->irq, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1846,6 +1860,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
|||
engine->device_terminate_all = rcar_dmac_chan_terminate_all;
|
||||
engine->device_tx_status = rcar_dmac_tx_status;
|
||||
engine->device_issue_pending = rcar_dmac_issue_pending;
|
||||
engine->device_synchronize = rcar_dmac_device_synchronize;
|
||||
|
||||
ret = dma_async_device_register(engine);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -2528,10 +2528,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
|
|||
dma_addr += period_len;
|
||||
}
|
||||
|
||||
sg[periods].offset = 0;
|
||||
sg_dma_len(&sg[periods]) = 0;
|
||||
sg[periods].page_link =
|
||||
((unsigned long)sg | 0x01) & ~0x02;
|
||||
sg_chain(sg, periods + 1, sg);
|
||||
|
||||
txd = d40_prep_sg(chan, sg, sg, periods, direction,
|
||||
DMA_PREP_INTERRUPT);
|
||||
|
|
|
@ -1492,37 +1492,9 @@ static int tegra_dma_remove(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
static int tegra_dma_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct tegra_dma *tdma = dev_get_drvdata(dev);
|
||||
|
||||
clk_disable_unprepare(tdma->dma_clk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_dma_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct tegra_dma *tdma = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
|
||||
ret = clk_prepare_enable(tdma->dma_clk);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "clk_enable failed: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int tegra_dma_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct tegra_dma *tdma = dev_get_drvdata(dev);
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
/* Enable clock before accessing register */
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
|
||||
for (i = 0; i < tdma->chip_data->nr_channels; i++) {
|
||||
|
@ -1543,21 +1515,21 @@ static int tegra_dma_pm_suspend(struct device *dev)
|
|||
TEGRA_APBDMA_CHAN_WCOUNT);
|
||||
}
|
||||
|
||||
/* Disable clock */
|
||||
pm_runtime_put(dev);
|
||||
clk_disable_unprepare(tdma->dma_clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_dma_pm_resume(struct device *dev)
|
||||
static int tegra_dma_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct tegra_dma *tdma = dev_get_drvdata(dev);
|
||||
int i;
|
||||
int ret;
|
||||
int i, ret;
|
||||
|
||||
/* Enable clock before accessing register */
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0)
|
||||
ret = clk_prepare_enable(tdma->dma_clk);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "clk_enable failed: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
|
||||
tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
|
||||
|
@ -1582,16 +1554,14 @@ static int tegra_dma_pm_resume(struct device *dev)
|
|||
(ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
|
||||
}
|
||||
|
||||
/* Disable clock */
|
||||
pm_runtime_put(dev);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
|
||||
SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
|
||||
NULL)
|
||||
SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
|
||||
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
||||
pm_runtime_force_resume)
|
||||
};
|
||||
|
||||
static const struct of_device_id tegra_dma_of_match[] = {
|
||||
|
|
|
@ -794,9 +794,6 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
|
|||
|
||||
chan = to_chan(dchan);
|
||||
|
||||
if (len > ZYNQMP_DMA_MAX_TRANS_LEN)
|
||||
return NULL;
|
||||
|
||||
desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN);
|
||||
|
||||
spin_lock_bh(&chan->lock);
|
||||
|
|
|
@ -44,7 +44,14 @@
|
|||
|
||||
#define PL080_SYNC (0x34)
|
||||
|
||||
/* Per channel configuration registers */
|
||||
/* The Faraday Technology FTDMAC020 variant registers */
|
||||
#define FTDMAC020_CH_BUSY (0x20)
|
||||
/* Identical to PL080_CONFIG */
|
||||
#define FTDMAC020_CSR (0x24)
|
||||
/* Identical to PL080_SYNC */
|
||||
#define FTDMAC020_SYNC (0x2C)
|
||||
#define FTDMAC020_REVISION (0x30)
|
||||
#define FTDMAC020_FEATURE (0x34)
|
||||
|
||||
/* Per channel configuration registers */
|
||||
#define PL080_Cx_BASE(x) ((0x100 + (x * 0x20)))
|
||||
|
@ -55,13 +62,20 @@
|
|||
#define PL080_CH_CONFIG (0x10)
|
||||
#define PL080S_CH_CONTROL2 (0x10)
|
||||
#define PL080S_CH_CONFIG (0x14)
|
||||
/* The Faraday FTDMAC020 derivative shuffles the registers around */
|
||||
#define FTDMAC020_CH_CSR (0x00)
|
||||
#define FTDMAC020_CH_CFG (0x04)
|
||||
#define FTDMAC020_CH_SRC_ADDR (0x08)
|
||||
#define FTDMAC020_CH_DST_ADDR (0x0C)
|
||||
#define FTDMAC020_CH_LLP (0x10)
|
||||
#define FTDMAC020_CH_SIZE (0x14)
|
||||
|
||||
#define PL080_LLI_ADDR_MASK (0x3fffffff << 2)
|
||||
#define PL080_LLI_ADDR_MASK GENMASK(31, 2)
|
||||
#define PL080_LLI_ADDR_SHIFT (2)
|
||||
#define PL080_LLI_LM_AHB2 BIT(0)
|
||||
|
||||
#define PL080_CONTROL_TC_IRQ_EN BIT(31)
|
||||
#define PL080_CONTROL_PROT_MASK (0x7 << 28)
|
||||
#define PL080_CONTROL_PROT_MASK GENMASK(30, 28)
|
||||
#define PL080_CONTROL_PROT_SHIFT (28)
|
||||
#define PL080_CONTROL_PROT_CACHE BIT(30)
|
||||
#define PL080_CONTROL_PROT_BUFF BIT(29)
|
||||
|
@ -70,16 +84,16 @@
|
|||
#define PL080_CONTROL_SRC_INCR BIT(26)
|
||||
#define PL080_CONTROL_DST_AHB2 BIT(25)
|
||||
#define PL080_CONTROL_SRC_AHB2 BIT(24)
|
||||
#define PL080_CONTROL_DWIDTH_MASK (0x7 << 21)
|
||||
#define PL080_CONTROL_DWIDTH_MASK GENMASK(23, 21)
|
||||
#define PL080_CONTROL_DWIDTH_SHIFT (21)
|
||||
#define PL080_CONTROL_SWIDTH_MASK (0x7 << 18)
|
||||
#define PL080_CONTROL_SWIDTH_MASK GENMASK(20, 18)
|
||||
#define PL080_CONTROL_SWIDTH_SHIFT (18)
|
||||
#define PL080_CONTROL_DB_SIZE_MASK (0x7 << 15)
|
||||
#define PL080_CONTROL_DB_SIZE_MASK GENMASK(17, 15)
|
||||
#define PL080_CONTROL_DB_SIZE_SHIFT (15)
|
||||
#define PL080_CONTROL_SB_SIZE_MASK (0x7 << 12)
|
||||
#define PL080_CONTROL_SB_SIZE_MASK GENMASK(14, 12)
|
||||
#define PL080_CONTROL_SB_SIZE_SHIFT (12)
|
||||
#define PL080_CONTROL_TRANSFER_SIZE_MASK (0xfff << 0)
|
||||
#define PL080S_CONTROL_TRANSFER_SIZE_MASK (0x1ffffff << 0)
|
||||
#define PL080_CONTROL_TRANSFER_SIZE_MASK GENMASK(11, 0)
|
||||
#define PL080S_CONTROL_TRANSFER_SIZE_MASK GENMASK(24, 0)
|
||||
#define PL080_CONTROL_TRANSFER_SIZE_SHIFT (0)
|
||||
|
||||
#define PL080_BSIZE_1 (0x0)
|
||||
|
@ -102,11 +116,11 @@
|
|||
#define PL080_CONFIG_LOCK BIT(16)
|
||||
#define PL080_CONFIG_TC_IRQ_MASK BIT(15)
|
||||
#define PL080_CONFIG_ERR_IRQ_MASK BIT(14)
|
||||
#define PL080_CONFIG_FLOW_CONTROL_MASK (0x7 << 11)
|
||||
#define PL080_CONFIG_FLOW_CONTROL_MASK GENMASK(13, 11)
|
||||
#define PL080_CONFIG_FLOW_CONTROL_SHIFT (11)
|
||||
#define PL080_CONFIG_DST_SEL_MASK (0xf << 6)
|
||||
#define PL080_CONFIG_DST_SEL_MASK GENMASK(9, 6)
|
||||
#define PL080_CONFIG_DST_SEL_SHIFT (6)
|
||||
#define PL080_CONFIG_SRC_SEL_MASK (0xf << 1)
|
||||
#define PL080_CONFIG_SRC_SEL_MASK GENMASK(4, 1)
|
||||
#define PL080_CONFIG_SRC_SEL_SHIFT (1)
|
||||
#define PL080_CONFIG_ENABLE BIT(0)
|
||||
|
||||
|
@ -119,6 +133,73 @@
|
|||
#define PL080_FLOW_PER2MEM_PER (0x6)
|
||||
#define PL080_FLOW_SRC2DST_SRC (0x7)
|
||||
|
||||
#define FTDMAC020_CH_CSR_TC_MSK BIT(31)
|
||||
/* Later versions have a threshold in bits 24..26, */
|
||||
#define FTDMAC020_CH_CSR_FIFOTH_MSK GENMASK(26, 24)
|
||||
#define FTDMAC020_CH_CSR_FIFOTH_SHIFT (24)
|
||||
#define FTDMAC020_CH_CSR_CHPR1_MSK GENMASK(23, 22)
|
||||
#define FTDMAC020_CH_CSR_PROT3 BIT(21)
|
||||
#define FTDMAC020_CH_CSR_PROT2 BIT(20)
|
||||
#define FTDMAC020_CH_CSR_PROT1 BIT(19)
|
||||
#define FTDMAC020_CH_CSR_SRC_SIZE_MSK GENMASK(18, 16)
|
||||
#define FTDMAC020_CH_CSR_SRC_SIZE_SHIFT (16)
|
||||
#define FTDMAC020_CH_CSR_ABT BIT(15)
|
||||
#define FTDMAC020_CH_CSR_SRC_WIDTH_MSK GENMASK(13, 11)
|
||||
#define FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT (11)
|
||||
#define FTDMAC020_CH_CSR_DST_WIDTH_MSK GENMASK(10, 8)
|
||||
#define FTDMAC020_CH_CSR_DST_WIDTH_SHIFT (8)
|
||||
#define FTDMAC020_CH_CSR_MODE BIT(7)
|
||||
/* 00 = increase, 01 = decrease, 10 = fix */
|
||||
#define FTDMAC020_CH_CSR_SRCAD_CTL_MSK GENMASK(6, 5)
|
||||
#define FTDMAC020_CH_CSR_SRCAD_CTL_SHIFT (5)
|
||||
#define FTDMAC020_CH_CSR_DSTAD_CTL_MSK GENMASK(4, 3)
|
||||
#define FTDMAC020_CH_CSR_DSTAD_CTL_SHIFT (3)
|
||||
#define FTDMAC020_CH_CSR_SRC_SEL BIT(2)
|
||||
#define FTDMAC020_CH_CSR_DST_SEL BIT(1)
|
||||
#define FTDMAC020_CH_CSR_EN BIT(0)
|
||||
|
||||
/* FIFO threshold setting */
|
||||
#define FTDMAC020_CH_CSR_FIFOTH_1 (0x0)
|
||||
#define FTDMAC020_CH_CSR_FIFOTH_2 (0x1)
|
||||
#define FTDMAC020_CH_CSR_FIFOTH_4 (0x2)
|
||||
#define FTDMAC020_CH_CSR_FIFOTH_8 (0x3)
|
||||
#define FTDMAC020_CH_CSR_FIFOTH_16 (0x4)
|
||||
/* The FTDMAC020 supports 64bit wide transfers */
|
||||
#define FTDMAC020_WIDTH_64BIT (0x3)
|
||||
/* Address can be increased, decreased or fixed */
|
||||
#define FTDMAC020_CH_CSR_SRCAD_CTL_INC (0x0)
|
||||
#define FTDMAC020_CH_CSR_SRCAD_CTL_DEC (0x1)
|
||||
#define FTDMAC020_CH_CSR_SRCAD_CTL_FIXED (0x2)
|
||||
|
||||
#define FTDMAC020_CH_CFG_LLP_CNT_MASK GENMASK(19, 16)
|
||||
#define FTDMAC020_CH_CFG_LLP_CNT_SHIFT (16)
|
||||
#define FTDMAC020_CH_CFG_BUSY BIT(8)
|
||||
#define FTDMAC020_CH_CFG_INT_ABT_MASK BIT(2)
|
||||
#define FTDMAC020_CH_CFG_INT_ERR_MASK BIT(1)
|
||||
#define FTDMAC020_CH_CFG_INT_TC_MASK BIT(0)
|
||||
|
||||
/* Inside the LLIs, the applicable CSR fields are mapped differently */
|
||||
#define FTDMAC020_LLI_TC_MSK BIT(28)
|
||||
#define FTDMAC020_LLI_SRC_WIDTH_MSK GENMASK(27, 25)
|
||||
#define FTDMAC020_LLI_SRC_WIDTH_SHIFT (25)
|
||||
#define FTDMAC020_LLI_DST_WIDTH_MSK GENMASK(24, 22)
|
||||
#define FTDMAC020_LLI_DST_WIDTH_SHIFT (22)
|
||||
#define FTDMAC020_LLI_SRCAD_CTL_MSK GENMASK(21, 20)
|
||||
#define FTDMAC020_LLI_SRCAD_CTL_SHIFT (20)
|
||||
#define FTDMAC020_LLI_DSTAD_CTL_MSK GENMASK(19, 18)
|
||||
#define FTDMAC020_LLI_DSTAD_CTL_SHIFT (18)
|
||||
#define FTDMAC020_LLI_SRC_SEL BIT(17)
|
||||
#define FTDMAC020_LLI_DST_SEL BIT(16)
|
||||
#define FTDMAC020_LLI_TRANSFER_SIZE_MASK GENMASK(11, 0)
|
||||
#define FTDMAC020_LLI_TRANSFER_SIZE_SHIFT (0)
|
||||
|
||||
#define FTDMAC020_CFG_LLP_CNT_MASK GENMASK(19, 16)
|
||||
#define FTDMAC020_CFG_LLP_CNT_SHIFT (16)
|
||||
#define FTDMAC020_CFG_BUSY BIT(8)
|
||||
#define FTDMAC020_CFG_INT_ABT_MSK BIT(2)
|
||||
#define FTDMAC020_CFG_INT_ERR_MSK BIT(1)
|
||||
#define FTDMAC020_CFG_INT_TC_MSK BIT(0)
|
||||
|
||||
/* DMA linked list chain structure */
|
||||
|
||||
struct pl080_lli {
|
||||
|
|
|
@ -47,8 +47,6 @@ enum {
|
|||
* devices with static assignments
|
||||
* @muxval: a number usually used to poke into some mux regiser to
|
||||
* mux in the signal to this channel
|
||||
* @cctl_memcpy: options for the channel control register for memcpy
|
||||
* *** not used for slave channels ***
|
||||
* @addr: source/target address in physical memory for this DMA channel,
|
||||
* can be the address of a FIFO register for burst requests for example.
|
||||
* This can be left undefined if the PrimeCell API is used for configuring
|
||||
|
@ -63,12 +61,28 @@ struct pl08x_channel_data {
|
|||
int min_signal;
|
||||
int max_signal;
|
||||
u32 muxval;
|
||||
u32 cctl_memcpy;
|
||||
dma_addr_t addr;
|
||||
bool single;
|
||||
u8 periph_buses;
|
||||
};
|
||||
|
||||
enum pl08x_burst_size {
|
||||
PL08X_BURST_SZ_1,
|
||||
PL08X_BURST_SZ_4,
|
||||
PL08X_BURST_SZ_8,
|
||||
PL08X_BURST_SZ_16,
|
||||
PL08X_BURST_SZ_32,
|
||||
PL08X_BURST_SZ_64,
|
||||
PL08X_BURST_SZ_128,
|
||||
PL08X_BURST_SZ_256,
|
||||
};
|
||||
|
||||
enum pl08x_bus_width {
|
||||
PL08X_BUS_WIDTH_8_BITS,
|
||||
PL08X_BUS_WIDTH_16_BITS,
|
||||
PL08X_BUS_WIDTH_32_BITS,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pl08x_platform_data - the platform configuration for the PL08x
|
||||
* PrimeCells.
|
||||
|
@ -76,6 +90,11 @@ struct pl08x_channel_data {
|
|||
* platform, all inclusive, including multiplexed channels. The available
|
||||
* physical channels will be multiplexed around these signals as they are
|
||||
* requested, just enumerate all possible channels.
|
||||
* @num_slave_channels: number of elements in the slave channel array
|
||||
* @memcpy_burst_size: the appropriate burst size for memcpy operations
|
||||
* @memcpy_bus_width: memory bus width
|
||||
* @memcpy_prot_buff: whether memcpy DMA is bufferable
|
||||
* @memcpy_prot_cache: whether memcpy DMA is cacheable
|
||||
* @get_xfer_signal: request a physical signal to be used for a DMA transfer
|
||||
* immediately: if there is some multiplexing or similar blocking the use
|
||||
* of the channel the transfer can be denied by returning less than zero,
|
||||
|
@ -90,7 +109,10 @@ struct pl08x_channel_data {
|
|||
struct pl08x_platform_data {
|
||||
struct pl08x_channel_data *slave_channels;
|
||||
unsigned int num_slave_channels;
|
||||
struct pl08x_channel_data memcpy_channel;
|
||||
enum pl08x_burst_size memcpy_burst_size;
|
||||
enum pl08x_bus_width memcpy_bus_width;
|
||||
bool memcpy_prot_buff;
|
||||
bool memcpy_prot_cache;
|
||||
int (*get_xfer_signal)(const struct pl08x_channel_data *);
|
||||
void (*put_xfer_signal)(const struct pl08x_channel_data *, int);
|
||||
u8 lli_buses;
|
||||
|
|
|
@ -142,6 +142,7 @@ int raid6_select_algo(void);
|
|||
extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256)));
|
||||
extern const u8 raid6_vgfmul[256][32] __attribute__((aligned(256)));
|
||||
extern const u8 raid6_gfexp[256] __attribute__((aligned(256)));
|
||||
extern const u8 raid6_gflog[256] __attribute__((aligned(256)));
|
||||
extern const u8 raid6_gfinv[256] __attribute__((aligned(256)));
|
||||
extern const u8 raid6_gfexi[256] __attribute__((aligned(256)));
|
||||
|
||||
|
|
|
@ -125,6 +125,26 @@ int main(int argc, char *argv[])
|
|||
printf("EXPORT_SYMBOL(raid6_gfexp);\n");
|
||||
printf("#endif\n");
|
||||
|
||||
/* Compute log-of-2 table */
|
||||
printf("\nconst u8 __attribute__((aligned(256)))\n"
|
||||
"raid6_gflog[256] =\n" "{\n");
|
||||
for (i = 0; i < 256; i += 8) {
|
||||
printf("\t");
|
||||
for (j = 0; j < 8; j++) {
|
||||
v = 255;
|
||||
for (k = 0; k < 256; k++)
|
||||
if (exptbl[k] == (i + j)) {
|
||||
v = k;
|
||||
break;
|
||||
}
|
||||
printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
|
||||
}
|
||||
}
|
||||
printf("};\n");
|
||||
printf("#ifdef __KERNEL__\n");
|
||||
printf("EXPORT_SYMBOL(raid6_gflog);\n");
|
||||
printf("#endif\n");
|
||||
|
||||
/* Compute inverse table x^-1 == x^254 */
|
||||
printf("\nconst u8 __attribute__((aligned(256)))\n"
|
||||
"raid6_gfinv[256] =\n" "{\n");
|
||||
|
|
Loading…
Reference in New Issue