mirror of https://gitee.com/openkylin/linux.git
Merge branch 'topic/dmatest' into for-linus
This commit is contained in:
commit
41bd0314fa
|
@ -181,13 +181,6 @@ Currently, the types available are:
|
||||||
- Used by the client drivers to register a callback that will be
|
- Used by the client drivers to register a callback that will be
|
||||||
called on a regular basis through the DMA controller interrupt
|
called on a regular basis through the DMA controller interrupt
|
||||||
|
|
||||||
* DMA_SG
|
|
||||||
- The device supports memory to memory scatter-gather
|
|
||||||
transfers.
|
|
||||||
- Even though a plain memcpy can look like a particular case of a
|
|
||||||
scatter-gather transfer, with a single chunk to transfer, it's a
|
|
||||||
distinct transaction type in the mem2mem transfers case
|
|
||||||
|
|
||||||
* DMA_PRIVATE
|
* DMA_PRIVATE
|
||||||
- The devices only supports slave transfers, and as such isn't
|
- The devices only supports slave transfers, and as such isn't
|
||||||
available for async transfers.
|
available for async transfers.
|
||||||
|
|
|
@ -502,27 +502,6 @@ static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
|
||||||
return &desc->tx_desc;
|
return &desc->tx_desc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
|
|
||||||
struct dma_chan *dma_chan, struct scatterlist *dst_sg,
|
|
||||||
unsigned int dst_nents, struct scatterlist *src_sg,
|
|
||||||
unsigned int src_nents, unsigned long flags)
|
|
||||||
{
|
|
||||||
struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
|
|
||||||
dma_chan);
|
|
||||||
struct ccp_dma_desc *desc;
|
|
||||||
|
|
||||||
dev_dbg(chan->ccp->dev,
|
|
||||||
"%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
|
|
||||||
__func__, src_sg, src_nents, dst_sg, dst_nents, flags);
|
|
||||||
|
|
||||||
desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
|
|
||||||
flags);
|
|
||||||
if (!desc)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return &desc->tx_desc;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
|
static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
|
||||||
struct dma_chan *dma_chan, unsigned long flags)
|
struct dma_chan *dma_chan, unsigned long flags)
|
||||||
{
|
{
|
||||||
|
@ -704,7 +683,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
|
||||||
dma_dev->directions = DMA_MEM_TO_MEM;
|
dma_dev->directions = DMA_MEM_TO_MEM;
|
||||||
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
|
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
|
||||||
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
|
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
|
||||||
dma_cap_set(DMA_SG, dma_dev->cap_mask);
|
|
||||||
dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
|
dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
|
||||||
|
|
||||||
/* The DMA channels for this device can be set to public or private,
|
/* The DMA channels for this device can be set to public or private,
|
||||||
|
@ -740,7 +718,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
|
||||||
|
|
||||||
dma_dev->device_free_chan_resources = ccp_free_chan_resources;
|
dma_dev->device_free_chan_resources = ccp_free_chan_resources;
|
||||||
dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
|
dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
|
||||||
dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
|
|
||||||
dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
|
dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
|
||||||
dma_dev->device_issue_pending = ccp_issue_pending;
|
dma_dev->device_issue_pending = ccp_issue_pending;
|
||||||
dma_dev->device_tx_status = ccp_tx_status;
|
dma_dev->device_tx_status = ccp_tx_status;
|
||||||
|
|
|
@ -1202,138 +1202,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* atc_prep_dma_sg - prepare memory to memory scather-gather operation
|
|
||||||
* @chan: the channel to prepare operation on
|
|
||||||
* @dst_sg: destination scatterlist
|
|
||||||
* @dst_nents: number of destination scatterlist entries
|
|
||||||
* @src_sg: source scatterlist
|
|
||||||
* @src_nents: number of source scatterlist entries
|
|
||||||
* @flags: tx descriptor status flags
|
|
||||||
*/
|
|
||||||
static struct dma_async_tx_descriptor *
|
|
||||||
atc_prep_dma_sg(struct dma_chan *chan,
|
|
||||||
struct scatterlist *dst_sg, unsigned int dst_nents,
|
|
||||||
struct scatterlist *src_sg, unsigned int src_nents,
|
|
||||||
unsigned long flags)
|
|
||||||
{
|
|
||||||
struct at_dma_chan *atchan = to_at_dma_chan(chan);
|
|
||||||
struct at_desc *desc = NULL;
|
|
||||||
struct at_desc *first = NULL;
|
|
||||||
struct at_desc *prev = NULL;
|
|
||||||
unsigned int src_width;
|
|
||||||
unsigned int dst_width;
|
|
||||||
size_t xfer_count;
|
|
||||||
u32 ctrla;
|
|
||||||
u32 ctrlb;
|
|
||||||
size_t dst_len = 0, src_len = 0;
|
|
||||||
dma_addr_t dst = 0, src = 0;
|
|
||||||
size_t len = 0, total_len = 0;
|
|
||||||
|
|
||||||
if (unlikely(dst_nents == 0 || src_nents == 0))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
if (unlikely(dst_sg == NULL || src_sg == NULL))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
|
|
||||||
| ATC_SRC_ADDR_MODE_INCR
|
|
||||||
| ATC_DST_ADDR_MODE_INCR
|
|
||||||
| ATC_FC_MEM2MEM;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* loop until there is either no more source or no more destination
|
|
||||||
* scatterlist entry
|
|
||||||
*/
|
|
||||||
while (true) {
|
|
||||||
|
|
||||||
/* prepare the next transfer */
|
|
||||||
if (dst_len == 0) {
|
|
||||||
|
|
||||||
/* no more destination scatterlist entries */
|
|
||||||
if (!dst_sg || !dst_nents)
|
|
||||||
break;
|
|
||||||
|
|
||||||
dst = sg_dma_address(dst_sg);
|
|
||||||
dst_len = sg_dma_len(dst_sg);
|
|
||||||
|
|
||||||
dst_sg = sg_next(dst_sg);
|
|
||||||
dst_nents--;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (src_len == 0) {
|
|
||||||
|
|
||||||
/* no more source scatterlist entries */
|
|
||||||
if (!src_sg || !src_nents)
|
|
||||||
break;
|
|
||||||
|
|
||||||
src = sg_dma_address(src_sg);
|
|
||||||
src_len = sg_dma_len(src_sg);
|
|
||||||
|
|
||||||
src_sg = sg_next(src_sg);
|
|
||||||
src_nents--;
|
|
||||||
}
|
|
||||||
|
|
||||||
len = min_t(size_t, src_len, dst_len);
|
|
||||||
if (len == 0)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* take care for the alignment */
|
|
||||||
src_width = dst_width = atc_get_xfer_width(src, dst, len);
|
|
||||||
|
|
||||||
ctrla = ATC_SRC_WIDTH(src_width) |
|
|
||||||
ATC_DST_WIDTH(dst_width);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The number of transfers to set up refer to the source width
|
|
||||||
* that depends on the alignment.
|
|
||||||
*/
|
|
||||||
xfer_count = len >> src_width;
|
|
||||||
if (xfer_count > ATC_BTSIZE_MAX) {
|
|
||||||
xfer_count = ATC_BTSIZE_MAX;
|
|
||||||
len = ATC_BTSIZE_MAX << src_width;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* create the transfer */
|
|
||||||
desc = atc_desc_get(atchan);
|
|
||||||
if (!desc)
|
|
||||||
goto err_desc_get;
|
|
||||||
|
|
||||||
desc->lli.saddr = src;
|
|
||||||
desc->lli.daddr = dst;
|
|
||||||
desc->lli.ctrla = ctrla | xfer_count;
|
|
||||||
desc->lli.ctrlb = ctrlb;
|
|
||||||
|
|
||||||
desc->txd.cookie = 0;
|
|
||||||
desc->len = len;
|
|
||||||
|
|
||||||
atc_desc_chain(&first, &prev, desc);
|
|
||||||
|
|
||||||
/* update the lengths and addresses for the next loop cycle */
|
|
||||||
dst_len -= len;
|
|
||||||
src_len -= len;
|
|
||||||
dst += len;
|
|
||||||
src += len;
|
|
||||||
|
|
||||||
total_len += len;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* First descriptor of the chain embedds additional information */
|
|
||||||
first->txd.cookie = -EBUSY;
|
|
||||||
first->total_len = total_len;
|
|
||||||
|
|
||||||
/* set end-of-link to the last link descriptor of list*/
|
|
||||||
set_desc_eol(desc);
|
|
||||||
|
|
||||||
first->txd.flags = flags; /* client is in control of this ack */
|
|
||||||
|
|
||||||
return &first->txd;
|
|
||||||
|
|
||||||
err_desc_get:
|
|
||||||
atc_desc_put(atchan, first);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* atc_dma_cyclic_check_values
|
* atc_dma_cyclic_check_values
|
||||||
* Check for too big/unaligned periods and unaligned DMA buffer
|
* Check for too big/unaligned periods and unaligned DMA buffer
|
||||||
|
@ -1933,14 +1801,12 @@ static int __init at_dma_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
/* setup platform data for each SoC */
|
/* setup platform data for each SoC */
|
||||||
dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
|
dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
|
||||||
dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
|
|
||||||
dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
|
dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
|
||||||
dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
|
dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
|
||||||
dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
|
dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
|
||||||
dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
|
dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
|
||||||
dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
|
dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
|
||||||
dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
|
dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
|
||||||
dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
|
|
||||||
|
|
||||||
/* get DMA parameters from controller type */
|
/* get DMA parameters from controller type */
|
||||||
plat_dat = at_dma_get_driver_data(pdev);
|
plat_dat = at_dma_get_driver_data(pdev);
|
||||||
|
@ -2078,16 +1944,12 @@ static int __init at_dma_probe(struct platform_device *pdev)
|
||||||
atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
|
|
||||||
atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;
|
|
||||||
|
|
||||||
dma_writel(atdma, EN, AT_DMA_ENABLE);
|
dma_writel(atdma, EN, AT_DMA_ENABLE);
|
||||||
|
|
||||||
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
|
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
|
||||||
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
|
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
|
||||||
dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
|
dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
|
||||||
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
|
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
|
||||||
dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "",
|
|
||||||
plat_dat->nr_channels);
|
plat_dat->nr_channels);
|
||||||
|
|
||||||
dma_async_device_register(&atdma->dma_common);
|
dma_async_device_register(&atdma->dma_common);
|
||||||
|
|
|
@ -923,30 +923,85 @@ int dma_async_device_register(struct dma_device *device)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
/* validate device routines */
|
/* validate device routines */
|
||||||
BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
|
if (!device->dev) {
|
||||||
!device->device_prep_dma_memcpy);
|
pr_err("DMAdevice must have dev\n");
|
||||||
BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
|
return -EIO;
|
||||||
!device->device_prep_dma_xor);
|
}
|
||||||
BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
|
|
||||||
!device->device_prep_dma_xor_val);
|
|
||||||
BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
|
|
||||||
!device->device_prep_dma_pq);
|
|
||||||
BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
|
|
||||||
!device->device_prep_dma_pq_val);
|
|
||||||
BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
|
|
||||||
!device->device_prep_dma_memset);
|
|
||||||
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
|
|
||||||
!device->device_prep_dma_interrupt);
|
|
||||||
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
|
|
||||||
!device->device_prep_dma_sg);
|
|
||||||
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
|
|
||||||
!device->device_prep_dma_cyclic);
|
|
||||||
BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
|
|
||||||
!device->device_prep_interleaved_dma);
|
|
||||||
|
|
||||||
BUG_ON(!device->device_tx_status);
|
if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
|
||||||
BUG_ON(!device->device_issue_pending);
|
dev_err(device->dev,
|
||||||
BUG_ON(!device->dev);
|
"Device claims capability %s, but op is not defined\n",
|
||||||
|
"DMA_MEMCPY");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
|
||||||
|
dev_err(device->dev,
|
||||||
|
"Device claims capability %s, but op is not defined\n",
|
||||||
|
"DMA_XOR");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
|
||||||
|
dev_err(device->dev,
|
||||||
|
"Device claims capability %s, but op is not defined\n",
|
||||||
|
"DMA_XOR_VAL");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
|
||||||
|
dev_err(device->dev,
|
||||||
|
"Device claims capability %s, but op is not defined\n",
|
||||||
|
"DMA_PQ");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
|
||||||
|
dev_err(device->dev,
|
||||||
|
"Device claims capability %s, but op is not defined\n",
|
||||||
|
"DMA_PQ_VAL");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
|
||||||
|
dev_err(device->dev,
|
||||||
|
"Device claims capability %s, but op is not defined\n",
|
||||||
|
"DMA_MEMSET");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
|
||||||
|
dev_err(device->dev,
|
||||||
|
"Device claims capability %s, but op is not defined\n",
|
||||||
|
"DMA_INTERRUPT");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
|
||||||
|
dev_err(device->dev,
|
||||||
|
"Device claims capability %s, but op is not defined\n",
|
||||||
|
"DMA_CYCLIC");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
|
||||||
|
dev_err(device->dev,
|
||||||
|
"Device claims capability %s, but op is not defined\n",
|
||||||
|
"DMA_INTERLEAVE");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (!device->device_tx_status) {
|
||||||
|
dev_err(device->dev, "Device tx_status is not defined\n");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (!device->device_issue_pending) {
|
||||||
|
dev_err(device->dev, "Device issue_pending is not defined\n");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
/* note: this only matters in the
|
/* note: this only matters in the
|
||||||
* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
|
* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
|
||||||
|
|
|
@ -52,15 +52,10 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR);
|
||||||
MODULE_PARM_DESC(iterations,
|
MODULE_PARM_DESC(iterations,
|
||||||
"Iterations before stopping test (default: infinite)");
|
"Iterations before stopping test (default: infinite)");
|
||||||
|
|
||||||
static unsigned int sg_buffers = 1;
|
|
||||||
module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
|
|
||||||
MODULE_PARM_DESC(sg_buffers,
|
|
||||||
"Number of scatter gather buffers (default: 1)");
|
|
||||||
|
|
||||||
static unsigned int dmatest;
|
static unsigned int dmatest;
|
||||||
module_param(dmatest, uint, S_IRUGO | S_IWUSR);
|
module_param(dmatest, uint, S_IRUGO | S_IWUSR);
|
||||||
MODULE_PARM_DESC(dmatest,
|
MODULE_PARM_DESC(dmatest,
|
||||||
"dmatest 0-memcpy 1-slave_sg (default: 0)");
|
"dmatest 0-memcpy 1-memset (default: 0)");
|
||||||
|
|
||||||
static unsigned int xor_sources = 3;
|
static unsigned int xor_sources = 3;
|
||||||
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
|
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
|
||||||
|
@ -158,6 +153,7 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
|
||||||
#define PATTERN_COPY 0x40
|
#define PATTERN_COPY 0x40
|
||||||
#define PATTERN_OVERWRITE 0x20
|
#define PATTERN_OVERWRITE 0x20
|
||||||
#define PATTERN_COUNT_MASK 0x1f
|
#define PATTERN_COUNT_MASK 0x1f
|
||||||
|
#define PATTERN_MEMSET_IDX 0x01
|
||||||
|
|
||||||
struct dmatest_thread {
|
struct dmatest_thread {
|
||||||
struct list_head node;
|
struct list_head node;
|
||||||
|
@ -239,46 +235,62 @@ static unsigned long dmatest_random(void)
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u8 gen_inv_idx(u8 index, bool is_memset)
|
||||||
|
{
|
||||||
|
u8 val = is_memset ? PATTERN_MEMSET_IDX : index;
|
||||||
|
|
||||||
|
return ~val & PATTERN_COUNT_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u8 gen_src_value(u8 index, bool is_memset)
|
||||||
|
{
|
||||||
|
return PATTERN_SRC | gen_inv_idx(index, is_memset);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u8 gen_dst_value(u8 index, bool is_memset)
|
||||||
|
{
|
||||||
|
return PATTERN_DST | gen_inv_idx(index, is_memset);
|
||||||
|
}
|
||||||
|
|
||||||
static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
|
static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
|
||||||
unsigned int buf_size)
|
unsigned int buf_size, bool is_memset)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
u8 *buf;
|
u8 *buf;
|
||||||
|
|
||||||
for (; (buf = *bufs); bufs++) {
|
for (; (buf = *bufs); bufs++) {
|
||||||
for (i = 0; i < start; i++)
|
for (i = 0; i < start; i++)
|
||||||
buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
|
buf[i] = gen_src_value(i, is_memset);
|
||||||
for ( ; i < start + len; i++)
|
for ( ; i < start + len; i++)
|
||||||
buf[i] = PATTERN_SRC | PATTERN_COPY
|
buf[i] = gen_src_value(i, is_memset) | PATTERN_COPY;
|
||||||
| (~i & PATTERN_COUNT_MASK);
|
|
||||||
for ( ; i < buf_size; i++)
|
for ( ; i < buf_size; i++)
|
||||||
buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
|
buf[i] = gen_src_value(i, is_memset);
|
||||||
buf++;
|
buf++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
|
static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
|
||||||
unsigned int buf_size)
|
unsigned int buf_size, bool is_memset)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
u8 *buf;
|
u8 *buf;
|
||||||
|
|
||||||
for (; (buf = *bufs); bufs++) {
|
for (; (buf = *bufs); bufs++) {
|
||||||
for (i = 0; i < start; i++)
|
for (i = 0; i < start; i++)
|
||||||
buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
|
buf[i] = gen_dst_value(i, is_memset);
|
||||||
for ( ; i < start + len; i++)
|
for ( ; i < start + len; i++)
|
||||||
buf[i] = PATTERN_DST | PATTERN_OVERWRITE
|
buf[i] = gen_dst_value(i, is_memset) |
|
||||||
| (~i & PATTERN_COUNT_MASK);
|
PATTERN_OVERWRITE;
|
||||||
for ( ; i < buf_size; i++)
|
for ( ; i < buf_size; i++)
|
||||||
buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
|
buf[i] = gen_dst_value(i, is_memset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
|
static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
|
||||||
unsigned int counter, bool is_srcbuf)
|
unsigned int counter, bool is_srcbuf, bool is_memset)
|
||||||
{
|
{
|
||||||
u8 diff = actual ^ pattern;
|
u8 diff = actual ^ pattern;
|
||||||
u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
|
u8 expected = pattern | gen_inv_idx(counter, is_memset);
|
||||||
const char *thread_name = current->comm;
|
const char *thread_name = current->comm;
|
||||||
|
|
||||||
if (is_srcbuf)
|
if (is_srcbuf)
|
||||||
|
@ -298,7 +310,7 @@ static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
|
||||||
|
|
||||||
static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
|
static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
|
||||||
unsigned int end, unsigned int counter, u8 pattern,
|
unsigned int end, unsigned int counter, u8 pattern,
|
||||||
bool is_srcbuf)
|
bool is_srcbuf, bool is_memset)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
unsigned int error_count = 0;
|
unsigned int error_count = 0;
|
||||||
|
@ -311,11 +323,12 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
|
||||||
counter = counter_orig;
|
counter = counter_orig;
|
||||||
for (i = start; i < end; i++) {
|
for (i = start; i < end; i++) {
|
||||||
actual = buf[i];
|
actual = buf[i];
|
||||||
expected = pattern | (~counter & PATTERN_COUNT_MASK);
|
expected = pattern | gen_inv_idx(counter, is_memset);
|
||||||
if (actual != expected) {
|
if (actual != expected) {
|
||||||
if (error_count < MAX_ERROR_COUNT)
|
if (error_count < MAX_ERROR_COUNT)
|
||||||
dmatest_mismatch(actual, pattern, i,
|
dmatest_mismatch(actual, pattern, i,
|
||||||
counter, is_srcbuf);
|
counter, is_srcbuf,
|
||||||
|
is_memset);
|
||||||
error_count++;
|
error_count++;
|
||||||
}
|
}
|
||||||
counter++;
|
counter++;
|
||||||
|
@ -435,6 +448,7 @@ static int dmatest_func(void *data)
|
||||||
s64 runtime = 0;
|
s64 runtime = 0;
|
||||||
unsigned long long total_len = 0;
|
unsigned long long total_len = 0;
|
||||||
u8 align = 0;
|
u8 align = 0;
|
||||||
|
bool is_memset = false;
|
||||||
|
|
||||||
set_freezable();
|
set_freezable();
|
||||||
|
|
||||||
|
@ -448,9 +462,10 @@ static int dmatest_func(void *data)
|
||||||
if (thread->type == DMA_MEMCPY) {
|
if (thread->type == DMA_MEMCPY) {
|
||||||
align = dev->copy_align;
|
align = dev->copy_align;
|
||||||
src_cnt = dst_cnt = 1;
|
src_cnt = dst_cnt = 1;
|
||||||
} else if (thread->type == DMA_SG) {
|
} else if (thread->type == DMA_MEMSET) {
|
||||||
align = dev->copy_align;
|
align = dev->fill_align;
|
||||||
src_cnt = dst_cnt = sg_buffers;
|
src_cnt = dst_cnt = 1;
|
||||||
|
is_memset = true;
|
||||||
} else if (thread->type == DMA_XOR) {
|
} else if (thread->type == DMA_XOR) {
|
||||||
/* force odd to ensure dst = src */
|
/* force odd to ensure dst = src */
|
||||||
src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
|
src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
|
||||||
|
@ -530,8 +545,6 @@ static int dmatest_func(void *data)
|
||||||
dma_addr_t srcs[src_cnt];
|
dma_addr_t srcs[src_cnt];
|
||||||
dma_addr_t *dsts;
|
dma_addr_t *dsts;
|
||||||
unsigned int src_off, dst_off, len;
|
unsigned int src_off, dst_off, len;
|
||||||
struct scatterlist tx_sg[src_cnt];
|
|
||||||
struct scatterlist rx_sg[src_cnt];
|
|
||||||
|
|
||||||
total_tests++;
|
total_tests++;
|
||||||
|
|
||||||
|
@ -571,9 +584,9 @@ static int dmatest_func(void *data)
|
||||||
dst_off = (dst_off >> align) << align;
|
dst_off = (dst_off >> align) << align;
|
||||||
|
|
||||||
dmatest_init_srcs(thread->srcs, src_off, len,
|
dmatest_init_srcs(thread->srcs, src_off, len,
|
||||||
params->buf_size);
|
params->buf_size, is_memset);
|
||||||
dmatest_init_dsts(thread->dsts, dst_off, len,
|
dmatest_init_dsts(thread->dsts, dst_off, len,
|
||||||
params->buf_size);
|
params->buf_size, is_memset);
|
||||||
|
|
||||||
diff = ktime_sub(ktime_get(), start);
|
diff = ktime_sub(ktime_get(), start);
|
||||||
filltime = ktime_add(filltime, diff);
|
filltime = ktime_add(filltime, diff);
|
||||||
|
@ -627,22 +640,15 @@ static int dmatest_func(void *data)
|
||||||
um->bidi_cnt++;
|
um->bidi_cnt++;
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_init_table(tx_sg, src_cnt);
|
|
||||||
sg_init_table(rx_sg, src_cnt);
|
|
||||||
for (i = 0; i < src_cnt; i++) {
|
|
||||||
sg_dma_address(&rx_sg[i]) = srcs[i];
|
|
||||||
sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off;
|
|
||||||
sg_dma_len(&tx_sg[i]) = len;
|
|
||||||
sg_dma_len(&rx_sg[i]) = len;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (thread->type == DMA_MEMCPY)
|
if (thread->type == DMA_MEMCPY)
|
||||||
tx = dev->device_prep_dma_memcpy(chan,
|
tx = dev->device_prep_dma_memcpy(chan,
|
||||||
dsts[0] + dst_off,
|
dsts[0] + dst_off,
|
||||||
srcs[0], len, flags);
|
srcs[0], len, flags);
|
||||||
else if (thread->type == DMA_SG)
|
else if (thread->type == DMA_MEMSET)
|
||||||
tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt,
|
tx = dev->device_prep_dma_memset(chan,
|
||||||
rx_sg, src_cnt, flags);
|
dsts[0] + dst_off,
|
||||||
|
*(thread->srcs[0] + src_off),
|
||||||
|
len, flags);
|
||||||
else if (thread->type == DMA_XOR)
|
else if (thread->type == DMA_XOR)
|
||||||
tx = dev->device_prep_dma_xor(chan,
|
tx = dev->device_prep_dma_xor(chan,
|
||||||
dsts[0] + dst_off,
|
dsts[0] + dst_off,
|
||||||
|
@ -722,23 +728,25 @@ static int dmatest_func(void *data)
|
||||||
start = ktime_get();
|
start = ktime_get();
|
||||||
pr_debug("%s: verifying source buffer...\n", current->comm);
|
pr_debug("%s: verifying source buffer...\n", current->comm);
|
||||||
error_count = dmatest_verify(thread->srcs, 0, src_off,
|
error_count = dmatest_verify(thread->srcs, 0, src_off,
|
||||||
0, PATTERN_SRC, true);
|
0, PATTERN_SRC, true, is_memset);
|
||||||
error_count += dmatest_verify(thread->srcs, src_off,
|
error_count += dmatest_verify(thread->srcs, src_off,
|
||||||
src_off + len, src_off,
|
src_off + len, src_off,
|
||||||
PATTERN_SRC | PATTERN_COPY, true);
|
PATTERN_SRC | PATTERN_COPY, true, is_memset);
|
||||||
error_count += dmatest_verify(thread->srcs, src_off + len,
|
error_count += dmatest_verify(thread->srcs, src_off + len,
|
||||||
params->buf_size, src_off + len,
|
params->buf_size, src_off + len,
|
||||||
PATTERN_SRC, true);
|
PATTERN_SRC, true, is_memset);
|
||||||
|
|
||||||
pr_debug("%s: verifying dest buffer...\n", current->comm);
|
pr_debug("%s: verifying dest buffer...\n", current->comm);
|
||||||
error_count += dmatest_verify(thread->dsts, 0, dst_off,
|
error_count += dmatest_verify(thread->dsts, 0, dst_off,
|
||||||
0, PATTERN_DST, false);
|
0, PATTERN_DST, false, is_memset);
|
||||||
|
|
||||||
error_count += dmatest_verify(thread->dsts, dst_off,
|
error_count += dmatest_verify(thread->dsts, dst_off,
|
||||||
dst_off + len, src_off,
|
dst_off + len, src_off,
|
||||||
PATTERN_SRC | PATTERN_COPY, false);
|
PATTERN_SRC | PATTERN_COPY, false, is_memset);
|
||||||
|
|
||||||
error_count += dmatest_verify(thread->dsts, dst_off + len,
|
error_count += dmatest_verify(thread->dsts, dst_off + len,
|
||||||
params->buf_size, dst_off + len,
|
params->buf_size, dst_off + len,
|
||||||
PATTERN_DST, false);
|
PATTERN_DST, false, is_memset);
|
||||||
|
|
||||||
diff = ktime_sub(ktime_get(), start);
|
diff = ktime_sub(ktime_get(), start);
|
||||||
comparetime = ktime_add(comparetime, diff);
|
comparetime = ktime_add(comparetime, diff);
|
||||||
|
@ -821,8 +829,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
|
||||||
|
|
||||||
if (type == DMA_MEMCPY)
|
if (type == DMA_MEMCPY)
|
||||||
op = "copy";
|
op = "copy";
|
||||||
else if (type == DMA_SG)
|
else if (type == DMA_MEMSET)
|
||||||
op = "sg";
|
op = "set";
|
||||||
else if (type == DMA_XOR)
|
else if (type == DMA_XOR)
|
||||||
op = "xor";
|
op = "xor";
|
||||||
else if (type == DMA_PQ)
|
else if (type == DMA_PQ)
|
||||||
|
@ -883,9 +891,9 @@ static int dmatest_add_channel(struct dmatest_info *info,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) {
|
if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
|
||||||
if (dmatest == 1) {
|
if (dmatest == 1) {
|
||||||
cnt = dmatest_add_threads(info, dtc, DMA_SG);
|
cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
|
||||||
thread_count += cnt > 0 ? cnt : 0;
|
thread_count += cnt > 0 ? cnt : 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -961,8 +969,8 @@ static void run_threaded_test(struct dmatest_info *info)
|
||||||
params->noverify = noverify;
|
params->noverify = noverify;
|
||||||
|
|
||||||
request_channels(info, DMA_MEMCPY);
|
request_channels(info, DMA_MEMCPY);
|
||||||
|
request_channels(info, DMA_MEMSET);
|
||||||
request_channels(info, DMA_XOR);
|
request_channels(info, DMA_XOR);
|
||||||
request_channels(info, DMA_SG);
|
|
||||||
request_channels(info, DMA_PQ);
|
request_channels(info, DMA_PQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -825,122 +825,6 @@ fsl_dma_prep_memcpy(struct dma_chan *dchan,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
|
|
||||||
struct scatterlist *dst_sg, unsigned int dst_nents,
|
|
||||||
struct scatterlist *src_sg, unsigned int src_nents,
|
|
||||||
unsigned long flags)
|
|
||||||
{
|
|
||||||
struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
|
|
||||||
struct fsldma_chan *chan = to_fsl_chan(dchan);
|
|
||||||
size_t dst_avail, src_avail;
|
|
||||||
dma_addr_t dst, src;
|
|
||||||
size_t len;
|
|
||||||
|
|
||||||
/* basic sanity checks */
|
|
||||||
if (dst_nents == 0 || src_nents == 0)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
if (dst_sg == NULL || src_sg == NULL)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* TODO: should we check that both scatterlists have the same
|
|
||||||
* TODO: number of bytes in total? Is that really an error?
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* get prepared for the loop */
|
|
||||||
dst_avail = sg_dma_len(dst_sg);
|
|
||||||
src_avail = sg_dma_len(src_sg);
|
|
||||||
|
|
||||||
/* run until we are out of scatterlist entries */
|
|
||||||
while (true) {
|
|
||||||
|
|
||||||
/* create the largest transaction possible */
|
|
||||||
len = min_t(size_t, src_avail, dst_avail);
|
|
||||||
len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
|
|
||||||
if (len == 0)
|
|
||||||
goto fetch;
|
|
||||||
|
|
||||||
dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
|
|
||||||
src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
|
|
||||||
|
|
||||||
/* allocate and populate the descriptor */
|
|
||||||
new = fsl_dma_alloc_descriptor(chan);
|
|
||||||
if (!new) {
|
|
||||||
chan_err(chan, "%s\n", msg_ld_oom);
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
set_desc_cnt(chan, &new->hw, len);
|
|
||||||
set_desc_src(chan, &new->hw, src);
|
|
||||||
set_desc_dst(chan, &new->hw, dst);
|
|
||||||
|
|
||||||
if (!first)
|
|
||||||
first = new;
|
|
||||||
else
|
|
||||||
set_desc_next(chan, &prev->hw, new->async_tx.phys);
|
|
||||||
|
|
||||||
new->async_tx.cookie = 0;
|
|
||||||
async_tx_ack(&new->async_tx);
|
|
||||||
prev = new;
|
|
||||||
|
|
||||||
/* Insert the link descriptor to the LD ring */
|
|
||||||
list_add_tail(&new->node, &first->tx_list);
|
|
||||||
|
|
||||||
/* update metadata */
|
|
||||||
dst_avail -= len;
|
|
||||||
src_avail -= len;
|
|
||||||
|
|
||||||
fetch:
|
|
||||||
/* fetch the next dst scatterlist entry */
|
|
||||||
if (dst_avail == 0) {
|
|
||||||
|
|
||||||
/* no more entries: we're done */
|
|
||||||
if (dst_nents == 0)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* fetch the next entry: if there are no more: done */
|
|
||||||
dst_sg = sg_next(dst_sg);
|
|
||||||
if (dst_sg == NULL)
|
|
||||||
break;
|
|
||||||
|
|
||||||
dst_nents--;
|
|
||||||
dst_avail = sg_dma_len(dst_sg);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* fetch the next src scatterlist entry */
|
|
||||||
if (src_avail == 0) {
|
|
||||||
|
|
||||||
/* no more entries: we're done */
|
|
||||||
if (src_nents == 0)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* fetch the next entry: if there are no more: done */
|
|
||||||
src_sg = sg_next(src_sg);
|
|
||||||
if (src_sg == NULL)
|
|
||||||
break;
|
|
||||||
|
|
||||||
src_nents--;
|
|
||||||
src_avail = sg_dma_len(src_sg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
new->async_tx.flags = flags; /* client is in control of this ack */
|
|
||||||
new->async_tx.cookie = -EBUSY;
|
|
||||||
|
|
||||||
/* Set End-of-link to the last link descriptor of new list */
|
|
||||||
set_ld_eol(chan, new);
|
|
||||||
|
|
||||||
return &first->async_tx;
|
|
||||||
|
|
||||||
fail:
|
|
||||||
if (!first)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
fsldma_free_desc_list_reverse(chan, &first->tx_list);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
|
static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
|
||||||
{
|
{
|
||||||
struct fsldma_chan *chan;
|
struct fsldma_chan *chan;
|
||||||
|
@ -1357,12 +1241,10 @@ static int fsldma_of_probe(struct platform_device *op)
|
||||||
fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
|
fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
|
||||||
|
|
||||||
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
|
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
|
||||||
dma_cap_set(DMA_SG, fdev->common.cap_mask);
|
|
||||||
dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
|
dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
|
||||||
fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
|
fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
|
||||||
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
|
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
|
||||||
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
|
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
|
||||||
fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
|
|
||||||
fdev->common.device_tx_status = fsl_tx_status;
|
fdev->common.device_tx_status = fsl_tx_status;
|
||||||
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
|
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
|
||||||
fdev->common.device_config = fsl_dma_device_config;
|
fdev->common.device_config = fsl_dma_device_config;
|
||||||
|
|
|
@ -68,36 +68,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc,
|
||||||
hw_desc->byte_count = byte_count;
|
hw_desc->byte_count = byte_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Populate the descriptor */
|
|
||||||
static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc,
|
|
||||||
dma_addr_t dma_src, dma_addr_t dma_dst,
|
|
||||||
u32 len, struct mv_xor_desc_slot *prev)
|
|
||||||
{
|
|
||||||
struct mv_xor_desc *hw_desc = desc->hw_desc;
|
|
||||||
|
|
||||||
hw_desc->status = XOR_DESC_DMA_OWNED;
|
|
||||||
hw_desc->phy_next_desc = 0;
|
|
||||||
/* Configure for XOR with only one src address -> MEMCPY */
|
|
||||||
hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0);
|
|
||||||
hw_desc->phy_dest_addr = dma_dst;
|
|
||||||
hw_desc->phy_src_addr[0] = dma_src;
|
|
||||||
hw_desc->byte_count = len;
|
|
||||||
|
|
||||||
if (prev) {
|
|
||||||
struct mv_xor_desc *hw_prev = prev->hw_desc;
|
|
||||||
|
|
||||||
hw_prev->phy_next_desc = desc->async_tx.phys;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc)
|
|
||||||
{
|
|
||||||
struct mv_xor_desc *hw_desc = desc->hw_desc;
|
|
||||||
|
|
||||||
/* Enable end-of-descriptor interrupt */
|
|
||||||
hw_desc->desc_command |= XOR_DESC_EOD_INT_EN;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
|
static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
|
||||||
{
|
{
|
||||||
struct mv_xor_desc *hw_desc = desc->hw_desc;
|
struct mv_xor_desc *hw_desc = desc->hw_desc;
|
||||||
|
@ -662,132 +632,6 @@ mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
|
||||||
return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
|
return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction
|
|
||||||
* @chan: DMA channel
|
|
||||||
* @dst_sg: Destination scatter list
|
|
||||||
* @dst_sg_len: Number of entries in destination scatter list
|
|
||||||
* @src_sg: Source scatter list
|
|
||||||
* @src_sg_len: Number of entries in source scatter list
|
|
||||||
* @flags: transfer ack flags
|
|
||||||
*
|
|
||||||
* Return: Async transaction descriptor on success and NULL on failure
|
|
||||||
*/
|
|
||||||
static struct dma_async_tx_descriptor *
|
|
||||||
mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
|
|
||||||
unsigned int dst_sg_len, struct scatterlist *src_sg,
|
|
||||||
unsigned int src_sg_len, unsigned long flags)
|
|
||||||
{
|
|
||||||
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
|
|
||||||
struct mv_xor_desc_slot *new;
|
|
||||||
struct mv_xor_desc_slot *first = NULL;
|
|
||||||
struct mv_xor_desc_slot *prev = NULL;
|
|
||||||
size_t len, dst_avail, src_avail;
|
|
||||||
dma_addr_t dma_dst, dma_src;
|
|
||||||
int desc_cnt = 0;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
dev_dbg(mv_chan_to_devp(mv_chan),
|
|
||||||
"%s dst_sg_len: %d src_sg_len: %d flags: %ld\n",
|
|
||||||
__func__, dst_sg_len, src_sg_len, flags);
|
|
||||||
|
|
||||||
dst_avail = sg_dma_len(dst_sg);
|
|
||||||
src_avail = sg_dma_len(src_sg);
|
|
||||||
|
|
||||||
/* Run until we are out of scatterlist entries */
|
|
||||||
while (true) {
|
|
||||||
/* Allocate and populate the descriptor */
|
|
||||||
desc_cnt++;
|
|
||||||
new = mv_chan_alloc_slot(mv_chan);
|
|
||||||
if (!new) {
|
|
||||||
dev_err(mv_chan_to_devp(mv_chan),
|
|
||||||
"Out of descriptors (desc_cnt=%d)!\n",
|
|
||||||
desc_cnt);
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
len = min_t(size_t, src_avail, dst_avail);
|
|
||||||
len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT);
|
|
||||||
if (len == 0)
|
|
||||||
goto fetch;
|
|
||||||
|
|
||||||
if (len < MV_XOR_MIN_BYTE_COUNT) {
|
|
||||||
dev_err(mv_chan_to_devp(mv_chan),
|
|
||||||
"Transfer size of %zu too small!\n", len);
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
|
|
||||||
dst_avail;
|
|
||||||
dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
|
|
||||||
src_avail;
|
|
||||||
|
|
||||||
/* Check if a new window needs to get added for 'dst' */
|
|
||||||
ret = mv_xor_add_io_win(mv_chan, dma_dst);
|
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
/* Check if a new window needs to get added for 'src' */
|
|
||||||
ret = mv_xor_add_io_win(mv_chan, dma_src);
|
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
/* Populate the descriptor */
|
|
||||||
mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev);
|
|
||||||
prev = new;
|
|
||||||
dst_avail -= len;
|
|
||||||
src_avail -= len;
|
|
||||||
|
|
||||||
if (!first)
|
|
||||||
first = new;
|
|
||||||
else
|
|
||||||
list_move_tail(&new->node, &first->sg_tx_list);
|
|
||||||
|
|
||||||
fetch:
|
|
||||||
/* Fetch the next dst scatterlist entry */
|
|
||||||
if (dst_avail == 0) {
|
|
||||||
if (dst_sg_len == 0)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* Fetch the next entry: if there are no more: done */
|
|
||||||
dst_sg = sg_next(dst_sg);
|
|
||||||
if (dst_sg == NULL)
|
|
||||||
break;
|
|
||||||
|
|
||||||
dst_sg_len--;
|
|
||||||
dst_avail = sg_dma_len(dst_sg);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Fetch the next src scatterlist entry */
|
|
||||||
if (src_avail == 0) {
|
|
||||||
if (src_sg_len == 0)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* Fetch the next entry: if there are no more: done */
|
|
||||||
src_sg = sg_next(src_sg);
|
|
||||||
if (src_sg == NULL)
|
|
||||||
break;
|
|
||||||
|
|
||||||
src_sg_len--;
|
|
||||||
src_avail = sg_dma_len(src_sg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set the EOD flag in the last descriptor */
|
|
||||||
mv_xor_desc_config_eod(new);
|
|
||||||
first->async_tx.flags = flags;
|
|
||||||
|
|
||||||
return &first->async_tx;
|
|
||||||
|
|
||||||
err:
|
|
||||||
/* Cleanup: Move all descriptors back into the free list */
|
|
||||||
spin_lock_bh(&mv_chan->lock);
|
|
||||||
mv_desc_clean_slot(first, mv_chan);
|
|
||||||
spin_unlock_bh(&mv_chan->lock);
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mv_xor_free_chan_resources(struct dma_chan *chan)
|
static void mv_xor_free_chan_resources(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
|
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
|
||||||
|
@ -1254,8 +1098,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
|
||||||
dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
|
dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
|
||||||
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
|
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
|
||||||
dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
|
dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
|
||||||
if (dma_has_cap(DMA_SG, dma_dev->cap_mask))
|
|
||||||
dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg;
|
|
||||||
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
|
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
|
||||||
dma_dev->max_xor = 8;
|
dma_dev->max_xor = 8;
|
||||||
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
|
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
|
||||||
|
@ -1305,11 +1147,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
|
||||||
goto err_free_irq;
|
goto err_free_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n",
|
dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
|
||||||
mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
|
mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
|
||||||
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
|
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
|
||||||
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
|
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
|
||||||
dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "",
|
|
||||||
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
|
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
|
||||||
|
|
||||||
dma_async_device_register(dma_dev);
|
dma_async_device_register(dma_dev);
|
||||||
|
@ -1552,7 +1393,6 @@ static int mv_xor_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
dma_cap_zero(cap_mask);
|
dma_cap_zero(cap_mask);
|
||||||
dma_cap_set(DMA_MEMCPY, cap_mask);
|
dma_cap_set(DMA_MEMCPY, cap_mask);
|
||||||
dma_cap_set(DMA_SG, cap_mask);
|
|
||||||
dma_cap_set(DMA_XOR, cap_mask);
|
dma_cap_set(DMA_XOR, cap_mask);
|
||||||
dma_cap_set(DMA_INTERRUPT, cap_mask);
|
dma_cap_set(DMA_INTERRUPT, cap_mask);
|
||||||
|
|
||||||
|
|
|
@ -1005,21 +1005,6 @@ static struct dma_async_tx_descriptor *nbpf_prep_memcpy(
|
||||||
DMA_MEM_TO_MEM, flags);
|
DMA_MEM_TO_MEM, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dma_async_tx_descriptor *nbpf_prep_memcpy_sg(
|
|
||||||
struct dma_chan *dchan,
|
|
||||||
struct scatterlist *dst_sg, unsigned int dst_nents,
|
|
||||||
struct scatterlist *src_sg, unsigned int src_nents,
|
|
||||||
unsigned long flags)
|
|
||||||
{
|
|
||||||
struct nbpf_channel *chan = nbpf_to_chan(dchan);
|
|
||||||
|
|
||||||
if (dst_nents != src_nents)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return nbpf_prep_sg(chan, src_sg, dst_sg, src_nents,
|
|
||||||
DMA_MEM_TO_MEM, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct dma_async_tx_descriptor *nbpf_prep_slave_sg(
|
static struct dma_async_tx_descriptor *nbpf_prep_slave_sg(
|
||||||
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
|
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
|
||||||
enum dma_transfer_direction direction, unsigned long flags, void *context)
|
enum dma_transfer_direction direction, unsigned long flags, void *context)
|
||||||
|
@ -1417,13 +1402,11 @@ static int nbpf_probe(struct platform_device *pdev)
|
||||||
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
|
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
|
||||||
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
|
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
|
||||||
dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
|
dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
|
||||||
dma_cap_set(DMA_SG, dma_dev->cap_mask);
|
|
||||||
|
|
||||||
/* Common and MEMCPY operations */
|
/* Common and MEMCPY operations */
|
||||||
dma_dev->device_alloc_chan_resources
|
dma_dev->device_alloc_chan_resources
|
||||||
= nbpf_alloc_chan_resources;
|
= nbpf_alloc_chan_resources;
|
||||||
dma_dev->device_free_chan_resources = nbpf_free_chan_resources;
|
dma_dev->device_free_chan_resources = nbpf_free_chan_resources;
|
||||||
dma_dev->device_prep_dma_sg = nbpf_prep_memcpy_sg;
|
|
||||||
dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
|
dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
|
||||||
dma_dev->device_tx_status = nbpf_tx_status;
|
dma_dev->device_tx_status = nbpf_tx_status;
|
||||||
dma_dev->device_issue_pending = nbpf_issue_pending;
|
dma_dev->device_issue_pending = nbpf_issue_pending;
|
||||||
|
|
|
@ -2484,19 +2484,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
|
||||||
DMA_MEM_TO_MEM, dma_flags);
|
DMA_MEM_TO_MEM, dma_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dma_async_tx_descriptor *
|
|
||||||
d40_prep_memcpy_sg(struct dma_chan *chan,
|
|
||||||
struct scatterlist *dst_sg, unsigned int dst_nents,
|
|
||||||
struct scatterlist *src_sg, unsigned int src_nents,
|
|
||||||
unsigned long dma_flags)
|
|
||||||
{
|
|
||||||
if (dst_nents != src_nents)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return d40_prep_sg(chan, src_sg, dst_sg, src_nents,
|
|
||||||
DMA_MEM_TO_MEM, dma_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct dma_async_tx_descriptor *
|
static struct dma_async_tx_descriptor *
|
||||||
d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||||
unsigned int sg_len, enum dma_transfer_direction direction,
|
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||||
|
@ -2821,9 +2808,6 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
|
||||||
dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
|
dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dma_has_cap(DMA_SG, dev->cap_mask))
|
|
||||||
dev->device_prep_dma_sg = d40_prep_memcpy_sg;
|
|
||||||
|
|
||||||
if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
|
if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
|
||||||
dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
|
dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
|
||||||
|
|
||||||
|
@ -2865,7 +2849,6 @@ static int __init d40_dmaengine_init(struct d40_base *base,
|
||||||
|
|
||||||
dma_cap_zero(base->dma_memcpy.cap_mask);
|
dma_cap_zero(base->dma_memcpy.cap_mask);
|
||||||
dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
|
dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
|
||||||
dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
|
|
||||||
|
|
||||||
d40_ops_init(base, &base->dma_memcpy);
|
d40_ops_init(base, &base->dma_memcpy);
|
||||||
|
|
||||||
|
@ -2883,7 +2866,6 @@ static int __init d40_dmaengine_init(struct d40_base *base,
|
||||||
dma_cap_zero(base->dma_both.cap_mask);
|
dma_cap_zero(base->dma_both.cap_mask);
|
||||||
dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
|
dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
|
||||||
dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
|
dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
|
||||||
dma_cap_set(DMA_SG, base->dma_both.cap_mask);
|
|
||||||
dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
|
dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
|
||||||
|
|
||||||
d40_ops_init(base, &base->dma_both);
|
d40_ops_init(base, &base->dma_both);
|
||||||
|
|
|
@ -391,11 +391,6 @@ static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
|
||||||
*paddr += nbytes;
|
*paddr += nbytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xgene_dma_invalidate_buffer(__le64 *ext8)
|
|
||||||
{
|
|
||||||
*ext8 |= cpu_to_le64(XGENE_DMA_INVALID_LEN_CODE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx)
|
static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx)
|
||||||
{
|
{
|
||||||
switch (idx) {
|
switch (idx) {
|
||||||
|
@ -425,48 +420,6 @@ static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc,
|
||||||
XGENE_DMA_DESC_HOENQ_NUM_POS);
|
XGENE_DMA_DESC_HOENQ_NUM_POS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan,
|
|
||||||
struct xgene_dma_desc_sw *desc_sw,
|
|
||||||
dma_addr_t dst, dma_addr_t src,
|
|
||||||
size_t len)
|
|
||||||
{
|
|
||||||
struct xgene_dma_desc_hw *desc1, *desc2;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
/* Get 1st descriptor */
|
|
||||||
desc1 = &desc_sw->desc1;
|
|
||||||
xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
|
|
||||||
|
|
||||||
/* Set destination address */
|
|
||||||
desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT);
|
|
||||||
desc1->m3 |= cpu_to_le64(dst);
|
|
||||||
|
|
||||||
/* Set 1st source address */
|
|
||||||
xgene_dma_set_src_buffer(&desc1->m1, &len, &src);
|
|
||||||
|
|
||||||
if (!len)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We need to split this source buffer,
|
|
||||||
* and need to use 2nd descriptor
|
|
||||||
*/
|
|
||||||
desc2 = &desc_sw->desc2;
|
|
||||||
desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT);
|
|
||||||
|
|
||||||
/* Set 2nd to 5th source address */
|
|
||||||
for (i = 0; i < 4 && len; i++)
|
|
||||||
xgene_dma_set_src_buffer(xgene_dma_lookup_ext8(desc2, i),
|
|
||||||
&len, &src);
|
|
||||||
|
|
||||||
/* Invalidate unused source address field */
|
|
||||||
for (; i < 4; i++)
|
|
||||||
xgene_dma_invalidate_buffer(xgene_dma_lookup_ext8(desc2, i));
|
|
||||||
|
|
||||||
/* Updated flag that we have prepared 64B descriptor */
|
|
||||||
desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan,
|
static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan,
|
||||||
struct xgene_dma_desc_sw *desc_sw,
|
struct xgene_dma_desc_sw *desc_sw,
|
||||||
dma_addr_t *dst, dma_addr_t *src,
|
dma_addr_t *dst, dma_addr_t *src,
|
||||||
|
@ -891,114 +844,6 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
|
||||||
chan->desc_pool = NULL;
|
chan->desc_pool = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dma_async_tx_descriptor *xgene_dma_prep_sg(
|
|
||||||
struct dma_chan *dchan, struct scatterlist *dst_sg,
|
|
||||||
u32 dst_nents, struct scatterlist *src_sg,
|
|
||||||
u32 src_nents, unsigned long flags)
|
|
||||||
{
|
|
||||||
struct xgene_dma_desc_sw *first = NULL, *new = NULL;
|
|
||||||
struct xgene_dma_chan *chan;
|
|
||||||
size_t dst_avail, src_avail;
|
|
||||||
dma_addr_t dst, src;
|
|
||||||
size_t len;
|
|
||||||
|
|
||||||
if (unlikely(!dchan))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
if (unlikely(!dst_nents || !src_nents))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
if (unlikely(!dst_sg || !src_sg))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
chan = to_dma_chan(dchan);
|
|
||||||
|
|
||||||
/* Get prepared for the loop */
|
|
||||||
dst_avail = sg_dma_len(dst_sg);
|
|
||||||
src_avail = sg_dma_len(src_sg);
|
|
||||||
dst_nents--;
|
|
||||||
src_nents--;
|
|
||||||
|
|
||||||
/* Run until we are out of scatterlist entries */
|
|
||||||
while (true) {
|
|
||||||
/* Create the largest transaction possible */
|
|
||||||
len = min_t(size_t, src_avail, dst_avail);
|
|
||||||
len = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
|
|
||||||
if (len == 0)
|
|
||||||
goto fetch;
|
|
||||||
|
|
||||||
dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
|
|
||||||
src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
|
|
||||||
|
|
||||||
/* Allocate the link descriptor from DMA pool */
|
|
||||||
new = xgene_dma_alloc_descriptor(chan);
|
|
||||||
if (!new)
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
/* Prepare DMA descriptor */
|
|
||||||
xgene_dma_prep_cpy_desc(chan, new, dst, src, len);
|
|
||||||
|
|
||||||
if (!first)
|
|
||||||
first = new;
|
|
||||||
|
|
||||||
new->tx.cookie = 0;
|
|
||||||
async_tx_ack(&new->tx);
|
|
||||||
|
|
||||||
/* update metadata */
|
|
||||||
dst_avail -= len;
|
|
||||||
src_avail -= len;
|
|
||||||
|
|
||||||
/* Insert the link descriptor to the LD ring */
|
|
||||||
list_add_tail(&new->node, &first->tx_list);
|
|
||||||
|
|
||||||
fetch:
|
|
||||||
/* fetch the next dst scatterlist entry */
|
|
||||||
if (dst_avail == 0) {
|
|
||||||
/* no more entries: we're done */
|
|
||||||
if (dst_nents == 0)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* fetch the next entry: if there are no more: done */
|
|
||||||
dst_sg = sg_next(dst_sg);
|
|
||||||
if (!dst_sg)
|
|
||||||
break;
|
|
||||||
|
|
||||||
dst_nents--;
|
|
||||||
dst_avail = sg_dma_len(dst_sg);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* fetch the next src scatterlist entry */
|
|
||||||
if (src_avail == 0) {
|
|
||||||
/* no more entries: we're done */
|
|
||||||
if (src_nents == 0)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* fetch the next entry: if there are no more: done */
|
|
||||||
src_sg = sg_next(src_sg);
|
|
||||||
if (!src_sg)
|
|
||||||
break;
|
|
||||||
|
|
||||||
src_nents--;
|
|
||||||
src_avail = sg_dma_len(src_sg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!new)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
new->tx.flags = flags; /* client is in control of this ack */
|
|
||||||
new->tx.cookie = -EBUSY;
|
|
||||||
list_splice(&first->tx_list, &new->tx_list);
|
|
||||||
|
|
||||||
return &new->tx;
|
|
||||||
fail:
|
|
||||||
if (!first)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
xgene_dma_free_desc_list(chan, &first->tx_list);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct dma_async_tx_descriptor *xgene_dma_prep_xor(
|
static struct dma_async_tx_descriptor *xgene_dma_prep_xor(
|
||||||
struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
|
struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
|
||||||
u32 src_cnt, size_t len, unsigned long flags)
|
u32 src_cnt, size_t len, unsigned long flags)
|
||||||
|
@ -1653,7 +1498,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
|
||||||
dma_cap_zero(dma_dev->cap_mask);
|
dma_cap_zero(dma_dev->cap_mask);
|
||||||
|
|
||||||
/* Set DMA device capability */
|
/* Set DMA device capability */
|
||||||
dma_cap_set(DMA_SG, dma_dev->cap_mask);
|
|
||||||
|
|
||||||
/* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
|
/* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
|
||||||
* and channel 1 supports XOR, PQ both. First thing here is we have
|
* and channel 1 supports XOR, PQ both. First thing here is we have
|
||||||
|
@ -1679,7 +1523,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
|
||||||
dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
|
dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
|
||||||
dma_dev->device_issue_pending = xgene_dma_issue_pending;
|
dma_dev->device_issue_pending = xgene_dma_issue_pending;
|
||||||
dma_dev->device_tx_status = xgene_dma_tx_status;
|
dma_dev->device_tx_status = xgene_dma_tx_status;
|
||||||
dma_dev->device_prep_dma_sg = xgene_dma_prep_sg;
|
|
||||||
|
|
||||||
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
|
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
|
||||||
dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
|
dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
|
||||||
|
@ -1731,8 +1574,7 @@ static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
|
||||||
|
|
||||||
/* DMA capability info */
|
/* DMA capability info */
|
||||||
dev_info(pdma->dev,
|
dev_info(pdma->dev,
|
||||||
"%s: CAPABILITY ( %s%s%s)\n", dma_chan_name(&chan->dma_chan),
|
"%s: CAPABILITY ( %s%s)\n", dma_chan_name(&chan->dma_chan),
|
||||||
dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "",
|
|
||||||
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
|
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
|
||||||
dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");
|
dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");
|
||||||
|
|
||||||
|
|
|
@ -829,98 +829,6 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
|
||||||
return &first->async_tx;
|
return &first->async_tx;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction
|
|
||||||
* @dchan: DMA channel
|
|
||||||
* @dst_sg: Destination scatter list
|
|
||||||
* @dst_sg_len: Number of entries in destination scatter list
|
|
||||||
* @src_sg: Source scatter list
|
|
||||||
* @src_sg_len: Number of entries in source scatter list
|
|
||||||
* @flags: transfer ack flags
|
|
||||||
*
|
|
||||||
* Return: Async transaction descriptor on success and NULL on failure
|
|
||||||
*/
|
|
||||||
static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg(
|
|
||||||
struct dma_chan *dchan, struct scatterlist *dst_sg,
|
|
||||||
unsigned int dst_sg_len, struct scatterlist *src_sg,
|
|
||||||
unsigned int src_sg_len, unsigned long flags)
|
|
||||||
{
|
|
||||||
struct zynqmp_dma_desc_sw *new, *first = NULL;
|
|
||||||
struct zynqmp_dma_chan *chan = to_chan(dchan);
|
|
||||||
void *desc = NULL, *prev = NULL;
|
|
||||||
size_t len, dst_avail, src_avail;
|
|
||||||
dma_addr_t dma_dst, dma_src;
|
|
||||||
u32 desc_cnt = 0, i;
|
|
||||||
struct scatterlist *sg;
|
|
||||||
|
|
||||||
for_each_sg(src_sg, sg, src_sg_len, i)
|
|
||||||
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg),
|
|
||||||
ZYNQMP_DMA_MAX_TRANS_LEN);
|
|
||||||
|
|
||||||
spin_lock_bh(&chan->lock);
|
|
||||||
if (desc_cnt > chan->desc_free_cnt) {
|
|
||||||
spin_unlock_bh(&chan->lock);
|
|
||||||
dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
|
|
||||||
spin_unlock_bh(&chan->lock);
|
|
||||||
|
|
||||||
dst_avail = sg_dma_len(dst_sg);
|
|
||||||
src_avail = sg_dma_len(src_sg);
|
|
||||||
|
|
||||||
/* Run until we are out of scatterlist entries */
|
|
||||||
while (true) {
|
|
||||||
/* Allocate and populate the descriptor */
|
|
||||||
new = zynqmp_dma_get_descriptor(chan);
|
|
||||||
desc = (struct zynqmp_dma_desc_ll *)new->src_v;
|
|
||||||
len = min_t(size_t, src_avail, dst_avail);
|
|
||||||
len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
|
|
||||||
if (len == 0)
|
|
||||||
goto fetch;
|
|
||||||
dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
|
|
||||||
dst_avail;
|
|
||||||
dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
|
|
||||||
src_avail;
|
|
||||||
|
|
||||||
zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst,
|
|
||||||
len, prev);
|
|
||||||
prev = desc;
|
|
||||||
dst_avail -= len;
|
|
||||||
src_avail -= len;
|
|
||||||
|
|
||||||
if (!first)
|
|
||||||
first = new;
|
|
||||||
else
|
|
||||||
list_add_tail(&new->node, &first->tx_list);
|
|
||||||
fetch:
|
|
||||||
/* Fetch the next dst scatterlist entry */
|
|
||||||
if (dst_avail == 0) {
|
|
||||||
if (dst_sg_len == 0)
|
|
||||||
break;
|
|
||||||
dst_sg = sg_next(dst_sg);
|
|
||||||
if (dst_sg == NULL)
|
|
||||||
break;
|
|
||||||
dst_sg_len--;
|
|
||||||
dst_avail = sg_dma_len(dst_sg);
|
|
||||||
}
|
|
||||||
/* Fetch the next src scatterlist entry */
|
|
||||||
if (src_avail == 0) {
|
|
||||||
if (src_sg_len == 0)
|
|
||||||
break;
|
|
||||||
src_sg = sg_next(src_sg);
|
|
||||||
if (src_sg == NULL)
|
|
||||||
break;
|
|
||||||
src_sg_len--;
|
|
||||||
src_avail = sg_dma_len(src_sg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
zynqmp_dma_desc_config_eod(chan, desc);
|
|
||||||
first->async_tx.flags = flags;
|
|
||||||
return &first->async_tx;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* zynqmp_dma_chan_remove - Channel remove function
|
* zynqmp_dma_chan_remove - Channel remove function
|
||||||
* @chan: ZynqMP DMA channel pointer
|
* @chan: ZynqMP DMA channel pointer
|
||||||
|
@ -1064,11 +972,9 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
|
||||||
INIT_LIST_HEAD(&zdev->common.channels);
|
INIT_LIST_HEAD(&zdev->common.channels);
|
||||||
|
|
||||||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
|
dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
|
||||||
dma_cap_set(DMA_SG, zdev->common.cap_mask);
|
|
||||||
dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask);
|
dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask);
|
||||||
|
|
||||||
p = &zdev->common;
|
p = &zdev->common;
|
||||||
p->device_prep_dma_sg = zynqmp_dma_prep_sg;
|
|
||||||
p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
|
p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
|
||||||
p->device_terminate_all = zynqmp_dma_device_terminate_all;
|
p->device_terminate_all = zynqmp_dma_device_terminate_all;
|
||||||
p->device_issue_pending = zynqmp_dma_issue_pending;
|
p->device_issue_pending = zynqmp_dma_issue_pending;
|
||||||
|
|
|
@ -68,7 +68,6 @@ enum dma_transaction_type {
|
||||||
DMA_MEMSET,
|
DMA_MEMSET,
|
||||||
DMA_MEMSET_SG,
|
DMA_MEMSET_SG,
|
||||||
DMA_INTERRUPT,
|
DMA_INTERRUPT,
|
||||||
DMA_SG,
|
|
||||||
DMA_PRIVATE,
|
DMA_PRIVATE,
|
||||||
DMA_ASYNC_TX,
|
DMA_ASYNC_TX,
|
||||||
DMA_SLAVE,
|
DMA_SLAVE,
|
||||||
|
@ -775,11 +774,6 @@ struct dma_device {
|
||||||
unsigned int nents, int value, unsigned long flags);
|
unsigned int nents, int value, unsigned long flags);
|
||||||
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
|
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
|
||||||
struct dma_chan *chan, unsigned long flags);
|
struct dma_chan *chan, unsigned long flags);
|
||||||
struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
|
|
||||||
struct dma_chan *chan,
|
|
||||||
struct scatterlist *dst_sg, unsigned int dst_nents,
|
|
||||||
struct scatterlist *src_sg, unsigned int src_nents,
|
|
||||||
unsigned long flags);
|
|
||||||
|
|
||||||
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
|
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
|
||||||
struct dma_chan *chan, struct scatterlist *sgl,
|
struct dma_chan *chan, struct scatterlist *sgl,
|
||||||
|
@ -909,19 +903,6 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
|
||||||
len, flags);
|
len, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
|
|
||||||
struct dma_chan *chan,
|
|
||||||
struct scatterlist *dst_sg, unsigned int dst_nents,
|
|
||||||
struct scatterlist *src_sg, unsigned int src_nents,
|
|
||||||
unsigned long flags)
|
|
||||||
{
|
|
||||||
if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
|
|
||||||
src_sg, src_nents, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dmaengine_terminate_all() - Terminate all active DMA transfers
|
* dmaengine_terminate_all() - Terminate all active DMA transfers
|
||||||
* @chan: The channel for which to terminate the transfers
|
* @chan: The channel for which to terminate the transfers
|
||||||
|
|
Loading…
Reference in New Issue