diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index e164f3295f5d..350968baaf88 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include "dw-axi-dmac.h" @@ -195,43 +196,58 @@ static inline const char *axi_chan_name(struct axi_dma_chan *chan) return dma_chan_name(&chan->vc.chan); } -static struct axi_dma_desc *axi_desc_get(struct axi_dma_chan *chan) +static struct axi_dma_desc *axi_desc_alloc(u32 num) +{ + struct axi_dma_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (!desc) + return NULL; + + desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT); + if (!desc->hw_desc) { + kfree(desc); + return NULL; + } + + return desc; +} + +static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan, + dma_addr_t *addr) { struct dw_axi_dma *dw = chan->chip->dw; - struct axi_dma_desc *desc; + struct axi_dma_lli *lli; dma_addr_t phys; - desc = dma_pool_zalloc(dw->desc_pool, GFP_NOWAIT, &phys); - if (unlikely(!desc)) { + lli = dma_pool_zalloc(dw->desc_pool, GFP_NOWAIT, &phys); + if (unlikely(!lli)) { dev_err(chan2dev(chan), "%s: not enough descriptors available\n", axi_chan_name(chan)); return NULL; } atomic_inc(&chan->descs_allocated); - INIT_LIST_HEAD(&desc->xfer_list); - desc->vd.tx.phys = phys; - desc->chan = chan; + *addr = phys; - return desc; + return lli; } static void axi_desc_put(struct axi_dma_desc *desc) { struct axi_dma_chan *chan = desc->chan; struct dw_axi_dma *dw = chan->chip->dw; - struct axi_dma_desc *child, *_next; - unsigned int descs_put = 0; + int count = atomic_read(&chan->descs_allocated); + struct axi_dma_hw_desc *hw_desc; + int descs_put; - list_for_each_entry_safe(child, _next, &desc->xfer_list, xfer_list) { - list_del(&child->xfer_list); - dma_pool_free(dw->desc_pool, child, child->vd.tx.phys); - descs_put++; + for (descs_put = 0; descs_put < count; descs_put++) { + hw_desc = &desc->hw_desc[descs_put]; + dma_pool_free(dw->desc_pool, hw_desc->lli, hw_desc->llp); } - dma_pool_free(dw->desc_pool, desc, desc->vd.tx.phys); - descs_put++; - + kfree(desc->hw_desc); + kfree(desc); atomic_sub(descs_put, &chan->descs_allocated); dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n", axi_chan_name(chan), descs_put, @@ -258,9 +274,9 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, return ret; } -static void write_desc_llp(struct axi_dma_desc *desc, dma_addr_t adr) +static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr) { - desc->lli.llp = cpu_to_le64(adr); + desc->lli->llp = cpu_to_le64(adr); } static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr) @@ -295,7 +311,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS); axi_chan_iowrite32(chan, CH_CFG_H, reg); - write_chan_llp(chan, first->vd.tx.phys | lms); + write_chan_llp(chan, first->hw_desc[0].llp | lms); irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR; axi_chan_irq_sig_set(chan, irq_mask); @@ -378,67 +394,78 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan) * transfer and completes the DMA transfer operation at the end of current * block transfer. */ -static void set_desc_last(struct axi_dma_desc *desc) +static void set_desc_last(struct axi_dma_hw_desc *desc) { u32 val; - val = le32_to_cpu(desc->lli.ctl_hi); + val = le32_to_cpu(desc->lli->ctl_hi); val |= CH_CTL_H_LLI_LAST; - desc->lli.ctl_hi = cpu_to_le32(val); + desc->lli->ctl_hi = cpu_to_le32(val); } -static void write_desc_sar(struct axi_dma_desc *desc, dma_addr_t adr) +static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr) { - desc->lli.sar = cpu_to_le64(adr); + desc->lli->sar = cpu_to_le64(adr); } -static void write_desc_dar(struct axi_dma_desc *desc, dma_addr_t adr) +static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr) { - desc->lli.dar = cpu_to_le64(adr); + desc->lli->dar = cpu_to_le64(adr); } -static void set_desc_src_master(struct axi_dma_desc *desc) +static void set_desc_src_master(struct axi_dma_hw_desc *desc) { u32 val; /* Select AXI0 for source master */ - val = le32_to_cpu(desc->lli.ctl_lo); + val = le32_to_cpu(desc->lli->ctl_lo); val &= ~CH_CTL_L_SRC_MAST; - desc->lli.ctl_lo = cpu_to_le32(val); + desc->lli->ctl_lo = cpu_to_le32(val); } -static void set_desc_dest_master(struct axi_dma_desc *desc) +static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc, + struct axi_dma_desc *desc) { u32 val; /* Select AXI1 for source master if available */ - val = le32_to_cpu(desc->lli.ctl_lo); + val = le32_to_cpu(hw_desc->lli->ctl_lo); if (desc->chan->chip->dw->hdata->nr_masters > 1) val |= CH_CTL_L_DST_MAST; else val &= ~CH_CTL_L_DST_MAST; - desc->lli.ctl_lo = cpu_to_le32(val); + hw_desc->lli->ctl_lo = cpu_to_le32(val); } static struct dma_async_tx_descriptor * dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, dma_addr_t src_adr, size_t len, unsigned long flags) { - struct axi_dma_desc *first = NULL, *desc = NULL, *prev = NULL; struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); size_t block_ts, max_block_ts, xfer_len; - u32 xfer_width, reg; + struct axi_dma_hw_desc *hw_desc = NULL; + struct axi_dma_desc *desc = NULL; + u32 xfer_width, reg, num; + u64 llp = 0; u8 lms = 0; /* Select AXI0 master for LLI fetching */ dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx", axi_chan_name(chan), &src_adr, &dst_adr, len, flags); max_block_ts = chan->chip->dw->hdata->block_size[chan->id]; + xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len); + num = DIV_ROUND_UP(len, max_block_ts << xfer_width); + desc = axi_desc_alloc(num); + if (unlikely(!desc)) + goto err_desc_get; + desc->chan = chan; + num = 0; while (len) { xfer_len = len; + hw_desc = &desc->hw_desc[num]; /* * Take care for the alignment. * Actually source and destination widths can be different, but @@ -457,13 +484,13 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, xfer_len = max_block_ts << xfer_width; } - desc = axi_desc_get(chan); - if (unlikely(!desc)) + hw_desc->lli = axi_desc_get(chan, &hw_desc->llp); + if (unlikely(!hw_desc->lli)) goto err_desc_get; - write_desc_sar(desc, src_adr); - write_desc_dar(desc, dst_adr); - desc->lli.block_ts_lo = cpu_to_le32(block_ts - 1); + write_desc_sar(hw_desc, src_adr); + write_desc_dar(hw_desc, dst_adr); + hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1); reg = CH_CTL_H_LLI_VALID; if (chan->chip->dw->hdata->restrict_axi_burst_len) { @@ -474,7 +501,7 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, CH_CTL_H_AWLEN_EN | burst_len << CH_CTL_H_AWLEN_POS); } - desc->lli.ctl_hi = cpu_to_le32(reg); + hw_desc->lli->ctl_hi = cpu_to_le32(reg); reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS | DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS | @@ -482,62 +509,61 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, xfer_width << CH_CTL_L_SRC_WIDTH_POS | DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS | DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS); - desc->lli.ctl_lo = cpu_to_le32(reg); + hw_desc->lli->ctl_lo = cpu_to_le32(reg); - set_desc_src_master(desc); - set_desc_dest_master(desc); + set_desc_src_master(hw_desc); + set_desc_dest_master(hw_desc, desc); - /* Manage transfer list (xfer_list) */ - if (!first) { - first = desc; - } else { - list_add_tail(&desc->xfer_list, &first->xfer_list); - write_desc_llp(prev, desc->vd.tx.phys | lms); - } - prev = desc; /* update the length and addresses for the next loop cycle */ len -= xfer_len; dst_adr += xfer_len; src_adr += xfer_len; + num++; } /* Total len of src/dest sg == 0, so no descriptor were allocated */ - if (unlikely(!first)) + if (unlikely(!desc)) return NULL; /* Set end-of-link to the last link descriptor of list */ - set_desc_last(desc); + set_desc_last(&desc->hw_desc[num - 1]); + /* Managed transfer list */ + do { + hw_desc = &desc->hw_desc[--num]; + write_desc_llp(hw_desc, llp | lms); + llp = hw_desc->llp; + } while (num); - return vchan_tx_prep(&chan->vc, &first->vd, flags); + return vchan_tx_prep(&chan->vc, &desc->vd, flags); err_desc_get: - if (first) - axi_desc_put(first); + if (desc) + axi_desc_put(desc); return NULL; } static void axi_chan_dump_lli(struct axi_dma_chan *chan, - struct axi_dma_desc *desc) + struct axi_dma_hw_desc *desc) { dev_err(dchan2dev(&chan->vc.chan), "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x", - le64_to_cpu(desc->lli.sar), - le64_to_cpu(desc->lli.dar), - le64_to_cpu(desc->lli.llp), - le32_to_cpu(desc->lli.block_ts_lo), - le32_to_cpu(desc->lli.ctl_hi), - le32_to_cpu(desc->lli.ctl_lo)); + le64_to_cpu(desc->lli->sar), + le64_to_cpu(desc->lli->dar), + le64_to_cpu(desc->lli->llp), + le32_to_cpu(desc->lli->block_ts_lo), + le32_to_cpu(desc->lli->ctl_hi), + le32_to_cpu(desc->lli->ctl_lo)); } static void axi_chan_list_dump_lli(struct axi_dma_chan *chan, struct axi_dma_desc *desc_head) { - struct axi_dma_desc *desc; + int count = atomic_read(&chan->descs_allocated); + int i; - axi_chan_dump_lli(chan, desc_head); - list_for_each_entry(desc, &desc_head->xfer_list, xfer_list) - axi_chan_dump_lli(chan, desc); + for (i = 0; i < count; i++) + axi_chan_dump_lli(chan, &desc_head->hw_desc[i]); } static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status) @@ -872,7 +898,7 @@ static int dw_probe(struct platform_device *pdev) /* Lli address must be aligned to a 64-byte boundary */ dw->desc_pool = dmam_pool_create(KBUILD_MODNAME, chip->dev, - sizeof(struct axi_dma_desc), 64, 0); + sizeof(struct axi_dma_lli), 64, 0); if (!dw->desc_pool) { dev_err(chip->dev, "No memory for descriptors dma pool\n"); return -ENOMEM; diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h index 18b6014cf9b4..41e775e6e593 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h @@ -41,6 +41,7 @@ struct axi_dma_chan { struct virt_dma_chan vc; + struct axi_dma_desc *desc; /* these other elements are all protected by vc.lock */ bool is_paused; }; @@ -80,12 +81,16 @@ struct __packed axi_dma_lli { __le32 reserved_hi; }; +struct axi_dma_hw_desc { + struct axi_dma_lli *lli; + dma_addr_t llp; +}; + struct axi_dma_desc { - struct axi_dma_lli lli; + struct axi_dma_hw_desc *hw_desc; struct virt_dma_desc vd; struct axi_dma_chan *chan; - struct list_head xfer_list; }; static inline struct device *dchan2dev(struct dma_chan *dchan)