From eb8df543e444492328f506adffc7dfe94111f1bd Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Fri, 5 May 2017 11:57:44 +0200 Subject: [PATCH 01/12] dmaengine: mv_xor_v2: handle mv_xor_v2_prep_sw_desc() error properly The mv_xor_v2_prep_sw_desc() is called from a few different places in the driver, but we never take into account the fact that it might return NULL. This commit fixes that, ensuring that we don't panic if there are no more descriptors available. Fixes: 19a340b1a820 ("dmaengine: mv_xor_v2: new driver") Cc: Signed-off-by: Thomas Petazzoni Signed-off-by: Vinod Koul --- drivers/dma/mv_xor_v2.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index a28a01fcba67..e9280207ac19 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -389,6 +389,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, __func__, len, &src, &dest, flags); sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); + if (!sw_desc) + return NULL; sw_desc->async_tx.flags = flags; @@ -443,6 +445,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, __func__, src_cnt, len, &dest, flags); sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); + if (!sw_desc) + return NULL; sw_desc->async_tx.flags = flags; @@ -491,6 +495,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) container_of(chan, struct mv_xor_v2_device, dmachan); sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); + if (!sw_desc) + return NULL; /* set the HW descriptor */ hw_descriptor = &sw_desc->hw_desc; From 2aab4e18152cd30cb5d2f4c27629fc8a04aed979 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Fri, 5 May 2017 11:57:45 +0200 Subject: [PATCH 02/12] dmaengine: mv_xor_v2: properly handle wrapping in the array of HW descriptors mv_xor_v2_tasklet() is looping over completed HW descriptors. Before the loop, it initializes 'next_pending_hw_desc' to the first HW descriptor to handle, and then the loop simply increments this point, without taking care of wrapping when we reach the last HW descriptor. The 'pending_ptr' index was being wrapped back to 0 at the end, but it wasn't used in each iteration of the loop to calculate next_pending_hw_desc. This commit fixes that, and makes next_pending_hw_desc a variable local to the loop itself. Fixes: 19a340b1a820 ("dmaengine: mv_xor_v2: new driver") Cc: Signed-off-by: Thomas Petazzoni Signed-off-by: Vinod Koul --- drivers/dma/mv_xor_v2.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index e9280207ac19..bdfb36ecff81 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -560,7 +560,6 @@ static void mv_xor_v2_tasklet(unsigned long data) { struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data; int pending_ptr, num_of_pending, i; - struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL; struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); @@ -568,17 +567,10 @@ static void mv_xor_v2_tasklet(unsigned long data) /* get the pending descriptors parameters */ num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); - /* next HW descriptor */ - next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr; - /* loop over free descriptors */ for (i = 0; i < num_of_pending; i++) { - - if (pending_ptr > MV_XOR_V2_DESC_NUM) - pending_ptr = 0; - - if (next_pending_sw_desc != NULL) - next_pending_hw_desc++; + struct mv_xor_v2_descriptor *next_pending_hw_desc = + xor_dev->hw_desq_virt + pending_ptr; /* get the SW descriptor related to the HW descriptor */ next_pending_sw_desc = @@ -614,6 +606,8 @@ static void mv_xor_v2_tasklet(unsigned long data) /* increment the next descriptor */ pending_ptr++; + if (pending_ptr >= MV_XOR_V2_DESC_NUM) + pending_ptr = 0; } if (num_of_pending != 0) { From bc473da1ed726c975ad47f8d7d27631de11356d8 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Fri, 5 May 2017 11:57:46 +0200 Subject: [PATCH 03/12] dmaengine: mv_xor_v2: do not use descriptors not acked by async_tx Descriptors that have not been acknowledged by the async_tx layer should not be re-used, so this commit adjusts the implementation of mv_xor_v2_prep_sw_desc() to skip descriptors for which async_tx_test_ack() is false. Fixes: 19a340b1a820 ("dmaengine: mv_xor_v2: new driver") Cc: Signed-off-by: Thomas Petazzoni Signed-off-by: Vinod Koul --- drivers/dma/mv_xor_v2.c | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index bdfb36ecff81..cb60e7c4aa16 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -344,6 +344,7 @@ static struct mv_xor_v2_sw_desc * mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) { struct mv_xor_v2_sw_desc *sw_desc; + bool found = false; /* Lock the channel */ spin_lock_bh(&xor_dev->lock); @@ -355,19 +356,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) return NULL; } - /* get a free SW descriptor from the SW DESQ */ - sw_desc = list_first_entry(&xor_dev->free_sw_desc, - struct mv_xor_v2_sw_desc, free_list); + list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) { + if (async_tx_test_ack(&sw_desc->async_tx)) { + found = true; + break; + } + } + + if (!found) { + spin_unlock_bh(&xor_dev->lock); + return NULL; + } + list_del(&sw_desc->free_list); /* Release the channel */ spin_unlock_bh(&xor_dev->lock); - /* set the async tx descriptor */ - dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan); - sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; - async_tx_ack(&sw_desc->async_tx); - return sw_desc; } @@ -785,8 +790,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev) /* add all SW descriptors to the free list */ for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { - xor_dev->sw_desq[i].idx = i; - list_add(&xor_dev->sw_desq[i].free_list, + struct mv_xor_v2_sw_desc *sw_desc = + xor_dev->sw_desq + i; + sw_desc->idx = i; + dma_async_tx_descriptor_init(&sw_desc->async_tx, + &xor_dev->dmachan); + sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; + async_tx_ack(&sw_desc->async_tx); + + list_add(&sw_desc->free_list, &xor_dev->free_sw_desc); } From ab2c5f0a77fe49bdb6e307b397496373cb47d2c2 Mon Sep 17 00:00:00 2001 From: Hanna Hawa Date: Fri, 5 May 2017 11:57:47 +0200 Subject: [PATCH 04/12] dmaengine: mv_xor_v2: enable XOR engine after its configuration The engine was enabled prior to its configuration, which isn't correct. This patch relocates the activation of the XOR engine, to be after the configuration of the XOR engine. Fixes: 19a340b1a820 ("dmaengine: mv_xor_v2: new driver") Cc: Signed-off-by: Hanna Hawa Signed-off-by: Thomas Petazzoni Signed-off-by: Vinod Koul --- drivers/dma/mv_xor_v2.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index cb60e7c4aa16..211b8c0e3cfb 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -653,9 +653,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); - /* enable the DMA engine */ - writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); - /* * This is a temporary solution, until we activate the * SMMU. Set the attributes for reading & writing data buffers @@ -699,6 +696,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); + /* enable the DMA engine */ + writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); + return 0; } From 44d5887a8bf1e86915c8ff647337cb138149da82 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Fri, 5 May 2017 11:57:48 +0200 Subject: [PATCH 05/12] dmaengine: mv_xor_v2: fix tx_submit() implementation The mv_xor_v2_tx_submit() gets the next available HW descriptor by calling mv_xor_v2_get_desq_write_ptr(), which reads a HW register telling the next available HW descriptor. This was working fine when HW descriptors were issued for processing directly in tx_submit(). However, as part of the review process of the driver, a change was requested to move the actual kick-off of HW descriptors processing to ->issue_pending(). Due to this, reading the HW register to know the next available HW descriptor no longer works. So instead of using this HW register, we implemented a software index pointing to the next available HW descriptor. Fixes: 19a340b1a820 ("dmaengine: mv_xor_v2: new driver") Cc: Signed-off-by: Thomas Petazzoni Signed-off-by: Vinod Koul --- drivers/dma/mv_xor_v2.c | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index 211b8c0e3cfb..4684eceea759 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -161,6 +161,7 @@ struct mv_xor_v2_device { struct mv_xor_v2_sw_desc *sw_desq; int desc_size; unsigned int npendings; + unsigned int hw_queue_idx; }; /** @@ -213,18 +214,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev, } } -/* - * Return the next available index in the DESQ. - */ -static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev) -{ - /* read the index for the next available descriptor in the DESQ */ - u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF); - - return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT) - & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK); -} - /* * notify the engine of new descriptors, and update the available index. */ @@ -306,7 +295,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) static dma_cookie_t mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) { - int desq_ptr; void *dest_hw_desc; dma_cookie_t cookie; struct mv_xor_v2_sw_desc *sw_desc = @@ -322,15 +310,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_bh(&xor_dev->lock); cookie = dma_cookie_assign(tx); - /* get the next available slot in the DESQ */ - desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev); - /* copy the HW descriptor from the SW descriptor to the DESQ */ - dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr; + dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx; memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); xor_dev->npendings++; + xor_dev->hw_queue_idx++; + if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM) + xor_dev->hw_queue_idx = 0; spin_unlock_bh(&xor_dev->lock); From 9dd4f319bac25334a869d9276b19eac9e478fd33 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Fri, 5 May 2017 11:57:49 +0200 Subject: [PATCH 06/12] dmaengine: mv_xor_v2: remove interrupt coalescing The current implementation of interrupt coalescing doesn't work, because it doesn't configure the coalescing timer, which is needed to make sure we get an interrupt at some point. As a fix for stable, we simply remove the interrupt coalescing functionality. It will be re-introduced properly in a future commit. Fixes: 19a340b1a820 ("dmaengine: mv_xor_v2: new driver") Cc: Signed-off-by: Thomas Petazzoni Signed-off-by: Vinod Koul --- drivers/dma/mv_xor_v2.c | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index 4684eceea759..b133fe29d788 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -246,22 +246,6 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev) return MV_XOR_V2_EXT_DESC_SIZE; } -/* - * Set the IMSG threshold - */ -static inline -void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val) -{ - u32 reg; - - reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); - - reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); - reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); - - writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); -} - static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) { struct mv_xor_v2_device *xor_dev = data; @@ -277,12 +261,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) if (!ndescs) return IRQ_NONE; - /* - * Update IMSG threshold, to disable new IMSG interrupts until - * end of the tasklet - */ - mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM); - /* schedule a tasklet to handle descriptors callbacks */ tasklet_schedule(&xor_dev->irq_tasklet); @@ -607,9 +585,6 @@ static void mv_xor_v2_tasklet(unsigned long data) /* free the descriptores */ mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); } - - /* Update IMSG threshold, to enable new IMSG interrupts */ - mv_xor_v2_set_imsg_thrd(xor_dev, 0); } /* From b2d3c270f9f2fb82518ac500a9849c3aaf503852 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Fri, 5 May 2017 11:57:50 +0200 Subject: [PATCH 07/12] dmaengine: mv_xor_v2: set DMA mask to 40 bits The XORv2 engine on Armada 7K/8K can only access the first 40 bits of the physical address space, so the DMA mask must be set accordingly. Fixes: 19a340b1a820 ("dmaengine: mv_xor_v2: new driver") Cc: Signed-off-by: Thomas Petazzoni Signed-off-by: Vinod Koul --- drivers/dma/mv_xor_v2.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index b133fe29d788..f3e211f8f6c5 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -693,6 +693,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev) platform_set_drvdata(pdev, xor_dev); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (ret) + return ret; + xor_dev->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) return -EPROBE_DEFER; From 9a445bbb1607d9f14556a532453dd86d1b7e381e Mon Sep 17 00:00:00 2001 From: Hiroyuki Yokoyama Date: Mon, 15 May 2017 17:49:52 +0900 Subject: [PATCH 08/12] dmaengine: usb-dmac: Fix DMAOR AE bit definition This patch fixes the register definition of AE (Address Error flag) bit. Fixes: 0c1c8ff32fa2 ("dmaengine: usb-dmac: Add Renesas USB DMA Controller (USB-DMAC) driver") Cc: # v4.1+ Signed-off-by: Hiroyuki Yokoyama [Shimoda: add Fixes and Cc tags in the commit log] Signed-off-by: Yoshihiro Shimoda Reviewed-by: Geert Uytterhoeven Signed-off-by: Vinod Koul --- drivers/dma/sh/usb-dmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 72c649713ace..31a145154e9f 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -117,7 +117,7 @@ struct usb_dmac { #define USB_DMASWR 0x0008 #define USB_DMASWR_SWR (1 << 0) #define USB_DMAOR 0x0060 -#define USB_DMAOR_AE (1 << 2) +#define USB_DMAOR_AE (1 << 1) #define USB_DMAOR_DME (1 << 0) #define USB_DMASAR 0x0000 From 0037ae47812b1f431cc602100d1d51f37d77b61e Mon Sep 17 00:00:00 2001 From: Alexander Sverdlin Date: Mon, 22 May 2017 16:05:22 +0200 Subject: [PATCH 09/12] dmaengine: ep93xx: Always start from BASE0 The current buffer is being reset to zero on device_free_chan_resources() but not on device_terminate_all(). It could happen that HW is restarted and expects BASE0 to be used, but the driver is not synchronized and will start from BASE1. One solution is to reset the buffer explicitly in m2p_hw_setup(). Signed-off-by: Alexander Sverdlin Cc: stable@vger.kernel.org Signed-off-by: Vinod Koul --- drivers/dma/ep93xx_dma.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index d37e8dda8079..deb009c3121f 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -323,6 +323,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac) | M2P_CONTROL_ENABLE; m2p_set_control(edmac, control); + edmac->buffer = 0; + return 0; } From 98f9de366fccee7572c646af226b2d4b4841e3b5 Mon Sep 17 00:00:00 2001 From: Alexander Sverdlin Date: Mon, 22 May 2017 16:05:23 +0200 Subject: [PATCH 10/12] dmaengine: ep93xx: Don't drain the transfers in terminate_all() Draining the transfers in terminate_all callback happens with IRQs disabled, therefore induces huge latency: irqsoff latency trace v1.1.5 on 4.11.0 -------------------------------------------------------------------- latency: 39770 us, #57/57, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0) ----------------- | task: process-129 (uid:0 nice:0 policy:2 rt_prio:50) ----------------- => started at: _snd_pcm_stream_lock_irqsave => ended at: snd_pcm_stream_unlock_irqrestore _------=> CPU# / _-----=> irqs-off | / _----=> need-resched || / _---=> hardirq/softirq ||| / _--=> preempt-depth |||| / delay cmd pid ||||| time | caller \ / ||||| \ | / process-129 0d.s. 3us : _snd_pcm_stream_lock_irqsave process-129 0d.s1 9us : snd_pcm_stream_lock <-_snd_pcm_stream_lock_irqsave process-129 0d.s1 15us : preempt_count_add <-snd_pcm_stream_lock process-129 0d.s2 22us : preempt_count_add <-snd_pcm_stream_lock process-129 0d.s3 32us : snd_pcm_update_hw_ptr0 <-snd_pcm_period_elapsed process-129 0d.s3 41us : soc_pcm_pointer <-snd_pcm_update_hw_ptr0 process-129 0d.s3 50us : dmaengine_pcm_pointer <-soc_pcm_pointer process-129 0d.s3 58us+: snd_dmaengine_pcm_pointer_no_residue <-dmaengine_pcm_pointer process-129 0d.s3 96us : update_audio_tstamp <-snd_pcm_update_hw_ptr0 process-129 0d.s3 103us : snd_pcm_update_state <-snd_pcm_update_hw_ptr0 process-129 0d.s3 112us : xrun <-snd_pcm_update_state process-129 0d.s3 119us : snd_pcm_stop <-xrun process-129 0d.s3 126us : snd_pcm_action <-snd_pcm_stop process-129 0d.s3 134us : snd_pcm_action_single <-snd_pcm_action process-129 0d.s3 141us : snd_pcm_pre_stop <-snd_pcm_action_single process-129 0d.s3 150us : snd_pcm_do_stop <-snd_pcm_action_single process-129 0d.s3 157us : soc_pcm_trigger <-snd_pcm_do_stop process-129 0d.s3 166us : snd_dmaengine_pcm_trigger <-soc_pcm_trigger process-129 0d.s3 175us : ep93xx_dma_terminate_all <-snd_dmaengine_pcm_trigger process-129 0d.s3 182us : preempt_count_add <-ep93xx_dma_terminate_all process-129 0d.s4 189us*: m2p_hw_shutdown <-ep93xx_dma_terminate_all process-129 0d.s4 39472us : m2p_hw_setup <-ep93xx_dma_terminate_all ... rest skipped... process-129 0d.s. 40080us : => ep93xx_dma_tasklet => tasklet_action => __do_softirq => irq_exit => __handle_domain_irq => vic_handle_irq => __irq_usr => 0xb66c6668 Just abort the transfers and warn if the HW state is not what we expect. Move draining into device_synchronize callback. Signed-off-by: Alexander Sverdlin Cc: stable@vger.kernel.org Signed-off-by: Vinod Koul --- drivers/dma/ep93xx_dma.c | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index deb009c3121f..ec240592f5c8 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -201,6 +201,7 @@ struct ep93xx_dma_engine { struct dma_device dma_dev; bool m2m; int (*hw_setup)(struct ep93xx_dma_chan *); + void (*hw_synchronize)(struct ep93xx_dma_chan *); void (*hw_shutdown)(struct ep93xx_dma_chan *); void (*hw_submit)(struct ep93xx_dma_chan *); int (*hw_interrupt)(struct ep93xx_dma_chan *); @@ -333,21 +334,27 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac) return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; } -static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) +static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac) { + unsigned long flags; u32 control; + spin_lock_irqsave(&edmac->lock, flags); control = readl(edmac->regs + M2P_CONTROL); control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); m2p_set_control(edmac, control); + spin_unlock_irqrestore(&edmac->lock, flags); while (m2p_channel_state(edmac) >= M2P_STATE_ON) - cpu_relax(); + schedule(); +} +static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) +{ m2p_set_control(edmac, 0); - while (m2p_channel_state(edmac) == M2P_STATE_STALL) - cpu_relax(); + while (m2p_channel_state(edmac) != M2P_STATE_IDLE) + dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n"); } static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) @@ -1162,6 +1169,26 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, return NULL; } +/** + * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the + * current context. + * @chan: channel + * + * Synchronizes the DMA channel termination to the current context. When this + * function returns it is guaranteed that all transfers for previously issued + * descriptors have stopped and and it is safe to free the memory associated + * with them. Furthermore it is guaranteed that all complete callback functions + * for a previously submitted descriptor have finished running and it is safe to + * free resources accessed from within the complete callbacks. + */ +static void ep93xx_dma_synchronize(struct dma_chan *chan) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); + + if (edmac->edma->hw_synchronize) + edmac->edma->hw_synchronize(edmac); +} + /** * ep93xx_dma_terminate_all - terminate all transactions * @chan: channel @@ -1325,6 +1352,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev) dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; dma_dev->device_config = ep93xx_dma_slave_config; + dma_dev->device_synchronize = ep93xx_dma_synchronize; dma_dev->device_terminate_all = ep93xx_dma_terminate_all; dma_dev->device_issue_pending = ep93xx_dma_issue_pending; dma_dev->device_tx_status = ep93xx_dma_tx_status; @@ -1342,6 +1370,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev) } else { dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); + edma->hw_synchronize = m2p_hw_synchronize; edma->hw_setup = m2p_hw_setup; edma->hw_shutdown = m2p_hw_shutdown; edma->hw_submit = m2p_hw_submit; From 56b177055adb246cdeca174331dbf92fc49bfccd Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Tue, 23 May 2017 07:08:43 +0000 Subject: [PATCH 11/12] rcar-dmac: fixup descriptor pointer for descriptor mode In descriptor mode, the descriptor running pointer is not maintained by the interrupt handler, thus, driver finds the running descriptor from the descriptor pointer field in the CHCRB register. But, CHCRB::DPTR indicates *next* descriptor pointer, not current. Thus, The residue calculation will be missed. This patch fixup it. Signed-off-by: Kuninori Morimoto Signed-off-by: Vinod Koul --- drivers/dma/sh/rcar-dmac.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index db41795fe42a..bd261c9e9664 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1287,6 +1287,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, if (desc->hwdescs.use) { dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; + if (dptr == 0) + dptr = desc->nchunks; + dptr--; WARN_ON(dptr >= desc->nchunks); } else { running = desc->running; From ebcdaee4cebb3a8d0d702ab5e9392373672ec1de Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Thu, 1 Jun 2017 19:22:01 +0100 Subject: [PATCH 12/12] dmaengine: pl330: fix warning in pl330_remove When removing a device with less than 9 IRQs (AMBA_NR_IRQS), we'll get a big WARN_ON from devres.c because pl330_remove calls devm_free_irqs for unallocated irqs. Similarly to pl330_probe, check that IRQ number is present before calling devm_free_irq. Signed-off-by: Jean-Philippe Brucker Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 8b0da7fa520d..e90a7a0d760a 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -3008,7 +3008,8 @@ static int pl330_remove(struct amba_device *adev) for (i = 0; i < AMBA_NR_IRQS; i++) { irq = adev->irq[i]; - devm_free_irq(&adev->dev, irq, pl330); + if (irq) + devm_free_irq(&adev->dev, irq, pl330); } dma_async_device_unregister(&pl330->ddma);