mirror of https://gitee.com/openkylin/linux.git
dmaengine: remove dependency on async_tx
async_tx.ko is a consumer of dma channels. A circular dependency arises if modules in drivers/dma rely on common code in async_tx.ko. It prevents either module from being unloaded. Move dma_wait_for_async_tx and async_tx_run_dependencies to dmaeninge.o where they should have been from the beginning. Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
28405d8d9c
commit
07f2211e4f
|
@ -72,81 +72,6 @@ void async_tx_issue_pending_all(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(async_tx_issue_pending_all);
|
EXPORT_SYMBOL_GPL(async_tx_issue_pending_all);
|
||||||
|
|
||||||
/* dma_wait_for_async_tx - spin wait for a transcation to complete
|
|
||||||
* @tx: transaction to wait on
|
|
||||||
*/
|
|
||||||
enum dma_status
|
|
||||||
dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
|
|
||||||
{
|
|
||||||
enum dma_status status;
|
|
||||||
struct dma_async_tx_descriptor *iter;
|
|
||||||
struct dma_async_tx_descriptor *parent;
|
|
||||||
|
|
||||||
if (!tx)
|
|
||||||
return DMA_SUCCESS;
|
|
||||||
|
|
||||||
/* poll through the dependency chain, return when tx is complete */
|
|
||||||
do {
|
|
||||||
iter = tx;
|
|
||||||
|
|
||||||
/* find the root of the unsubmitted dependency chain */
|
|
||||||
do {
|
|
||||||
parent = iter->parent;
|
|
||||||
if (!parent)
|
|
||||||
break;
|
|
||||||
else
|
|
||||||
iter = parent;
|
|
||||||
} while (parent);
|
|
||||||
|
|
||||||
/* there is a small window for ->parent == NULL and
|
|
||||||
* ->cookie == -EBUSY
|
|
||||||
*/
|
|
||||||
while (iter->cookie == -EBUSY)
|
|
||||||
cpu_relax();
|
|
||||||
|
|
||||||
status = dma_sync_wait(iter->chan, iter->cookie);
|
|
||||||
} while (status == DMA_IN_PROGRESS || (iter != tx));
|
|
||||||
|
|
||||||
return status;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
|
|
||||||
|
|
||||||
/* async_tx_run_dependencies - helper routine for dma drivers to process
|
|
||||||
* (start) dependent operations on their target channel
|
|
||||||
* @tx: transaction with dependencies
|
|
||||||
*/
|
|
||||||
void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx)
|
|
||||||
{
|
|
||||||
struct dma_async_tx_descriptor *dep = tx->next;
|
|
||||||
struct dma_async_tx_descriptor *dep_next;
|
|
||||||
struct dma_chan *chan;
|
|
||||||
|
|
||||||
if (!dep)
|
|
||||||
return;
|
|
||||||
|
|
||||||
chan = dep->chan;
|
|
||||||
|
|
||||||
/* keep submitting up until a channel switch is detected
|
|
||||||
* in that case we will be called again as a result of
|
|
||||||
* processing the interrupt from async_tx_channel_switch
|
|
||||||
*/
|
|
||||||
for (; dep; dep = dep_next) {
|
|
||||||
spin_lock_bh(&dep->lock);
|
|
||||||
dep->parent = NULL;
|
|
||||||
dep_next = dep->next;
|
|
||||||
if (dep_next && dep_next->chan == chan)
|
|
||||||
dep->next = NULL; /* ->next will be submitted */
|
|
||||||
else
|
|
||||||
dep_next = NULL; /* submit current dep and terminate */
|
|
||||||
spin_unlock_bh(&dep->lock);
|
|
||||||
|
|
||||||
dep->tx_submit(dep);
|
|
||||||
}
|
|
||||||
|
|
||||||
chan->device->device_issue_pending(chan);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(async_tx_run_dependencies);
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
free_dma_chan_ref(struct rcu_head *rcu)
|
free_dma_chan_ref(struct rcu_head *rcu)
|
||||||
{
|
{
|
||||||
|
|
|
@ -33,7 +33,6 @@ config INTEL_IOATDMA
|
||||||
config INTEL_IOP_ADMA
|
config INTEL_IOP_ADMA
|
||||||
tristate "Intel IOP ADMA support"
|
tristate "Intel IOP ADMA support"
|
||||||
depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
|
depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
|
||||||
select ASYNC_CORE
|
|
||||||
select DMA_ENGINE
|
select DMA_ENGINE
|
||||||
help
|
help
|
||||||
Enable support for the Intel(R) IOP Series RAID engines.
|
Enable support for the Intel(R) IOP Series RAID engines.
|
||||||
|
@ -59,7 +58,6 @@ config FSL_DMA
|
||||||
config MV_XOR
|
config MV_XOR
|
||||||
bool "Marvell XOR engine support"
|
bool "Marvell XOR engine support"
|
||||||
depends on PLAT_ORION
|
depends on PLAT_ORION
|
||||||
select ASYNC_CORE
|
|
||||||
select DMA_ENGINE
|
select DMA_ENGINE
|
||||||
---help---
|
---help---
|
||||||
Enable support for the Marvell XOR engine.
|
Enable support for the Marvell XOR engine.
|
||||||
|
|
|
@ -626,6 +626,90 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
|
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
|
||||||
|
|
||||||
|
/* dma_wait_for_async_tx - spin wait for a transaction to complete
|
||||||
|
* @tx: in-flight transaction to wait on
|
||||||
|
*
|
||||||
|
* This routine assumes that tx was obtained from a call to async_memcpy,
|
||||||
|
* async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
|
||||||
|
* and submitted). Walking the parent chain is only meant to cover for DMA
|
||||||
|
* drivers that do not implement the DMA_INTERRUPT capability and may race with
|
||||||
|
* the driver's descriptor cleanup routine.
|
||||||
|
*/
|
||||||
|
enum dma_status
|
||||||
|
dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
|
||||||
|
{
|
||||||
|
enum dma_status status;
|
||||||
|
struct dma_async_tx_descriptor *iter;
|
||||||
|
struct dma_async_tx_descriptor *parent;
|
||||||
|
|
||||||
|
if (!tx)
|
||||||
|
return DMA_SUCCESS;
|
||||||
|
|
||||||
|
WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
|
||||||
|
" %s\n", __func__, dev_name(&tx->chan->dev));
|
||||||
|
|
||||||
|
/* poll through the dependency chain, return when tx is complete */
|
||||||
|
do {
|
||||||
|
iter = tx;
|
||||||
|
|
||||||
|
/* find the root of the unsubmitted dependency chain */
|
||||||
|
do {
|
||||||
|
parent = iter->parent;
|
||||||
|
if (!parent)
|
||||||
|
break;
|
||||||
|
else
|
||||||
|
iter = parent;
|
||||||
|
} while (parent);
|
||||||
|
|
||||||
|
/* there is a small window for ->parent == NULL and
|
||||||
|
* ->cookie == -EBUSY
|
||||||
|
*/
|
||||||
|
while (iter->cookie == -EBUSY)
|
||||||
|
cpu_relax();
|
||||||
|
|
||||||
|
status = dma_sync_wait(iter->chan, iter->cookie);
|
||||||
|
} while (status == DMA_IN_PROGRESS || (iter != tx));
|
||||||
|
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
|
||||||
|
|
||||||
|
/* dma_run_dependencies - helper routine for dma drivers to process
|
||||||
|
* (start) dependent operations on their target channel
|
||||||
|
* @tx: transaction with dependencies
|
||||||
|
*/
|
||||||
|
void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
|
||||||
|
{
|
||||||
|
struct dma_async_tx_descriptor *dep = tx->next;
|
||||||
|
struct dma_async_tx_descriptor *dep_next;
|
||||||
|
struct dma_chan *chan;
|
||||||
|
|
||||||
|
if (!dep)
|
||||||
|
return;
|
||||||
|
|
||||||
|
chan = dep->chan;
|
||||||
|
|
||||||
|
/* keep submitting up until a channel switch is detected
|
||||||
|
* in that case we will be called again as a result of
|
||||||
|
* processing the interrupt from async_tx_channel_switch
|
||||||
|
*/
|
||||||
|
for (; dep; dep = dep_next) {
|
||||||
|
spin_lock_bh(&dep->lock);
|
||||||
|
dep->parent = NULL;
|
||||||
|
dep_next = dep->next;
|
||||||
|
if (dep_next && dep_next->chan == chan)
|
||||||
|
dep->next = NULL; /* ->next will be submitted */
|
||||||
|
else
|
||||||
|
dep_next = NULL; /* submit current dep and terminate */
|
||||||
|
spin_unlock_bh(&dep->lock);
|
||||||
|
|
||||||
|
dep->tx_submit(dep);
|
||||||
|
}
|
||||||
|
|
||||||
|
chan->device->device_issue_pending(chan);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(dma_run_dependencies);
|
||||||
|
|
||||||
static int __init dma_bus_init(void)
|
static int __init dma_bus_init(void)
|
||||||
{
|
{
|
||||||
mutex_init(&dma_list_mutex);
|
mutex_init(&dma_list_mutex);
|
||||||
|
|
|
@ -24,7 +24,6 @@
|
||||||
|
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/async_tx.h>
|
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
@ -116,7 +115,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* run dependent operations */
|
/* run dependent operations */
|
||||||
async_tx_run_dependencies(&desc->async_tx);
|
dma_run_dependencies(&desc->async_tx);
|
||||||
|
|
||||||
return cookie;
|
return cookie;
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,6 @@
|
||||||
|
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/async_tx.h>
|
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
@ -340,7 +339,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* run dependent operations */
|
/* run dependent operations */
|
||||||
async_tx_run_dependencies(&desc->async_tx);
|
dma_run_dependencies(&desc->async_tx);
|
||||||
|
|
||||||
return cookie;
|
return cookie;
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,8 +60,6 @@ enum async_tx_flags {
|
||||||
|
|
||||||
#ifdef CONFIG_DMA_ENGINE
|
#ifdef CONFIG_DMA_ENGINE
|
||||||
void async_tx_issue_pending_all(void);
|
void async_tx_issue_pending_all(void);
|
||||||
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
|
|
||||||
void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx);
|
|
||||||
#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
|
#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
|
||||||
#include <asm/async_tx.h>
|
#include <asm/async_tx.h>
|
||||||
#else
|
#else
|
||||||
|
@ -77,19 +75,6 @@ static inline void async_tx_issue_pending_all(void)
|
||||||
do { } while (0);
|
do { } while (0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline enum dma_status
|
|
||||||
dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
|
|
||||||
{
|
|
||||||
return DMA_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
async_tx_run_dependencies(struct dma_async_tx_descriptor *tx,
|
|
||||||
struct dma_chan *host_chan)
|
|
||||||
{
|
|
||||||
do { } while (0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct dma_chan *
|
static inline struct dma_chan *
|
||||||
async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
|
async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
|
||||||
enum dma_transaction_type tx_type, struct page **dst, int dst_count,
|
enum dma_transaction_type tx_type, struct page **dst, int dst_count,
|
||||||
|
|
|
@ -475,11 +475,20 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
|
||||||
}
|
}
|
||||||
|
|
||||||
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
|
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
|
||||||
|
#ifdef CONFIG_DMA_ENGINE
|
||||||
|
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
|
||||||
|
#else
|
||||||
|
static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
|
||||||
|
{
|
||||||
|
return DMA_SUCCESS;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* --- DMA device --- */
|
/* --- DMA device --- */
|
||||||
|
|
||||||
int dma_async_device_register(struct dma_device *device);
|
int dma_async_device_register(struct dma_device *device);
|
||||||
void dma_async_device_unregister(struct dma_device *device);
|
void dma_async_device_unregister(struct dma_device *device);
|
||||||
|
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
|
||||||
|
|
||||||
/* --- Helper iov-locking functions --- */
|
/* --- Helper iov-locking functions --- */
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue