mirror of https://gitee.com/openkylin/linux.git
dmaengine: prepare for generic 'unmap' data
Add a hook for a common dma unmap implementation to enable removal of the per driver custom unmap code. (A reworked version of Bartlomiej Zolnierkiewicz's patches to remove the custom callbacks and the size increase of dma_async_tx_descriptor for drivers that don't care about raid). Cc: Vinod Koul <vinod.koul@intel.com> Cc: Tomasz Figa <t.figa@samsung.com> Cc: Dave Jiang <dave.jiang@intel.com> [bzolnier: prepare pl330 driver for adding missing unmap while at it] Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
56ea27fd61
commit
d38a8c622a
|
@ -1197,6 +1197,7 @@ static void pl08x_desc_free(struct virt_dma_desc *vd)
|
|||
struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
|
||||
struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
|
||||
|
||||
dma_descriptor_unmap(txd);
|
||||
if (!plchan->slave)
|
||||
pl08x_unmap_buffers(txd);
|
||||
|
||||
|
|
|
@ -345,6 +345,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
|
|||
list_move(&desc->desc_node, &atchan->free_list);
|
||||
|
||||
/* unmap dma addresses (not on slave channels) */
|
||||
dma_descriptor_unmap(txd);
|
||||
if (!atchan->chan_common.private) {
|
||||
struct device *parent = chan2parent(&atchan->chan_common);
|
||||
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
|
||||
|
|
|
@ -311,6 +311,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
|
|||
list_splice_init(&desc->tx_list, &dwc->free_list);
|
||||
list_move(&desc->desc_node, &dwc->free_list);
|
||||
|
||||
dma_descriptor_unmap(txd);
|
||||
if (!is_slave_direction(dwc->direction)) {
|
||||
struct device *parent = chan2parent(&dwc->chan);
|
||||
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
|
||||
|
|
|
@ -791,6 +791,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
|
|||
* For the memcpy channels the API requires us to unmap the
|
||||
* buffers unless requested otherwise.
|
||||
*/
|
||||
dma_descriptor_unmap(&desc->txd);
|
||||
if (!edmac->chan.private)
|
||||
ep93xx_dma_unmap_buffers(desc);
|
||||
|
||||
|
|
|
@ -868,6 +868,7 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
|
|||
/* Run any dependencies */
|
||||
dma_run_dependencies(txd);
|
||||
|
||||
dma_descriptor_unmap(txd);
|
||||
/* Unmap the dst buffer, if requested */
|
||||
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
|
||||
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
|
||||
|
|
|
@ -602,6 +602,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
|
|||
dump_desc_dbg(ioat, desc);
|
||||
if (tx->cookie) {
|
||||
dma_cookie_complete(tx);
|
||||
dma_descriptor_unmap(tx);
|
||||
ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
|
||||
ioat->active -= desc->hw->tx_cnt;
|
||||
if (tx->callback) {
|
||||
|
|
|
@ -148,6 +148,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
|
|||
tx = &desc->txd;
|
||||
dump_desc_dbg(ioat, desc);
|
||||
if (tx->cookie) {
|
||||
dma_descriptor_unmap(tx);
|
||||
ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
|
||||
dma_cookie_complete(tx);
|
||||
if (tx->callback) {
|
||||
|
|
|
@ -577,6 +577,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
|
|||
tx = &desc->txd;
|
||||
if (tx->cookie) {
|
||||
dma_cookie_complete(tx);
|
||||
dma_descriptor_unmap(tx);
|
||||
ioat3_dma_unmap(ioat, desc, idx + i);
|
||||
if (tx->callback) {
|
||||
tx->callback(tx->callback_param);
|
||||
|
|
|
@ -152,6 +152,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
|
|||
if (tx->callback)
|
||||
tx->callback(tx->callback_param);
|
||||
|
||||
dma_descriptor_unmap(tx);
|
||||
/* unmap dma addresses
|
||||
* (unmap_single vs unmap_page?)
|
||||
*/
|
||||
|
|
|
@ -278,6 +278,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
|
|||
desc->async_tx.callback(
|
||||
desc->async_tx.callback_param);
|
||||
|
||||
dma_descriptor_unmap(&desc->async_tx);
|
||||
/* unmap dma addresses
|
||||
* (unmap_single vs unmap_page?)
|
||||
*/
|
||||
|
|
|
@ -2268,6 +2268,8 @@ static void pl330_tasklet(unsigned long data)
|
|||
list_move_tail(&desc->node, &pch->dmac->desc_pool);
|
||||
}
|
||||
|
||||
dma_descriptor_unmap(&desc->txd);
|
||||
|
||||
if (callback) {
|
||||
spin_unlock_irqrestore(&pch->lock, flags);
|
||||
callback(callback_param);
|
||||
|
|
|
@ -1765,6 +1765,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
|
|||
desc->async_tx.callback(
|
||||
desc->async_tx.callback_param);
|
||||
|
||||
dma_descriptor_unmap(&desc->async_tx);
|
||||
/* unmap dma addresses
|
||||
* (unmap_single vs unmap_page?)
|
||||
*
|
||||
|
|
|
@ -293,6 +293,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
|
|||
|
||||
list_move(&td_desc->desc_node, &td_chan->free_list);
|
||||
|
||||
dma_descriptor_unmap(txd);
|
||||
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
|
||||
__td_unmap_descs(td_desc,
|
||||
txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
|
||||
|
|
|
@ -419,6 +419,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
|
|||
list_splice_init(&desc->tx_list, &dc->free_list);
|
||||
list_move(&desc->desc_node, &dc->free_list);
|
||||
|
||||
dma_descriptor_unmap(txd);
|
||||
if (!ds) {
|
||||
dma_addr_t dmaaddr;
|
||||
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
|
||||
|
|
|
@ -413,6 +413,17 @@ void dma_chan_cleanup(struct kref *kref);
|
|||
typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
|
||||
|
||||
typedef void (*dma_async_tx_callback)(void *dma_async_param);
|
||||
|
||||
struct dmaengine_unmap_data {
|
||||
u8 to_cnt;
|
||||
u8 from_cnt;
|
||||
u8 bidi_cnt;
|
||||
struct device *dev;
|
||||
struct kref kref;
|
||||
size_t len;
|
||||
dma_addr_t addr[0];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dma_async_tx_descriptor - async transaction descriptor
|
||||
* ---dma generic offload fields---
|
||||
|
@ -438,6 +449,7 @@ struct dma_async_tx_descriptor {
|
|||
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
|
||||
dma_async_tx_callback callback;
|
||||
void *callback_param;
|
||||
struct dmaengine_unmap_data *unmap;
|
||||
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
|
||||
struct dma_async_tx_descriptor *next;
|
||||
struct dma_async_tx_descriptor *parent;
|
||||
|
@ -445,6 +457,20 @@ struct dma_async_tx_descriptor {
|
|||
#endif
|
||||
};
|
||||
|
||||
static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
|
||||
struct dmaengine_unmap_data *unmap)
|
||||
{
|
||||
kref_get(&unmap->kref);
|
||||
tx->unmap = unmap;
|
||||
}
|
||||
|
||||
static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
if (tx->unmap) {
|
||||
tx->unmap = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
|
||||
static inline void txd_lock(struct dma_async_tx_descriptor *txd)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue