mirror of https://gitee.com/openkylin/linux.git
tile: uninline dma_set_mask
We'll soon merge <asm-generic/dma-mapping-common.h> into <linux/dma-mapping.h> and the reference to dma_capable in the tile dma_set_mask would create a circular dependency. Fix this by moving the implementation out of line. Signed-off-by: Christoph Hellwig <hch@lst.de> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Joerg Roedel <jroedel@suse.de> Cc: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
30081d8ea4
commit
bd38118f9c
|
@ -76,34 +76,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||||
|
|
||||||
#include <asm-generic/dma-mapping-common.h>
|
#include <asm-generic/dma-mapping-common.h>
|
||||||
|
|
||||||
static inline int
|
int dma_set_mask(struct device *dev, u64 mask);
|
||||||
dma_set_mask(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* For PCI devices with 64-bit DMA addressing capability, promote
|
|
||||||
* the dma_ops to hybrid, with the consistent memory DMA space limited
|
|
||||||
* to 32-bit. For 32-bit capable devices, limit the streaming DMA
|
|
||||||
* address range to max_direct_dma_addr.
|
|
||||||
*/
|
|
||||||
if (dma_ops == gx_pci_dma_map_ops ||
|
|
||||||
dma_ops == gx_hybrid_pci_dma_map_ops ||
|
|
||||||
dma_ops == gx_legacy_pci_dma_map_ops) {
|
|
||||||
if (mask == DMA_BIT_MASK(64) &&
|
|
||||||
dma_ops == gx_legacy_pci_dma_map_ops)
|
|
||||||
set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
|
|
||||||
else if (mask > dev->archdata.max_direct_dma_addr)
|
|
||||||
mask = dev->archdata.max_direct_dma_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
*dev->dma_mask = mask;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* dma_alloc_noncoherent() is #defined to return coherent memory,
|
* dma_alloc_noncoherent() is #defined to return coherent memory,
|
||||||
|
|
|
@ -583,6 +583,35 @@ struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
|
||||||
EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
|
EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
|
||||||
EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
|
EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
|
||||||
|
|
||||||
|
int dma_set_mask(struct device *dev, u64 mask)
|
||||||
|
{
|
||||||
|
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For PCI devices with 64-bit DMA addressing capability, promote
|
||||||
|
* the dma_ops to hybrid, with the consistent memory DMA space limited
|
||||||
|
* to 32-bit. For 32-bit capable devices, limit the streaming DMA
|
||||||
|
* address range to max_direct_dma_addr.
|
||||||
|
*/
|
||||||
|
if (dma_ops == gx_pci_dma_map_ops ||
|
||||||
|
dma_ops == gx_hybrid_pci_dma_map_ops ||
|
||||||
|
dma_ops == gx_legacy_pci_dma_map_ops) {
|
||||||
|
if (mask == DMA_BIT_MASK(64) &&
|
||||||
|
dma_ops == gx_legacy_pci_dma_map_ops)
|
||||||
|
set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
|
||||||
|
else if (mask > dev->archdata.max_direct_dma_addr)
|
||||||
|
mask = dev->archdata.max_direct_dma_addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!dev->dma_mask || !dma_supported(dev, mask))
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
|
*dev->dma_mask = mask;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dma_set_mask);
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
|
#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
|
||||||
int dma_set_coherent_mask(struct device *dev, u64 mask)
|
int dma_set_coherent_mask(struct device *dev, u64 mask)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue