mirror of https://gitee.com/openkylin/linux.git
iommu/vt-d: Rename 'hwdev' variables to 'dev' now that that's the norm
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
This commit is contained in:
parent
207e35920d
commit
5040a918bd
|
@ -2955,7 +2955,7 @@ static int iommu_no_mapping(struct device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
|
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
|
||||||
size_t size, int dir, u64 dma_mask)
|
size_t size, int dir, u64 dma_mask)
|
||||||
{
|
{
|
||||||
struct dmar_domain *domain;
|
struct dmar_domain *domain;
|
||||||
|
@ -2968,17 +2968,17 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
|
||||||
|
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
|
|
||||||
if (iommu_no_mapping(hwdev))
|
if (iommu_no_mapping(dev))
|
||||||
return paddr;
|
return paddr;
|
||||||
|
|
||||||
domain = get_valid_domain_for_dev(hwdev);
|
domain = get_valid_domain_for_dev(dev);
|
||||||
if (!domain)
|
if (!domain)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
iommu = domain_get_iommu(domain);
|
iommu = domain_get_iommu(domain);
|
||||||
size = aligned_nrpages(paddr, size);
|
size = aligned_nrpages(paddr, size);
|
||||||
|
|
||||||
iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
|
iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
|
||||||
if (!iova)
|
if (!iova)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
@ -3016,7 +3016,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
|
||||||
if (iova)
|
if (iova)
|
||||||
__free_iova(&domain->iovad, iova);
|
__free_iova(&domain->iovad, iova);
|
||||||
printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
|
printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
|
||||||
dev_name(hwdev), size, (unsigned long long)paddr, dir);
|
dev_name(dev), size, (unsigned long long)paddr, dir);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3155,7 +3155,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *intel_alloc_coherent(struct device *hwdev, size_t size,
|
static void *intel_alloc_coherent(struct device *dev, size_t size,
|
||||||
dma_addr_t *dma_handle, gfp_t flags,
|
dma_addr_t *dma_handle, gfp_t flags,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
|
@ -3165,10 +3165,10 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
order = get_order(size);
|
order = get_order(size);
|
||||||
|
|
||||||
if (!iommu_no_mapping(hwdev))
|
if (!iommu_no_mapping(dev))
|
||||||
flags &= ~(GFP_DMA | GFP_DMA32);
|
flags &= ~(GFP_DMA | GFP_DMA32);
|
||||||
else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
|
else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
|
||||||
if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
|
if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
|
||||||
flags |= GFP_DMA;
|
flags |= GFP_DMA;
|
||||||
else
|
else
|
||||||
flags |= GFP_DMA32;
|
flags |= GFP_DMA32;
|
||||||
|
@ -3179,16 +3179,16 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
|
||||||
return NULL;
|
return NULL;
|
||||||
memset(vaddr, 0, size);
|
memset(vaddr, 0, size);
|
||||||
|
|
||||||
*dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
|
*dma_handle = __intel_map_single(dev, virt_to_bus(vaddr), size,
|
||||||
DMA_BIDIRECTIONAL,
|
DMA_BIDIRECTIONAL,
|
||||||
hwdev->coherent_dma_mask);
|
dev->coherent_dma_mask);
|
||||||
if (*dma_handle)
|
if (*dma_handle)
|
||||||
return vaddr;
|
return vaddr;
|
||||||
free_pages((unsigned long)vaddr, order);
|
free_pages((unsigned long)vaddr, order);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||||
dma_addr_t dma_handle, struct dma_attrs *attrs)
|
dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
int order;
|
int order;
|
||||||
|
@ -3196,11 +3196,11 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
order = get_order(size);
|
order = get_order(size);
|
||||||
|
|
||||||
intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
|
intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
|
||||||
free_pages((unsigned long)vaddr, order);
|
free_pages((unsigned long)vaddr, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
|
static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
int nelems, enum dma_data_direction dir,
|
int nelems, enum dma_data_direction dir,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
|
@ -3210,10 +3210,10 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
struct page *freelist;
|
struct page *freelist;
|
||||||
|
|
||||||
if (iommu_no_mapping(hwdev))
|
if (iommu_no_mapping(dev))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
domain = find_domain(hwdev);
|
domain = find_domain(dev);
|
||||||
BUG_ON(!domain);
|
BUG_ON(!domain);
|
||||||
|
|
||||||
iommu = domain_get_iommu(domain);
|
iommu = domain_get_iommu(domain);
|
||||||
|
@ -3257,7 +3257,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
|
||||||
return nelems;
|
return nelems;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
|
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
|
||||||
enum dma_data_direction dir, struct dma_attrs *attrs)
|
enum dma_data_direction dir, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
@ -3271,10 +3271,10 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
|
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
if (iommu_no_mapping(hwdev))
|
if (iommu_no_mapping(dev))
|
||||||
return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
|
return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
|
||||||
|
|
||||||
domain = get_valid_domain_for_dev(hwdev);
|
domain = get_valid_domain_for_dev(dev);
|
||||||
if (!domain)
|
if (!domain)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -3283,8 +3283,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
|
||||||
for_each_sg(sglist, sg, nelems, i)
|
for_each_sg(sglist, sg, nelems, i)
|
||||||
size += aligned_nrpages(sg->offset, sg->length);
|
size += aligned_nrpages(sg->offset, sg->length);
|
||||||
|
|
||||||
iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
|
iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
|
||||||
*hwdev->dma_mask);
|
*dev->dma_mask);
|
||||||
if (!iova) {
|
if (!iova) {
|
||||||
sglist->dma_length = 0;
|
sglist->dma_length = 0;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Loading…
Reference in New Issue