iommu/dma: Handle MSI mappings separately
MSI pages must always be mapped into a device's *current* domain, which
*might* be the default DMA domain, but might instead be a VFIO domain
with its own MSI cookie. This subtlety got accidentally lost in the
streamlining of __iommu_dma_map(), but rather than reintroduce more
complexity and/or special-casing, it turns out neater to just split this
path out entirely.
Since iommu_dma_get_msi_page() already duplicates much of what
__iommu_dma_map() does, it can easily just make the allocation and
mapping calls directly as well. That way we can further streamline the
helper back to exclusively operating on DMA domains.
Fixes: b61d271e59
("iommu/dma: Move domain lookup into __iommu_dma_{map,unmap}")
Reported-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Reported-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Tested-by: Marc Zyngier <maz@kernel.org>
Tested-by: Andre Przywara <andre.przywara@arm.com>
Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
e21a712a96
commit
8af23fad62
|
@ -459,13 +459,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
|||
{
|
||||
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
size_t iova_off = 0;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
size_t iova_off = iova_offset(iovad, phys);
|
||||
dma_addr_t iova;
|
||||
|
||||
if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
|
||||
iova_off = iova_offset(&cookie->iovad, phys);
|
||||
size = iova_align(&cookie->iovad, size + iova_off);
|
||||
}
|
||||
size = iova_align(iovad, size + iova_off);
|
||||
|
||||
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
|
||||
if (!iova)
|
||||
|
@ -1147,16 +1145,21 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
|||
if (!msi_page)
|
||||
return NULL;
|
||||
|
||||
iova = __iommu_dma_map(dev, msi_addr, size, prot);
|
||||
if (iova == DMA_MAPPING_ERROR)
|
||||
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
|
||||
if (!iova)
|
||||
goto out_free_page;
|
||||
|
||||
if (iommu_map(domain, iova, msi_addr, size, prot))
|
||||
goto out_free_iova;
|
||||
|
||||
INIT_LIST_HEAD(&msi_page->list);
|
||||
msi_page->phys = msi_addr;
|
||||
msi_page->iova = iova;
|
||||
list_add(&msi_page->list, &cookie->msi_page_list);
|
||||
return msi_page;
|
||||
|
||||
out_free_iova:
|
||||
iommu_dma_free_iova(cookie, iova, size);
|
||||
out_free_page:
|
||||
kfree(msi_page);
|
||||
return NULL;
|
||||
|
|
Loading…
Reference in New Issue