iommu/arm-smmu-v3: Remove io-pgtable spinlock
As for SMMUv2, take advantage of io-pgtable's newfound tolerance for concurrency. Unfortunately in this case the command queue lock remains a point of serialisation for the unmap path, but there may be a little more we can do to ameliorate that in future. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
523d7423e2
commit
58188afeb7
|
@ -646,7 +646,6 @@ struct arm_smmu_domain {
|
|||
struct mutex init_mutex; /* Protects smmu pointer */
|
||||
|
||||
struct io_pgtable_ops *pgtbl_ops;
|
||||
spinlock_t pgtbl_lock;
|
||||
|
||||
enum arm_smmu_domain_stage stage;
|
||||
union {
|
||||
|
@ -1414,7 +1413,6 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
|
|||
}
|
||||
|
||||
mutex_init(&smmu_domain->init_mutex);
|
||||
spin_lock_init(&smmu_domain->pgtbl_lock);
|
||||
return &smmu_domain->domain;
|
||||
}
|
||||
|
||||
|
@ -1686,44 +1684,29 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|||
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
|
||||
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
|
||||
|
||||
if (!ops)
|
||||
return -ENODEV;
|
||||
|
||||
spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
|
||||
ret = ops->map(ops, iova, paddr, size, prot);
|
||||
spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
|
||||
return ret;
|
||||
return ops->map(ops, iova, paddr, size, prot);
|
||||
}
|
||||
|
||||
static size_t
|
||||
arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
|
||||
{
|
||||
size_t ret;
|
||||
unsigned long flags;
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
|
||||
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
|
||||
|
||||
if (!ops)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
|
||||
ret = ops->unmap(ops, iova, size);
|
||||
spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
|
||||
return ret;
|
||||
return ops->unmap(ops, iova, size);
|
||||
}
|
||||
|
||||
static phys_addr_t
|
||||
arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
|
||||
{
|
||||
phys_addr_t ret;
|
||||
unsigned long flags;
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
|
||||
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
|
||||
|
||||
if (domain->type == IOMMU_DOMAIN_IDENTITY)
|
||||
return iova;
|
||||
|
@ -1731,11 +1714,7 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
|
|||
if (!ops)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
|
||||
ret = ops->iova_to_phys(ops, iova);
|
||||
spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
|
||||
|
||||
return ret;
|
||||
return ops->iova_to_phys(ops, iova);
|
||||
}
|
||||
|
||||
static struct platform_driver arm_smmu_driver;
|
||||
|
|
Loading…
Reference in New Issue