iommu/mediatek: Get rid of the pgtlock
Now we have tlb_lock for the HW tlb flush, then pgtable code hasn't needed the external "pgtlock" for a while. this patch remove the "pgtlock". Signed-off-by: Yong Wu <yong.wu@mediatek.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
1f4fd62481
commit
60829b4d00
|
@ -101,8 +101,6 @@
|
||||||
#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
|
#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
|
||||||
|
|
||||||
struct mtk_iommu_domain {
|
struct mtk_iommu_domain {
|
||||||
spinlock_t pgtlock; /* lock for page table */
|
|
||||||
|
|
||||||
struct io_pgtable_cfg cfg;
|
struct io_pgtable_cfg cfg;
|
||||||
struct io_pgtable_ops *iop;
|
struct io_pgtable_ops *iop;
|
||||||
|
|
||||||
|
@ -295,8 +293,6 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
|
||||||
{
|
{
|
||||||
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
|
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
|
||||||
|
|
||||||
spin_lock_init(&dom->pgtlock);
|
|
||||||
|
|
||||||
dom->cfg = (struct io_pgtable_cfg) {
|
dom->cfg = (struct io_pgtable_cfg) {
|
||||||
.quirks = IO_PGTABLE_QUIRK_ARM_NS |
|
.quirks = IO_PGTABLE_QUIRK_ARM_NS |
|
||||||
IO_PGTABLE_QUIRK_NO_PERMS |
|
IO_PGTABLE_QUIRK_NO_PERMS |
|
||||||
|
@ -395,18 +391,13 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||||
{
|
{
|
||||||
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
|
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
|
||||||
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
|
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
|
||||||
unsigned long flags;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/* The "4GB mode" M4U physically can not use the lower remap of Dram. */
|
/* The "4GB mode" M4U physically can not use the lower remap of Dram. */
|
||||||
if (data->enable_4GB)
|
if (data->enable_4GB)
|
||||||
paddr |= BIT_ULL(32);
|
paddr |= BIT_ULL(32);
|
||||||
|
|
||||||
spin_lock_irqsave(&dom->pgtlock, flags);
|
/* Synchronize with the tlb_lock */
|
||||||
ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
|
return dom->iop->map(dom->iop, iova, paddr, size, prot);
|
||||||
spin_unlock_irqrestore(&dom->pgtlock, flags);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t mtk_iommu_unmap(struct iommu_domain *domain,
|
static size_t mtk_iommu_unmap(struct iommu_domain *domain,
|
||||||
|
@ -414,14 +405,8 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
|
||||||
struct iommu_iotlb_gather *gather)
|
struct iommu_iotlb_gather *gather)
|
||||||
{
|
{
|
||||||
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
|
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
|
||||||
unsigned long flags;
|
|
||||||
size_t unmapsz;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&dom->pgtlock, flags);
|
return dom->iop->unmap(dom->iop, iova, size, gather);
|
||||||
unmapsz = dom->iop->unmap(dom->iop, iova, size, gather);
|
|
||||||
spin_unlock_irqrestore(&dom->pgtlock, flags);
|
|
||||||
|
|
||||||
return unmapsz;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
|
static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
|
||||||
|
@ -447,13 +432,9 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
{
|
{
|
||||||
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
|
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
|
||||||
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
|
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
|
||||||
unsigned long flags;
|
|
||||||
phys_addr_t pa;
|
phys_addr_t pa;
|
||||||
|
|
||||||
spin_lock_irqsave(&dom->pgtlock, flags);
|
|
||||||
pa = dom->iop->iova_to_phys(dom->iop, iova);
|
pa = dom->iop->iova_to_phys(dom->iop, iova);
|
||||||
spin_unlock_irqrestore(&dom->pgtlock, flags);
|
|
||||||
|
|
||||||
if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
|
if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
|
||||||
pa &= ~BIT_ULL(32);
|
pa &= ~BIT_ULL(32);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue