mirror of https://gitee.com/openkylin/linux.git
powerpc/mmu: Add real mode support for IOMMU preregistered memory
This makes mm_iommu_lookup() able to work in realmode by replacing list_for_each_entry_rcu() (which can do debug stuff which can fail in real mode) with list_for_each_entry_lockless(). This adds realmode version of mm_iommu_ua_to_hpa() which adds explicit vmalloc'd-to-linear address conversion. Unlike mm_iommu_ua_to_hpa(), mm_iommu_ua_to_hpa_rm() can fail. This changes mm_iommu_preregistered() to receive @mm as in real mode @current does not always have a correct pointer. This adds realmode version of mm_iommu_lookup() which receives @mm (for the same reason as for mm_iommu_preregistered()) and uses lockless version of list_for_each_entry_rcu(). Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
97da3854c5
commit
6b5c19c552
|
@ -29,10 +29,14 @@ extern void mm_iommu_init(struct mm_struct *mm);
|
||||||
extern void mm_iommu_cleanup(struct mm_struct *mm);
|
extern void mm_iommu_cleanup(struct mm_struct *mm);
|
||||||
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
|
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
|
||||||
unsigned long ua, unsigned long size);
|
unsigned long ua, unsigned long size);
|
||||||
|
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
|
||||||
|
struct mm_struct *mm, unsigned long ua, unsigned long size);
|
||||||
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
||||||
unsigned long ua, unsigned long entries);
|
unsigned long ua, unsigned long entries);
|
||||||
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
||||||
unsigned long ua, unsigned long *hpa);
|
unsigned long ua, unsigned long *hpa);
|
||||||
|
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
||||||
|
unsigned long ua, unsigned long *hpa);
|
||||||
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
|
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
|
||||||
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
|
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -314,6 +314,25 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mm_iommu_lookup);
|
EXPORT_SYMBOL_GPL(mm_iommu_lookup);
|
||||||
|
|
||||||
|
struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
|
||||||
|
unsigned long ua, unsigned long size)
|
||||||
|
{
|
||||||
|
struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
|
||||||
|
|
||||||
|
list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
|
||||||
|
next) {
|
||||||
|
if ((mem->ua <= ua) &&
|
||||||
|
(ua + size <= mem->ua +
|
||||||
|
(mem->entries << PAGE_SHIFT))) {
|
||||||
|
ret = mem;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
|
||||||
|
|
||||||
struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
||||||
unsigned long ua, unsigned long entries)
|
unsigned long ua, unsigned long entries)
|
||||||
{
|
{
|
||||||
|
@ -345,6 +364,26 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
|
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
|
||||||
|
|
||||||
|
long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
||||||
|
unsigned long ua, unsigned long *hpa)
|
||||||
|
{
|
||||||
|
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
|
||||||
|
void *va = &mem->hpas[entry];
|
||||||
|
unsigned long *pa;
|
||||||
|
|
||||||
|
if (entry >= mem->entries)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
pa = (void *) vmalloc_to_phys(va);
|
||||||
|
if (!pa)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
*hpa = *pa | (ua & ~PAGE_MASK);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
|
||||||
|
|
||||||
long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
|
long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
|
||||||
{
|
{
|
||||||
if (atomic64_inc_not_zero(&mem->mapped))
|
if (atomic64_inc_not_zero(&mem->mapped))
|
||||||
|
|
Loading…
Reference in New Issue