mirror of https://gitee.com/openkylin/linux.git
arm64: tlb: Set the TTL field in flush_tlb_range
This patch uses the cleared_* in struct mmu_gather to set the TTL field in flush_tlb_range(). Signed-off-by: Zhenyu Ye <yezhenyu2@huawei.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20200625080314.230-6-yezhenyu2@huawei.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
2631ed00b0
commit
c4ab2cbc1d
|
@ -21,11 +21,37 @@ static void tlb_flush(struct mmu_gather *tlb);
|
||||||
|
|
||||||
#include <asm-generic/tlb.h>
|
#include <asm-generic/tlb.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* get the tlbi levels in arm64. Default value is 0 if more than one
|
||||||
|
* of cleared_* is set or neither is set.
|
||||||
|
* Arm64 doesn't support p4ds now.
|
||||||
|
*/
|
||||||
|
static inline int tlb_get_level(struct mmu_gather *tlb)
|
||||||
|
{
|
||||||
|
if (tlb->cleared_ptes && !(tlb->cleared_pmds ||
|
||||||
|
tlb->cleared_puds ||
|
||||||
|
tlb->cleared_p4ds))
|
||||||
|
return 3;
|
||||||
|
|
||||||
|
if (tlb->cleared_pmds && !(tlb->cleared_ptes ||
|
||||||
|
tlb->cleared_puds ||
|
||||||
|
tlb->cleared_p4ds))
|
||||||
|
return 2;
|
||||||
|
|
||||||
|
if (tlb->cleared_puds && !(tlb->cleared_ptes ||
|
||||||
|
tlb->cleared_pmds ||
|
||||||
|
tlb->cleared_p4ds))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void tlb_flush(struct mmu_gather *tlb)
|
static inline void tlb_flush(struct mmu_gather *tlb)
|
||||||
{
|
{
|
||||||
struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
|
struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
|
||||||
bool last_level = !tlb->freed_tables;
|
bool last_level = !tlb->freed_tables;
|
||||||
unsigned long stride = tlb_get_unmap_size(tlb);
|
unsigned long stride = tlb_get_unmap_size(tlb);
|
||||||
|
int tlb_level = tlb_get_level(tlb);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we're tearing down the address space then we only care about
|
* If we're tearing down the address space then we only care about
|
||||||
|
@ -38,7 +64,8 @@ static inline void tlb_flush(struct mmu_gather *tlb)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
__flush_tlb_range(&vma, tlb->start, tlb->end, stride, last_level);
|
__flush_tlb_range(&vma, tlb->start, tlb->end, stride,
|
||||||
|
last_level, tlb_level);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
|
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
|
||||||
|
|
|
@ -230,7 +230,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
|
||||||
|
|
||||||
static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
||||||
unsigned long start, unsigned long end,
|
unsigned long start, unsigned long end,
|
||||||
unsigned long stride, bool last_level)
|
unsigned long stride, bool last_level,
|
||||||
|
int tlb_level)
|
||||||
{
|
{
|
||||||
unsigned long asid = ASID(vma->vm_mm);
|
unsigned long asid = ASID(vma->vm_mm);
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
|
@ -252,11 +253,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
||||||
dsb(ishst);
|
dsb(ishst);
|
||||||
for (addr = start; addr < end; addr += stride) {
|
for (addr = start; addr < end; addr += stride) {
|
||||||
if (last_level) {
|
if (last_level) {
|
||||||
__tlbi_level(vale1is, addr, 0);
|
__tlbi_level(vale1is, addr, tlb_level);
|
||||||
__tlbi_user_level(vale1is, addr, 0);
|
__tlbi_user_level(vale1is, addr, tlb_level);
|
||||||
} else {
|
} else {
|
||||||
__tlbi_level(vae1is, addr, 0);
|
__tlbi_level(vae1is, addr, tlb_level);
|
||||||
__tlbi_user_level(vae1is, addr, 0);
|
__tlbi_user_level(vae1is, addr, tlb_level);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dsb(ish);
|
dsb(ish);
|
||||||
|
@ -268,8 +269,9 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
|
||||||
/*
|
/*
|
||||||
* We cannot use leaf-only invalidation here, since we may be invalidating
|
* We cannot use leaf-only invalidation here, since we may be invalidating
|
||||||
* table entries as part of collapsing hugepages or moving page tables.
|
* table entries as part of collapsing hugepages or moving page tables.
|
||||||
|
* Set the tlb_level to 0 because we can not get enough information here.
|
||||||
*/
|
*/
|
||||||
__flush_tlb_range(vma, start, end, PAGE_SIZE, false);
|
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||||
|
|
Loading…
Reference in New Issue