mirror of https://gitee.com/openkylin/linux.git
powerpc/mm/book3s64/radix: Use freed_tables instead of need_flush_all
With commit 22a61c3c4f
("asm-generic/tlb: Track freeing of
page-table directories in struct mmu_gather") we now track whether we
freed page table in mmu_gather. Use that to decide whether to flush
Page Walk Cache.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20191024075801.22434-2-aneesh.kumar@linux.ibm.com
This commit is contained in:
parent
a42d6ba8c5
commit
52162ec784
|
@ -122,11 +122,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|||
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
|
||||
unsigned long address)
|
||||
{
|
||||
/*
|
||||
* By now all the pud entries should be none entries. So go
|
||||
* ahead and flush the page walk cache
|
||||
*/
|
||||
flush_tlb_pgtable(tlb, address);
|
||||
pgtable_free_tlb(tlb, pud, PUD_INDEX);
|
||||
}
|
||||
|
||||
|
@ -143,11 +138,6 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
|||
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
|
||||
unsigned long address)
|
||||
{
|
||||
/*
|
||||
* By now all the pud entries should be none entries. So go
|
||||
* ahead and flush the page walk cache
|
||||
*/
|
||||
flush_tlb_pgtable(tlb, address);
|
||||
return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
|
||||
}
|
||||
|
||||
|
@ -166,11 +156,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|||
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
||||
unsigned long address)
|
||||
{
|
||||
/*
|
||||
* By now all the pud entries should be none entries. So go
|
||||
* ahead and flush the page walk cache
|
||||
*/
|
||||
flush_tlb_pgtable(tlb, address);
|
||||
pgtable_free_tlb(tlb, table, PTE_INDEX);
|
||||
}
|
||||
|
||||
|
|
|
@ -147,22 +147,6 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
|
|||
flush_tlb_page(vma, address);
|
||||
}
|
||||
|
||||
/*
|
||||
* flush the page walk cache for the address
|
||||
*/
|
||||
static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
|
||||
{
|
||||
/*
|
||||
* Flush the page table walk cache on freeing a page table. We already
|
||||
* have marked the upper/higher level page table entry none by now.
|
||||
* So it is safe to flush PWC here.
|
||||
*/
|
||||
if (!radix_enabled())
|
||||
return;
|
||||
|
||||
radix__flush_tlb_pwc(tlb, address);
|
||||
}
|
||||
|
||||
extern bool tlbie_capable;
|
||||
extern bool tlbie_enabled;
|
||||
|
||||
|
|
|
@ -732,18 +732,13 @@ static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
|
|||
}
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void radix__flush_all_mm(struct mm_struct *mm)
|
||||
{
|
||||
__flush_all_mm(mm, false);
|
||||
}
|
||||
EXPORT_SYMBOL(radix__flush_all_mm);
|
||||
|
||||
void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
|
||||
{
|
||||
tlb->need_flush_all = 1;
|
||||
}
|
||||
EXPORT_SYMBOL(radix__flush_tlb_pwc);
|
||||
|
||||
void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
|
||||
int psize)
|
||||
{
|
||||
|
@ -1003,12 +998,12 @@ void radix__tlb_flush(struct mmu_gather *tlb)
|
|||
if (tlb->fullmm) {
|
||||
__flush_all_mm(mm, true);
|
||||
} else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
|
||||
if (!tlb->need_flush_all)
|
||||
if (!tlb->freed_tables)
|
||||
radix__flush_tlb_mm(mm);
|
||||
else
|
||||
radix__flush_all_mm(mm);
|
||||
} else {
|
||||
if (!tlb->need_flush_all)
|
||||
if (!tlb->freed_tables)
|
||||
radix__flush_tlb_range_psize(mm, start, end, psize);
|
||||
else
|
||||
radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
|
||||
|
|
Loading…
Reference in New Issue