powerpc/mm: move FSL_BOOK3 version of update_mmu_cache()
Move FSL_BOOK3E version of update_mmu_cache() at the same place as book3e_hugetlb_preload() as update_mmu_cache() is the only user of book3e_hugetlb_preload(). Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/4d69fdc86df9c74adc71a60331a86f6afb8b5e9e.1565933217.git.christophe.leroy@c-s.fr
This commit is contained in:
parent
d964211791
commit
4c1616ef03
|
@ -31,9 +31,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
|
||||
pte_t pte);
|
||||
|
||||
#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
|
||||
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
||||
unsigned long end, unsigned long floor,
|
||||
|
|
|
@ -458,14 +458,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
|||
hash_preload(vma->vm_mm, address, is_exec, trap);
|
||||
}
|
||||
#endif /* CONFIG_PPC_BOOK3S */
|
||||
#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE)
|
||||
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
||||
pte_t *ptep)
|
||||
{
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
book3e_hugetlb_preload(vma, address, *ptep);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* System memory should not be in /proc/iomem but various tools expect it
|
||||
|
|
|
@ -122,8 +122,8 @@ static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
|
|||
return found;
|
||||
}
|
||||
|
||||
void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
|
||||
pte_t pte)
|
||||
static void
|
||||
book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
|
||||
{
|
||||
unsigned long mas1, mas2;
|
||||
u64 mas7_3;
|
||||
|
@ -183,6 +183,18 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called at the end of handling a user page fault, when the
|
||||
* fault has been handled by updating a PTE in the linux page tables.
|
||||
*
|
||||
* This must always be called with the pte lock held.
|
||||
*/
|
||||
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
|
||||
{
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
book3e_hugetlb_preload(vma, address, *ptep);
|
||||
}
|
||||
|
||||
void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
||||
{
|
||||
struct hstate *hstate = hstate_file(vma->vm_file);
|
||||
|
|
Loading…
Reference in New Issue