mirror of https://gitee.com/openkylin/linux.git
powerpc/mm: Rename find_linux_pte_or_hugepte()
Add newer helpers to make the function usage simpler. It is always recommended to use find_current_mm_pte() for walking the page table. If we cannot use find_current_mm_pte(), it should be documented why the said usage of __find_linux_pte() is safe against a parallel THP split. For now we have KVM code using __find_linux_pte(). This is because kvm code ends up calling __find_linux_pte() in real mode with MSR_EE=0 but with PACA soft_enabled = 1. We may want to fix that later and make sure we keep the MSR_EE and PACA soft_enabled in sync. When we do that we can switch kvm to use find_linux_pte(). Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
520eccdfe1
commit
94171b19c3
|
@ -66,16 +66,8 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
|
|||
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define pmd_large(pmd) 0
|
||||
#endif
|
||||
pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
||||
bool *is_thp, unsigned *shift);
|
||||
static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
||||
bool *is_thp, unsigned *shift)
|
||||
{
|
||||
VM_WARN(!arch_irqs_disabled(),
|
||||
"%s called with irq enabled\n", __func__);
|
||||
return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift);
|
||||
}
|
||||
|
||||
/* can we use this in kvm */
|
||||
unsigned long vmalloc_to_phys(void *vmalloc_addr);
|
||||
|
||||
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
#ifndef _ASM_POWERPC_PTE_WALK_H
|
||||
#define _ASM_POWERPC_PTE_WALK_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
|
||||
/* Don't use this directly */
|
||||
extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
|
||||
bool *is_thp, unsigned *hshift);
|
||||
|
||||
static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
|
||||
bool *is_thp, unsigned *hshift)
|
||||
{
|
||||
VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
|
||||
return __find_linux_pte(pgdir, ea, is_thp, hshift);
|
||||
}
|
||||
|
||||
static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
|
||||
{
|
||||
pgd_t *pgdir = init_mm.pgd;
|
||||
return __find_linux_pte(pgdir, ea, NULL, hshift);
|
||||
}
|
||||
/*
|
||||
* This is what we should always use. Any other lockless page table lookup needs
|
||||
* careful audit against THP split.
|
||||
*/
|
||||
static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
|
||||
bool *is_thp, unsigned *hshift)
|
||||
{
|
||||
VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
|
||||
VM_WARN(pgdir != current->mm->pgd,
|
||||
"%s lock less page table lookup called on wrong mm\n", __func__);
|
||||
return __find_linux_pte(pgdir, ea, is_thp, hshift);
|
||||
}
|
||||
|
||||
#endif /* _ASM_POWERPC_PTE_WALK_H */
|
|
@ -44,6 +44,7 @@
|
|||
#include <asm/machdep.h>
|
||||
#include <asm/ppc-pci.h>
|
||||
#include <asm/rtas.h>
|
||||
#include <asm/pte-walk.h>
|
||||
|
||||
|
||||
/** Overview:
|
||||
|
@ -352,8 +353,7 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
|
|||
* worried about _PAGE_SPLITTING/collapse. Also we will not hit
|
||||
* page table free, because of init_mm.
|
||||
*/
|
||||
ptep = __find_linux_pte_or_hugepte(init_mm.pgd, token,
|
||||
NULL, &hugepage_shift);
|
||||
ptep = find_init_mm_pte(token, &hugepage_shift);
|
||||
if (!ptep)
|
||||
return token;
|
||||
WARN_ON(hugepage_shift);
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
#include <asm/pgtable.h>
|
||||
#include <asm/ppc-pci.h>
|
||||
#include <asm/io-workarounds.h>
|
||||
#include <asm/pte-walk.h>
|
||||
|
||||
|
||||
#define IOWA_MAX_BUS 8
|
||||
|
||||
|
@ -75,8 +77,7 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
|
|||
* We won't find huge pages here (iomem). Also can't hit
|
||||
* a page table free due to init_mm
|
||||
*/
|
||||
ptep = __find_linux_pte_or_hugepte(init_mm.pgd, vaddr,
|
||||
NULL, &hugepage_shift);
|
||||
ptep = find_init_mm_pte(vaddr, &hugepage_shift);
|
||||
if (ptep == NULL)
|
||||
paddr = 0;
|
||||
else {
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <asm/synch.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/pte-walk.h>
|
||||
|
||||
#include "trace_hv.h"
|
||||
|
||||
|
@ -597,8 +598,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
* hugepage split and collapse.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
ptep = find_linux_pte_or_hugepte(current->mm->pgd,
|
||||
hva, NULL, NULL);
|
||||
ptep = find_current_mm_pte(current->mm->pgd,
|
||||
hva, NULL, NULL);
|
||||
if (ptep) {
|
||||
pte = kvmppc_read_update_linux_pte(ptep, 1);
|
||||
if (__pte_write(pte))
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <asm/mmu.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pte-walk.h>
|
||||
|
||||
/*
|
||||
* Supported radix tree geometry.
|
||||
|
@ -359,8 +360,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
if (writing)
|
||||
pgflags |= _PAGE_DIRTY;
|
||||
local_irq_save(flags);
|
||||
ptep = __find_linux_pte_or_hugepte(current->mm->pgd, hva,
|
||||
NULL, NULL);
|
||||
ptep = find_current_mm_pte(current->mm->pgd, hva, NULL, NULL);
|
||||
if (ptep) {
|
||||
pte = READ_ONCE(*ptep);
|
||||
if (pte_present(pte) &&
|
||||
|
@ -374,8 +374,12 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
spin_unlock(&kvm->mmu_lock);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable,
|
||||
gpa, NULL, &shift);
|
||||
/*
|
||||
* We are walking the secondary page table here. We can do this
|
||||
* without disabling irq.
|
||||
*/
|
||||
ptep = __find_linux_pte(kvm->arch.pgtable,
|
||||
gpa, NULL, &shift);
|
||||
if (ptep && pte_present(*ptep)) {
|
||||
kvmppc_radix_update_pte(kvm, ptep, 0, pgflags,
|
||||
gpa, shift);
|
||||
|
@ -427,8 +431,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
pgflags |= _PAGE_WRITE;
|
||||
} else {
|
||||
local_irq_save(flags);
|
||||
ptep = __find_linux_pte_or_hugepte(current->mm->pgd,
|
||||
hva, NULL, NULL);
|
||||
ptep = find_current_mm_pte(current->mm->pgd,
|
||||
hva, NULL, NULL);
|
||||
if (ptep && pte_write(*ptep) && pte_dirty(*ptep))
|
||||
pgflags |= _PAGE_WRITE;
|
||||
local_irq_restore(flags);
|
||||
|
@ -499,8 +503,7 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|||
unsigned int shift;
|
||||
unsigned long old;
|
||||
|
||||
ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
|
||||
NULL, &shift);
|
||||
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
|
||||
if (ptep && pte_present(*ptep)) {
|
||||
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0,
|
||||
gpa, shift);
|
||||
|
@ -525,8 +528,7 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|||
unsigned int shift;
|
||||
int ref = 0;
|
||||
|
||||
ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
|
||||
NULL, &shift);
|
||||
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
|
||||
if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
|
||||
kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
|
||||
gpa, shift);
|
||||
|
@ -545,8 +547,7 @@ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|||
unsigned int shift;
|
||||
int ref = 0;
|
||||
|
||||
ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
|
||||
NULL, &shift);
|
||||
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
|
||||
if (ptep && pte_present(*ptep) && pte_young(*ptep))
|
||||
ref = 1;
|
||||
return ref;
|
||||
|
@ -562,8 +563,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
|
|||
unsigned int shift;
|
||||
int ret = 0;
|
||||
|
||||
ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
|
||||
NULL, &shift);
|
||||
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
|
||||
if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
|
||||
ret = 1;
|
||||
if (shift)
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <asm/udbg.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/tce.h>
|
||||
#include <asm/pte-walk.h>
|
||||
|
||||
#ifdef CONFIG_BUG
|
||||
|
||||
|
@ -353,7 +354,16 @@ static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
|
|||
pte_t *ptep, pte;
|
||||
unsigned shift = 0;
|
||||
|
||||
ptep = __find_linux_pte_or_hugepte(vcpu->arch.pgdir, ua, NULL, &shift);
|
||||
/*
|
||||
* Called in real mode with MSR_EE = 0. We are safe here.
|
||||
* It is ok to do the lookup with arch.pgdir here, because
|
||||
* we are doing this on secondary cpus and current task there
|
||||
* is not the hypervisor. Also this is safe against THP in the
|
||||
* host, because an IPI to primary thread will wait for the secondary
|
||||
* to exit which will agains result in the below page table walk
|
||||
* to finish.
|
||||
*/
|
||||
ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
|
||||
if (!ptep || !pte_present(*ptep))
|
||||
return -ENXIO;
|
||||
pte = *ptep;
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <asm/hvcall.h>
|
||||
#include <asm/synch.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
#include <asm/pte-walk.h>
|
||||
|
||||
/* Translate address of a vmalloc'd thing to a linear map address */
|
||||
static void *real_vmalloc_addr(void *x)
|
||||
|
@ -31,9 +32,9 @@ static void *real_vmalloc_addr(void *x)
|
|||
/*
|
||||
* assume we don't have huge pages in vmalloc space...
|
||||
* So don't worry about THP collapse/split. Called
|
||||
* Only in realmode, hence won't need irq_save/restore.
|
||||
* Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
|
||||
*/
|
||||
p = __find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL, NULL);
|
||||
p = find_init_mm_pte(addr, NULL);
|
||||
if (!p || !pte_present(*p))
|
||||
return NULL;
|
||||
addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
|
||||
|
@ -230,14 +231,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
|||
* If we had a page table table change after lookup, we would
|
||||
* retry via mmu_notifier_retry.
|
||||
*/
|
||||
if (realmode)
|
||||
ptep = __find_linux_pte_or_hugepte(pgdir, hva, NULL,
|
||||
&hpage_shift);
|
||||
else {
|
||||
if (!realmode)
|
||||
local_irq_save(irq_flags);
|
||||
ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL,
|
||||
&hpage_shift);
|
||||
}
|
||||
/*
|
||||
* If called in real mode we have MSR_EE = 0. Otherwise
|
||||
* we disable irq above.
|
||||
*/
|
||||
ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift);
|
||||
if (ptep) {
|
||||
pte_t pte;
|
||||
unsigned int host_pte_size;
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/vmalloc.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <asm/kvm_ppc.h>
|
||||
#include <asm/pte-walk.h>
|
||||
|
||||
#include "e500.h"
|
||||
#include "timing.h"
|
||||
|
@ -476,7 +477,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|||
* can't run hence pfn won't change.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL, NULL);
|
||||
ptep = find_linux_pte(pgdir, hva, NULL, NULL);
|
||||
if (ptep) {
|
||||
pte_t pte = READ_ONCE(*ptep);
|
||||
|
||||
|
|
|
@ -61,6 +61,7 @@
|
|||
#include <asm/tm.h>
|
||||
#include <asm/trace.h>
|
||||
#include <asm/ps3.h>
|
||||
#include <asm/pte-walk.h>
|
||||
|
||||
#ifdef DEBUG
|
||||
#define DBG(fmt...) udbg_printf(fmt)
|
||||
|
@ -1297,7 +1298,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
|
|||
#endif /* CONFIG_PPC_64K_PAGES */
|
||||
|
||||
/* Get PTE and page size from page tables */
|
||||
ptep = __find_linux_pte_or_hugepte(pgdir, ea, &is_thp, &hugeshift);
|
||||
ptep = find_linux_pte(pgdir, ea, &is_thp, &hugeshift);
|
||||
if (ptep == NULL || !pte_present(*ptep)) {
|
||||
DBG_LOW(" no PTE !\n");
|
||||
rc = 1;
|
||||
|
@ -1526,7 +1527,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
|
|||
* THP pages use update_mmu_cache_pmd. We don't do
|
||||
* hash preload there. Hence can ignore THP here
|
||||
*/
|
||||
ptep = find_linux_pte_or_hugepte(pgdir, ea, NULL, &hugepage_shift);
|
||||
ptep = find_current_mm_pte(pgdir, ea, NULL, &hugepage_shift);
|
||||
if (!ptep)
|
||||
goto out_exit;
|
||||
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
#include <asm/tlb.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/hugetlb.h>
|
||||
#include <asm/pte-walk.h>
|
||||
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
|
||||
|
@ -60,8 +62,11 @@ static unsigned nr_gpages;
|
|||
|
||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
|
||||
{
|
||||
/* Only called for hugetlbfs pages, hence can ignore THP */
|
||||
return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL, NULL);
|
||||
/*
|
||||
* Only called for hugetlbfs pages, hence can ignore THP and the
|
||||
* irq disabled walk.
|
||||
*/
|
||||
return __find_linux_pte(mm->pgd, addr, NULL, NULL);
|
||||
}
|
||||
|
||||
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
|
||||
|
@ -886,9 +891,8 @@ void flush_dcache_icache_hugepage(struct page *page)
|
|||
* This function need to be called with interrupts disabled. We use this variant
|
||||
* when we have MSR[EE] = 0 but the paca->soft_enabled = 1
|
||||
*/
|
||||
|
||||
pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
||||
bool *is_thp, unsigned *shift)
|
||||
pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
|
||||
bool *is_thp, unsigned *hpage_shift)
|
||||
{
|
||||
pgd_t pgd, *pgdp;
|
||||
pud_t pud, *pudp;
|
||||
|
@ -897,8 +901,8 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
|||
hugepd_t *hpdp = NULL;
|
||||
unsigned pdshift = PGDIR_SHIFT;
|
||||
|
||||
if (shift)
|
||||
*shift = 0;
|
||||
if (hpage_shift)
|
||||
*hpage_shift = 0;
|
||||
|
||||
if (is_thp)
|
||||
*is_thp = false;
|
||||
|
@ -968,11 +972,11 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
|||
ret_pte = hugepte_offset(*hpdp, ea, pdshift);
|
||||
pdshift = hugepd_shift(*hpdp);
|
||||
out:
|
||||
if (shift)
|
||||
*shift = pdshift;
|
||||
if (hpage_shift)
|
||||
*hpage_shift = pdshift;
|
||||
return ret_pte;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__find_linux_pte_or_hugepte);
|
||||
EXPORT_SYMBOL_GPL(__find_linux_pte);
|
||||
|
||||
int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages, int *nr)
|
||||
|
|
|
@ -29,6 +29,8 @@
|
|||
#include <asm/tlbflush.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/pte-walk.h>
|
||||
|
||||
|
||||
#include <trace/events/thp.h>
|
||||
|
||||
|
@ -207,8 +209,8 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
|
|||
local_irq_save(flags);
|
||||
arch_enter_lazy_mmu_mode();
|
||||
for (; start < end; start += PAGE_SIZE) {
|
||||
pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, &is_thp,
|
||||
&hugepage_shift);
|
||||
pte_t *ptep = find_current_mm_pte(mm->pgd, start, &is_thp,
|
||||
&hugepage_shift);
|
||||
unsigned long pte;
|
||||
|
||||
if (ptep == NULL)
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#ifdef CONFIG_PPC64
|
||||
#include "../kernel/ppc32.h"
|
||||
#endif
|
||||
#include <asm/pte-walk.h>
|
||||
|
||||
|
||||
/*
|
||||
|
@ -127,7 +128,7 @@ static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
|
|||
return -EFAULT;
|
||||
|
||||
local_irq_save(flags);
|
||||
ptep = find_linux_pte_or_hugepte(pgdir, addr, NULL, &shift);
|
||||
ptep = find_current_mm_pte(pgdir, addr, NULL, &shift);
|
||||
if (!ptep)
|
||||
goto err_out;
|
||||
if (!shift)
|
||||
|
|
Loading…
Reference in New Issue