From eadfb1c5f8c02f428a565e62e908e99900b697e4 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Fri, 22 Mar 2019 17:05:45 +1100 Subject: [PATCH] KVM: PPC: Book3S HV: Implement real mode H_PAGE_INIT handler Implement a real mode handler for the H_CALL H_PAGE_INIT which can be used to zero or copy a guest page. The page is defined to be 4k and must be 4k aligned. The in-kernel real mode handler halves the time to handle this H_CALL compared to handling it in userspace for a hash guest. Signed-off-by: Suraj Jitindar Singh Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_ppc.h | 2 + arch/powerpc/kvm/book3s_hv_rm_mmu.c | 144 ++++++++++++++++++++++++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 2 +- 3 files changed, 147 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index ac22b28ae78d..df0b173da3e0 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -665,6 +665,8 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index); long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index); +long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, + unsigned long dest, unsigned long src); long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, unsigned long slb_v, unsigned int status, bool data); unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 3b3791ed74a6..8431ad1e8391 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -867,6 +868,149 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, return ret; } +static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa, + int writing, unsigned long *hpa, + struct kvm_memory_slot **memslot_p) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *memslot; + unsigned long gfn, hva, pa, psize = PAGE_SHIFT; + unsigned int shift; + pte_t *ptep, pte; + + /* Find the memslot for this address */ + gfn = gpa >> PAGE_SHIFT; + memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); + if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) + return H_PARAMETER; + + /* Translate to host virtual address */ + hva = __gfn_to_hva_memslot(memslot, gfn); + + /* Try to find the host pte for that virtual address */ + ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); + if (!ptep) + return H_TOO_HARD; + pte = kvmppc_read_update_linux_pte(ptep, writing); + if (!pte_present(pte)) + return H_TOO_HARD; + + /* Convert to a physical address */ + if (shift) + psize = 1UL << shift; + pa = pte_pfn(pte) << PAGE_SHIFT; + pa |= hva & (psize - 1); + pa |= gpa & ~PAGE_MASK; + + if (hpa) + *hpa = pa; + if (memslot_p) + *memslot_p = memslot; + + return H_SUCCESS; +} + +static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu, + unsigned long dest) +{ + struct kvm_memory_slot *memslot; + struct kvm *kvm = vcpu->kvm; + unsigned long pa, mmu_seq; + long ret = H_SUCCESS; + int i; + + /* Used later to detect if we might have been invalidated */ + mmu_seq = kvm->mmu_notifier_seq; + smp_rmb(); + + ret = kvmppc_get_hpa(vcpu, dest, 1, &pa, &memslot); + if (ret != H_SUCCESS) + return ret; + + /* Check if we've been invalidated */ + raw_spin_lock(&kvm->mmu_lock.rlock); + if (mmu_notifier_retry(kvm, mmu_seq)) { + ret = H_TOO_HARD; + goto out_unlock; + } + + /* Zero the page */ + for (i = 0; i < SZ_4K; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES) + dcbz((void *)pa); + kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE); + +out_unlock: + raw_spin_unlock(&kvm->mmu_lock.rlock); + return ret; +} + +static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu, + unsigned long dest, unsigned long src) +{ + unsigned long dest_pa, src_pa, mmu_seq; + struct kvm_memory_slot *dest_memslot; + struct kvm *kvm = vcpu->kvm; + long ret = H_SUCCESS; + + /* Used later to detect if we might have been invalidated */ + mmu_seq = kvm->mmu_notifier_seq; + smp_rmb(); + + ret = kvmppc_get_hpa(vcpu, dest, 1, &dest_pa, &dest_memslot); + if (ret != H_SUCCESS) + return ret; + ret = kvmppc_get_hpa(vcpu, src, 0, &src_pa, NULL); + if (ret != H_SUCCESS) + return ret; + + /* Check if we've been invalidated */ + raw_spin_lock(&kvm->mmu_lock.rlock); + if (mmu_notifier_retry(kvm, mmu_seq)) { + ret = H_TOO_HARD; + goto out_unlock; + } + + /* Copy the page */ + memcpy((void *)dest_pa, (void *)src_pa, SZ_4K); + + kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE); + +out_unlock: + raw_spin_unlock(&kvm->mmu_lock.rlock); + return ret; +} + +long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, + unsigned long dest, unsigned long src) +{ + struct kvm *kvm = vcpu->kvm; + u64 pg_mask = SZ_4K - 1; /* 4K page size */ + long ret = H_SUCCESS; + + /* Don't handle radix mode here, go up to the virtual mode handler */ + if (kvm_is_radix(kvm)) + return H_TOO_HARD; + + /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */ + if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE | + H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED)) + return H_PARAMETER; + + /* dest (and src if copy_page flag set) must be page aligned */ + if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask))) + return H_PARAMETER; + + /* zero and/or copy the page as determined by the flags */ + if (flags & H_COPY_PAGE) + ret = kvmppc_do_h_page_init_copy(vcpu, dest, src); + else if (flags & H_ZERO_PAGE) + ret = kvmppc_do_h_page_init_zero(vcpu, dest); + + /* We can ignore the other flags */ + + return ret; +} + void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep, unsigned long pte_index) { diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 3a5e719ef032..4e8be4d9e114 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -2281,7 +2281,7 @@ hcall_real_table: #endif .long 0 /* 0x24 - H_SET_SPRG0 */ .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table - .long 0 /* 0x2c */ + .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table .long 0 /* 0x30 */ .long 0 /* 0x34 */ .long 0 /* 0x38 */