mirror of https://gitee.com/openkylin/linux.git
KVM: PPC: Book3S HV P9: implement hash host / hash guest support
Implement support for hash guests under hash host. This has to save and restore the host SLB, and ensure that the MMU is off while switching into the guest SLB. POWER9 and later CPUs now always go via the P9 path. The "fast" guest mode is now renamed to the P9 mode, which is consistent with its functionality and the rest of the naming. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210528090752.3542186-32-npiggin@gmail.com
This commit is contained in:
parent
079a09a500
commit
0bf7e1b2e9
|
@ -147,7 +147,7 @@
|
||||||
#define KVM_GUEST_MODE_SKIP 2
|
#define KVM_GUEST_MODE_SKIP 2
|
||||||
#define KVM_GUEST_MODE_GUEST_HV 3
|
#define KVM_GUEST_MODE_GUEST_HV 3
|
||||||
#define KVM_GUEST_MODE_HOST_HV 4
|
#define KVM_GUEST_MODE_HOST_HV 4
|
||||||
#define KVM_GUEST_MODE_HV_FAST 5 /* ISA >= v3.0 host radix */
|
#define KVM_GUEST_MODE_HV_P9 5 /* ISA >= v3.0 path */
|
||||||
|
|
||||||
#define KVM_INST_FETCH_FAILED -1
|
#define KVM_INST_FETCH_FAILED -1
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,7 @@
|
||||||
kvmppc_hcall:
|
kvmppc_hcall:
|
||||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||||
lbz r10,HSTATE_IN_GUEST(r13)
|
lbz r10,HSTATE_IN_GUEST(r13)
|
||||||
cmpwi r10,KVM_GUEST_MODE_HV_FAST
|
cmpwi r10,KVM_GUEST_MODE_HV_P9
|
||||||
beq kvmppc_p9_exit_hcall
|
beq kvmppc_p9_exit_hcall
|
||||||
#endif
|
#endif
|
||||||
ld r10,PACA_EXGEN+EX_R13(r13)
|
ld r10,PACA_EXGEN+EX_R13(r13)
|
||||||
|
@ -68,7 +68,7 @@ kvmppc_interrupt:
|
||||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||||
std r10,HSTATE_SCRATCH0(r13)
|
std r10,HSTATE_SCRATCH0(r13)
|
||||||
lbz r10,HSTATE_IN_GUEST(r13)
|
lbz r10,HSTATE_IN_GUEST(r13)
|
||||||
cmpwi r10,KVM_GUEST_MODE_HV_FAST
|
cmpwi r10,KVM_GUEST_MODE_HV_P9
|
||||||
beq kvmppc_p9_exit_interrupt
|
beq kvmppc_p9_exit_interrupt
|
||||||
ld r10,HSTATE_SCRATCH0(r13)
|
ld r10,HSTATE_SCRATCH0(r13)
|
||||||
#endif
|
#endif
|
||||||
|
@ -183,8 +183,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||||
/*
|
/*
|
||||||
* void kvmppc_p9_enter_guest(struct vcpu *vcpu);
|
* void kvmppc_p9_enter_guest(struct vcpu *vcpu);
|
||||||
*
|
*
|
||||||
* Enter the guest on a ISAv3.0 or later system where we have exactly
|
* Enter the guest on a ISAv3.0 or later system.
|
||||||
* one vcpu per vcore, and the host is radix.
|
|
||||||
*/
|
*/
|
||||||
.balign IFETCH_ALIGN_BYTES
|
.balign IFETCH_ALIGN_BYTES
|
||||||
_GLOBAL(kvmppc_p9_enter_guest)
|
_GLOBAL(kvmppc_p9_enter_guest)
|
||||||
|
@ -284,7 +283,7 @@ kvmppc_p9_exit_hcall:
|
||||||
.balign IFETCH_ALIGN_BYTES
|
.balign IFETCH_ALIGN_BYTES
|
||||||
kvmppc_p9_exit_interrupt:
|
kvmppc_p9_exit_interrupt:
|
||||||
/*
|
/*
|
||||||
* If set to KVM_GUEST_MODE_HV_FAST but we're still in the
|
* If set to KVM_GUEST_MODE_HV_P9 but we're still in the
|
||||||
* hypervisor, that means we can't return from the entry stack.
|
* hypervisor, that means we can't return from the entry stack.
|
||||||
*/
|
*/
|
||||||
rldicl. r10,r12,64-MSR_HV_LG,63
|
rldicl. r10,r12,64-MSR_HV_LG,63
|
||||||
|
@ -358,6 +357,12 @@ kvmppc_p9_exit_interrupt:
|
||||||
* effort for a small bit of code. Lots of other things to do first.
|
* effort for a small bit of code. Lots of other things to do first.
|
||||||
*/
|
*/
|
||||||
kvmppc_p9_bad_interrupt:
|
kvmppc_p9_bad_interrupt:
|
||||||
|
BEGIN_MMU_FTR_SECTION
|
||||||
|
/*
|
||||||
|
* Hash host doesn't try to recover MMU (requires host SLB reload)
|
||||||
|
*/
|
||||||
|
b .
|
||||||
|
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
|
||||||
/*
|
/*
|
||||||
* Clean up guest registers to give host a chance to run.
|
* Clean up guest registers to give host a chance to run.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -4511,7 +4511,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
|
||||||
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
|
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (radix_enabled())
|
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||||
r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
|
r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
|
||||||
vcpu->arch.vcore->lpcr);
|
vcpu->arch.vcore->lpcr);
|
||||||
else
|
else
|
||||||
|
@ -5599,6 +5599,8 @@ static int kvmhv_enable_nested(struct kvm *kvm)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
if (!cpu_has_feature(CPU_FTR_ARCH_300))
|
if (!cpu_has_feature(CPU_FTR_ARCH_300))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
if (!radix_enabled())
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
/* kvm == NULL means the caller is testing if the capability exists */
|
/* kvm == NULL means the caller is testing if the capability exists */
|
||||||
if (kvm)
|
if (kvm)
|
||||||
|
|
|
@ -130,7 +130,7 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
|
||||||
isync();
|
isync();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid)
|
static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
|
||||||
{
|
{
|
||||||
isync();
|
isync();
|
||||||
mtspr(SPRN_PID, pid);
|
mtspr(SPRN_PID, pid);
|
||||||
|
@ -139,6 +139,22 @@ static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid)
|
||||||
isync();
|
isync();
|
||||||
mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
|
mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
|
||||||
isync();
|
isync();
|
||||||
|
|
||||||
|
if (!radix_enabled())
|
||||||
|
slb_restore_bolted_realmode();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void save_clear_host_mmu(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
if (!radix_enabled()) {
|
||||||
|
/*
|
||||||
|
* Hash host could save and restore host SLB entries to
|
||||||
|
* reduce SLB fault overheads of VM exits, but for now the
|
||||||
|
* existing code clears all entries and restores just the
|
||||||
|
* bolted ones when switching back to host.
|
||||||
|
*/
|
||||||
|
slb_clear_invalidate_partition();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
|
static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
|
||||||
|
@ -271,16 +287,24 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
|
||||||
|
|
||||||
mtspr(SPRN_AMOR, ~0UL);
|
mtspr(SPRN_AMOR, ~0UL);
|
||||||
|
|
||||||
local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_HV_FAST;
|
local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_HV_P9;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hash host, hash guest, or radix guest with prefetch bug, all have
|
||||||
|
* to disable the MMU before switching to guest MMU state.
|
||||||
|
*/
|
||||||
|
if (!radix_enabled() || !kvm_is_radix(kvm) ||
|
||||||
|
cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
|
||||||
|
__mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
|
||||||
|
|
||||||
|
save_clear_host_mmu(kvm);
|
||||||
|
|
||||||
if (kvm_is_radix(kvm)) {
|
if (kvm_is_radix(kvm)) {
|
||||||
if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
|
|
||||||
__mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
|
|
||||||
switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
|
switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
|
||||||
if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
|
if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
|
||||||
__mtmsrd(0, 1); /* clear RI */
|
__mtmsrd(0, 1); /* clear RI */
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
__mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
|
|
||||||
switch_mmu_to_guest_hpt(kvm, vcpu, lpcr);
|
switch_mmu_to_guest_hpt(kvm, vcpu, lpcr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -468,7 +492,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
|
||||||
mtspr(SPRN_HDEC, 0x7fffffff);
|
mtspr(SPRN_HDEC, 0x7fffffff);
|
||||||
|
|
||||||
save_clear_guest_mmu(kvm, vcpu);
|
save_clear_guest_mmu(kvm, vcpu);
|
||||||
switch_mmu_to_host_radix(kvm, host_pidr);
|
switch_mmu_to_host(kvm, host_pidr);
|
||||||
local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE;
|
local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue