mirror of https://gitee.com/openkylin/linux.git
Merge branch 'topic/ppc-kvm' into next
Merge some powerpc KVM patches from our topic branch. In particular this brings in Nick's big series rewriting parts of the guest entry/exit path in C. Conflicts: arch/powerpc/kernel/security.c arch/powerpc/kvm/book3s_hv_rmhandlers.S
This commit is contained in:
commit
3c53642324
|
@ -120,6 +120,7 @@ extern s32 patch__call_flush_branch_caches3;
|
|||
extern s32 patch__flush_count_cache_return;
|
||||
extern s32 patch__flush_link_stack_return;
|
||||
extern s32 patch__call_kvm_flush_link_stack;
|
||||
extern s32 patch__call_kvm_flush_link_stack_p9;
|
||||
extern s32 patch__memset_nocache, patch__memcpy_nocache;
|
||||
|
||||
extern long flush_branch_caches;
|
||||
|
@ -140,7 +141,7 @@ void kvmhv_load_host_pmu(void);
|
|||
void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
|
||||
void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
|
||||
|
||||
int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);
|
||||
void kvmppc_p9_enter_guest(struct kvm_vcpu *vcpu);
|
||||
|
||||
long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
|
||||
long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
|
||||
|
|
|
@ -35,6 +35,19 @@
|
|||
/* PACA save area size in u64 units (exgen, exmc, etc) */
|
||||
#define EX_SIZE 10
|
||||
|
||||
/* PACA save area offsets */
|
||||
#define EX_R9 0
|
||||
#define EX_R10 8
|
||||
#define EX_R11 16
|
||||
#define EX_R12 24
|
||||
#define EX_R13 32
|
||||
#define EX_DAR 40
|
||||
#define EX_DSISR 48
|
||||
#define EX_CCR 52
|
||||
#define EX_CFAR 56
|
||||
#define EX_PPR 64
|
||||
#define EX_CTR 72
|
||||
|
||||
/*
|
||||
* maximum recursive depth of MCE exceptions
|
||||
*/
|
||||
|
|
|
@ -147,6 +147,7 @@
|
|||
#define KVM_GUEST_MODE_SKIP 2
|
||||
#define KVM_GUEST_MODE_GUEST_HV 3
|
||||
#define KVM_GUEST_MODE_HOST_HV 4
|
||||
#define KVM_GUEST_MODE_HV_P9 5 /* ISA >= v3.0 path */
|
||||
|
||||
#define KVM_INST_FETCH_FAILED -1
|
||||
|
||||
|
|
|
@ -153,9 +153,17 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
|
|||
return radix;
|
||||
}
|
||||
|
||||
int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr);
|
||||
|
||||
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Invalid HDSISR value which is used to indicate when HW has not set the reg.
|
||||
* Used to work around an errata.
|
||||
*/
|
||||
#define HDSISR_CANARY 0x7fff
|
||||
|
||||
/*
|
||||
* We use a lock bit in HPTE dword 0 to synchronize updates and
|
||||
* accesses to each HPTE, and another bit to indicate non-present
|
||||
|
|
|
@ -297,7 +297,6 @@ struct kvm_arch {
|
|||
u8 fwnmi_enabled;
|
||||
u8 secure_guest;
|
||||
u8 svm_enabled;
|
||||
bool threads_indep;
|
||||
bool nested_enable;
|
||||
bool dawr1_enabled;
|
||||
pgd_t *pgtable;
|
||||
|
@ -683,7 +682,12 @@ struct kvm_vcpu_arch {
|
|||
ulong fault_dar;
|
||||
u32 fault_dsisr;
|
||||
unsigned long intr_msr;
|
||||
ulong fault_gpa; /* guest real address of page fault (POWER9) */
|
||||
/*
|
||||
* POWER9 and later: fault_gpa contains the guest real address of page
|
||||
* fault for a radix guest, or segment descriptor (equivalent to result
|
||||
* from slbmfev of SLB entry that translated the EA) for hash guests.
|
||||
*/
|
||||
ulong fault_gpa;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BOOKE
|
||||
|
|
|
@ -129,6 +129,7 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
|
|||
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
|
||||
extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
|
||||
extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
|
||||
|
@ -606,6 +607,7 @@ extern void kvmppc_free_pimap(struct kvm *kvm);
|
|||
extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
|
||||
extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
|
||||
extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req);
|
||||
extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
|
||||
extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
|
||||
|
@ -638,6 +640,8 @@ static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
|
|||
static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
|
||||
static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
|
||||
{ return 0; }
|
||||
static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
|
||||
{ return 0; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_XIVE
|
||||
|
@ -655,8 +659,6 @@ extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
|
|||
u32 *priority);
|
||||
extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
|
||||
extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
|
||||
extern void kvmppc_xive_init_module(void);
|
||||
extern void kvmppc_xive_exit_module(void);
|
||||
|
||||
extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
|
||||
struct kvm_vcpu *vcpu, u32 cpu);
|
||||
|
@ -671,6 +673,8 @@ extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
|
|||
extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
|
||||
int level, bool line_status);
|
||||
extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -680,8 +684,6 @@ static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
|
|||
extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
|
||||
struct kvm_vcpu *vcpu, u32 cpu);
|
||||
extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_xive_native_init_module(void);
|
||||
extern void kvmppc_xive_native_exit_module(void);
|
||||
extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
|
||||
union kvmppc_one_reg *val);
|
||||
extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
|
||||
|
@ -695,8 +697,6 @@ static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
|
|||
u32 *priority) { return -1; }
|
||||
static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
|
||||
static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
|
||||
static inline void kvmppc_xive_init_module(void) { }
|
||||
static inline void kvmppc_xive_exit_module(void) { }
|
||||
|
||||
static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
|
||||
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
|
||||
|
@ -711,14 +711,14 @@ static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { retur
|
|||
static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
|
||||
int level, bool line_status) { return -ENODEV; }
|
||||
static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
|
||||
static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
|
||||
static inline void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { }
|
||||
|
||||
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
|
||||
{ return 0; }
|
||||
static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
|
||||
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
|
||||
static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
|
||||
static inline void kvmppc_xive_native_init_module(void) { }
|
||||
static inline void kvmppc_xive_native_exit_module(void) { }
|
||||
static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
|
||||
union kvmppc_one_reg *val)
|
||||
{ return 0; }
|
||||
|
@ -754,7 +754,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
|
|||
unsigned long tce_value, unsigned long npages);
|
||||
long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
|
||||
unsigned int yield_count);
|
||||
long kvmppc_h_random(struct kvm_vcpu *vcpu);
|
||||
long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
|
||||
void kvmhv_commence_exit(int trap);
|
||||
void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
|
||||
void kvmppc_subcore_enter_guest(void);
|
||||
|
|
|
@ -121,12 +121,6 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
|
||||
extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
|
||||
#else
|
||||
static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
|
||||
#endif
|
||||
|
||||
extern void switch_cop(struct mm_struct *next);
|
||||
extern int use_cop(unsigned long acop, struct mm_struct *mm);
|
||||
extern void drop_cop(unsigned long acop, struct mm_struct *mm);
|
||||
|
|
|
@ -97,6 +97,18 @@ extern void div128_by_32(u64 dividend_high, u64 dividend_low,
|
|||
extern void secondary_cpu_time_init(void);
|
||||
extern void __init time_init(void);
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
static inline unsigned long test_irq_work_pending(void)
|
||||
{
|
||||
unsigned long x;
|
||||
|
||||
asm volatile("lbz %0,%1(13)"
|
||||
: "=r" (x)
|
||||
: "i" (offsetof(struct paca_struct, irq_work_pending)));
|
||||
return x;
|
||||
}
|
||||
#endif
|
||||
|
||||
DECLARE_PER_CPU(u64, decrementers_next_tb);
|
||||
|
||||
/* Convert timebase ticks to nanoseconds */
|
||||
|
|
|
@ -473,7 +473,6 @@ int main(void)
|
|||
OFFSET(VCPU_SLB_NR, kvm_vcpu, arch.slb_nr);
|
||||
OFFSET(VCPU_FAULT_DSISR, kvm_vcpu, arch.fault_dsisr);
|
||||
OFFSET(VCPU_FAULT_DAR, kvm_vcpu, arch.fault_dar);
|
||||
OFFSET(VCPU_FAULT_GPA, kvm_vcpu, arch.fault_gpa);
|
||||
OFFSET(VCPU_INTR_MSR, kvm_vcpu, arch.intr_msr);
|
||||
OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
|
||||
OFFSET(VCPU_TRAP, kvm_vcpu, arch.trap);
|
||||
|
|
|
@ -21,22 +21,6 @@
|
|||
#include <asm/feature-fixups.h>
|
||||
#include <asm/kup.h>
|
||||
|
||||
/* PACA save area offsets (exgen, exmc, etc) */
|
||||
#define EX_R9 0
|
||||
#define EX_R10 8
|
||||
#define EX_R11 16
|
||||
#define EX_R12 24
|
||||
#define EX_R13 32
|
||||
#define EX_DAR 40
|
||||
#define EX_DSISR 48
|
||||
#define EX_CCR 52
|
||||
#define EX_CFAR 56
|
||||
#define EX_PPR 64
|
||||
#define EX_CTR 72
|
||||
.if EX_SIZE != 10
|
||||
.error "EX_SIZE is wrong"
|
||||
.endif
|
||||
|
||||
/*
|
||||
* Following are fixed section helper macros.
|
||||
*
|
||||
|
@ -133,7 +117,6 @@ name:
|
|||
#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */
|
||||
#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */
|
||||
#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */
|
||||
#define IKVM_SKIP .L_IKVM_SKIP_\name\() /* Generate KVM skip handler */
|
||||
#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */
|
||||
#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name
|
||||
#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */
|
||||
|
@ -190,9 +173,6 @@ do_define_int n
|
|||
.ifndef IMASK
|
||||
IMASK=0
|
||||
.endif
|
||||
.ifndef IKVM_SKIP
|
||||
IKVM_SKIP=0
|
||||
.endif
|
||||
.ifndef IKVM_REAL
|
||||
IKVM_REAL=0
|
||||
.endif
|
||||
|
@ -207,8 +187,6 @@ do_define_int n
|
|||
.endif
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
/*
|
||||
* All interrupts which set HSRR registers, as well as SRESET and MCE and
|
||||
* syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken,
|
||||
|
@ -238,88 +216,28 @@ do_define_int n
|
|||
|
||||
/*
|
||||
* If an interrupt is taken while a guest is running, it is immediately routed
|
||||
* to KVM to handle. If both HV and PR KVM arepossible, KVM interrupts go first
|
||||
* to kvmppc_interrupt_hv, which handles the PR guest case.
|
||||
* to KVM to handle.
|
||||
*/
|
||||
#define kvmppc_interrupt kvmppc_interrupt_hv
|
||||
#else
|
||||
#define kvmppc_interrupt kvmppc_interrupt_pr
|
||||
#endif
|
||||
|
||||
.macro KVMTEST name
|
||||
.macro KVMTEST name handler
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
|
||||
lbz r10,HSTATE_IN_GUEST(r13)
|
||||
cmpwi r10,0
|
||||
bne \name\()_kvm
|
||||
.endm
|
||||
|
||||
.macro GEN_KVM name
|
||||
.balign IFETCH_ALIGN_BYTES
|
||||
\name\()_kvm:
|
||||
|
||||
.if IKVM_SKIP
|
||||
cmpwi r10,KVM_GUEST_MODE_SKIP
|
||||
beq 89f
|
||||
.else
|
||||
BEGIN_FTR_SECTION
|
||||
ld r10,IAREA+EX_CFAR(r13)
|
||||
std r10,HSTATE_CFAR(r13)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
|
||||
.endif
|
||||
|
||||
ld r10,IAREA+EX_CTR(r13)
|
||||
mtctr r10
|
||||
BEGIN_FTR_SECTION
|
||||
ld r10,IAREA+EX_PPR(r13)
|
||||
std r10,HSTATE_PPR(r13)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||
ld r11,IAREA+EX_R11(r13)
|
||||
ld r12,IAREA+EX_R12(r13)
|
||||
std r12,HSTATE_SCRATCH0(r13)
|
||||
sldi r12,r9,32
|
||||
ld r9,IAREA+EX_R9(r13)
|
||||
ld r10,IAREA+EX_R10(r13)
|
||||
/* HSRR variants have the 0x2 bit added to their trap number */
|
||||
.if IHSRR_IF_HVMODE
|
||||
BEGIN_FTR_SECTION
|
||||
ori r12,r12,(IVEC + 0x2)
|
||||
li r10,(IVEC + 0x2)
|
||||
FTR_SECTION_ELSE
|
||||
ori r12,r12,(IVEC)
|
||||
li r10,(IVEC)
|
||||
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
|
||||
.elseif IHSRR
|
||||
ori r12,r12,(IVEC+ 0x2)
|
||||
li r10,(IVEC + 0x2)
|
||||
.else
|
||||
ori r12,r12,(IVEC)
|
||||
li r10,(IVEC)
|
||||
.endif
|
||||
b kvmppc_interrupt
|
||||
|
||||
.if IKVM_SKIP
|
||||
89: mtocrf 0x80,r9
|
||||
ld r10,IAREA+EX_CTR(r13)
|
||||
mtctr r10
|
||||
ld r9,IAREA+EX_R9(r13)
|
||||
ld r10,IAREA+EX_R10(r13)
|
||||
ld r11,IAREA+EX_R11(r13)
|
||||
ld r12,IAREA+EX_R12(r13)
|
||||
.if IHSRR_IF_HVMODE
|
||||
BEGIN_FTR_SECTION
|
||||
b kvmppc_skip_Hinterrupt
|
||||
FTR_SECTION_ELSE
|
||||
b kvmppc_skip_interrupt
|
||||
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
|
||||
.elseif IHSRR
|
||||
b kvmppc_skip_Hinterrupt
|
||||
.else
|
||||
b kvmppc_skip_interrupt
|
||||
.endif
|
||||
.endif
|
||||
.endm
|
||||
|
||||
#else
|
||||
.macro KVMTEST name
|
||||
.endm
|
||||
.macro GEN_KVM name
|
||||
.endm
|
||||
bne \handler
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* This is the BOOK3S interrupt entry code macro.
|
||||
|
@ -461,7 +379,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
|
|||
DEFINE_FIXED_SYMBOL(\name\()_common_real)
|
||||
\name\()_common_real:
|
||||
.if IKVM_REAL
|
||||
KVMTEST \name
|
||||
KVMTEST \name kvm_interrupt
|
||||
.endif
|
||||
|
||||
ld r10,PACAKMSR(r13) /* get MSR value for kernel */
|
||||
|
@ -484,7 +402,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
|
|||
DEFINE_FIXED_SYMBOL(\name\()_common_virt)
|
||||
\name\()_common_virt:
|
||||
.if IKVM_VIRT
|
||||
KVMTEST \name
|
||||
KVMTEST \name kvm_interrupt
|
||||
1:
|
||||
.endif
|
||||
.endif /* IVIRT */
|
||||
|
@ -498,7 +416,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_virt)
|
|||
DEFINE_FIXED_SYMBOL(\name\()_common_real)
|
||||
\name\()_common_real:
|
||||
.if IKVM_REAL
|
||||
KVMTEST \name
|
||||
KVMTEST \name kvm_interrupt
|
||||
.endif
|
||||
.endm
|
||||
|
||||
|
@ -1000,8 +918,6 @@ EXC_COMMON_BEGIN(system_reset_common)
|
|||
EXCEPTION_RESTORE_REGS
|
||||
RFI_TO_USER_OR_KERNEL
|
||||
|
||||
GEN_KVM system_reset
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0x200 - Machine Check Interrupt (MCE).
|
||||
|
@ -1070,7 +986,6 @@ INT_DEFINE_BEGIN(machine_check)
|
|||
ISET_RI=0
|
||||
IDAR=1
|
||||
IDSISR=1
|
||||
IKVM_SKIP=1
|
||||
IKVM_REAL=1
|
||||
INT_DEFINE_END(machine_check)
|
||||
|
||||
|
@ -1166,7 +1081,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
|
|||
/*
|
||||
* Check if we are coming from guest. If yes, then run the normal
|
||||
* exception handler which will take the
|
||||
* machine_check_kvm->kvmppc_interrupt branch to deliver the MC event
|
||||
* machine_check_kvm->kvm_interrupt branch to deliver the MC event
|
||||
* to guest.
|
||||
*/
|
||||
lbz r11,HSTATE_IN_GUEST(r13)
|
||||
|
@ -1236,8 +1151,6 @@ EXC_COMMON_BEGIN(machine_check_common)
|
|||
bl machine_check_exception
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM machine_check
|
||||
|
||||
|
||||
#ifdef CONFIG_PPC_P7_NAP
|
||||
/*
|
||||
|
@ -1342,7 +1255,6 @@ INT_DEFINE_BEGIN(data_access)
|
|||
IVEC=0x300
|
||||
IDAR=1
|
||||
IDSISR=1
|
||||
IKVM_SKIP=1
|
||||
IKVM_REAL=1
|
||||
INT_DEFINE_END(data_access)
|
||||
|
||||
|
@ -1373,8 +1285,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
|
|||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM data_access
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0x380 - Data Segment Interrupt (DSLB).
|
||||
|
@ -1396,7 +1306,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
|
|||
INT_DEFINE_BEGIN(data_access_slb)
|
||||
IVEC=0x380
|
||||
IDAR=1
|
||||
IKVM_SKIP=1
|
||||
IKVM_REAL=1
|
||||
INT_DEFINE_END(data_access_slb)
|
||||
|
||||
|
@ -1425,8 +1334,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
|
|||
bl do_bad_slb_fault
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM data_access_slb
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0x400 - Instruction Storage Interrupt (ISI).
|
||||
|
@ -1463,8 +1370,6 @@ MMU_FTR_SECTION_ELSE
|
|||
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM instruction_access
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0x480 - Instruction Segment Interrupt (ISLB).
|
||||
|
@ -1509,8 +1414,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
|
|||
bl do_bad_slb_fault
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM instruction_access_slb
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0x500 - External Interrupt.
|
||||
|
@ -1555,8 +1458,6 @@ EXC_COMMON_BEGIN(hardware_interrupt_common)
|
|||
bl do_IRQ
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM hardware_interrupt
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0x600 - Alignment Interrupt
|
||||
|
@ -1584,8 +1485,6 @@ EXC_COMMON_BEGIN(alignment_common)
|
|||
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM alignment
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0x700 - Program Interrupt (program check).
|
||||
|
@ -1693,8 +1592,6 @@ EXC_COMMON_BEGIN(program_check_common)
|
|||
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM program_check
|
||||
|
||||
|
||||
/*
|
||||
* Interrupt 0x800 - Floating-Point Unavailable Interrupt.
|
||||
|
@ -1744,8 +1641,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
|||
b interrupt_return
|
||||
#endif
|
||||
|
||||
GEN_KVM fp_unavailable
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0x900 - Decrementer Interrupt.
|
||||
|
@ -1784,8 +1679,6 @@ EXC_COMMON_BEGIN(decrementer_common)
|
|||
bl timer_interrupt
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM decrementer
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0x980 - Hypervisor Decrementer Interrupt.
|
||||
|
@ -1831,8 +1724,6 @@ EXC_COMMON_BEGIN(hdecrementer_common)
|
|||
ld r13,PACA_EXGEN+EX_R13(r13)
|
||||
HRFI_TO_KERNEL
|
||||
|
||||
GEN_KVM hdecrementer
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0xa00 - Directed Privileged Doorbell Interrupt.
|
||||
|
@ -1872,8 +1763,6 @@ EXC_COMMON_BEGIN(doorbell_super_common)
|
|||
#endif
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM doorbell_super
|
||||
|
||||
|
||||
EXC_REAL_NONE(0xb00, 0x100)
|
||||
EXC_VIRT_NONE(0x4b00, 0x100)
|
||||
|
@ -1923,7 +1812,7 @@ INT_DEFINE_END(system_call)
|
|||
GET_PACA(r13)
|
||||
std r10,PACA_EXGEN+EX_R10(r13)
|
||||
INTERRUPT_TO_KERNEL
|
||||
KVMTEST system_call /* uses r10, branch to system_call_kvm */
|
||||
KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */
|
||||
mfctr r9
|
||||
#else
|
||||
mr r9,r13
|
||||
|
@ -1979,14 +1868,16 @@ EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
|
|||
EXC_VIRT_END(system_call, 0x4c00, 0x100)
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
|
||||
TRAMP_REAL_BEGIN(system_call_kvm)
|
||||
/*
|
||||
* This is a hcall, so register convention is as above, with these
|
||||
* differences:
|
||||
* r13 = PACA
|
||||
* ctr = orig r13
|
||||
* orig r10 saved in PACA
|
||||
*/
|
||||
TRAMP_REAL_BEGIN(kvm_hcall)
|
||||
std r9,PACA_EXGEN+EX_R9(r13)
|
||||
std r11,PACA_EXGEN+EX_R11(r13)
|
||||
std r12,PACA_EXGEN+EX_R12(r13)
|
||||
mfcr r9
|
||||
mfctr r10
|
||||
std r10,PACA_EXGEN+EX_R13(r13)
|
||||
li r10,0
|
||||
std r10,PACA_EXGEN+EX_CFAR(r13)
|
||||
std r10,PACA_EXGEN+EX_CTR(r13)
|
||||
/*
|
||||
* Save the PPR (on systems that support it) before changing to
|
||||
* HMT_MEDIUM. That allows the KVM code to save that value into the
|
||||
|
@ -1994,31 +1885,24 @@ TRAMP_REAL_BEGIN(system_call_kvm)
|
|||
*/
|
||||
BEGIN_FTR_SECTION
|
||||
mfspr r10,SPRN_PPR
|
||||
std r10,HSTATE_PPR(r13)
|
||||
std r10,PACA_EXGEN+EX_PPR(r13)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||
|
||||
HMT_MEDIUM
|
||||
mfctr r10
|
||||
SET_SCRATCH0(r10)
|
||||
mfcr r10
|
||||
std r12,HSTATE_SCRATCH0(r13)
|
||||
sldi r12,r10,32
|
||||
ori r12,r12,0xc00
|
||||
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
/*
|
||||
* Requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives
|
||||
* Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives
|
||||
* outside the head section.
|
||||
*/
|
||||
__LOAD_FAR_HANDLER(r10, kvmppc_interrupt)
|
||||
__LOAD_FAR_HANDLER(r10, kvmppc_hcall)
|
||||
mtctr r10
|
||||
ld r10,PACA_EXGEN+EX_R10(r13)
|
||||
bctr
|
||||
#else
|
||||
ld r10,PACA_EXGEN+EX_R10(r13)
|
||||
b kvmppc_interrupt
|
||||
b kvmppc_hcall
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0xd00 - Trace Interrupt.
|
||||
* This is a synchronous interrupt in response to instruction step or
|
||||
|
@ -2043,8 +1927,6 @@ EXC_COMMON_BEGIN(single_step_common)
|
|||
bl single_step_exception
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM single_step
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI).
|
||||
|
@ -2063,7 +1945,6 @@ INT_DEFINE_BEGIN(h_data_storage)
|
|||
IHSRR=1
|
||||
IDAR=1
|
||||
IDSISR=1
|
||||
IKVM_SKIP=1
|
||||
IKVM_REAL=1
|
||||
IKVM_VIRT=1
|
||||
INT_DEFINE_END(h_data_storage)
|
||||
|
@ -2084,8 +1965,6 @@ MMU_FTR_SECTION_ELSE
|
|||
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM h_data_storage
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI).
|
||||
|
@ -2111,8 +1990,6 @@ EXC_COMMON_BEGIN(h_instr_storage_common)
|
|||
bl unknown_exception
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM h_instr_storage
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt.
|
||||
|
@ -2137,8 +2014,6 @@ EXC_COMMON_BEGIN(emulation_assist_common)
|
|||
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM emulation_assist
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI).
|
||||
|
@ -2210,16 +2085,12 @@ EXC_COMMON_BEGIN(hmi_exception_early_common)
|
|||
EXCEPTION_RESTORE_REGS hsrr=1
|
||||
GEN_INT_ENTRY hmi_exception, virt=0
|
||||
|
||||
GEN_KVM hmi_exception_early
|
||||
|
||||
EXC_COMMON_BEGIN(hmi_exception_common)
|
||||
GEN_COMMON hmi_exception
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl handle_hmi_exception
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM hmi_exception
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt.
|
||||
|
@ -2250,8 +2121,6 @@ EXC_COMMON_BEGIN(h_doorbell_common)
|
|||
#endif
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM h_doorbell
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0xea0 - Hypervisor Virtualization Interrupt.
|
||||
|
@ -2278,8 +2147,6 @@ EXC_COMMON_BEGIN(h_virt_irq_common)
|
|||
bl do_IRQ
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM h_virt_irq
|
||||
|
||||
|
||||
EXC_REAL_NONE(0xec0, 0x20)
|
||||
EXC_VIRT_NONE(0x4ec0, 0x20)
|
||||
|
@ -2323,8 +2190,6 @@ EXC_COMMON_BEGIN(performance_monitor_common)
|
|||
bl performance_monitor_exception
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM performance_monitor
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0xf20 - Vector Unavailable Interrupt.
|
||||
|
@ -2374,8 +2239,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
|||
bl altivec_unavailable_exception
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM altivec_unavailable
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0xf40 - VSX Unavailable Interrupt.
|
||||
|
@ -2424,8 +2287,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
|
|||
bl vsx_unavailable_exception
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM vsx_unavailable
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0xf60 - Facility Unavailable Interrupt.
|
||||
|
@ -2454,8 +2315,6 @@ EXC_COMMON_BEGIN(facility_unavailable_common)
|
|||
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM facility_unavailable
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt.
|
||||
|
@ -2484,8 +2343,6 @@ EXC_COMMON_BEGIN(h_facility_unavailable_common)
|
|||
REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM h_facility_unavailable
|
||||
|
||||
|
||||
EXC_REAL_NONE(0xfa0, 0x20)
|
||||
EXC_VIRT_NONE(0x4fa0, 0x20)
|
||||
|
@ -2515,8 +2372,6 @@ EXC_COMMON_BEGIN(cbe_system_error_common)
|
|||
bl cbe_system_error_exception
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM cbe_system_error
|
||||
|
||||
#else /* CONFIG_CBE_RAS */
|
||||
EXC_REAL_NONE(0x1200, 0x100)
|
||||
EXC_VIRT_NONE(0x5200, 0x100)
|
||||
|
@ -2548,8 +2403,6 @@ EXC_COMMON_BEGIN(instruction_breakpoint_common)
|
|||
bl instruction_breakpoint_exception
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM instruction_breakpoint
|
||||
|
||||
|
||||
EXC_REAL_NONE(0x1400, 0x100)
|
||||
EXC_VIRT_NONE(0x5400, 0x100)
|
||||
|
@ -2670,8 +2523,6 @@ EXC_COMMON_BEGIN(denorm_exception_common)
|
|||
bl unknown_exception
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM denorm_exception
|
||||
|
||||
|
||||
#ifdef CONFIG_CBE_RAS
|
||||
INT_DEFINE_BEGIN(cbe_maintenance)
|
||||
|
@ -2689,8 +2540,6 @@ EXC_COMMON_BEGIN(cbe_maintenance_common)
|
|||
bl cbe_maintenance_exception
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM cbe_maintenance
|
||||
|
||||
#else /* CONFIG_CBE_RAS */
|
||||
EXC_REAL_NONE(0x1600, 0x100)
|
||||
EXC_VIRT_NONE(0x5600, 0x100)
|
||||
|
@ -2721,8 +2570,6 @@ EXC_COMMON_BEGIN(altivec_assist_common)
|
|||
#endif
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM altivec_assist
|
||||
|
||||
|
||||
#ifdef CONFIG_CBE_RAS
|
||||
INT_DEFINE_BEGIN(cbe_thermal)
|
||||
|
@ -2740,8 +2587,6 @@ EXC_COMMON_BEGIN(cbe_thermal_common)
|
|||
bl cbe_thermal_exception
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM cbe_thermal
|
||||
|
||||
#else /* CONFIG_CBE_RAS */
|
||||
EXC_REAL_NONE(0x1800, 0x100)
|
||||
EXC_VIRT_NONE(0x5800, 0x100)
|
||||
|
@ -2994,6 +2839,15 @@ TRAMP_REAL_BEGIN(rfscv_flush_fallback)
|
|||
|
||||
USE_TEXT_SECTION()
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
|
||||
kvm_interrupt:
|
||||
/*
|
||||
* The conditional branch in KVMTEST can't reach all the way,
|
||||
* make a stub.
|
||||
*/
|
||||
b kvmppc_interrupt
|
||||
#endif
|
||||
|
||||
_GLOBAL(do_uaccess_flush)
|
||||
UACCESS_FLUSH_FIXUP_SECTION
|
||||
nop
|
||||
|
@ -3009,32 +2863,6 @@ EXPORT_SYMBOL(do_uaccess_flush)
|
|||
MASKED_INTERRUPT
|
||||
MASKED_INTERRUPT hsrr=1
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
|
||||
kvmppc_skip_interrupt:
|
||||
/*
|
||||
* Here all GPRs are unchanged from when the interrupt happened
|
||||
* except for r13, which is saved in SPRG_SCRATCH0.
|
||||
*/
|
||||
mfspr r13, SPRN_SRR0
|
||||
addi r13, r13, 4
|
||||
mtspr SPRN_SRR0, r13
|
||||
GET_SCRATCH0(r13)
|
||||
RFI_TO_KERNEL
|
||||
b .
|
||||
|
||||
kvmppc_skip_Hinterrupt:
|
||||
/*
|
||||
* Here all GPRs are unchanged from when the interrupt happened
|
||||
* except for r13, which is saved in SPRG_SCRATCH0.
|
||||
*/
|
||||
mfspr r13, SPRN_HSRR0
|
||||
addi r13, r13, 4
|
||||
mtspr SPRN_HSRR0, r13
|
||||
GET_SCRATCH0(r13)
|
||||
HRFI_TO_KERNEL
|
||||
b .
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Relocation-on interrupts: A subset of the interrupts can be delivered
|
||||
* with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
|
||||
|
|
|
@ -432,16 +432,19 @@ device_initcall(stf_barrier_debugfs_init);
|
|||
|
||||
static void update_branch_cache_flush(void)
|
||||
{
|
||||
u32 *site;
|
||||
u32 *site, __maybe_unused *site2;
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
site = &patch__call_kvm_flush_link_stack;
|
||||
site2 = &patch__call_kvm_flush_link_stack_p9;
|
||||
// This controls the branch from guest_exit_cont to kvm_flush_link_stack
|
||||
if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
|
||||
patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
|
||||
patch_instruction_site(site2, ppc_inst(PPC_RAW_NOP()));
|
||||
} else {
|
||||
// Could use HW flush, but that could also flush count cache
|
||||
patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
|
||||
patch_branch_site(site2, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -508,16 +508,6 @@ EXPORT_SYMBOL(profile_pc);
|
|||
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
|
||||
*/
|
||||
#ifdef CONFIG_PPC64
|
||||
static inline unsigned long test_irq_work_pending(void)
|
||||
{
|
||||
unsigned long x;
|
||||
|
||||
asm volatile("lbz %0,%1(13)"
|
||||
: "=r" (x)
|
||||
: "i" (offsetof(struct paca_struct, irq_work_pending)));
|
||||
return x;
|
||||
}
|
||||
|
||||
static inline void set_irq_work_pending_flag(void)
|
||||
{
|
||||
asm volatile("stb %0,%1(13)" : :
|
||||
|
|
|
@ -57,6 +57,7 @@ kvm-pr-y := \
|
|||
book3s_32_mmu.o
|
||||
|
||||
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
|
||||
book3s_64_entry.o \
|
||||
tm.o
|
||||
|
||||
ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
|
@ -86,6 +87,7 @@ kvm-book3s_64-builtin-tm-objs-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
|
|||
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
|
||||
book3s_hv_hmi.o \
|
||||
book3s_hv_p9_entry.o \
|
||||
book3s_hv_rmhandlers.o \
|
||||
book3s_hv_rm_mmu.o \
|
||||
book3s_hv_ras.o \
|
||||
|
|
|
@ -171,6 +171,12 @@ void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
|
||||
|
||||
void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_SYSCALL, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(kvmppc_core_queue_syscall);
|
||||
|
||||
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
|
||||
{
|
||||
/* might as well deliver this straight away */
|
||||
|
@ -1044,13 +1050,10 @@ static int kvmppc_book3s_init(void)
|
|||
#ifdef CONFIG_KVM_XICS
|
||||
#ifdef CONFIG_KVM_XIVE
|
||||
if (xics_on_xive()) {
|
||||
kvmppc_xive_init_module();
|
||||
kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
|
||||
if (kvmppc_xive_native_supported()) {
|
||||
kvmppc_xive_native_init_module();
|
||||
if (kvmppc_xive_native_supported())
|
||||
kvm_register_device_ops(&kvm_xive_native_ops,
|
||||
KVM_DEV_TYPE_XIVE);
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
|
||||
|
@ -1060,12 +1063,6 @@ static int kvmppc_book3s_init(void)
|
|||
|
||||
static void kvmppc_book3s_exit(void)
|
||||
{
|
||||
#ifdef CONFIG_KVM_XICS
|
||||
if (xics_on_xive()) {
|
||||
kvmppc_xive_exit_module();
|
||||
kvmppc_xive_native_exit_module();
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
||||
kvmppc_book3s_exit_pr();
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,416 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/code-patching-asm.h>
|
||||
#include <asm/exception-64s.h>
|
||||
#include <asm/export.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_book3s_asm.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/ultravisor-api.h>
|
||||
|
||||
/*
|
||||
* These are branched to from interrupt handlers in exception-64s.S which set
|
||||
* IKVM_REAL or IKVM_VIRT, if HSTATE_IN_GUEST was found to be non-zero.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This is a hcall, so register convention is as
|
||||
* Documentation/powerpc/papr_hcalls.rst.
|
||||
*
|
||||
* This may also be a syscall from PR-KVM userspace that is to be
|
||||
* reflected to the PR guest kernel, so registers may be set up for
|
||||
* a system call rather than hcall. We don't currently clobber
|
||||
* anything here, but the 0xc00 handler has already clobbered CTR
|
||||
* and CR0, so PR-KVM can not support a guest kernel that preserves
|
||||
* those registers across its system calls.
|
||||
*
|
||||
* The state of registers is as kvmppc_interrupt, except CFAR is not
|
||||
* saved, R13 is not in SCRATCH0, and R10 does not contain the trap.
|
||||
*/
|
||||
.global kvmppc_hcall
|
||||
.balign IFETCH_ALIGN_BYTES
|
||||
kvmppc_hcall:
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
lbz r10,HSTATE_IN_GUEST(r13)
|
||||
cmpwi r10,KVM_GUEST_MODE_HV_P9
|
||||
beq kvmppc_p9_exit_hcall
|
||||
#endif
|
||||
ld r10,PACA_EXGEN+EX_R13(r13)
|
||||
SET_SCRATCH0(r10)
|
||||
li r10,0xc00
|
||||
/* Now we look like kvmppc_interrupt */
|
||||
li r11,PACA_EXGEN
|
||||
b .Lgot_save_area
|
||||
|
||||
/*
|
||||
* KVM interrupt entry occurs after GEN_INT_ENTRY runs, and follows that
|
||||
* call convention:
|
||||
*
|
||||
* guest R9-R13, CTR, CFAR, PPR saved in PACA EX_xxx save area
|
||||
* guest (H)DAR, (H)DSISR are also in the save area for relevant interrupts
|
||||
* guest R13 also saved in SCRATCH0
|
||||
* R13 = PACA
|
||||
* R11 = (H)SRR0
|
||||
* R12 = (H)SRR1
|
||||
* R9 = guest CR
|
||||
* PPR is set to medium
|
||||
*
|
||||
* With the addition for KVM:
|
||||
* R10 = trap vector
|
||||
*/
|
||||
.global kvmppc_interrupt
|
||||
.balign IFETCH_ALIGN_BYTES
|
||||
kvmppc_interrupt:
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
std r10,HSTATE_SCRATCH0(r13)
|
||||
lbz r10,HSTATE_IN_GUEST(r13)
|
||||
cmpwi r10,KVM_GUEST_MODE_HV_P9
|
||||
beq kvmppc_p9_exit_interrupt
|
||||
ld r10,HSTATE_SCRATCH0(r13)
|
||||
#endif
|
||||
li r11,PACA_EXGEN
|
||||
cmpdi r10,0x200
|
||||
bgt+ .Lgot_save_area
|
||||
li r11,PACA_EXMC
|
||||
beq .Lgot_save_area
|
||||
li r11,PACA_EXNMI
|
||||
.Lgot_save_area:
|
||||
add r11,r11,r13
|
||||
BEGIN_FTR_SECTION
|
||||
ld r12,EX_CFAR(r11)
|
||||
std r12,HSTATE_CFAR(r13)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
|
||||
ld r12,EX_CTR(r11)
|
||||
mtctr r12
|
||||
BEGIN_FTR_SECTION
|
||||
ld r12,EX_PPR(r11)
|
||||
std r12,HSTATE_PPR(r13)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||
ld r12,EX_R12(r11)
|
||||
std r12,HSTATE_SCRATCH0(r13)
|
||||
sldi r12,r9,32
|
||||
or r12,r12,r10
|
||||
ld r9,EX_R9(r11)
|
||||
ld r10,EX_R10(r11)
|
||||
ld r11,EX_R11(r11)
|
||||
|
||||
/*
|
||||
* Hcalls and other interrupts come here after normalising register
|
||||
* contents and save locations:
|
||||
*
|
||||
* R12 = (guest CR << 32) | interrupt vector
|
||||
* R13 = PACA
|
||||
* guest R12 saved in shadow HSTATE_SCRATCH0
|
||||
* guest R13 saved in SPRN_SCRATCH0
|
||||
*/
|
||||
std r9,HSTATE_SCRATCH2(r13)
|
||||
lbz r9,HSTATE_IN_GUEST(r13)
|
||||
cmpwi r9,KVM_GUEST_MODE_SKIP
|
||||
beq- .Lmaybe_skip
|
||||
.Lno_skip:
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
cmpwi r9,KVM_GUEST_MODE_GUEST
|
||||
beq kvmppc_interrupt_pr
|
||||
#endif
|
||||
b kvmppc_interrupt_hv
|
||||
#else
|
||||
b kvmppc_interrupt_pr
|
||||
#endif
|
||||
|
||||
/*
|
||||
* "Skip" interrupts are part of a trick KVM uses a with hash guests to load
|
||||
* the faulting instruction in guest memory from the the hypervisor without
|
||||
* walking page tables.
|
||||
*
|
||||
* When the guest takes a fault that requires the hypervisor to load the
|
||||
* instruction (e.g., MMIO emulation), KVM is running in real-mode with HV=1
|
||||
* and the guest MMU context loaded. It sets KVM_GUEST_MODE_SKIP, and sets
|
||||
* MSR[DR]=1 while leaving MSR[IR]=0, so it continues to fetch HV instructions
|
||||
* but loads and stores will access the guest context. This is used to load
|
||||
* the faulting instruction using the faulting guest effective address.
|
||||
*
|
||||
* However the guest context may not be able to translate, or it may cause a
|
||||
* machine check or other issue, which results in a fault in the host
|
||||
* (even with KVM-HV).
|
||||
*
|
||||
* These faults come here because KVM_GUEST_MODE_SKIP was set, so if they
|
||||
* are (or are likely) caused by that load, the instruction is skipped by
|
||||
* just returning with the PC advanced +4, where it is noticed the load did
|
||||
* not execute and it goes to the slow path which walks the page tables to
|
||||
* read guest memory.
|
||||
*/
|
||||
.Lmaybe_skip:
|
||||
cmpwi r12,BOOK3S_INTERRUPT_MACHINE_CHECK
|
||||
beq 1f
|
||||
cmpwi r12,BOOK3S_INTERRUPT_DATA_STORAGE
|
||||
beq 1f
|
||||
cmpwi r12,BOOK3S_INTERRUPT_DATA_SEGMENT
|
||||
beq 1f
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
/* HSRR interrupts get 2 added to interrupt number */
|
||||
cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE | 0x2
|
||||
beq 2f
|
||||
#endif
|
||||
b .Lno_skip
|
||||
1: mfspr r9,SPRN_SRR0
|
||||
addi r9,r9,4
|
||||
mtspr SPRN_SRR0,r9
|
||||
ld r12,HSTATE_SCRATCH0(r13)
|
||||
ld r9,HSTATE_SCRATCH2(r13)
|
||||
GET_SCRATCH0(r13)
|
||||
RFI_TO_KERNEL
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
2: mfspr r9,SPRN_HSRR0
|
||||
addi r9,r9,4
|
||||
mtspr SPRN_HSRR0,r9
|
||||
ld r12,HSTATE_SCRATCH0(r13)
|
||||
ld r9,HSTATE_SCRATCH2(r13)
|
||||
GET_SCRATCH0(r13)
|
||||
HRFI_TO_KERNEL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
|
||||
/* Stack frame offsets for kvmppc_p9_enter_guest */
|
||||
#define SFS (144 + STACK_FRAME_MIN_SIZE)
|
||||
#define STACK_SLOT_NVGPRS (SFS - 144) /* 18 gprs */
|
||||
|
||||
/*
|
||||
* void kvmppc_p9_enter_guest(struct vcpu *vcpu);
|
||||
*
|
||||
* Enter the guest on a ISAv3.0 or later system.
|
||||
*/
|
||||
.balign IFETCH_ALIGN_BYTES
|
||||
_GLOBAL(kvmppc_p9_enter_guest)
|
||||
EXPORT_SYMBOL_GPL(kvmppc_p9_enter_guest)
|
||||
mflr r0
|
||||
std r0,PPC_LR_STKOFF(r1)
|
||||
stdu r1,-SFS(r1)
|
||||
|
||||
std r1,HSTATE_HOST_R1(r13)
|
||||
|
||||
mfcr r4
|
||||
stw r4,SFS+8(r1)
|
||||
|
||||
reg = 14
|
||||
.rept 18
|
||||
std reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
|
||||
reg = reg + 1
|
||||
.endr
|
||||
|
||||
ld r4,VCPU_LR(r3)
|
||||
mtlr r4
|
||||
ld r4,VCPU_CTR(r3)
|
||||
mtctr r4
|
||||
ld r4,VCPU_XER(r3)
|
||||
mtspr SPRN_XER,r4
|
||||
|
||||
ld r1,VCPU_CR(r3)
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
ld r4,VCPU_CFAR(r3)
|
||||
mtspr SPRN_CFAR,r4
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
|
||||
BEGIN_FTR_SECTION
|
||||
ld r4,VCPU_PPR(r3)
|
||||
mtspr SPRN_PPR,r4
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||
|
||||
reg = 4
|
||||
.rept 28
|
||||
ld reg,__VCPU_GPR(reg)(r3)
|
||||
reg = reg + 1
|
||||
.endr
|
||||
|
||||
ld r4,VCPU_KVM(r3)
|
||||
lbz r4,KVM_SECURE_GUEST(r4)
|
||||
cmpdi r4,0
|
||||
ld r4,VCPU_GPR(R4)(r3)
|
||||
bne .Lret_to_ultra
|
||||
|
||||
mtcr r1
|
||||
|
||||
ld r0,VCPU_GPR(R0)(r3)
|
||||
ld r1,VCPU_GPR(R1)(r3)
|
||||
ld r2,VCPU_GPR(R2)(r3)
|
||||
ld r3,VCPU_GPR(R3)(r3)
|
||||
|
||||
HRFI_TO_GUEST
|
||||
b .
|
||||
|
||||
/*
|
||||
* Use UV_RETURN ultracall to return control back to the Ultravisor
|
||||
* after processing an hypercall or interrupt that was forwarded
|
||||
* (a.k.a. reflected) to the Hypervisor.
|
||||
*
|
||||
* All registers have already been reloaded except the ucall requires:
|
||||
* R0 = hcall result
|
||||
* R2 = SRR1, so UV can detect a synthesized interrupt (if any)
|
||||
* R3 = UV_RETURN
|
||||
*/
|
||||
.Lret_to_ultra:
|
||||
mtcr r1
|
||||
ld r1,VCPU_GPR(R1)(r3)
|
||||
|
||||
ld r0,VCPU_GPR(R3)(r3)
|
||||
mfspr r2,SPRN_SRR1
|
||||
LOAD_REG_IMMEDIATE(r3, UV_RETURN)
|
||||
sc 2
|
||||
|
||||
/*
|
||||
* kvmppc_p9_exit_hcall and kvmppc_p9_exit_interrupt are branched to from
|
||||
* above if the interrupt was taken for a guest that was entered via
|
||||
* kvmppc_p9_enter_guest().
|
||||
*
|
||||
* The exit code recovers the host stack and vcpu pointer, saves all guest GPRs
|
||||
* and CR, LR, XER as well as guest MSR and NIA into the VCPU, then re-
|
||||
* establishes the host stack and registers to return from the
|
||||
* kvmppc_p9_enter_guest() function, which saves CTR and other guest registers
|
||||
* (SPRs and FP, VEC, etc).
|
||||
*/
|
||||
.balign IFETCH_ALIGN_BYTES
|
||||
kvmppc_p9_exit_hcall:
|
||||
mfspr r11,SPRN_SRR0
|
||||
mfspr r12,SPRN_SRR1
|
||||
li r10,0xc00
|
||||
std r10,HSTATE_SCRATCH0(r13)
|
||||
|
||||
.balign IFETCH_ALIGN_BYTES
|
||||
kvmppc_p9_exit_interrupt:
|
||||
/*
|
||||
* If set to KVM_GUEST_MODE_HV_P9 but we're still in the
|
||||
* hypervisor, that means we can't return from the entry stack.
|
||||
*/
|
||||
rldicl. r10,r12,64-MSR_HV_LG,63
|
||||
bne- kvmppc_p9_bad_interrupt
|
||||
|
||||
std r1,HSTATE_SCRATCH1(r13)
|
||||
std r3,HSTATE_SCRATCH2(r13)
|
||||
ld r1,HSTATE_HOST_R1(r13)
|
||||
ld r3,HSTATE_KVM_VCPU(r13)
|
||||
|
||||
std r9,VCPU_CR(r3)
|
||||
|
||||
1:
|
||||
std r11,VCPU_PC(r3)
|
||||
std r12,VCPU_MSR(r3)
|
||||
|
||||
reg = 14
|
||||
.rept 18
|
||||
std reg,__VCPU_GPR(reg)(r3)
|
||||
reg = reg + 1
|
||||
.endr
|
||||
|
||||
/* r1, r3, r9-r13 are saved to vcpu by C code */
|
||||
std r0,VCPU_GPR(R0)(r3)
|
||||
std r2,VCPU_GPR(R2)(r3)
|
||||
reg = 4
|
||||
.rept 5
|
||||
std reg,__VCPU_GPR(reg)(r3)
|
||||
reg = reg + 1
|
||||
.endr
|
||||
|
||||
ld r2,PACATOC(r13)
|
||||
|
||||
mflr r4
|
||||
std r4,VCPU_LR(r3)
|
||||
mfspr r4,SPRN_XER
|
||||
std r4,VCPU_XER(r3)
|
||||
|
||||
reg = 14
|
||||
.rept 18
|
||||
ld reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
|
||||
reg = reg + 1
|
||||
.endr
|
||||
|
||||
lwz r4,SFS+8(r1)
|
||||
mtcr r4
|
||||
|
||||
/*
|
||||
* Flush the link stack here, before executing the first blr on the
|
||||
* way out of the guest.
|
||||
*
|
||||
* The link stack won't match coming out of the guest anyway so the
|
||||
* only cost is the flush itself. The call clobbers r0.
|
||||
*/
|
||||
1: nop
|
||||
patch_site 1b patch__call_kvm_flush_link_stack_p9
|
||||
|
||||
addi r1,r1,SFS
|
||||
ld r0,PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
/*
|
||||
* Took an interrupt somewhere right before HRFID to guest, so registers are
|
||||
* in a bad way. Return things hopefully enough to run host virtual code and
|
||||
* run the Linux interrupt handler (SRESET or MCE) to print something useful.
|
||||
*
|
||||
* We could be really clever and save all host registers in known locations
|
||||
* before setting HSTATE_IN_GUEST, then restoring them all here, and setting
|
||||
* return address to a fixup that sets them up again. But that's a lot of
|
||||
* effort for a small bit of code. Lots of other things to do first.
|
||||
*/
|
||||
kvmppc_p9_bad_interrupt:
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
/*
|
||||
* Hash host doesn't try to recover MMU (requires host SLB reload)
|
||||
*/
|
||||
b .
|
||||
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
|
||||
/*
|
||||
* Clean up guest registers to give host a chance to run.
|
||||
*/
|
||||
li r10,0
|
||||
mtspr SPRN_AMR,r10
|
||||
mtspr SPRN_IAMR,r10
|
||||
mtspr SPRN_CIABR,r10
|
||||
mtspr SPRN_DAWRX0,r10
|
||||
BEGIN_FTR_SECTION
|
||||
mtspr SPRN_DAWRX1,r10
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
|
||||
mtspr SPRN_PID,r10
|
||||
|
||||
/*
|
||||
* Switch to host MMU mode
|
||||
*/
|
||||
ld r10, HSTATE_KVM_VCPU(r13)
|
||||
ld r10, VCPU_KVM(r10)
|
||||
lwz r10, KVM_HOST_LPID(r10)
|
||||
mtspr SPRN_LPID,r10
|
||||
|
||||
ld r10, HSTATE_KVM_VCPU(r13)
|
||||
ld r10, VCPU_KVM(r10)
|
||||
ld r10, KVM_HOST_LPCR(r10)
|
||||
mtspr SPRN_LPCR,r10
|
||||
|
||||
/*
|
||||
* Set GUEST_MODE_NONE so the handler won't branch to KVM, and clear
|
||||
* MSR_RI in r12 ([H]SRR1) so the handler won't try to return.
|
||||
*/
|
||||
li r10,KVM_GUEST_MODE_NONE
|
||||
stb r10,HSTATE_IN_GUEST(r13)
|
||||
li r10,MSR_RI
|
||||
andc r12,r12,r10
|
||||
|
||||
/*
|
||||
* Go back to interrupt handler. MCE and SRESET have their specific
|
||||
* PACA save area so they should be used directly. They set up their
|
||||
* own stack. The other handlers all use EXGEN. They will use the
|
||||
* guest r1 if it looks like a kernel stack, so just load the
|
||||
* emergency stack and go to program check for all other interrupts.
|
||||
*/
|
||||
ld r10,HSTATE_SCRATCH0(r13)
|
||||
cmpwi r10,BOOK3S_INTERRUPT_MACHINE_CHECK
|
||||
beq machine_check_common
|
||||
|
||||
cmpwi r10,BOOK3S_INTERRUPT_SYSTEM_RESET
|
||||
beq system_reset_common
|
||||
|
||||
b .
|
||||
#endif
|
|
@ -391,10 +391,6 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
|||
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
|
||||
/* liobn, ioba, tce); */
|
||||
|
||||
/* For radix, we might be in virtual mode, so punt */
|
||||
if (kvm_is_radix(vcpu->kvm))
|
||||
return H_TOO_HARD;
|
||||
|
||||
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||
if (!stt)
|
||||
return H_TOO_HARD;
|
||||
|
@ -489,10 +485,6 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|||
bool prereg = false;
|
||||
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||
|
||||
/* For radix, we might be in virtual mode, so punt */
|
||||
if (kvm_is_radix(vcpu->kvm))
|
||||
return H_TOO_HARD;
|
||||
|
||||
/*
|
||||
* used to check for invalidations in progress
|
||||
*/
|
||||
|
@ -602,10 +594,6 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
|
|||
long i, ret;
|
||||
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||
|
||||
/* For radix, we might be in virtual mode, so punt */
|
||||
if (kvm_is_radix(vcpu->kvm))
|
||||
return H_TOO_HARD;
|
||||
|
||||
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||
if (!stt)
|
||||
return H_TOO_HARD;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -34,21 +34,6 @@
|
|||
#include "book3s_xics.h"
|
||||
#include "book3s_xive.h"
|
||||
|
||||
/*
|
||||
* The XIVE module will populate these when it loads
|
||||
*/
|
||||
unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
|
||||
unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
|
||||
int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
|
||||
unsigned long mfrr);
|
||||
int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
|
||||
int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
|
||||
EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
|
||||
EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
|
||||
EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
|
||||
EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
|
||||
EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
|
||||
|
||||
/*
|
||||
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
|
||||
* should be power of 2.
|
||||
|
@ -196,16 +181,9 @@ int kvmppc_hwrng_present(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
|
||||
|
||||
long kvmppc_h_random(struct kvm_vcpu *vcpu)
|
||||
long kvmppc_rm_h_random(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* Only need to do the expensive mfmsr() on radix */
|
||||
if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
|
||||
r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
|
||||
else
|
||||
r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
|
||||
if (r)
|
||||
if (powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]))
|
||||
return H_SUCCESS;
|
||||
|
||||
return H_HARDWARE;
|
||||
|
@ -221,15 +199,6 @@ void kvmhv_rm_send_ipi(int cpu)
|
|||
void __iomem *xics_phys;
|
||||
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
|
||||
|
||||
/* For a nested hypervisor, use the XICS via hcall */
|
||||
if (kvmhv_on_pseries()) {
|
||||
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
||||
|
||||
plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
|
||||
IPI_PRIORITY);
|
||||
return;
|
||||
}
|
||||
|
||||
/* On POWER9 we can use msgsnd for any destination cpu. */
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
||||
msg |= get_hard_smp_processor_id(cpu);
|
||||
|
@ -442,19 +411,12 @@ static long kvmppc_read_one_intr(bool *again)
|
|||
return 1;
|
||||
|
||||
/* Now read the interrupt from the ICP */
|
||||
if (kvmhv_on_pseries()) {
|
||||
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
||||
|
||||
rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF);
|
||||
xirr = cpu_to_be32(retbuf[0]);
|
||||
} else {
|
||||
xics_phys = local_paca->kvm_hstate.xics_phys;
|
||||
rc = 0;
|
||||
if (!xics_phys)
|
||||
rc = opal_int_get_xirr(&xirr, false);
|
||||
else
|
||||
xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
|
||||
}
|
||||
xics_phys = local_paca->kvm_hstate.xics_phys;
|
||||
rc = 0;
|
||||
if (!xics_phys)
|
||||
rc = opal_int_get_xirr(&xirr, false);
|
||||
else
|
||||
xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
|
||||
if (rc < 0)
|
||||
return 1;
|
||||
|
||||
|
@ -483,13 +445,7 @@ static long kvmppc_read_one_intr(bool *again)
|
|||
*/
|
||||
if (xisr == XICS_IPI) {
|
||||
rc = 0;
|
||||
if (kvmhv_on_pseries()) {
|
||||
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
||||
|
||||
plpar_hcall_raw(H_IPI, retbuf,
|
||||
hard_smp_processor_id(), 0xff);
|
||||
plpar_hcall_raw(H_EOI, retbuf, h_xirr);
|
||||
} else if (xics_phys) {
|
||||
if (xics_phys) {
|
||||
__raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
|
||||
__raw_rm_writel(xirr, xics_phys + XICS_XIRR);
|
||||
} else {
|
||||
|
@ -515,13 +471,7 @@ static long kvmppc_read_one_intr(bool *again)
|
|||
/* We raced with the host,
|
||||
* we need to resend that IPI, bummer
|
||||
*/
|
||||
if (kvmhv_on_pseries()) {
|
||||
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
||||
|
||||
plpar_hcall_raw(H_IPI, retbuf,
|
||||
hard_smp_processor_id(),
|
||||
IPI_PRIORITY);
|
||||
} else if (xics_phys)
|
||||
if (xics_phys)
|
||||
__raw_rm_writeb(IPI_PRIORITY,
|
||||
xics_phys + XICS_MFRR);
|
||||
else
|
||||
|
@ -541,22 +491,13 @@ static long kvmppc_read_one_intr(bool *again)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_KVM_XICS
|
||||
static inline bool is_rm(void)
|
||||
{
|
||||
return !(mfmsr() & MSR_DR);
|
||||
}
|
||||
|
||||
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!kvmppc_xics_enabled(vcpu))
|
||||
return H_TOO_HARD;
|
||||
if (xics_on_xive()) {
|
||||
if (is_rm())
|
||||
return xive_rm_h_xirr(vcpu);
|
||||
if (unlikely(!__xive_vm_h_xirr))
|
||||
return H_NOT_AVAILABLE;
|
||||
return __xive_vm_h_xirr(vcpu);
|
||||
} else
|
||||
if (xics_on_xive())
|
||||
return xive_rm_h_xirr(vcpu);
|
||||
else
|
||||
return xics_rm_h_xirr(vcpu);
|
||||
}
|
||||
|
||||
|
@ -565,13 +506,9 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
|
|||
if (!kvmppc_xics_enabled(vcpu))
|
||||
return H_TOO_HARD;
|
||||
vcpu->arch.regs.gpr[5] = get_tb();
|
||||
if (xics_on_xive()) {
|
||||
if (is_rm())
|
||||
return xive_rm_h_xirr(vcpu);
|
||||
if (unlikely(!__xive_vm_h_xirr))
|
||||
return H_NOT_AVAILABLE;
|
||||
return __xive_vm_h_xirr(vcpu);
|
||||
} else
|
||||
if (xics_on_xive())
|
||||
return xive_rm_h_xirr(vcpu);
|
||||
else
|
||||
return xics_rm_h_xirr(vcpu);
|
||||
}
|
||||
|
||||
|
@ -579,13 +516,9 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
|
|||
{
|
||||
if (!kvmppc_xics_enabled(vcpu))
|
||||
return H_TOO_HARD;
|
||||
if (xics_on_xive()) {
|
||||
if (is_rm())
|
||||
return xive_rm_h_ipoll(vcpu, server);
|
||||
if (unlikely(!__xive_vm_h_ipoll))
|
||||
return H_NOT_AVAILABLE;
|
||||
return __xive_vm_h_ipoll(vcpu, server);
|
||||
} else
|
||||
if (xics_on_xive())
|
||||
return xive_rm_h_ipoll(vcpu, server);
|
||||
else
|
||||
return H_TOO_HARD;
|
||||
}
|
||||
|
||||
|
@ -594,13 +527,9 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
|||
{
|
||||
if (!kvmppc_xics_enabled(vcpu))
|
||||
return H_TOO_HARD;
|
||||
if (xics_on_xive()) {
|
||||
if (is_rm())
|
||||
return xive_rm_h_ipi(vcpu, server, mfrr);
|
||||
if (unlikely(!__xive_vm_h_ipi))
|
||||
return H_NOT_AVAILABLE;
|
||||
return __xive_vm_h_ipi(vcpu, server, mfrr);
|
||||
} else
|
||||
if (xics_on_xive())
|
||||
return xive_rm_h_ipi(vcpu, server, mfrr);
|
||||
else
|
||||
return xics_rm_h_ipi(vcpu, server, mfrr);
|
||||
}
|
||||
|
||||
|
@ -608,13 +537,9 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
|
|||
{
|
||||
if (!kvmppc_xics_enabled(vcpu))
|
||||
return H_TOO_HARD;
|
||||
if (xics_on_xive()) {
|
||||
if (is_rm())
|
||||
return xive_rm_h_cppr(vcpu, cppr);
|
||||
if (unlikely(!__xive_vm_h_cppr))
|
||||
return H_NOT_AVAILABLE;
|
||||
return __xive_vm_h_cppr(vcpu, cppr);
|
||||
} else
|
||||
if (xics_on_xive())
|
||||
return xive_rm_h_cppr(vcpu, cppr);
|
||||
else
|
||||
return xics_rm_h_cppr(vcpu, cppr);
|
||||
}
|
||||
|
||||
|
@ -622,13 +547,9 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
|
|||
{
|
||||
if (!kvmppc_xics_enabled(vcpu))
|
||||
return H_TOO_HARD;
|
||||
if (xics_on_xive()) {
|
||||
if (is_rm())
|
||||
return xive_rm_h_eoi(vcpu, xirr);
|
||||
if (unlikely(!__xive_vm_h_eoi))
|
||||
return H_NOT_AVAILABLE;
|
||||
return __xive_vm_h_eoi(vcpu, xirr);
|
||||
} else
|
||||
if (xics_on_xive())
|
||||
return xive_rm_h_eoi(vcpu, xirr);
|
||||
else
|
||||
return xics_rm_h_eoi(vcpu, xirr);
|
||||
}
|
||||
#endif /* CONFIG_KVM_XICS */
|
||||
|
|
|
@ -58,7 +58,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
|||
/*
|
||||
* Put whatever is in the decrementer into the
|
||||
* hypervisor decrementer.
|
||||
* Because of a hardware deviation in P8 and P9,
|
||||
* Because of a hardware deviation in P8,
|
||||
* we need to set LPCR[HDICE] before writing HDEC.
|
||||
*/
|
||||
ld r5, HSTATE_KVM_VCORE(r13)
|
||||
|
@ -67,15 +67,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
|||
ori r8, r9, LPCR_HDICE
|
||||
mtspr SPRN_LPCR, r8
|
||||
isync
|
||||
andis. r0, r9, LPCR_LD@h
|
||||
mfspr r8,SPRN_DEC
|
||||
mftb r7
|
||||
BEGIN_FTR_SECTION
|
||||
/* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
|
||||
bne 32f
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
extsw r8,r8
|
||||
32: mtspr SPRN_HDEC,r8
|
||||
mtspr SPRN_HDEC,r8
|
||||
add r8,r8,r7
|
||||
std r8,HSTATE_DECEXP(r13)
|
||||
|
||||
|
|
|
@ -0,0 +1,508 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/asm-prototypes.h>
|
||||
#include <asm/dbell.h>
|
||||
#include <asm/kvm_ppc.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
||||
static void __start_timing(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
u64 tb = mftb() - vc->tb_offset_applied;
|
||||
|
||||
vcpu->arch.cur_activity = next;
|
||||
vcpu->arch.cur_tb_start = tb;
|
||||
}
|
||||
|
||||
static void __accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
struct kvmhv_tb_accumulator *curr;
|
||||
u64 tb = mftb() - vc->tb_offset_applied;
|
||||
u64 prev_tb;
|
||||
u64 delta;
|
||||
u64 seq;
|
||||
|
||||
curr = vcpu->arch.cur_activity;
|
||||
vcpu->arch.cur_activity = next;
|
||||
prev_tb = vcpu->arch.cur_tb_start;
|
||||
vcpu->arch.cur_tb_start = tb;
|
||||
|
||||
if (!curr)
|
||||
return;
|
||||
|
||||
delta = tb - prev_tb;
|
||||
|
||||
seq = curr->seqcount;
|
||||
curr->seqcount = seq + 1;
|
||||
smp_wmb();
|
||||
curr->tb_total += delta;
|
||||
if (seq == 0 || delta < curr->tb_min)
|
||||
curr->tb_min = delta;
|
||||
if (delta > curr->tb_max)
|
||||
curr->tb_max = delta;
|
||||
smp_wmb();
|
||||
curr->seqcount = seq + 2;
|
||||
}
|
||||
|
||||
#define start_timing(vcpu, next) __start_timing(vcpu, next)
|
||||
#define end_timing(vcpu) __start_timing(vcpu, NULL)
|
||||
#define accumulate_time(vcpu, next) __accumulate_time(vcpu, next)
|
||||
#else
|
||||
#define start_timing(vcpu, next) do {} while (0)
|
||||
#define end_timing(vcpu) do {} while (0)
|
||||
#define accumulate_time(vcpu, next) do {} while (0)
|
||||
#endif
|
||||
|
||||
static inline void mfslb(unsigned int idx, u64 *slbee, u64 *slbev)
|
||||
{
|
||||
asm volatile("slbmfev %0,%1" : "=r" (*slbev) : "r" (idx));
|
||||
asm volatile("slbmfee %0,%1" : "=r" (*slbee) : "r" (idx));
|
||||
}
|
||||
|
||||
static inline void mtslb(u64 slbee, u64 slbev)
|
||||
{
|
||||
asm volatile("slbmte %0,%1" :: "r" (slbev), "r" (slbee));
|
||||
}
|
||||
|
||||
static inline void clear_slb_entry(unsigned int idx)
|
||||
{
|
||||
mtslb(idx, 0);
|
||||
}
|
||||
|
||||
static inline void slb_clear_invalidate_partition(void)
|
||||
{
|
||||
clear_slb_entry(0);
|
||||
asm volatile(PPC_SLBIA(6));
|
||||
}
|
||||
|
||||
/*
|
||||
* Malicious or buggy radix guests may have inserted SLB entries
|
||||
* (only 0..3 because radix always runs with UPRT=1), so these must
|
||||
* be cleared here to avoid side-channels. slbmte is used rather
|
||||
* than slbia, as it won't clear cached translations.
|
||||
*/
|
||||
static void radix_clear_slb(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
clear_slb_entry(i);
|
||||
}
|
||||
|
||||
static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
|
||||
{
|
||||
struct kvm_nested_guest *nested = vcpu->arch.nested;
|
||||
u32 lpid;
|
||||
|
||||
lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
|
||||
|
||||
/*
|
||||
* All the isync()s are overkill but trivially follow the ISA
|
||||
* requirements. Some can likely be replaced with justification
|
||||
* comment for why they are not needed.
|
||||
*/
|
||||
isync();
|
||||
mtspr(SPRN_LPID, lpid);
|
||||
isync();
|
||||
mtspr(SPRN_LPCR, lpcr);
|
||||
isync();
|
||||
mtspr(SPRN_PID, vcpu->arch.pid);
|
||||
isync();
|
||||
}
|
||||
|
||||
static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
|
||||
{
|
||||
u32 lpid;
|
||||
int i;
|
||||
|
||||
lpid = kvm->arch.lpid;
|
||||
|
||||
mtspr(SPRN_LPID, lpid);
|
||||
mtspr(SPRN_LPCR, lpcr);
|
||||
mtspr(SPRN_PID, vcpu->arch.pid);
|
||||
|
||||
for (i = 0; i < vcpu->arch.slb_max; i++)
|
||||
mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv);
|
||||
|
||||
isync();
|
||||
}
|
||||
|
||||
static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
|
||||
{
|
||||
isync();
|
||||
mtspr(SPRN_PID, pid);
|
||||
isync();
|
||||
mtspr(SPRN_LPID, kvm->arch.host_lpid);
|
||||
isync();
|
||||
mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
|
||||
isync();
|
||||
|
||||
if (!radix_enabled())
|
||||
slb_restore_bolted_realmode();
|
||||
}
|
||||
|
||||
static void save_clear_host_mmu(struct kvm *kvm)
|
||||
{
|
||||
if (!radix_enabled()) {
|
||||
/*
|
||||
* Hash host could save and restore host SLB entries to
|
||||
* reduce SLB fault overheads of VM exits, but for now the
|
||||
* existing code clears all entries and restores just the
|
||||
* bolted ones when switching back to host.
|
||||
*/
|
||||
slb_clear_invalidate_partition();
|
||||
}
|
||||
}
|
||||
|
||||
static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_is_radix(kvm)) {
|
||||
radix_clear_slb();
|
||||
} else {
|
||||
int i;
|
||||
int nr = 0;
|
||||
|
||||
/*
|
||||
* This must run before switching to host (radix host can't
|
||||
* access all SLBs).
|
||||
*/
|
||||
for (i = 0; i < vcpu->arch.slb_nr; i++) {
|
||||
u64 slbee, slbev;
|
||||
mfslb(i, &slbee, &slbev);
|
||||
if (slbee & SLB_ESID_V) {
|
||||
vcpu->arch.slb[nr].orige = slbee | i;
|
||||
vcpu->arch.slb[nr].origv = slbev;
|
||||
nr++;
|
||||
}
|
||||
}
|
||||
vcpu->arch.slb_max = nr;
|
||||
slb_clear_invalidate_partition();
|
||||
}
|
||||
}
|
||||
|
||||
int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvm_nested_guest *nested = vcpu->arch.nested;
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
s64 hdec;
|
||||
u64 tb, purr, spurr;
|
||||
u64 *exsave;
|
||||
bool ri_set;
|
||||
int trap;
|
||||
unsigned long msr;
|
||||
unsigned long host_hfscr;
|
||||
unsigned long host_ciabr;
|
||||
unsigned long host_dawr0;
|
||||
unsigned long host_dawrx0;
|
||||
unsigned long host_psscr;
|
||||
unsigned long host_pidr;
|
||||
unsigned long host_dawr1;
|
||||
unsigned long host_dawrx1;
|
||||
|
||||
hdec = time_limit - mftb();
|
||||
if (hdec < 0)
|
||||
return BOOK3S_INTERRUPT_HV_DECREMENTER;
|
||||
|
||||
WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_HV);
|
||||
WARN_ON_ONCE(!(vcpu->arch.shregs.msr & MSR_ME));
|
||||
|
||||
start_timing(vcpu, &vcpu->arch.rm_entry);
|
||||
|
||||
vcpu->arch.ceded = 0;
|
||||
|
||||
if (vc->tb_offset) {
|
||||
u64 new_tb = mftb() + vc->tb_offset;
|
||||
mtspr(SPRN_TBU40, new_tb);
|
||||
tb = mftb();
|
||||
if ((tb & 0xffffff) < (new_tb & 0xffffff))
|
||||
mtspr(SPRN_TBU40, new_tb + 0x1000000);
|
||||
vc->tb_offset_applied = vc->tb_offset;
|
||||
}
|
||||
|
||||
msr = mfmsr();
|
||||
|
||||
host_hfscr = mfspr(SPRN_HFSCR);
|
||||
host_ciabr = mfspr(SPRN_CIABR);
|
||||
host_dawr0 = mfspr(SPRN_DAWR0);
|
||||
host_dawrx0 = mfspr(SPRN_DAWRX0);
|
||||
host_psscr = mfspr(SPRN_PSSCR);
|
||||
host_pidr = mfspr(SPRN_PID);
|
||||
if (cpu_has_feature(CPU_FTR_DAWR1)) {
|
||||
host_dawr1 = mfspr(SPRN_DAWR1);
|
||||
host_dawrx1 = mfspr(SPRN_DAWRX1);
|
||||
}
|
||||
|
||||
if (vc->pcr)
|
||||
mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
|
||||
mtspr(SPRN_DPDES, vc->dpdes);
|
||||
mtspr(SPRN_VTB, vc->vtb);
|
||||
|
||||
local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
|
||||
local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
|
||||
mtspr(SPRN_PURR, vcpu->arch.purr);
|
||||
mtspr(SPRN_SPURR, vcpu->arch.spurr);
|
||||
|
||||
if (dawr_enabled()) {
|
||||
mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
|
||||
mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
|
||||
if (cpu_has_feature(CPU_FTR_DAWR1)) {
|
||||
mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
|
||||
mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
|
||||
}
|
||||
}
|
||||
mtspr(SPRN_CIABR, vcpu->arch.ciabr);
|
||||
mtspr(SPRN_IC, vcpu->arch.ic);
|
||||
|
||||
mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
|
||||
(local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
|
||||
|
||||
mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
|
||||
|
||||
mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
|
||||
mtspr(SPRN_HSRR1, (vcpu->arch.shregs.msr & ~MSR_HV) | MSR_ME);
|
||||
|
||||
/*
|
||||
* On POWER9 DD2.1 and below, sometimes on a Hypervisor Data Storage
|
||||
* Interrupt (HDSI) the HDSISR is not be updated at all.
|
||||
*
|
||||
* To work around this we put a canary value into the HDSISR before
|
||||
* returning to a guest and then check for this canary when we take a
|
||||
* HDSI. If we find the canary on a HDSI, we know the hardware didn't
|
||||
* update the HDSISR. In this case we return to the guest to retake the
|
||||
* HDSI which should correctly update the HDSISR the second time HDSI
|
||||
* entry.
|
||||
*
|
||||
* Just do this on all p9 processors for now.
|
||||
*/
|
||||
mtspr(SPRN_HDSISR, HDSISR_CANARY);
|
||||
|
||||
mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
|
||||
mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
|
||||
mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
|
||||
mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
|
||||
|
||||
mtspr(SPRN_AMOR, ~0UL);
|
||||
|
||||
local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_HV_P9;
|
||||
|
||||
/*
|
||||
* Hash host, hash guest, or radix guest with prefetch bug, all have
|
||||
* to disable the MMU before switching to guest MMU state.
|
||||
*/
|
||||
if (!radix_enabled() || !kvm_is_radix(kvm) ||
|
||||
cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
|
||||
__mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
|
||||
|
||||
save_clear_host_mmu(kvm);
|
||||
|
||||
if (kvm_is_radix(kvm)) {
|
||||
switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
|
||||
if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
|
||||
__mtmsrd(0, 1); /* clear RI */
|
||||
|
||||
} else {
|
||||
switch_mmu_to_guest_hpt(kvm, vcpu, lpcr);
|
||||
}
|
||||
|
||||
/* TLBIEL uses LPID=LPIDR, so run this after setting guest LPID */
|
||||
kvmppc_check_need_tlb_flush(kvm, vc->pcpu, nested);
|
||||
|
||||
/*
|
||||
* P9 suppresses the HDEC exception when LPCR[HDICE] = 0,
|
||||
* so set guest LPCR (with HDICE) before writing HDEC.
|
||||
*/
|
||||
mtspr(SPRN_HDEC, hdec);
|
||||
|
||||
mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
|
||||
mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
|
||||
mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
|
||||
mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
|
||||
|
||||
accumulate_time(vcpu, &vcpu->arch.guest_time);
|
||||
|
||||
kvmppc_p9_enter_guest(vcpu);
|
||||
|
||||
accumulate_time(vcpu, &vcpu->arch.rm_intr);
|
||||
|
||||
/* XXX: Could get these from r11/12 and paca exsave instead */
|
||||
vcpu->arch.shregs.srr0 = mfspr(SPRN_SRR0);
|
||||
vcpu->arch.shregs.srr1 = mfspr(SPRN_SRR1);
|
||||
vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
|
||||
vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
|
||||
|
||||
/* 0x2 bit for HSRR is only used by PR and P7/8 HV paths, clear it */
|
||||
trap = local_paca->kvm_hstate.scratch0 & ~0x2;
|
||||
|
||||
/* HSRR interrupts leave MSR[RI] unchanged, SRR interrupts clear it. */
|
||||
ri_set = false;
|
||||
if (likely(trap > BOOK3S_INTERRUPT_MACHINE_CHECK)) {
|
||||
if (trap != BOOK3S_INTERRUPT_SYSCALL &&
|
||||
(vcpu->arch.shregs.msr & MSR_RI))
|
||||
ri_set = true;
|
||||
exsave = local_paca->exgen;
|
||||
} else if (trap == BOOK3S_INTERRUPT_SYSTEM_RESET) {
|
||||
exsave = local_paca->exnmi;
|
||||
} else { /* trap == 0x200 */
|
||||
exsave = local_paca->exmc;
|
||||
}
|
||||
|
||||
vcpu->arch.regs.gpr[1] = local_paca->kvm_hstate.scratch1;
|
||||
vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2;
|
||||
|
||||
/*
|
||||
* Only set RI after reading machine check regs (DAR, DSISR, SRR0/1)
|
||||
* and hstate scratch (which we need to move into exsave to make
|
||||
* re-entrant vs SRESET/MCE)
|
||||
*/
|
||||
if (ri_set) {
|
||||
if (unlikely(!(mfmsr() & MSR_RI))) {
|
||||
__mtmsrd(MSR_RI, 1);
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
} else {
|
||||
WARN_ON_ONCE(mfmsr() & MSR_RI);
|
||||
__mtmsrd(MSR_RI, 1);
|
||||
}
|
||||
|
||||
vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)];
|
||||
vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)];
|
||||
vcpu->arch.regs.gpr[11] = exsave[EX_R11/sizeof(u64)];
|
||||
vcpu->arch.regs.gpr[12] = exsave[EX_R12/sizeof(u64)];
|
||||
vcpu->arch.regs.gpr[13] = exsave[EX_R13/sizeof(u64)];
|
||||
vcpu->arch.ppr = exsave[EX_PPR/sizeof(u64)];
|
||||
vcpu->arch.cfar = exsave[EX_CFAR/sizeof(u64)];
|
||||
vcpu->arch.regs.ctr = exsave[EX_CTR/sizeof(u64)];
|
||||
|
||||
vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
|
||||
|
||||
if (unlikely(trap == BOOK3S_INTERRUPT_MACHINE_CHECK)) {
|
||||
vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
|
||||
vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
|
||||
kvmppc_realmode_machine_check(vcpu);
|
||||
|
||||
} else if (unlikely(trap == BOOK3S_INTERRUPT_HMI)) {
|
||||
kvmppc_realmode_hmi_handler();
|
||||
|
||||
} else if (trap == BOOK3S_INTERRUPT_H_EMUL_ASSIST) {
|
||||
vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
|
||||
|
||||
} else if (trap == BOOK3S_INTERRUPT_H_DATA_STORAGE) {
|
||||
vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
|
||||
vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
|
||||
vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
|
||||
|
||||
} else if (trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
|
||||
vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
|
||||
|
||||
} else if (trap == BOOK3S_INTERRUPT_H_FAC_UNAVAIL) {
|
||||
vcpu->arch.hfscr = mfspr(SPRN_HFSCR);
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/*
|
||||
* Softpatch interrupt for transactional memory emulation cases
|
||||
* on POWER9 DD2.2. This is early in the guest exit path - we
|
||||
* haven't saved registers or done a treclaim yet.
|
||||
*/
|
||||
} else if (trap == BOOK3S_INTERRUPT_HV_SOFTPATCH) {
|
||||
vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
|
||||
|
||||
/*
|
||||
* The cases we want to handle here are those where the guest
|
||||
* is in real suspend mode and is trying to transition to
|
||||
* transactional mode.
|
||||
*/
|
||||
if (local_paca->kvm_hstate.fake_suspend &&
|
||||
(vcpu->arch.shregs.msr & MSR_TS_S)) {
|
||||
if (kvmhv_p9_tm_emulation_early(vcpu)) {
|
||||
/* Prevent it being handled again. */
|
||||
trap = 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
accumulate_time(vcpu, &vcpu->arch.rm_exit);
|
||||
|
||||
/* Advance host PURR/SPURR by the amount used by guest */
|
||||
purr = mfspr(SPRN_PURR);
|
||||
spurr = mfspr(SPRN_SPURR);
|
||||
mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr +
|
||||
purr - vcpu->arch.purr);
|
||||
mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr +
|
||||
spurr - vcpu->arch.spurr);
|
||||
vcpu->arch.purr = purr;
|
||||
vcpu->arch.spurr = spurr;
|
||||
|
||||
vcpu->arch.ic = mfspr(SPRN_IC);
|
||||
vcpu->arch.pid = mfspr(SPRN_PID);
|
||||
vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
|
||||
|
||||
vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
|
||||
vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
|
||||
vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
|
||||
vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
|
||||
|
||||
/* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
|
||||
mtspr(SPRN_PSSCR, host_psscr |
|
||||
(local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
|
||||
mtspr(SPRN_HFSCR, host_hfscr);
|
||||
mtspr(SPRN_CIABR, host_ciabr);
|
||||
mtspr(SPRN_DAWR0, host_dawr0);
|
||||
mtspr(SPRN_DAWRX0, host_dawrx0);
|
||||
if (cpu_has_feature(CPU_FTR_DAWR1)) {
|
||||
mtspr(SPRN_DAWR1, host_dawr1);
|
||||
mtspr(SPRN_DAWRX1, host_dawrx1);
|
||||
}
|
||||
|
||||
if (kvm_is_radix(kvm)) {
|
||||
/*
|
||||
* Since this is radix, do a eieio; tlbsync; ptesync sequence
|
||||
* in case we interrupted the guest between a tlbie and a
|
||||
* ptesync.
|
||||
*/
|
||||
asm volatile("eieio; tlbsync; ptesync");
|
||||
}
|
||||
|
||||
/*
|
||||
* cp_abort is required if the processor supports local copy-paste
|
||||
* to clear the copy buffer that was under control of the guest.
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_31))
|
||||
asm volatile(PPC_CP_ABORT);
|
||||
|
||||
vc->dpdes = mfspr(SPRN_DPDES);
|
||||
vc->vtb = mfspr(SPRN_VTB);
|
||||
mtspr(SPRN_DPDES, 0);
|
||||
if (vc->pcr)
|
||||
mtspr(SPRN_PCR, PCR_MASK);
|
||||
|
||||
if (vc->tb_offset_applied) {
|
||||
u64 new_tb = mftb() - vc->tb_offset_applied;
|
||||
mtspr(SPRN_TBU40, new_tb);
|
||||
tb = mftb();
|
||||
if ((tb & 0xffffff) < (new_tb & 0xffffff))
|
||||
mtspr(SPRN_TBU40, new_tb + 0x1000000);
|
||||
vc->tb_offset_applied = 0;
|
||||
}
|
||||
|
||||
mtspr(SPRN_HDEC, 0x7fffffff);
|
||||
|
||||
save_clear_guest_mmu(kvm, vcpu);
|
||||
switch_mmu_to_host(kvm, host_pidr);
|
||||
local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE;
|
||||
|
||||
/*
|
||||
* If we are in real mode, only switch MMU on after the MMU is
|
||||
* switched to host, to avoid the P9_RADIX_PREFETCH_BUG.
|
||||
*/
|
||||
__mtmsrd(msr, 0);
|
||||
|
||||
end_timing(vcpu);
|
||||
|
||||
return trap;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmhv_vcpu_entry_p9);
|
|
@ -46,6 +46,10 @@ static int global_invalidates(struct kvm *kvm)
|
|||
else
|
||||
global = 1;
|
||||
|
||||
/* LPID has been switched to host if in virt mode so can't do local */
|
||||
if (!global && (mfmsr() & (MSR_IR|MSR_DR)))
|
||||
global = 1;
|
||||
|
||||
if (!global) {
|
||||
/* any other core might now have stale TLB entries... */
|
||||
smp_wmb();
|
||||
|
@ -398,6 +402,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
|
|||
vcpu->arch.pgdir, true,
|
||||
&vcpu->arch.regs.gpr[4]);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_h_enter);
|
||||
|
||||
#ifdef __BIG_ENDIAN__
|
||||
#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
|
||||
|
@ -542,6 +547,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
|
|||
return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
|
||||
&vcpu->arch.regs.gpr[4]);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_h_remove);
|
||||
|
||||
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -660,6 +666,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_h_bulk_remove);
|
||||
|
||||
long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||
unsigned long pte_index, unsigned long avpn)
|
||||
|
@ -730,6 +737,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
|
|||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_h_protect);
|
||||
|
||||
long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||
unsigned long pte_index)
|
||||
|
@ -770,6 +778,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
|
|||
}
|
||||
return H_SUCCESS;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_h_read);
|
||||
|
||||
long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||
unsigned long pte_index)
|
||||
|
@ -818,6 +827,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
|
|||
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_h_clear_ref);
|
||||
|
||||
long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||
unsigned long pte_index)
|
||||
|
@ -865,6 +875,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
|
|||
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_h_clear_mod);
|
||||
|
||||
static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
|
||||
unsigned long gpa, int writing, unsigned long *hpa,
|
||||
|
@ -1283,3 +1294,4 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
|
|||
|
||||
return -1; /* send fault up to host kernel mode */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_hpte_hv_fault);
|
||||
|
|
|
@ -141,13 +141,6 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
|
|||
return;
|
||||
}
|
||||
|
||||
if (xive_enabled() && kvmhv_on_pseries()) {
|
||||
/* No XICS access or hypercalls available, too hard */
|
||||
this_icp->rm_action |= XICS_RM_KICK_VCPU;
|
||||
this_icp->rm_kick_target = vcpu;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the core is loaded,
|
||||
* if not, find an available host core to post to wake the VCPU,
|
||||
|
@ -771,14 +764,6 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
|
|||
void __iomem *xics_phys;
|
||||
int64_t rc;
|
||||
|
||||
if (kvmhv_on_pseries()) {
|
||||
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
||||
|
||||
iosync();
|
||||
plpar_hcall_raw(H_EOI, retbuf, hwirq);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = pnv_opal_pci_msi_eoi(c, hwirq);
|
||||
|
||||
if (rc)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -164,12 +164,15 @@ kvmppc_interrupt_pr:
|
|||
/* 64-bit entry. Register usage at this point:
|
||||
*
|
||||
* SPRG_SCRATCH0 = guest R13
|
||||
* R9 = HSTATE_IN_GUEST
|
||||
* R12 = (guest CR << 32) | exit handler id
|
||||
* R13 = PACA
|
||||
* HSTATE.SCRATCH0 = guest R12
|
||||
* HSTATE.SCRATCH2 = guest R9
|
||||
*/
|
||||
#ifdef CONFIG_PPC64
|
||||
/* Match 32-bit entry */
|
||||
ld r9,HSTATE_SCRATCH2(r13)
|
||||
rotldi r12, r12, 32 /* Flip R12 halves for stw */
|
||||
stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */
|
||||
srdi r12, r12, 32 /* shift trap into low half */
|
||||
|
|
|
@ -127,6 +127,71 @@ void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
|
||||
|
||||
/*
|
||||
* Pull a vcpu's context from the XIVE on guest exit.
|
||||
* This assumes we are in virtual mode (MMU on)
|
||||
*/
|
||||
void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
|
||||
|
||||
if (!vcpu->arch.xive_pushed)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Should not have been pushed if there is no tima
|
||||
*/
|
||||
if (WARN_ON(!tima))
|
||||
return;
|
||||
|
||||
eieio();
|
||||
/* First load to pull the context, we ignore the value */
|
||||
__raw_readl(tima + TM_SPC_PULL_OS_CTX);
|
||||
/* Second load to recover the context state (Words 0 and 1) */
|
||||
vcpu->arch.xive_saved_state.w01 = __raw_readq(tima + TM_QW1_OS);
|
||||
|
||||
/* Fixup some of the state for the next load */
|
||||
vcpu->arch.xive_saved_state.lsmfb = 0;
|
||||
vcpu->arch.xive_saved_state.ack = 0xff;
|
||||
vcpu->arch.xive_pushed = 0;
|
||||
eieio();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
|
||||
|
||||
void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
|
||||
|
||||
if (!esc_vaddr)
|
||||
return;
|
||||
|
||||
/* we are using XIVE with single escalation */
|
||||
|
||||
if (vcpu->arch.xive_esc_on) {
|
||||
/*
|
||||
* If we still have a pending escalation, abort the cede,
|
||||
* and we must set PQ to 10 rather than 00 so that we don't
|
||||
* potentially end up with two entries for the escalation
|
||||
* interrupt in the XIVE interrupt queue. In that case
|
||||
* we also don't want to set xive_esc_on to 1 here in
|
||||
* case we race with xive_esc_irq().
|
||||
*/
|
||||
vcpu->arch.ceded = 0;
|
||||
/*
|
||||
* The escalation interrupts are special as we don't EOI them.
|
||||
* There is no need to use the load-after-store ordering offset
|
||||
* to set PQ to 10 as we won't use StoreEOI.
|
||||
*/
|
||||
__raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_10);
|
||||
} else {
|
||||
vcpu->arch.xive_esc_on = true;
|
||||
mb();
|
||||
__raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
|
||||
}
|
||||
mb();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);
|
||||
|
||||
/*
|
||||
* This is a simple trigger for a generic XIVE IRQ. This must
|
||||
* only be called for interrupts that support a trigger page
|
||||
|
@ -2075,6 +2140,36 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
|
||||
/* The VM should have configured XICS mode before doing XICS hcalls. */
|
||||
if (!kvmppc_xics_enabled(vcpu))
|
||||
return H_TOO_HARD;
|
||||
|
||||
switch (req) {
|
||||
case H_XIRR:
|
||||
return xive_vm_h_xirr(vcpu);
|
||||
case H_CPPR:
|
||||
return xive_vm_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
|
||||
case H_EOI:
|
||||
return xive_vm_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
|
||||
case H_IPI:
|
||||
return xive_vm_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
|
||||
kvmppc_get_gpr(vcpu, 5));
|
||||
case H_IPOLL:
|
||||
return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
|
||||
case H_XIRR_X:
|
||||
xive_vm_h_xirr(vcpu);
|
||||
kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset);
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
return H_UNSUPPORTED;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_xive_xics_hcall);
|
||||
|
||||
int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
|
||||
|
@ -2257,21 +2352,3 @@ struct kvm_device_ops kvm_xive_ops = {
|
|||
.get_attr = xive_get_attr,
|
||||
.has_attr = xive_has_attr,
|
||||
};
|
||||
|
||||
void kvmppc_xive_init_module(void)
|
||||
{
|
||||
__xive_vm_h_xirr = xive_vm_h_xirr;
|
||||
__xive_vm_h_ipoll = xive_vm_h_ipoll;
|
||||
__xive_vm_h_ipi = xive_vm_h_ipi;
|
||||
__xive_vm_h_cppr = xive_vm_h_cppr;
|
||||
__xive_vm_h_eoi = xive_vm_h_eoi;
|
||||
}
|
||||
|
||||
void kvmppc_xive_exit_module(void)
|
||||
{
|
||||
__xive_vm_h_xirr = NULL;
|
||||
__xive_vm_h_ipoll = NULL;
|
||||
__xive_vm_h_ipi = NULL;
|
||||
__xive_vm_h_cppr = NULL;
|
||||
__xive_vm_h_eoi = NULL;
|
||||
}
|
||||
|
|
|
@ -289,13 +289,6 @@ extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
|||
extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
|
||||
extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
|
||||
|
||||
extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
|
||||
extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
|
||||
extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
|
||||
unsigned long mfrr);
|
||||
extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
|
||||
extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
|
||||
|
||||
/*
|
||||
* Common Xive routines for XICS-over-XIVE and XIVE native
|
||||
*/
|
||||
|
|
|
@ -1281,13 +1281,3 @@ struct kvm_device_ops kvm_xive_native_ops = {
|
|||
.has_attr = kvmppc_xive_native_has_attr,
|
||||
.mmap = kvmppc_xive_native_mmap,
|
||||
};
|
||||
|
||||
void kvmppc_xive_native_init_module(void)
|
||||
{
|
||||
;
|
||||
}
|
||||
|
||||
void kvmppc_xive_native_exit_module(void)
|
||||
{
|
||||
;
|
||||
}
|
||||
|
|
|
@ -357,30 +357,19 @@ static void __init radix_init_pgtable(void)
|
|||
}
|
||||
|
||||
/* Find out how many PID bits are supported */
|
||||
if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
|
||||
if (!mmu_pid_bits)
|
||||
mmu_pid_bits = 20;
|
||||
mmu_base_pid = 1;
|
||||
} else if (cpu_has_feature(CPU_FTR_HVMODE)) {
|
||||
if (!mmu_pid_bits)
|
||||
mmu_pid_bits = 20;
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
if (!cpu_has_feature(CPU_FTR_HVMODE) &&
|
||||
cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
|
||||
/*
|
||||
* When KVM is possible, we only use the top half of the
|
||||
* PID space to avoid collisions between host and guest PIDs
|
||||
* which can cause problems due to prefetch when exiting the
|
||||
* guest with AIL=3
|
||||
* Older versions of KVM on these machines perfer if the
|
||||
* guest only uses the low 19 PID bits.
|
||||
*/
|
||||
mmu_base_pid = 1 << (mmu_pid_bits - 1);
|
||||
#else
|
||||
mmu_base_pid = 1;
|
||||
#endif
|
||||
} else {
|
||||
/* The guest uses the bottom half of the PID space */
|
||||
if (!mmu_pid_bits)
|
||||
mmu_pid_bits = 19;
|
||||
mmu_base_pid = 1;
|
||||
} else {
|
||||
if (!mmu_pid_bits)
|
||||
mmu_pid_bits = 20;
|
||||
}
|
||||
mmu_base_pid = 1;
|
||||
|
||||
/*
|
||||
* Allocate Partition table and process table for the
|
||||
|
|
|
@ -1344,49 +1344,3 @@ void radix__flush_tlb_all(void)
|
|||
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long pid = mm->context.id;
|
||||
|
||||
if (unlikely(pid == MMU_NO_CONTEXT))
|
||||
return;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If this context hasn't run on that CPU before and KVM is
|
||||
* around, there's a slim chance that the guest on another
|
||||
* CPU just brought in obsolete translation into the TLB of
|
||||
* this CPU due to a bad prefetch using the guest PID on
|
||||
* the way into the hypervisor.
|
||||
*
|
||||
* We work around this here. If KVM is possible, we check if
|
||||
* any sibling thread is in KVM. If it is, the window may exist
|
||||
* and thus we flush that PID from the core.
|
||||
*
|
||||
* A potential future improvement would be to mark which PIDs
|
||||
* have never been used on the system and avoid it if the PID
|
||||
* is new and the process has no other cpumask bit set.
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
|
||||
int cpu = smp_processor_id();
|
||||
int sib = cpu_first_thread_sibling(cpu);
|
||||
bool flush = false;
|
||||
|
||||
for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
|
||||
if (sib == cpu)
|
||||
continue;
|
||||
if (!cpu_possible(sib))
|
||||
continue;
|
||||
if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
|
||||
flush = true;
|
||||
}
|
||||
if (flush)
|
||||
_tlbiel_pid(pid, RIC_FLUSH_ALL);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
|
||||
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
||||
|
|
|
@ -83,9 +83,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|||
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||
asm volatile ("dssall");
|
||||
|
||||
if (new_on_cpu)
|
||||
radix_kvm_prefetch_workaround(next);
|
||||
else
|
||||
if (!new_on_cpu)
|
||||
membarrier_arch_switch_mm(prev, next, tsk);
|
||||
|
||||
/*
|
||||
|
|
|
@ -604,7 +604,7 @@ struct p9_sprs {
|
|||
u64 uamor;
|
||||
};
|
||||
|
||||
static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
|
||||
static unsigned long power9_idle_stop(unsigned long psscr)
|
||||
{
|
||||
int cpu = raw_smp_processor_id();
|
||||
int first = cpu_first_thread_sibling(cpu);
|
||||
|
@ -620,8 +620,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
|
|||
if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
|
||||
/* EC=ESL=0 case */
|
||||
|
||||
BUG_ON(!mmu_on);
|
||||
|
||||
/*
|
||||
* Wake synchronously. SRESET via xscom may still cause
|
||||
* a 0x100 powersave wakeup with SRR1 reason!
|
||||
|
@ -803,8 +801,7 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
|
|||
__slb_restore_bolted_realmode();
|
||||
|
||||
out:
|
||||
if (mmu_on)
|
||||
mtmsr(MSR_KERNEL);
|
||||
mtmsr(MSR_KERNEL);
|
||||
|
||||
return srr1;
|
||||
}
|
||||
|
@ -895,7 +892,7 @@ struct p10_sprs {
|
|||
*/
|
||||
};
|
||||
|
||||
static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on)
|
||||
static unsigned long power10_idle_stop(unsigned long psscr)
|
||||
{
|
||||
int cpu = raw_smp_processor_id();
|
||||
int first = cpu_first_thread_sibling(cpu);
|
||||
|
@ -909,8 +906,6 @@ static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on)
|
|||
if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
|
||||
/* EC=ESL=0 case */
|
||||
|
||||
BUG_ON(!mmu_on);
|
||||
|
||||
/*
|
||||
* Wake synchronously. SRESET via xscom may still cause
|
||||
* a 0x100 powersave wakeup with SRR1 reason!
|
||||
|
@ -991,8 +986,7 @@ static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on)
|
|||
__slb_restore_bolted_realmode();
|
||||
|
||||
out:
|
||||
if (mmu_on)
|
||||
mtmsr(MSR_KERNEL);
|
||||
mtmsr(MSR_KERNEL);
|
||||
|
||||
return srr1;
|
||||
}
|
||||
|
@ -1002,40 +996,10 @@ static unsigned long arch300_offline_stop(unsigned long psscr)
|
|||
{
|
||||
unsigned long srr1;
|
||||
|
||||
#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
__ppc64_runlatch_off();
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_31))
|
||||
srr1 = power10_idle_stop(psscr, true);
|
||||
srr1 = power10_idle_stop(psscr);
|
||||
else
|
||||
srr1 = power9_idle_stop(psscr, true);
|
||||
__ppc64_runlatch_on();
|
||||
#else
|
||||
/*
|
||||
* Tell KVM we're entering idle.
|
||||
* This does not have to be done in real mode because the P9 MMU
|
||||
* is independent per-thread. Some steppings share radix/hash mode
|
||||
* between threads, but in that case KVM has a barrier sync in real
|
||||
* mode before and after switching between radix and hash.
|
||||
*
|
||||
* kvm_start_guest must still be called in real mode though, hence
|
||||
* the false argument.
|
||||
*/
|
||||
local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
|
||||
|
||||
__ppc64_runlatch_off();
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_31))
|
||||
srr1 = power10_idle_stop(psscr, false);
|
||||
else
|
||||
srr1 = power9_idle_stop(psscr, false);
|
||||
__ppc64_runlatch_on();
|
||||
|
||||
local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
|
||||
/* Order setting hwthread_state vs. testing hwthread_req */
|
||||
smp_mb();
|
||||
if (local_paca->kvm_hstate.hwthread_req)
|
||||
srr1 = idle_kvm_start_guest(srr1);
|
||||
mtmsr(MSR_KERNEL);
|
||||
#endif
|
||||
srr1 = power9_idle_stop(psscr);
|
||||
|
||||
return srr1;
|
||||
}
|
||||
|
@ -1055,9 +1019,9 @@ void arch300_idle_type(unsigned long stop_psscr_val,
|
|||
|
||||
__ppc64_runlatch_off();
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_31))
|
||||
srr1 = power10_idle_stop(psscr, true);
|
||||
srr1 = power10_idle_stop(psscr);
|
||||
else
|
||||
srr1 = power9_idle_stop(psscr, true);
|
||||
srr1 = power9_idle_stop(psscr);
|
||||
__ppc64_runlatch_on();
|
||||
|
||||
fini_irq_for_idle_irqsoff();
|
||||
|
|
Loading…
Reference in New Issue