mirror of https://gitee.com/openkylin/linux.git
KVM/arm updates for Linux v5.1
- A number of pre-nested code rework - Direct physical timer assignment on VHE systems - kvm_call_hyp type safety enforcement - Set/Way cache sanitisation for 32bit guests - Build system cleanups - A bunch of janitorial fixes -----BEGIN PGP SIGNATURE----- iQJJBAABCgAzFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAlxwH3EVHG1hcmMuenlu Z2llckBhcm0uY29tAAoJECPQ0LrRPXpD9j4P/i1lIUKfg/bxGw46wwoqF1MjOEPD uDQ6irms65FfqFkUkIPaU1du51UcI9nwncUPeJh+E3g2wp2f5EXsAp7tksARfIWU YCLLez5AiuYH6Otrs7YxLm8L/Sqqc3DacGWuyOamXmdWM9wZlv7F295Yfo5nX8zk IhksfBQH4/KvOPxkzbY6yy1StKOreuXQuboecrcfUP0lxwaUcbqxHMuynP7DneCv EHNo5TUjK975xH4jS/K61Ji9FmTlA/PgGqgn+EOw5KXGnKlphFBaTrzuE7vPLveR XPV1VeNEuEitH/+qVhZr8k2Du+3kKqQA8Ikxv6SasYAnqyVFTPPPMtUEgZXTLpHa 6D4kIc+5jxgxF6Dyk3PKnjoNHPolCApj/uPCcTiD8dyY4smpJQ3+gxGiJkX68e92 EkJlBj0Hn4xgudHi9UWLP+eZHT+v3L8mvVLP9N9oqapwc0x4g9YqJVbJMyAnT5Pw pLPSKTx9ApmyAEkdzRHjB89gG5cwjUzmx5BF7gASYSmTS9el9r5Kaaxx8zCmMt1R gM1TF7rBrgyW5s+bsIBf2rqk+5WAxag+FmeCQwghuNc+uhNfboRgoJzx7qFTpxeX KFS86QmQPRMWGR0klXgW1+hNOD8ACnqCOPGB+3d41ql3bgQ0otLItvj4RoA44JYG 0Guq7o9EZNUYqDzA =iEs0 -----END PGP SIGNATURE----- Merge tag 'kvmarm-for-v5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-next KVM/arm updates for Linux v5.1 - A number of pre-nested code rework - Direct physical timer assignment on VHE systems - kvm_call_hyp type safety enforcement - Set/Way cache sanitisation for 32bit guests - Build system cleanups - A bunch of janitorial fixes
This commit is contained in:
commit
71783e09b4
18
MAINTAINERS
18
MAINTAINERS
|
@ -8304,29 +8304,25 @@ S: Maintained
|
|||
F: arch/x86/include/asm/svm.h
|
||||
F: arch/x86/kvm/svm.c
|
||||
|
||||
KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm)
|
||||
KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
|
||||
M: Christoffer Dall <christoffer.dall@arm.com>
|
||||
M: Marc Zyngier <marc.zyngier@arm.com>
|
||||
R: James Morse <james.morse@arm.com>
|
||||
R: Julien Thierry <julien.thierry@arm.com>
|
||||
R: Suzuki K Pouloze <suzuki.poulose@arm.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: kvmarm@lists.cs.columbia.edu
|
||||
W: http://systems.cs.columbia.edu/projects/kvm-arm
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
|
||||
S: Supported
|
||||
S: Maintained
|
||||
F: arch/arm/include/uapi/asm/kvm*
|
||||
F: arch/arm/include/asm/kvm*
|
||||
F: arch/arm/kvm/
|
||||
F: virt/kvm/arm/
|
||||
F: include/kvm/arm_*
|
||||
|
||||
KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
|
||||
M: Christoffer Dall <christoffer.dall@arm.com>
|
||||
M: Marc Zyngier <marc.zyngier@arm.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: kvmarm@lists.cs.columbia.edu
|
||||
S: Maintained
|
||||
F: arch/arm64/include/uapi/asm/kvm*
|
||||
F: arch/arm64/include/asm/kvm*
|
||||
F: arch/arm64/kvm/
|
||||
F: virt/kvm/arm/
|
||||
F: include/kvm/arm_*
|
||||
|
||||
KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
|
||||
M: James Hogan <jhogan@kernel.org>
|
||||
|
|
|
@ -54,7 +54,7 @@
|
|||
#define ICH_VTR __ACCESS_CP15(c12, 4, c11, 1)
|
||||
#define ICH_MISR __ACCESS_CP15(c12, 4, c11, 2)
|
||||
#define ICH_EISR __ACCESS_CP15(c12, 4, c11, 3)
|
||||
#define ICH_ELSR __ACCESS_CP15(c12, 4, c11, 5)
|
||||
#define ICH_ELRSR __ACCESS_CP15(c12, 4, c11, 5)
|
||||
#define ICH_VMCR __ACCESS_CP15(c12, 4, c11, 7)
|
||||
|
||||
#define __LR0(x) __ACCESS_CP15(c12, 4, c12, x)
|
||||
|
@ -151,7 +151,7 @@ CPUIF_MAP(ICH_HCR, ICH_HCR_EL2)
|
|||
CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
|
||||
CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
|
||||
CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
|
||||
CPUIF_MAP(ICH_ELSR, ICH_ELSR_EL2)
|
||||
CPUIF_MAP(ICH_ELRSR, ICH_ELRSR_EL2)
|
||||
CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
|
||||
CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
|
||||
CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
|
||||
|
|
|
@ -265,6 +265,14 @@ static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
}
|
||||
|
||||
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_vcpu_trap_is_iabt(vcpu))
|
||||
return false;
|
||||
|
||||
return kvm_vcpu_dabt_iswrite(vcpu);
|
||||
}
|
||||
|
||||
static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
#include <asm/fpstate.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <kvm/arm_arch_timer.h>
|
||||
|
||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
|
@ -56,10 +57,13 @@ int __attribute_const__ kvm_target_cpu(void);
|
|||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
|
||||
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
|
||||
|
||||
struct kvm_arch {
|
||||
/* VTTBR value associated with below pgd and vmid */
|
||||
u64 vttbr;
|
||||
struct kvm_vmid {
|
||||
/* The VMID generation used for the virt. memory system */
|
||||
u64 vmid_gen;
|
||||
u32 vmid;
|
||||
};
|
||||
|
||||
struct kvm_arch {
|
||||
/* The last vcpu id that ran on each physical CPU */
|
||||
int __percpu *last_vcpu_ran;
|
||||
|
||||
|
@ -69,11 +73,11 @@ struct kvm_arch {
|
|||
*/
|
||||
|
||||
/* The VMID generation used for the virt. memory system */
|
||||
u64 vmid_gen;
|
||||
u32 vmid;
|
||||
struct kvm_vmid vmid;
|
||||
|
||||
/* Stage-2 page table */
|
||||
pgd_t *pgd;
|
||||
phys_addr_t pgd_phys;
|
||||
|
||||
/* Interrupt controller */
|
||||
struct vgic_dist vgic;
|
||||
|
@ -147,6 +151,13 @@ struct kvm_cpu_context {
|
|||
|
||||
typedef struct kvm_cpu_context kvm_cpu_context_t;
|
||||
|
||||
static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
|
||||
int cpu)
|
||||
{
|
||||
/* The host's MPIDR is immutable, so let's set it up at boot time */
|
||||
cpu_ctxt->cp15[c0_MPIDR] = cpu_logical_map(cpu);
|
||||
}
|
||||
|
||||
struct kvm_vcpu_arch {
|
||||
struct kvm_cpu_context ctxt;
|
||||
|
||||
|
@ -214,7 +225,35 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
|
|||
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
|
||||
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
|
||||
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
|
||||
unsigned long kvm_call_hyp(void *hypfn, ...);
|
||||
|
||||
unsigned long __kvm_call_hyp(void *hypfn, ...);
|
||||
|
||||
/*
|
||||
* The has_vhe() part doesn't get emitted, but is used for type-checking.
|
||||
*/
|
||||
#define kvm_call_hyp(f, ...) \
|
||||
do { \
|
||||
if (has_vhe()) { \
|
||||
f(__VA_ARGS__); \
|
||||
} else { \
|
||||
__kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define kvm_call_hyp_ret(f, ...) \
|
||||
({ \
|
||||
typeof(f(__VA_ARGS__)) ret; \
|
||||
\
|
||||
if (has_vhe()) { \
|
||||
ret = f(__VA_ARGS__); \
|
||||
} else { \
|
||||
ret = __kvm_call_hyp(kvm_ksym_ref(f), \
|
||||
##__VA_ARGS__); \
|
||||
} \
|
||||
\
|
||||
ret; \
|
||||
})
|
||||
|
||||
void force_vm_exit(const cpumask_t *mask);
|
||||
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
|
||||
struct kvm_vcpu_events *events);
|
||||
|
@ -265,7 +304,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
|||
* compliant with the PCS!).
|
||||
*/
|
||||
|
||||
kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
|
||||
__kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
|
||||
}
|
||||
|
||||
static inline void __cpu_init_stage2(void)
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#define TTBR1 __ACCESS_CP15_64(1, c2)
|
||||
#define VTTBR __ACCESS_CP15_64(6, c2)
|
||||
#define PAR __ACCESS_CP15_64(0, c7)
|
||||
#define CNTP_CVAL __ACCESS_CP15_64(2, c14)
|
||||
#define CNTV_CVAL __ACCESS_CP15_64(3, c14)
|
||||
#define CNTVOFF __ACCESS_CP15_64(4, c14)
|
||||
|
||||
|
@ -85,6 +86,7 @@
|
|||
#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4)
|
||||
#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2)
|
||||
#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0)
|
||||
#define CNTP_CTL __ACCESS_CP15(c14, 0, c2, 1)
|
||||
#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1)
|
||||
#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0)
|
||||
|
||||
|
@ -94,6 +96,8 @@
|
|||
#define read_sysreg_el0(r) read_sysreg(r##_el0)
|
||||
#define write_sysreg_el0(v, r) write_sysreg(v, r##_el0)
|
||||
|
||||
#define cntp_ctl_el0 CNTP_CTL
|
||||
#define cntp_cval_el0 CNTP_CVAL
|
||||
#define cntv_ctl_el0 CNTV_CTL
|
||||
#define cntv_cval_el0 CNTV_CVAL
|
||||
#define cntvoff_el2 CNTVOFF
|
||||
|
|
|
@ -421,9 +421,14 @@ static inline int hyp_map_aux_data(void)
|
|||
|
||||
static inline void kvm_set_ipa_limit(void) {}
|
||||
|
||||
static inline bool kvm_cpu_has_cnp(void)
|
||||
static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
|
||||
{
|
||||
return false;
|
||||
struct kvm_vmid *vmid = &kvm->arch.vmid;
|
||||
u64 vmid_field, baddr;
|
||||
|
||||
baddr = kvm->arch.pgd_phys;
|
||||
vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
|
||||
return kvm_phys_to_vttbr(baddr) | vmid_field;
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
|
|
@ -8,9 +8,8 @@ ifeq ($(plus_virt),+virt)
|
|||
plus_virt_def := -DREQUIRES_VIRT=1
|
||||
endif
|
||||
|
||||
ccflags-y += -Iarch/arm/kvm -Ivirt/kvm/arm/vgic
|
||||
CFLAGS_arm.o := -I. $(plus_virt_def)
|
||||
CFLAGS_mmu.o := -I.
|
||||
ccflags-y += -I $(srctree)/$(src) -I $(srctree)/virt/kvm/arm/vgic
|
||||
CFLAGS_arm.o := $(plus_virt_def)
|
||||
|
||||
AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
|
||||
AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
|
||||
|
|
|
@ -293,15 +293,16 @@ static bool access_cntp_tval(struct kvm_vcpu *vcpu,
|
|||
const struct coproc_params *p,
|
||||
const struct coproc_reg *r)
|
||||
{
|
||||
u64 now = kvm_phys_timer_read();
|
||||
u64 val;
|
||||
u32 val;
|
||||
|
||||
if (p->is_write) {
|
||||
val = *vcpu_reg(vcpu, p->Rt1);
|
||||
kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, val + now);
|
||||
kvm_arm_timer_write_sysreg(vcpu,
|
||||
TIMER_PTIMER, TIMER_REG_TVAL, val);
|
||||
} else {
|
||||
val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
|
||||
*vcpu_reg(vcpu, p->Rt1) = val - now;
|
||||
val = kvm_arm_timer_read_sysreg(vcpu,
|
||||
TIMER_PTIMER, TIMER_REG_TVAL);
|
||||
*vcpu_reg(vcpu, p->Rt1) = val;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -315,9 +316,11 @@ static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
|
|||
|
||||
if (p->is_write) {
|
||||
val = *vcpu_reg(vcpu, p->Rt1);
|
||||
kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, val);
|
||||
kvm_arm_timer_write_sysreg(vcpu,
|
||||
TIMER_PTIMER, TIMER_REG_CTL, val);
|
||||
} else {
|
||||
val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
|
||||
val = kvm_arm_timer_read_sysreg(vcpu,
|
||||
TIMER_PTIMER, TIMER_REG_CTL);
|
||||
*vcpu_reg(vcpu, p->Rt1) = val;
|
||||
}
|
||||
|
||||
|
@ -333,9 +336,11 @@ static bool access_cntp_cval(struct kvm_vcpu *vcpu,
|
|||
if (p->is_write) {
|
||||
val = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
|
||||
val |= *vcpu_reg(vcpu, p->Rt1);
|
||||
kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, val);
|
||||
kvm_arm_timer_write_sysreg(vcpu,
|
||||
TIMER_PTIMER, TIMER_REG_CVAL, val);
|
||||
} else {
|
||||
val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
|
||||
val = kvm_arm_timer_read_sysreg(vcpu,
|
||||
TIMER_PTIMER, TIMER_REG_CVAL);
|
||||
*vcpu_reg(vcpu, p->Rt1) = val;
|
||||
*vcpu_reg(vcpu, p->Rt2) = val >> 32;
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx)
|
|||
|
||||
void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
ctxt->cp15[c0_MPIDR] = read_sysreg(VMPIDR);
|
||||
ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR);
|
||||
ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR);
|
||||
ctxt->cp15[c1_CPACR] = read_sysreg(CPACR);
|
||||
|
|
|
@ -176,7 +176,7 @@ THUMB( orr lr, lr, #PSR_T_BIT )
|
|||
msr spsr_cxsf, lr
|
||||
ldr lr, =panic
|
||||
msr ELR_hyp, lr
|
||||
ldr lr, =kvm_call_hyp
|
||||
ldr lr, =__kvm_call_hyp
|
||||
clrex
|
||||
eret
|
||||
ENDPROC(__hyp_do_panic)
|
||||
|
|
|
@ -77,7 +77,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
|
|||
static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
write_sysreg(kvm->arch.vttbr, VTTBR);
|
||||
write_sysreg(kvm_get_vttbr(kvm), VTTBR);
|
||||
write_sysreg(vcpu->arch.midr, VPIDR);
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
|
|||
|
||||
/* Switch to requested VMID */
|
||||
kvm = kern_hyp_va(kvm);
|
||||
write_sysreg(kvm->arch.vttbr, VTTBR);
|
||||
write_sysreg(kvm_get_vttbr(kvm), VTTBR);
|
||||
isb();
|
||||
|
||||
write_sysreg(0, TLBIALLIS);
|
||||
|
@ -61,7 +61,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
|
|||
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
|
||||
|
||||
/* Switch to requested VMID */
|
||||
write_sysreg(kvm->arch.vttbr, VTTBR);
|
||||
write_sysreg(kvm_get_vttbr(kvm), VTTBR);
|
||||
isb();
|
||||
|
||||
write_sysreg(0, TLBIALL);
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
* r12: caller save
|
||||
* rest: callee save
|
||||
*/
|
||||
ENTRY(kvm_call_hyp)
|
||||
ENTRY(__kvm_call_hyp)
|
||||
hvc #0
|
||||
bx lr
|
||||
ENDPROC(kvm_call_hyp)
|
||||
ENDPROC(__kvm_call_hyp)
|
||||
|
|
|
@ -77,6 +77,10 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
if (!vcpu_el1_is_32bit(vcpu))
|
||||
vcpu->arch.hcr_el2 |= HCR_TID3;
|
||||
|
||||
if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
|
||||
vcpu_el1_is_32bit(vcpu))
|
||||
vcpu->arch.hcr_el2 |= HCR_TID2;
|
||||
}
|
||||
|
||||
static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
|
||||
|
@ -331,6 +335,14 @@ static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
|
|||
return ESR_ELx_SYS64_ISS_RT(esr);
|
||||
}
|
||||
|
||||
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_vcpu_trap_is_iabt(vcpu))
|
||||
return false;
|
||||
|
||||
return kvm_vcpu_dabt_iswrite(vcpu);
|
||||
}
|
||||
|
||||
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
|
@ -56,16 +57,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
|
|||
int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
|
||||
void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
|
||||
|
||||
struct kvm_arch {
|
||||
struct kvm_vmid {
|
||||
/* The VMID generation used for the virt. memory system */
|
||||
u64 vmid_gen;
|
||||
u32 vmid;
|
||||
};
|
||||
|
||||
struct kvm_arch {
|
||||
struct kvm_vmid vmid;
|
||||
|
||||
/* stage2 entry level table */
|
||||
pgd_t *pgd;
|
||||
phys_addr_t pgd_phys;
|
||||
|
||||
/* VTTBR value associated with above pgd and vmid */
|
||||
u64 vttbr;
|
||||
/* VTCR_EL2 value for this VM */
|
||||
u64 vtcr;
|
||||
|
||||
|
@ -370,7 +374,36 @@ void kvm_arm_halt_guest(struct kvm *kvm);
|
|||
void kvm_arm_resume_guest(struct kvm *kvm);
|
||||
|
||||
u64 __kvm_call_hyp(void *hypfn, ...);
|
||||
#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
|
||||
|
||||
/*
|
||||
* The couple of isb() below are there to guarantee the same behaviour
|
||||
* on VHE as on !VHE, where the eret to EL1 acts as a context
|
||||
* synchronization event.
|
||||
*/
|
||||
#define kvm_call_hyp(f, ...) \
|
||||
do { \
|
||||
if (has_vhe()) { \
|
||||
f(__VA_ARGS__); \
|
||||
isb(); \
|
||||
} else { \
|
||||
__kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define kvm_call_hyp_ret(f, ...) \
|
||||
({ \
|
||||
typeof(f(__VA_ARGS__)) ret; \
|
||||
\
|
||||
if (has_vhe()) { \
|
||||
ret = f(__VA_ARGS__); \
|
||||
isb(); \
|
||||
} else { \
|
||||
ret = __kvm_call_hyp(kvm_ksym_ref(f), \
|
||||
##__VA_ARGS__); \
|
||||
} \
|
||||
\
|
||||
ret; \
|
||||
})
|
||||
|
||||
void force_vm_exit(const cpumask_t *mask);
|
||||
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
|
||||
|
@ -389,6 +422,13 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
|
|||
|
||||
DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
|
||||
|
||||
static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
|
||||
int cpu)
|
||||
{
|
||||
/* The host's MPIDR is immutable, so let's set it up at boot time */
|
||||
cpu_ctxt->sys_regs[MPIDR_EL1] = cpu_logical_map(cpu);
|
||||
}
|
||||
|
||||
void __kvm_enable_ssbs(void);
|
||||
|
||||
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/compiler.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#define __hyp_text __section(.hyp.text) notrace
|
||||
|
@ -163,7 +164,7 @@ void __noreturn __hyp_do_panic(unsigned long, ...);
|
|||
static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
|
||||
{
|
||||
write_sysreg(kvm->arch.vtcr, vtcr_el2);
|
||||
write_sysreg(kvm->arch.vttbr, vttbr_el2);
|
||||
write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
|
||||
|
||||
/*
|
||||
* ARM erratum 1165522 requires the actual execution of the above
|
||||
|
|
|
@ -138,7 +138,8 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
|
|||
})
|
||||
|
||||
/*
|
||||
* We currently only support a 40bit IPA.
|
||||
* We currently support using a VM-specified IPA size. For backward
|
||||
* compatibility, the default IPA size is fixed to 40bits.
|
||||
*/
|
||||
#define KVM_PHYS_SHIFT (40)
|
||||
|
||||
|
@ -591,9 +592,15 @@ static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
|
|||
return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
|
||||
}
|
||||
|
||||
static inline bool kvm_cpu_has_cnp(void)
|
||||
static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
|
||||
{
|
||||
return system_supports_cnp();
|
||||
struct kvm_vmid *vmid = &kvm->arch.vmid;
|
||||
u64 vmid_field, baddr;
|
||||
u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
|
||||
|
||||
baddr = kvm->arch.pgd_phys;
|
||||
vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
|
||||
return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -361,6 +361,7 @@
|
|||
|
||||
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
|
||||
|
||||
#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
|
||||
#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1)
|
||||
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
|
||||
|
||||
|
@ -392,6 +393,10 @@
|
|||
#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1)
|
||||
#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2)
|
||||
|
||||
#define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0)
|
||||
#define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1)
|
||||
#define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0)
|
||||
|
||||
#define __PMEV_op2(n) ((n) & 0x7)
|
||||
#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3))
|
||||
#define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n))
|
||||
|
@ -426,7 +431,7 @@
|
|||
#define SYS_ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
|
||||
#define SYS_ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
|
||||
#define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
|
||||
#define SYS_ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
|
||||
#define SYS_ICH_ELRSR_EL2 sys_reg(3, 4, 12, 11, 5)
|
||||
#define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
|
||||
|
||||
#define __SYS__LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
|
||||
|
|
|
@ -3,9 +3,7 @@
|
|||
# Makefile for Kernel-based Virtual Machine module
|
||||
#
|
||||
|
||||
ccflags-y += -Iarch/arm64/kvm -Ivirt/kvm/arm/vgic
|
||||
CFLAGS_arm.o := -I.
|
||||
CFLAGS_mmu.o := -I.
|
||||
ccflags-y += -I $(srctree)/$(src) -I $(srctree)/virt/kvm/arm/vgic
|
||||
|
||||
KVM=../../../virt/kvm
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
|
|||
|
||||
void kvm_arm_init_debug(void)
|
||||
{
|
||||
__this_cpu_write(mdcr_el2, kvm_call_hyp(__kvm_get_mdcr_el2));
|
||||
__this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -40,9 +40,6 @@
|
|||
* arch/arm64/kernel/hyp_stub.S.
|
||||
*/
|
||||
ENTRY(__kvm_call_hyp)
|
||||
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
||||
hvc #0
|
||||
ret
|
||||
alternative_else_nop_endif
|
||||
b __vhe_hyp_call
|
||||
ENDPROC(__kvm_call_hyp)
|
||||
|
|
|
@ -43,18 +43,6 @@
|
|||
ldr lr, [sp], #16
|
||||
.endm
|
||||
|
||||
ENTRY(__vhe_hyp_call)
|
||||
do_el2_call
|
||||
/*
|
||||
* We used to rely on having an exception return to get
|
||||
* an implicit isb. In the E2H case, we don't have it anymore.
|
||||
* rather than changing all the leaf functions, just do it here
|
||||
* before returning to the rest of the kernel.
|
||||
*/
|
||||
isb
|
||||
ret
|
||||
ENDPROC(__vhe_hyp_call)
|
||||
|
||||
el1_sync: // Guest trapped into EL2
|
||||
|
||||
mrs x0, esr_el2
|
||||
|
|
|
@ -52,7 +52,6 @@ static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
|
|||
|
||||
static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2);
|
||||
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
|
||||
ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr);
|
||||
ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
|
||||
|
|
|
@ -965,6 +965,10 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
return true;
|
||||
}
|
||||
|
||||
#define reg_to_encoding(x) \
|
||||
sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
|
||||
(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
|
||||
|
||||
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
|
||||
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
|
||||
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
|
||||
|
@ -986,44 +990,38 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
|
||||
access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
|
||||
|
||||
static bool access_cntp_tval(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
static bool access_arch_timer(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 now = kvm_phys_timer_read();
|
||||
u64 cval;
|
||||
enum kvm_arch_timers tmr;
|
||||
enum kvm_arch_timer_regs treg;
|
||||
u64 reg = reg_to_encoding(r);
|
||||
|
||||
if (p->is_write) {
|
||||
kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL,
|
||||
p->regval + now);
|
||||
} else {
|
||||
cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
|
||||
p->regval = cval - now;
|
||||
switch (reg) {
|
||||
case SYS_CNTP_TVAL_EL0:
|
||||
case SYS_AARCH32_CNTP_TVAL:
|
||||
tmr = TIMER_PTIMER;
|
||||
treg = TIMER_REG_TVAL;
|
||||
break;
|
||||
case SYS_CNTP_CTL_EL0:
|
||||
case SYS_AARCH32_CNTP_CTL:
|
||||
tmr = TIMER_PTIMER;
|
||||
treg = TIMER_REG_CTL;
|
||||
break;
|
||||
case SYS_CNTP_CVAL_EL0:
|
||||
case SYS_AARCH32_CNTP_CVAL:
|
||||
tmr = TIMER_PTIMER;
|
||||
treg = TIMER_REG_CVAL;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval);
|
||||
kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
|
||||
else
|
||||
p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool access_cntp_cval(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval);
|
||||
else
|
||||
p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
|
||||
p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1148,6 +1146,64 @@ static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
|||
return __set_id_reg(rd, uaddr, true);
|
||||
}
|
||||
|
||||
static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
return write_to_read_only(vcpu, p, r);
|
||||
|
||||
p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
return write_to_read_only(vcpu, p, r);
|
||||
|
||||
p->regval = read_sysreg(clidr_el1);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
vcpu_write_sys_reg(vcpu, p->regval, r->reg);
|
||||
else
|
||||
p->regval = vcpu_read_sys_reg(vcpu, r->reg);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u32 csselr;
|
||||
|
||||
if (p->is_write)
|
||||
return write_to_read_only(vcpu, p, r);
|
||||
|
||||
csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
|
||||
p->regval = get_ccsidr(csselr);
|
||||
|
||||
/*
|
||||
* Guests should not be doing cache operations by set/way at all, and
|
||||
* for this reason, we trap them and attempt to infer the intent, so
|
||||
* that we can flush the entire guest's address space at the appropriate
|
||||
* time.
|
||||
* To prevent this trapping from causing performance problems, let's
|
||||
* expose the geometry of all data and unified caches (which are
|
||||
* guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way.
|
||||
* [If guests should attempt to infer aliasing properties from the
|
||||
* geometry (which is not permitted by the architecture), they would
|
||||
* only do so for virtually indexed caches.]
|
||||
*/
|
||||
if (!(csselr & 1)) // data or unified cache
|
||||
p->regval &= ~GENMASK(27, 3);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* sys_reg_desc initialiser for known cpufeature ID registers */
|
||||
#define ID_SANITISED(name) { \
|
||||
SYS_DESC(SYS_##name), \
|
||||
|
@ -1365,7 +1421,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
|
||||
{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
|
||||
|
||||
{ SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
|
||||
{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
|
||||
{ SYS_DESC(SYS_CLIDR_EL1), access_clidr },
|
||||
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
|
||||
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
|
||||
|
||||
{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
|
||||
{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
|
||||
|
@ -1388,9 +1447,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
|
||||
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
|
||||
|
||||
{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
|
||||
{ SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
|
||||
{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
|
||||
{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
|
||||
|
||||
/* PMEVCNTRn_EL0 */
|
||||
PMU_PMEVCNTR_EL0(0),
|
||||
|
@ -1464,7 +1523,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
|
||||
{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
|
||||
{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
|
||||
{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
|
||||
{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
|
||||
};
|
||||
|
||||
static bool trap_dbgidr(struct kvm_vcpu *vcpu,
|
||||
|
@ -1665,6 +1724,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
|
|||
* register).
|
||||
*/
|
||||
static const struct sys_reg_desc cp15_regs[] = {
|
||||
{ Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
|
||||
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
|
||||
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
|
||||
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
|
||||
|
@ -1711,10 +1771,9 @@ static const struct sys_reg_desc cp15_regs[] = {
|
|||
|
||||
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
|
||||
|
||||
/* CNTP_TVAL */
|
||||
{ Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval },
|
||||
/* CNTP_CTL */
|
||||
{ Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl },
|
||||
/* Arch Tmers */
|
||||
{ SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
|
||||
{ SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
|
||||
|
||||
/* PMEVCNTRn */
|
||||
PMU_PMEVCNTR(0),
|
||||
|
@ -1782,6 +1841,10 @@ static const struct sys_reg_desc cp15_regs[] = {
|
|||
PMU_PMEVTYPER(30),
|
||||
/* PMCCFILTR */
|
||||
{ Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
|
||||
|
||||
{ Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
|
||||
{ Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
|
||||
{ Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR },
|
||||
};
|
||||
|
||||
static const struct sys_reg_desc cp15_64_regs[] = {
|
||||
|
@ -1791,7 +1854,7 @@ static const struct sys_reg_desc cp15_64_regs[] = {
|
|||
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
|
||||
{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
|
||||
{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
|
||||
{ Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval },
|
||||
{ SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
|
||||
};
|
||||
|
||||
/* Target specific emulation tables */
|
||||
|
@ -1820,30 +1883,19 @@ static const struct sys_reg_desc *get_target_table(unsigned target,
|
|||
}
|
||||
}
|
||||
|
||||
#define reg_to_match_value(x) \
|
||||
({ \
|
||||
unsigned long val; \
|
||||
val = (x)->Op0 << 14; \
|
||||
val |= (x)->Op1 << 11; \
|
||||
val |= (x)->CRn << 7; \
|
||||
val |= (x)->CRm << 3; \
|
||||
val |= (x)->Op2; \
|
||||
val; \
|
||||
})
|
||||
|
||||
static int match_sys_reg(const void *key, const void *elt)
|
||||
{
|
||||
const unsigned long pval = (unsigned long)key;
|
||||
const struct sys_reg_desc *r = elt;
|
||||
|
||||
return pval - reg_to_match_value(r);
|
||||
return pval - reg_to_encoding(r);
|
||||
}
|
||||
|
||||
static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
|
||||
const struct sys_reg_desc table[],
|
||||
unsigned int num)
|
||||
{
|
||||
unsigned long pval = reg_to_match_value(params);
|
||||
unsigned long pval = reg_to_encoding(params);
|
||||
|
||||
return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
|
||||
}
|
||||
|
@ -2206,11 +2258,15 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
|
|||
}
|
||||
|
||||
FUNCTION_INVARIANT(midr_el1)
|
||||
FUNCTION_INVARIANT(ctr_el0)
|
||||
FUNCTION_INVARIANT(revidr_el1)
|
||||
FUNCTION_INVARIANT(clidr_el1)
|
||||
FUNCTION_INVARIANT(aidr_el1)
|
||||
|
||||
static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
|
||||
{
|
||||
((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
|
||||
}
|
||||
|
||||
/* ->val is filled in by kvm_sys_reg_table_init() */
|
||||
static struct sys_reg_desc invariant_sys_regs[] = {
|
||||
{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
|
||||
|
|
|
@ -1206,6 +1206,13 @@ static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
|
|||
return ARCH_TIMER_PHYS_SECURE_PPI;
|
||||
}
|
||||
|
||||
static void __init arch_timer_populate_kvm_info(void)
|
||||
{
|
||||
arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
|
||||
if (is_kernel_in_hyp_mode())
|
||||
arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
|
||||
}
|
||||
|
||||
static int __init arch_timer_of_init(struct device_node *np)
|
||||
{
|
||||
int i, ret;
|
||||
|
@ -1220,7 +1227,7 @@ static int __init arch_timer_of_init(struct device_node *np)
|
|||
for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
|
||||
arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
|
||||
|
||||
arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
|
||||
arch_timer_populate_kvm_info();
|
||||
|
||||
rate = arch_timer_get_cntfrq();
|
||||
arch_timer_of_configure_rate(rate, np);
|
||||
|
@ -1550,7 +1557,7 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table)
|
|||
arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
|
||||
acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
|
||||
|
||||
arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
|
||||
arch_timer_populate_kvm_info();
|
||||
|
||||
/*
|
||||
* When probing via ACPI, we have no mechanism to override the sysreg
|
||||
|
|
|
@ -74,6 +74,7 @@ enum arch_timer_spi_nr {
|
|||
struct arch_timer_kvm_info {
|
||||
struct timecounter timecounter;
|
||||
int virtual_irq;
|
||||
int physical_irq;
|
||||
};
|
||||
|
||||
struct arch_timer_mem_frame {
|
||||
|
|
|
@ -22,7 +22,22 @@
|
|||
#include <linux/clocksource.h>
|
||||
#include <linux/hrtimer.h>
|
||||
|
||||
enum kvm_arch_timers {
|
||||
TIMER_PTIMER,
|
||||
TIMER_VTIMER,
|
||||
NR_KVM_TIMERS
|
||||
};
|
||||
|
||||
enum kvm_arch_timer_regs {
|
||||
TIMER_REG_CNT,
|
||||
TIMER_REG_CVAL,
|
||||
TIMER_REG_TVAL,
|
||||
TIMER_REG_CTL,
|
||||
};
|
||||
|
||||
struct arch_timer_context {
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
/* Registers: control register, timer value */
|
||||
u32 cnt_ctl;
|
||||
u64 cnt_cval;
|
||||
|
@ -30,30 +45,36 @@ struct arch_timer_context {
|
|||
/* Timer IRQ */
|
||||
struct kvm_irq_level irq;
|
||||
|
||||
/*
|
||||
* We have multiple paths which can save/restore the timer state
|
||||
* onto the hardware, so we need some way of keeping track of
|
||||
* where the latest state is.
|
||||
*
|
||||
* loaded == true: State is loaded on the hardware registers.
|
||||
* loaded == false: State is stored in memory.
|
||||
*/
|
||||
bool loaded;
|
||||
|
||||
/* Virtual offset */
|
||||
u64 cntvoff;
|
||||
u64 cntvoff;
|
||||
|
||||
/* Emulated Timer (may be unused) */
|
||||
struct hrtimer hrtimer;
|
||||
|
||||
/*
|
||||
* We have multiple paths which can save/restore the timer state onto
|
||||
* the hardware, so we need some way of keeping track of where the
|
||||
* latest state is.
|
||||
*/
|
||||
bool loaded;
|
||||
|
||||
/* Duplicated state from arch_timer.c for convenience */
|
||||
u32 host_timer_irq;
|
||||
u32 host_timer_irq_flags;
|
||||
};
|
||||
|
||||
struct timer_map {
|
||||
struct arch_timer_context *direct_vtimer;
|
||||
struct arch_timer_context *direct_ptimer;
|
||||
struct arch_timer_context *emul_ptimer;
|
||||
};
|
||||
|
||||
struct arch_timer_cpu {
|
||||
struct arch_timer_context vtimer;
|
||||
struct arch_timer_context ptimer;
|
||||
struct arch_timer_context timers[NR_KVM_TIMERS];
|
||||
|
||||
/* Background timer used when the guest is not running */
|
||||
struct hrtimer bg_timer;
|
||||
|
||||
/* Physical timer emulation */
|
||||
struct hrtimer phys_timer;
|
||||
|
||||
/* Is the timer enabled */
|
||||
bool enabled;
|
||||
};
|
||||
|
@ -76,9 +97,6 @@ int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
|
|||
|
||||
bool kvm_timer_is_pending(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_timer_schedule(struct kvm_vcpu *vcpu);
|
||||
void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
|
||||
|
||||
u64 kvm_phys_timer_read(void);
|
||||
|
||||
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
|
||||
|
@ -88,7 +106,19 @@ void kvm_timer_init_vhe(void);
|
|||
|
||||
bool kvm_arch_timer_get_input_level(int vintid);
|
||||
|
||||
#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer)
|
||||
#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer)
|
||||
#define vcpu_timer(v) (&(v)->arch.timer_cpu)
|
||||
#define vcpu_get_timer(v,t) (&vcpu_timer(v)->timers[(t)])
|
||||
#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_VTIMER])
|
||||
#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_PTIMER])
|
||||
|
||||
#define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers)
|
||||
|
||||
u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
|
||||
enum kvm_arch_timers tmr,
|
||||
enum kvm_arch_timer_regs treg);
|
||||
void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
|
||||
enum kvm_arch_timers tmr,
|
||||
enum kvm_arch_timer_regs treg,
|
||||
u64 val);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
|
||||
#include <clocksource/arm_arch_timer.h>
|
||||
#include <asm/arch_timer.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_hyp.h>
|
||||
|
||||
#include <kvm/arm_vgic.h>
|
||||
|
@ -34,7 +35,9 @@
|
|||
|
||||
static struct timecounter *timecounter;
|
||||
static unsigned int host_vtimer_irq;
|
||||
static unsigned int host_ptimer_irq;
|
||||
static u32 host_vtimer_irq_flags;
|
||||
static u32 host_ptimer_irq_flags;
|
||||
|
||||
static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
|
||||
|
||||
|
@ -52,12 +55,34 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
|
|||
static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
|
||||
struct arch_timer_context *timer_ctx);
|
||||
static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
|
||||
static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
|
||||
struct arch_timer_context *timer,
|
||||
enum kvm_arch_timer_regs treg,
|
||||
u64 val);
|
||||
static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
|
||||
struct arch_timer_context *timer,
|
||||
enum kvm_arch_timer_regs treg);
|
||||
|
||||
u64 kvm_phys_timer_read(void)
|
||||
{
|
||||
return timecounter->cc->read(timecounter->cc);
|
||||
}
|
||||
|
||||
static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
|
||||
{
|
||||
if (has_vhe()) {
|
||||
map->direct_vtimer = vcpu_vtimer(vcpu);
|
||||
map->direct_ptimer = vcpu_ptimer(vcpu);
|
||||
map->emul_ptimer = NULL;
|
||||
} else {
|
||||
map->direct_vtimer = vcpu_vtimer(vcpu);
|
||||
map->direct_ptimer = NULL;
|
||||
map->emul_ptimer = vcpu_ptimer(vcpu);
|
||||
}
|
||||
|
||||
trace_kvm_get_timer_map(vcpu->vcpu_id, map);
|
||||
}
|
||||
|
||||
static inline bool userspace_irqchip(struct kvm *kvm)
|
||||
{
|
||||
return static_branch_unlikely(&userspace_irqchip_in_use) &&
|
||||
|
@ -78,20 +103,27 @@ static void soft_timer_cancel(struct hrtimer *hrt)
|
|||
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
|
||||
struct arch_timer_context *vtimer;
|
||||
struct arch_timer_context *ctx;
|
||||
struct timer_map map;
|
||||
|
||||
/*
|
||||
* We may see a timer interrupt after vcpu_put() has been called which
|
||||
* sets the CPU's vcpu pointer to NULL, because even though the timer
|
||||
* has been disabled in vtimer_save_state(), the hardware interrupt
|
||||
* has been disabled in timer_save_state(), the hardware interrupt
|
||||
* signal may not have been retired from the interrupt controller yet.
|
||||
*/
|
||||
if (!vcpu)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
vtimer = vcpu_vtimer(vcpu);
|
||||
if (kvm_timer_should_fire(vtimer))
|
||||
kvm_timer_update_irq(vcpu, true, vtimer);
|
||||
get_timer_map(vcpu, &map);
|
||||
|
||||
if (irq == host_vtimer_irq)
|
||||
ctx = map.direct_vtimer;
|
||||
else
|
||||
ctx = map.direct_ptimer;
|
||||
|
||||
if (kvm_timer_should_fire(ctx))
|
||||
kvm_timer_update_irq(vcpu, true, ctx);
|
||||
|
||||
if (userspace_irqchip(vcpu->kvm) &&
|
||||
!static_branch_unlikely(&has_gic_active_state))
|
||||
|
@ -122,7 +154,9 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
|
|||
|
||||
static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
|
||||
{
|
||||
return !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
|
||||
WARN_ON(timer_ctx && timer_ctx->loaded);
|
||||
return timer_ctx &&
|
||||
!(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
|
||||
(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
|
||||
}
|
||||
|
||||
|
@ -132,21 +166,22 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
|
|||
*/
|
||||
static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 min_virt = ULLONG_MAX, min_phys = ULLONG_MAX;
|
||||
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
|
||||
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
|
||||
u64 min_delta = ULLONG_MAX;
|
||||
int i;
|
||||
|
||||
if (kvm_timer_irq_can_fire(vtimer))
|
||||
min_virt = kvm_timer_compute_delta(vtimer);
|
||||
for (i = 0; i < NR_KVM_TIMERS; i++) {
|
||||
struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
|
||||
|
||||
if (kvm_timer_irq_can_fire(ptimer))
|
||||
min_phys = kvm_timer_compute_delta(ptimer);
|
||||
WARN(ctx->loaded, "timer %d loaded\n", i);
|
||||
if (kvm_timer_irq_can_fire(ctx))
|
||||
min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
|
||||
}
|
||||
|
||||
/* If none of timers can fire, then return 0 */
|
||||
if ((min_virt == ULLONG_MAX) && (min_phys == ULLONG_MAX))
|
||||
if (min_delta == ULLONG_MAX)
|
||||
return 0;
|
||||
|
||||
return min(min_virt, min_phys);
|
||||
return min_delta;
|
||||
}
|
||||
|
||||
static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
|
||||
|
@ -173,41 +208,58 @@ static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
|
|||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static enum hrtimer_restart kvm_phys_timer_expire(struct hrtimer *hrt)
|
||||
static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
|
||||
{
|
||||
struct arch_timer_context *ptimer;
|
||||
struct arch_timer_cpu *timer;
|
||||
struct arch_timer_context *ctx;
|
||||
struct kvm_vcpu *vcpu;
|
||||
u64 ns;
|
||||
|
||||
timer = container_of(hrt, struct arch_timer_cpu, phys_timer);
|
||||
vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
|
||||
ptimer = vcpu_ptimer(vcpu);
|
||||
ctx = container_of(hrt, struct arch_timer_context, hrtimer);
|
||||
vcpu = ctx->vcpu;
|
||||
|
||||
trace_kvm_timer_hrtimer_expire(ctx);
|
||||
|
||||
/*
|
||||
* Check that the timer has really expired from the guest's
|
||||
* PoV (NTP on the host may have forced it to expire
|
||||
* early). If not ready, schedule for a later time.
|
||||
*/
|
||||
ns = kvm_timer_compute_delta(ptimer);
|
||||
ns = kvm_timer_compute_delta(ctx);
|
||||
if (unlikely(ns)) {
|
||||
hrtimer_forward_now(hrt, ns_to_ktime(ns));
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
|
||||
kvm_timer_update_irq(vcpu, true, ptimer);
|
||||
kvm_timer_update_irq(vcpu, true, ctx);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
|
||||
{
|
||||
enum kvm_arch_timers index;
|
||||
u64 cval, now;
|
||||
|
||||
if (timer_ctx->loaded) {
|
||||
u32 cnt_ctl;
|
||||
if (!timer_ctx)
|
||||
return false;
|
||||
|
||||
index = arch_timer_ctx_index(timer_ctx);
|
||||
|
||||
if (timer_ctx->loaded) {
|
||||
u32 cnt_ctl = 0;
|
||||
|
||||
switch (index) {
|
||||
case TIMER_VTIMER:
|
||||
cnt_ctl = read_sysreg_el0(cntv_ctl);
|
||||
break;
|
||||
case TIMER_PTIMER:
|
||||
cnt_ctl = read_sysreg_el0(cntp_ctl);
|
||||
break;
|
||||
case NR_KVM_TIMERS:
|
||||
/* GCC is braindead */
|
||||
cnt_ctl = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Only the virtual timer can be loaded so far */
|
||||
cnt_ctl = read_sysreg_el0(cntv_ctl);
|
||||
return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
|
||||
(cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
|
||||
!(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
|
||||
|
@ -224,13 +276,13 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
|
|||
|
||||
bool kvm_timer_is_pending(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
|
||||
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
|
||||
struct timer_map map;
|
||||
|
||||
if (kvm_timer_should_fire(vtimer))
|
||||
return true;
|
||||
get_timer_map(vcpu, &map);
|
||||
|
||||
return kvm_timer_should_fire(ptimer);
|
||||
return kvm_timer_should_fire(map.direct_vtimer) ||
|
||||
kvm_timer_should_fire(map.direct_ptimer) ||
|
||||
kvm_timer_should_fire(map.emul_ptimer);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -269,77 +321,70 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
|
|||
}
|
||||
}
|
||||
|
||||
/* Schedule the background timer for the emulated timer. */
|
||||
static void phys_timer_emulate(struct kvm_vcpu *vcpu)
|
||||
static void timer_emulate(struct arch_timer_context *ctx)
|
||||
{
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
|
||||
bool should_fire = kvm_timer_should_fire(ctx);
|
||||
|
||||
trace_kvm_timer_emulate(ctx, should_fire);
|
||||
|
||||
if (should_fire) {
|
||||
kvm_timer_update_irq(ctx->vcpu, true, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the timer can fire now, we don't need to have a soft timer
|
||||
* scheduled for the future. If the timer cannot fire at all,
|
||||
* then we also don't need a soft timer.
|
||||
*/
|
||||
if (kvm_timer_should_fire(ptimer) || !kvm_timer_irq_can_fire(ptimer)) {
|
||||
soft_timer_cancel(&timer->phys_timer);
|
||||
if (!kvm_timer_irq_can_fire(ctx)) {
|
||||
soft_timer_cancel(&ctx->hrtimer);
|
||||
return;
|
||||
}
|
||||
|
||||
soft_timer_start(&timer->phys_timer, kvm_timer_compute_delta(ptimer));
|
||||
soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if there was a change in the timer state, so that we should either
|
||||
* raise or lower the line level to the GIC or schedule a background timer to
|
||||
* emulate the physical timer.
|
||||
*/
|
||||
static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
|
||||
static void timer_save_state(struct arch_timer_context *ctx)
|
||||
{
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
|
||||
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
|
||||
bool level;
|
||||
|
||||
if (unlikely(!timer->enabled))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The vtimer virtual interrupt is a 'mapped' interrupt, meaning part
|
||||
* of its lifecycle is offloaded to the hardware, and we therefore may
|
||||
* not have lowered the irq.level value before having to signal a new
|
||||
* interrupt, but have to signal an interrupt every time the level is
|
||||
* asserted.
|
||||
*/
|
||||
level = kvm_timer_should_fire(vtimer);
|
||||
kvm_timer_update_irq(vcpu, level, vtimer);
|
||||
|
||||
phys_timer_emulate(vcpu);
|
||||
|
||||
if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
|
||||
kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
|
||||
}
|
||||
|
||||
static void vtimer_save_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
|
||||
struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
|
||||
enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
|
||||
unsigned long flags;
|
||||
|
||||
if (!timer->enabled)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
if (!vtimer->loaded)
|
||||
if (!ctx->loaded)
|
||||
goto out;
|
||||
|
||||
if (timer->enabled) {
|
||||
vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
|
||||
vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
|
||||
switch (index) {
|
||||
case TIMER_VTIMER:
|
||||
ctx->cnt_ctl = read_sysreg_el0(cntv_ctl);
|
||||
ctx->cnt_cval = read_sysreg_el0(cntv_cval);
|
||||
|
||||
/* Disable the timer */
|
||||
write_sysreg_el0(0, cntv_ctl);
|
||||
isb();
|
||||
|
||||
break;
|
||||
case TIMER_PTIMER:
|
||||
ctx->cnt_ctl = read_sysreg_el0(cntp_ctl);
|
||||
ctx->cnt_cval = read_sysreg_el0(cntp_cval);
|
||||
|
||||
/* Disable the timer */
|
||||
write_sysreg_el0(0, cntp_ctl);
|
||||
isb();
|
||||
|
||||
break;
|
||||
case NR_KVM_TIMERS:
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Disable the virtual timer */
|
||||
write_sysreg_el0(0, cntv_ctl);
|
||||
isb();
|
||||
trace_kvm_timer_save_state(ctx);
|
||||
|
||||
vtimer->loaded = false;
|
||||
ctx->loaded = false;
|
||||
out:
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
@ -349,67 +394,72 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
|
|||
* thread is removed from its waitqueue and made runnable when there's a timer
|
||||
* interrupt to handle.
|
||||
*/
|
||||
void kvm_timer_schedule(struct kvm_vcpu *vcpu)
|
||||
static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
|
||||
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
|
||||
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
|
||||
struct timer_map map;
|
||||
|
||||
vtimer_save_state(vcpu);
|
||||
get_timer_map(vcpu, &map);
|
||||
|
||||
/*
|
||||
* No need to schedule a background timer if any guest timer has
|
||||
* already expired, because kvm_vcpu_block will return before putting
|
||||
* the thread to sleep.
|
||||
*/
|
||||
if (kvm_timer_should_fire(vtimer) || kvm_timer_should_fire(ptimer))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If both timers are not capable of raising interrupts (disabled or
|
||||
* If no timers are capable of raising interrupts (disabled or
|
||||
* masked), then there's no more work for us to do.
|
||||
*/
|
||||
if (!kvm_timer_irq_can_fire(vtimer) && !kvm_timer_irq_can_fire(ptimer))
|
||||
if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
|
||||
!kvm_timer_irq_can_fire(map.direct_ptimer) &&
|
||||
!kvm_timer_irq_can_fire(map.emul_ptimer))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The guest timers have not yet expired, schedule a background timer.
|
||||
* At least one guest time will expire. Schedule a background timer.
|
||||
* Set the earliest expiration time among the guest timers.
|
||||
*/
|
||||
soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
|
||||
}
|
||||
|
||||
static void vtimer_restore_state(struct kvm_vcpu *vcpu)
|
||||
static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
|
||||
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
|
||||
|
||||
soft_timer_cancel(&timer->bg_timer);
|
||||
}
|
||||
|
||||
static void timer_restore_state(struct arch_timer_context *ctx)
|
||||
{
|
||||
struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
|
||||
enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
|
||||
unsigned long flags;
|
||||
|
||||
if (!timer->enabled)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
if (vtimer->loaded)
|
||||
if (ctx->loaded)
|
||||
goto out;
|
||||
|
||||
if (timer->enabled) {
|
||||
write_sysreg_el0(vtimer->cnt_cval, cntv_cval);
|
||||
switch (index) {
|
||||
case TIMER_VTIMER:
|
||||
write_sysreg_el0(ctx->cnt_cval, cntv_cval);
|
||||
isb();
|
||||
write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl);
|
||||
write_sysreg_el0(ctx->cnt_ctl, cntv_ctl);
|
||||
break;
|
||||
case TIMER_PTIMER:
|
||||
write_sysreg_el0(ctx->cnt_cval, cntp_cval);
|
||||
isb();
|
||||
write_sysreg_el0(ctx->cnt_ctl, cntp_ctl);
|
||||
break;
|
||||
case NR_KVM_TIMERS:
|
||||
BUG();
|
||||
}
|
||||
|
||||
vtimer->loaded = true;
|
||||
trace_kvm_timer_restore_state(ctx);
|
||||
|
||||
ctx->loaded = true;
|
||||
out:
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
|
||||
vtimer_restore_state(vcpu);
|
||||
|
||||
soft_timer_cancel(&timer->bg_timer);
|
||||
}
|
||||
|
||||
static void set_cntvoff(u64 cntvoff)
|
||||
{
|
||||
u32 low = lower_32_bits(cntvoff);
|
||||
|
@ -425,23 +475,32 @@ static void set_cntvoff(u64 cntvoff)
|
|||
kvm_call_hyp(__kvm_timer_set_cntvoff, low, high);
|
||||
}
|
||||
|
||||
static inline void set_vtimer_irq_phys_active(struct kvm_vcpu *vcpu, bool active)
|
||||
static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
|
||||
{
|
||||
int r;
|
||||
r = irq_set_irqchip_state(host_vtimer_irq, IRQCHIP_STATE_ACTIVE, active);
|
||||
r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
|
||||
WARN_ON(r);
|
||||
}
|
||||
|
||||
static void kvm_timer_vcpu_load_gic(struct kvm_vcpu *vcpu)
|
||||
static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
|
||||
{
|
||||
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
|
||||
bool phys_active;
|
||||
struct kvm_vcpu *vcpu = ctx->vcpu;
|
||||
bool phys_active = false;
|
||||
|
||||
/*
|
||||
* Update the timer output so that it is likely to match the
|
||||
* state we're about to restore. If the timer expires between
|
||||
* this point and the register restoration, we'll take the
|
||||
* interrupt anyway.
|
||||
*/
|
||||
kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
|
||||
|
||||
if (irqchip_in_kernel(vcpu->kvm))
|
||||
phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
|
||||
else
|
||||
phys_active = vtimer->irq.level;
|
||||
set_vtimer_irq_phys_active(vcpu, phys_active);
|
||||
phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
|
||||
|
||||
phys_active |= ctx->irq.level;
|
||||
|
||||
set_timer_irq_phys_active(ctx, phys_active);
|
||||
}
|
||||
|
||||
static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
|
||||
|
@ -466,28 +525,32 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
|
|||
|
||||
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
|
||||
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
|
||||
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
|
||||
struct timer_map map;
|
||||
|
||||
if (unlikely(!timer->enabled))
|
||||
return;
|
||||
|
||||
if (static_branch_likely(&has_gic_active_state))
|
||||
kvm_timer_vcpu_load_gic(vcpu);
|
||||
else
|
||||
get_timer_map(vcpu, &map);
|
||||
|
||||
if (static_branch_likely(&has_gic_active_state)) {
|
||||
kvm_timer_vcpu_load_gic(map.direct_vtimer);
|
||||
if (map.direct_ptimer)
|
||||
kvm_timer_vcpu_load_gic(map.direct_ptimer);
|
||||
} else {
|
||||
kvm_timer_vcpu_load_nogic(vcpu);
|
||||
}
|
||||
|
||||
set_cntvoff(vtimer->cntvoff);
|
||||
set_cntvoff(map.direct_vtimer->cntvoff);
|
||||
|
||||
vtimer_restore_state(vcpu);
|
||||
kvm_timer_unblocking(vcpu);
|
||||
|
||||
/* Set the background timer for the physical timer emulation. */
|
||||
phys_timer_emulate(vcpu);
|
||||
timer_restore_state(map.direct_vtimer);
|
||||
if (map.direct_ptimer)
|
||||
timer_restore_state(map.direct_ptimer);
|
||||
|
||||
/* If the timer fired while we weren't running, inject it now */
|
||||
if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
|
||||
kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
|
||||
if (map.emul_ptimer)
|
||||
timer_emulate(map.emul_ptimer);
|
||||
}
|
||||
|
||||
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
|
||||
|
@ -509,15 +572,20 @@ bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
|
|||
|
||||
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
|
||||
struct timer_map map;
|
||||
|
||||
if (unlikely(!timer->enabled))
|
||||
return;
|
||||
|
||||
vtimer_save_state(vcpu);
|
||||
get_timer_map(vcpu, &map);
|
||||
|
||||
timer_save_state(map.direct_vtimer);
|
||||
if (map.direct_ptimer)
|
||||
timer_save_state(map.direct_ptimer);
|
||||
|
||||
/*
|
||||
* Cancel the physical timer emulation, because the only case where we
|
||||
* Cancel soft timer emulation, because the only case where we
|
||||
* need it after a vcpu_put is in the context of a sleeping VCPU, and
|
||||
* in that case we already factor in the deadline for the physical
|
||||
* timer when scheduling the bg_timer.
|
||||
|
@ -525,7 +593,11 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
|
|||
* In any case, we re-schedule the hrtimer for the physical timer when
|
||||
* coming back to the VCPU thread in kvm_timer_vcpu_load().
|
||||
*/
|
||||
soft_timer_cancel(&timer->phys_timer);
|
||||
if (map.emul_ptimer)
|
||||
soft_timer_cancel(&map.emul_ptimer->hrtimer);
|
||||
|
||||
if (swait_active(kvm_arch_vcpu_wq(vcpu)))
|
||||
kvm_timer_blocking(vcpu);
|
||||
|
||||
/*
|
||||
* The kernel may decide to run userspace after calling vcpu_put, so
|
||||
|
@ -534,8 +606,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
|
|||
* counter of non-VHE case. For VHE, the virtual counter uses a fixed
|
||||
* virtual offset of zero, so no need to zero CNTVOFF_EL2 register.
|
||||
*/
|
||||
if (!has_vhe())
|
||||
set_cntvoff(0);
|
||||
set_cntvoff(0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -550,7 +621,7 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
|
|||
if (!kvm_timer_should_fire(vtimer)) {
|
||||
kvm_timer_update_irq(vcpu, false, vtimer);
|
||||
if (static_branch_likely(&has_gic_active_state))
|
||||
set_vtimer_irq_phys_active(vcpu, false);
|
||||
set_timer_irq_phys_active(vtimer, false);
|
||||
else
|
||||
enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
|
||||
}
|
||||
|
@ -558,7 +629,7 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
|
|||
|
||||
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
|
||||
|
||||
if (unlikely(!timer->enabled))
|
||||
return;
|
||||
|
@ -569,9 +640,10 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
|
|||
|
||||
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
|
||||
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
|
||||
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
|
||||
struct timer_map map;
|
||||
|
||||
get_timer_map(vcpu, &map);
|
||||
|
||||
/*
|
||||
* The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
|
||||
|
@ -579,12 +651,22 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
|
|||
* resets the timer to be disabled and unmasked and is compliant with
|
||||
* the ARMv7 architecture.
|
||||
*/
|
||||
vtimer->cnt_ctl = 0;
|
||||
ptimer->cnt_ctl = 0;
|
||||
kvm_timer_update_state(vcpu);
|
||||
vcpu_vtimer(vcpu)->cnt_ctl = 0;
|
||||
vcpu_ptimer(vcpu)->cnt_ctl = 0;
|
||||
|
||||
if (timer->enabled && irqchip_in_kernel(vcpu->kvm))
|
||||
kvm_vgic_reset_mapped_irq(vcpu, vtimer->irq.irq);
|
||||
if (timer->enabled) {
|
||||
kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
|
||||
kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
|
||||
|
||||
if (irqchip_in_kernel(vcpu->kvm)) {
|
||||
kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
|
||||
if (map.direct_ptimer)
|
||||
kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
|
||||
}
|
||||
}
|
||||
|
||||
if (map.emul_ptimer)
|
||||
soft_timer_cancel(&map.emul_ptimer->hrtimer);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -610,56 +692,76 @@ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
|
|||
|
||||
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
|
||||
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
|
||||
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
|
||||
|
||||
/* Synchronize cntvoff across all vtimers of a VM. */
|
||||
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
|
||||
vcpu_ptimer(vcpu)->cntvoff = 0;
|
||||
ptimer->cntvoff = 0;
|
||||
|
||||
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||
timer->bg_timer.function = kvm_bg_timer_expire;
|
||||
|
||||
hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||
timer->phys_timer.function = kvm_phys_timer_expire;
|
||||
hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||
hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||
vtimer->hrtimer.function = kvm_hrtimer_expire;
|
||||
ptimer->hrtimer.function = kvm_hrtimer_expire;
|
||||
|
||||
vtimer->irq.irq = default_vtimer_irq.irq;
|
||||
ptimer->irq.irq = default_ptimer_irq.irq;
|
||||
|
||||
vtimer->host_timer_irq = host_vtimer_irq;
|
||||
ptimer->host_timer_irq = host_ptimer_irq;
|
||||
|
||||
vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
|
||||
ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
|
||||
|
||||
vtimer->vcpu = vcpu;
|
||||
ptimer->vcpu = vcpu;
|
||||
}
|
||||
|
||||
static void kvm_timer_init_interrupt(void *info)
|
||||
{
|
||||
enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
|
||||
enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
|
||||
}
|
||||
|
||||
int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
|
||||
{
|
||||
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
|
||||
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
|
||||
struct arch_timer_context *timer;
|
||||
bool level;
|
||||
|
||||
switch (regid) {
|
||||
case KVM_REG_ARM_TIMER_CTL:
|
||||
vtimer->cnt_ctl = value & ~ARCH_TIMER_CTRL_IT_STAT;
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
|
||||
break;
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
|
||||
break;
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
vtimer->cnt_cval = value;
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
|
||||
break;
|
||||
case KVM_REG_ARM_PTIMER_CTL:
|
||||
ptimer->cnt_ctl = value & ~ARCH_TIMER_CTRL_IT_STAT;
|
||||
timer = vcpu_ptimer(vcpu);
|
||||
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
|
||||
break;
|
||||
case KVM_REG_ARM_PTIMER_CVAL:
|
||||
ptimer->cnt_cval = value;
|
||||
timer = vcpu_ptimer(vcpu);
|
||||
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
|
||||
break;
|
||||
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
kvm_timer_update_state(vcpu);
|
||||
level = kvm_timer_should_fire(timer);
|
||||
kvm_timer_update_irq(vcpu, level, timer);
|
||||
timer_emulate(timer);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -679,26 +781,113 @@ static u64 read_timer_ctl(struct arch_timer_context *timer)
|
|||
|
||||
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
|
||||
{
|
||||
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
|
||||
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
|
||||
|
||||
switch (regid) {
|
||||
case KVM_REG_ARM_TIMER_CTL:
|
||||
return read_timer_ctl(vtimer);
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_vtimer(vcpu), TIMER_REG_CTL);
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
return kvm_phys_timer_read() - vtimer->cntvoff;
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_vtimer(vcpu), TIMER_REG_CNT);
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
return vtimer->cnt_cval;
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_vtimer(vcpu), TIMER_REG_CVAL);
|
||||
case KVM_REG_ARM_PTIMER_CTL:
|
||||
return read_timer_ctl(ptimer);
|
||||
case KVM_REG_ARM_PTIMER_CVAL:
|
||||
return ptimer->cnt_cval;
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_ptimer(vcpu), TIMER_REG_CTL);
|
||||
case KVM_REG_ARM_PTIMER_CNT:
|
||||
return kvm_phys_timer_read();
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_vtimer(vcpu), TIMER_REG_CNT);
|
||||
case KVM_REG_ARM_PTIMER_CVAL:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_ptimer(vcpu), TIMER_REG_CVAL);
|
||||
}
|
||||
return (u64)-1;
|
||||
}
|
||||
|
||||
static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
|
||||
struct arch_timer_context *timer,
|
||||
enum kvm_arch_timer_regs treg)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
switch (treg) {
|
||||
case TIMER_REG_TVAL:
|
||||
val = kvm_phys_timer_read() - timer->cntvoff - timer->cnt_cval;
|
||||
break;
|
||||
|
||||
case TIMER_REG_CTL:
|
||||
val = read_timer_ctl(timer);
|
||||
break;
|
||||
|
||||
case TIMER_REG_CVAL:
|
||||
val = timer->cnt_cval;
|
||||
break;
|
||||
|
||||
case TIMER_REG_CNT:
|
||||
val = kvm_phys_timer_read() - timer->cntvoff;
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
|
||||
enum kvm_arch_timers tmr,
|
||||
enum kvm_arch_timer_regs treg)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
preempt_disable();
|
||||
kvm_timer_vcpu_put(vcpu);
|
||||
|
||||
val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg);
|
||||
|
||||
kvm_timer_vcpu_load(vcpu);
|
||||
preempt_enable();
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
|
||||
struct arch_timer_context *timer,
|
||||
enum kvm_arch_timer_regs treg,
|
||||
u64 val)
|
||||
{
|
||||
switch (treg) {
|
||||
case TIMER_REG_TVAL:
|
||||
timer->cnt_cval = val - kvm_phys_timer_read() - timer->cntvoff;
|
||||
break;
|
||||
|
||||
case TIMER_REG_CTL:
|
||||
timer->cnt_ctl = val & ~ARCH_TIMER_CTRL_IT_STAT;
|
||||
break;
|
||||
|
||||
case TIMER_REG_CVAL:
|
||||
timer->cnt_cval = val;
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
|
||||
enum kvm_arch_timers tmr,
|
||||
enum kvm_arch_timer_regs treg,
|
||||
u64 val)
|
||||
{
|
||||
preempt_disable();
|
||||
kvm_timer_vcpu_put(vcpu);
|
||||
|
||||
kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val);
|
||||
|
||||
kvm_timer_vcpu_load(vcpu);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static int kvm_timer_starting_cpu(unsigned int cpu)
|
||||
{
|
||||
kvm_timer_init_interrupt(NULL);
|
||||
|
@ -724,6 +913,8 @@ int kvm_timer_hyp_init(bool has_gic)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* First, do the virtual EL1 timer irq */
|
||||
|
||||
if (info->virtual_irq <= 0) {
|
||||
kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
|
||||
info->virtual_irq);
|
||||
|
@ -734,15 +925,15 @@ int kvm_timer_hyp_init(bool has_gic)
|
|||
host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
|
||||
if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
|
||||
host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
|
||||
kvm_err("Invalid trigger for IRQ%d, assuming level low\n",
|
||||
kvm_err("Invalid trigger for vtimer IRQ%d, assuming level low\n",
|
||||
host_vtimer_irq);
|
||||
host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
|
||||
}
|
||||
|
||||
err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
|
||||
"kvm guest timer", kvm_get_running_vcpus());
|
||||
"kvm guest vtimer", kvm_get_running_vcpus());
|
||||
if (err) {
|
||||
kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
|
||||
kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
|
||||
host_vtimer_irq, err);
|
||||
return err;
|
||||
}
|
||||
|
@ -760,6 +951,43 @@ int kvm_timer_hyp_init(bool has_gic)
|
|||
|
||||
kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
|
||||
|
||||
/* Now let's do the physical EL1 timer irq */
|
||||
|
||||
if (info->physical_irq > 0) {
|
||||
host_ptimer_irq = info->physical_irq;
|
||||
host_ptimer_irq_flags = irq_get_trigger_type(host_ptimer_irq);
|
||||
if (host_ptimer_irq_flags != IRQF_TRIGGER_HIGH &&
|
||||
host_ptimer_irq_flags != IRQF_TRIGGER_LOW) {
|
||||
kvm_err("Invalid trigger for ptimer IRQ%d, assuming level low\n",
|
||||
host_ptimer_irq);
|
||||
host_ptimer_irq_flags = IRQF_TRIGGER_LOW;
|
||||
}
|
||||
|
||||
err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
|
||||
"kvm guest ptimer", kvm_get_running_vcpus());
|
||||
if (err) {
|
||||
kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
|
||||
host_ptimer_irq, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (has_gic) {
|
||||
err = irq_set_vcpu_affinity(host_ptimer_irq,
|
||||
kvm_get_running_vcpus());
|
||||
if (err) {
|
||||
kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
|
||||
goto out_free_irq;
|
||||
}
|
||||
}
|
||||
|
||||
kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
|
||||
} else if (has_vhe()) {
|
||||
kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
|
||||
info->physical_irq);
|
||||
err = -ENODEV;
|
||||
goto out_free_irq;
|
||||
}
|
||||
|
||||
cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
|
||||
"kvm/arm/timer:starting", kvm_timer_starting_cpu,
|
||||
kvm_timer_dying_cpu);
|
||||
|
@ -771,7 +999,7 @@ int kvm_timer_hyp_init(bool has_gic)
|
|||
|
||||
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
|
||||
|
||||
soft_timer_cancel(&timer->bg_timer);
|
||||
}
|
||||
|
@ -807,16 +1035,18 @@ bool kvm_arch_timer_get_input_level(int vintid)
|
|||
|
||||
if (vintid == vcpu_vtimer(vcpu)->irq.irq)
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
|
||||
timer = vcpu_ptimer(vcpu);
|
||||
else
|
||||
BUG(); /* We only map the vtimer so far */
|
||||
BUG();
|
||||
|
||||
return kvm_timer_should_fire(timer);
|
||||
}
|
||||
|
||||
int kvm_timer_enable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
|
||||
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
|
||||
struct timer_map map;
|
||||
int ret;
|
||||
|
||||
if (timer->enabled)
|
||||
|
@ -834,19 +1064,33 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = kvm_vgic_map_phys_irq(vcpu, host_vtimer_irq, vtimer->irq.irq,
|
||||
get_timer_map(vcpu, &map);
|
||||
|
||||
ret = kvm_vgic_map_phys_irq(vcpu,
|
||||
map.direct_vtimer->host_timer_irq,
|
||||
map.direct_vtimer->irq.irq,
|
||||
kvm_arch_timer_get_input_level);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (map.direct_ptimer) {
|
||||
ret = kvm_vgic_map_phys_irq(vcpu,
|
||||
map.direct_ptimer->host_timer_irq,
|
||||
map.direct_ptimer->irq.irq,
|
||||
kvm_arch_timer_get_input_level);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
no_vgic:
|
||||
timer->enabled = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* On VHE system, we only need to configure trap on physical timer and counter
|
||||
* accesses in EL0 and EL1 once, not for every world switch.
|
||||
* On VHE system, we only need to configure the EL2 timer trap register once,
|
||||
* not for every world switch.
|
||||
* The host kernel runs at EL2 with HCR_EL2.TGE == 1,
|
||||
* and this makes those bits have no effect for the host kernel execution.
|
||||
*/
|
||||
|
@ -857,11 +1101,11 @@ void kvm_timer_init_vhe(void)
|
|||
u64 val;
|
||||
|
||||
/*
|
||||
* Disallow physical timer access for the guest.
|
||||
* Physical counter access is allowed.
|
||||
* VHE systems allow the guest direct access to the EL1 physical
|
||||
* timer/counter.
|
||||
*/
|
||||
val = read_sysreg(cnthctl_el2);
|
||||
val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift);
|
||||
val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
|
||||
val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
|
||||
write_sysreg(val, cnthctl_el2);
|
||||
}
|
||||
|
|
|
@ -65,7 +65,6 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
|
|||
/* The VMID used in the VTTBR */
|
||||
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
|
||||
static u32 kvm_next_vmid;
|
||||
static unsigned int kvm_vmid_bits __read_mostly;
|
||||
static DEFINE_SPINLOCK(kvm_vmid_lock);
|
||||
|
||||
static bool vgic_present;
|
||||
|
@ -142,7 +141,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||
kvm_vgic_early_init(kvm);
|
||||
|
||||
/* Mark the initial VMID generation invalid */
|
||||
kvm->arch.vmid_gen = 0;
|
||||
kvm->arch.vmid.vmid_gen = 0;
|
||||
|
||||
/* The maximum number of VCPUs is limited by the host's GIC model */
|
||||
kvm->arch.max_vcpus = vgic_present ?
|
||||
|
@ -336,13 +335,11 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
|||
|
||||
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_timer_schedule(vcpu);
|
||||
kvm_vgic_v4_enable_doorbell(vcpu);
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_timer_unschedule(vcpu);
|
||||
kvm_vgic_v4_disable_doorbell(vcpu);
|
||||
}
|
||||
|
||||
|
@ -472,37 +469,31 @@ void force_vm_exit(const cpumask_t *mask)
|
|||
|
||||
/**
|
||||
* need_new_vmid_gen - check that the VMID is still valid
|
||||
* @kvm: The VM's VMID to check
|
||||
* @vmid: The VMID to check
|
||||
*
|
||||
* return true if there is a new generation of VMIDs being used
|
||||
*
|
||||
* The hardware supports only 256 values with the value zero reserved for the
|
||||
* host, so we check if an assigned value belongs to a previous generation,
|
||||
* which which requires us to assign a new value. If we're the first to use a
|
||||
* VMID for the new generation, we must flush necessary caches and TLBs on all
|
||||
* CPUs.
|
||||
* The hardware supports a limited set of values with the value zero reserved
|
||||
* for the host, so we check if an assigned value belongs to a previous
|
||||
* generation, which which requires us to assign a new value. If we're the
|
||||
* first to use a VMID for the new generation, we must flush necessary caches
|
||||
* and TLBs on all CPUs.
|
||||
*/
|
||||
static bool need_new_vmid_gen(struct kvm *kvm)
|
||||
static bool need_new_vmid_gen(struct kvm_vmid *vmid)
|
||||
{
|
||||
u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
|
||||
smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
|
||||
return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen);
|
||||
return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
|
||||
}
|
||||
|
||||
/**
|
||||
* update_vttbr - Update the VTTBR with a valid VMID before the guest runs
|
||||
* @kvm The guest that we are about to run
|
||||
*
|
||||
* Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
|
||||
* VM has a valid VMID, otherwise assigns a new one and flushes corresponding
|
||||
* caches and TLBs.
|
||||
* update_vmid - Update the vmid with a valid VMID for the current generation
|
||||
* @kvm: The guest that struct vmid belongs to
|
||||
* @vmid: The stage-2 VMID information struct
|
||||
*/
|
||||
static void update_vttbr(struct kvm *kvm)
|
||||
static void update_vmid(struct kvm_vmid *vmid)
|
||||
{
|
||||
phys_addr_t pgd_phys;
|
||||
u64 vmid, cnp = kvm_cpu_has_cnp() ? VTTBR_CNP_BIT : 0;
|
||||
|
||||
if (!need_new_vmid_gen(kvm))
|
||||
if (!need_new_vmid_gen(vmid))
|
||||
return;
|
||||
|
||||
spin_lock(&kvm_vmid_lock);
|
||||
|
@ -512,7 +503,7 @@ static void update_vttbr(struct kvm *kvm)
|
|||
* already allocated a valid vmid for this vm, then this vcpu should
|
||||
* use the same vmid.
|
||||
*/
|
||||
if (!need_new_vmid_gen(kvm)) {
|
||||
if (!need_new_vmid_gen(vmid)) {
|
||||
spin_unlock(&kvm_vmid_lock);
|
||||
return;
|
||||
}
|
||||
|
@ -536,18 +527,12 @@ static void update_vttbr(struct kvm *kvm)
|
|||
kvm_call_hyp(__kvm_flush_vm_context);
|
||||
}
|
||||
|
||||
kvm->arch.vmid = kvm_next_vmid;
|
||||
vmid->vmid = kvm_next_vmid;
|
||||
kvm_next_vmid++;
|
||||
kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
|
||||
|
||||
/* update vttbr to be used with the new vmid */
|
||||
pgd_phys = virt_to_phys(kvm->arch.pgd);
|
||||
BUG_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm));
|
||||
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
|
||||
kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid | cnp;
|
||||
kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
|
||||
|
||||
smp_wmb();
|
||||
WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen));
|
||||
WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
|
||||
|
||||
spin_unlock(&kvm_vmid_lock);
|
||||
}
|
||||
|
@ -690,7 +675,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
*/
|
||||
cond_resched();
|
||||
|
||||
update_vttbr(vcpu->kvm);
|
||||
update_vmid(&vcpu->kvm->arch.vmid);
|
||||
|
||||
check_vcpu_requests(vcpu);
|
||||
|
||||
|
@ -739,7 +724,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
*/
|
||||
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
|
||||
|
||||
if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
|
||||
if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) ||
|
||||
kvm_request_pending(vcpu)) {
|
||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
isb(); /* Ensure work in x_flush_hwstate is committed */
|
||||
|
@ -765,7 +750,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
ret = kvm_vcpu_run_vhe(vcpu);
|
||||
kvm_arm_vhe_guest_exit();
|
||||
} else {
|
||||
ret = kvm_call_hyp(__kvm_vcpu_run_nvhe, vcpu);
|
||||
ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
|
||||
}
|
||||
|
||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
|
@ -1417,10 +1402,6 @@ static inline void hyp_cpu_pm_exit(void)
|
|||
|
||||
static int init_common_resources(void)
|
||||
{
|
||||
/* set size of VMID supported by CPU */
|
||||
kvm_vmid_bits = kvm_get_vmid_bits();
|
||||
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
|
||||
|
||||
kvm_set_ipa_limit();
|
||||
|
||||
return 0;
|
||||
|
@ -1561,6 +1542,7 @@ static int init_hyp_mode(void)
|
|||
kvm_cpu_context_t *cpu_ctxt;
|
||||
|
||||
cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
|
||||
kvm_init_host_cpu_context(cpu_ctxt, cpu);
|
||||
err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
|
||||
|
||||
if (err) {
|
||||
|
@ -1571,7 +1553,7 @@ static int init_hyp_mode(void)
|
|||
|
||||
err = hyp_map_aux_data();
|
||||
if (err)
|
||||
kvm_err("Cannot map host auxilary data: %d\n", err);
|
||||
kvm_err("Cannot map host auxiliary data: %d\n", err);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -226,7 +226,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
|
|||
int i;
|
||||
u32 elrsr;
|
||||
|
||||
elrsr = read_gicreg(ICH_ELSR_EL2);
|
||||
elrsr = read_gicreg(ICH_ELRSR_EL2);
|
||||
|
||||
write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
|
||||
|
||||
|
|
|
@ -908,6 +908,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
|
|||
*/
|
||||
int kvm_alloc_stage2_pgd(struct kvm *kvm)
|
||||
{
|
||||
phys_addr_t pgd_phys;
|
||||
pgd_t *pgd;
|
||||
|
||||
if (kvm->arch.pgd != NULL) {
|
||||
|
@ -920,7 +921,12 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
|
|||
if (!pgd)
|
||||
return -ENOMEM;
|
||||
|
||||
pgd_phys = virt_to_phys(pgd);
|
||||
if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
|
||||
return -EINVAL;
|
||||
|
||||
kvm->arch.pgd = pgd;
|
||||
kvm->arch.pgd_phys = pgd_phys;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1008,6 +1014,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
|
|||
unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
|
||||
pgd = READ_ONCE(kvm->arch.pgd);
|
||||
kvm->arch.pgd = NULL;
|
||||
kvm->arch.pgd_phys = 0;
|
||||
}
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
|
||||
|
@ -1396,14 +1403,6 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_vcpu_trap_is_iabt(vcpu))
|
||||
return false;
|
||||
|
||||
return kvm_vcpu_dabt_iswrite(vcpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* stage2_wp_ptes - write protect PMD range
|
||||
* @pmd: pointer to pmd entry
|
||||
|
@ -1598,14 +1597,13 @@ static void kvm_send_hwpoison_signal(unsigned long address,
|
|||
static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
|
||||
unsigned long hva)
|
||||
{
|
||||
gpa_t gpa_start, gpa_end;
|
||||
gpa_t gpa_start;
|
||||
hva_t uaddr_start, uaddr_end;
|
||||
size_t size;
|
||||
|
||||
size = memslot->npages * PAGE_SIZE;
|
||||
|
||||
gpa_start = memslot->base_gfn << PAGE_SHIFT;
|
||||
gpa_end = gpa_start + size;
|
||||
|
||||
uaddr_start = memslot->userspace_addr;
|
||||
uaddr_end = uaddr_start + size;
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_KVM_H
|
||||
|
||||
#include <kvm/arm_arch_timer.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
|
@ -262,10 +263,114 @@ TRACE_EVENT(kvm_timer_update_irq,
|
|||
__entry->vcpu_id, __entry->irq, __entry->level)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_get_timer_map,
|
||||
TP_PROTO(unsigned long vcpu_id, struct timer_map *map),
|
||||
TP_ARGS(vcpu_id, map),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, vcpu_id )
|
||||
__field( int, direct_vtimer )
|
||||
__field( int, direct_ptimer )
|
||||
__field( int, emul_ptimer )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_id = vcpu_id;
|
||||
__entry->direct_vtimer = arch_timer_ctx_index(map->direct_vtimer);
|
||||
__entry->direct_ptimer =
|
||||
(map->direct_ptimer) ? arch_timer_ctx_index(map->direct_ptimer) : -1;
|
||||
__entry->emul_ptimer =
|
||||
(map->emul_ptimer) ? arch_timer_ctx_index(map->emul_ptimer) : -1;
|
||||
),
|
||||
|
||||
TP_printk("VCPU: %ld, dv: %d, dp: %d, ep: %d",
|
||||
__entry->vcpu_id,
|
||||
__entry->direct_vtimer,
|
||||
__entry->direct_ptimer,
|
||||
__entry->emul_ptimer)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_timer_save_state,
|
||||
TP_PROTO(struct arch_timer_context *ctx),
|
||||
TP_ARGS(ctx),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, ctl )
|
||||
__field( unsigned long long, cval )
|
||||
__field( int, timer_idx )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->ctl = ctx->cnt_ctl;
|
||||
__entry->cval = ctx->cnt_cval;
|
||||
__entry->timer_idx = arch_timer_ctx_index(ctx);
|
||||
),
|
||||
|
||||
TP_printk(" CTL: %#08lx CVAL: %#16llx arch_timer_ctx_index: %d",
|
||||
__entry->ctl,
|
||||
__entry->cval,
|
||||
__entry->timer_idx)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_timer_restore_state,
|
||||
TP_PROTO(struct arch_timer_context *ctx),
|
||||
TP_ARGS(ctx),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, ctl )
|
||||
__field( unsigned long long, cval )
|
||||
__field( int, timer_idx )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->ctl = ctx->cnt_ctl;
|
||||
__entry->cval = ctx->cnt_cval;
|
||||
__entry->timer_idx = arch_timer_ctx_index(ctx);
|
||||
),
|
||||
|
||||
TP_printk("CTL: %#08lx CVAL: %#16llx arch_timer_ctx_index: %d",
|
||||
__entry->ctl,
|
||||
__entry->cval,
|
||||
__entry->timer_idx)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_timer_hrtimer_expire,
|
||||
TP_PROTO(struct arch_timer_context *ctx),
|
||||
TP_ARGS(ctx),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( int, timer_idx )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->timer_idx = arch_timer_ctx_index(ctx);
|
||||
),
|
||||
|
||||
TP_printk("arch_timer_ctx_index: %d", __entry->timer_idx)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_timer_emulate,
|
||||
TP_PROTO(struct arch_timer_context *ctx, bool should_fire),
|
||||
TP_ARGS(ctx, should_fire),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( int, timer_idx )
|
||||
__field( bool, should_fire )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->timer_idx = arch_timer_ctx_index(ctx);
|
||||
__entry->should_fire = should_fire;
|
||||
),
|
||||
|
||||
TP_printk("arch_timer_ctx_index: %d (should_fire: %d)",
|
||||
__entry->timer_idx, __entry->should_fire)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_KVM_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH ../../../virt/kvm/arm
|
||||
#define TRACE_INCLUDE_PATH ../../virt/kvm/arm
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
|
||||
|
|
|
@ -589,7 +589,7 @@ early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
|
|||
*/
|
||||
int vgic_v3_probe(const struct gic_kvm_info *info)
|
||||
{
|
||||
u32 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
|
||||
u32 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_ich_vtr_el2);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
@ -679,7 +679,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
|
|||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
|
||||
if (likely(cpu_if->vgic_sre))
|
||||
cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr);
|
||||
cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
|
||||
|
||||
kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
|
||||
|
||||
|
|
Loading…
Reference in New Issue