mirror of https://gitee.com/openkylin/linux.git
Merge branch 'kvm-arm64/misc-5.9' into kvmarm-master/next-WIP
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
a394cf6e85
|
@ -1182,22 +1182,6 @@ config HARDEN_BRANCH_PREDICTOR
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config HARDEN_EL2_VECTORS
|
||||
bool "Harden EL2 vector mapping against system register leak" if EXPERT
|
||||
default y
|
||||
help
|
||||
Speculation attacks against some high-performance processors can
|
||||
be used to leak privileged information such as the vector base
|
||||
register, resulting in a potential defeat of the EL2 layout
|
||||
randomization.
|
||||
|
||||
This config option will map the vectors to a fixed location,
|
||||
independent of the EL2 code mapping, so that revealing VBAR_EL2
|
||||
to an attacker does not give away any extra information. This
|
||||
only gets enabled on affected CPUs.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_SSBD
|
||||
bool "Speculative Store Bypass Disable" if EXPERT
|
||||
default y
|
||||
|
|
|
@ -191,7 +191,6 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
|
|||
.macro get_vcpu_ptr vcpu, ctxt
|
||||
get_host_ctxt \ctxt, \vcpu
|
||||
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
|
||||
kern_hyp_va \vcpu
|
||||
.endm
|
||||
|
||||
#endif
|
||||
|
|
|
@ -238,14 +238,14 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
|
|||
return mode != PSR_MODE_EL0t;
|
||||
}
|
||||
|
||||
static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.fault.esr_el2;
|
||||
}
|
||||
|
||||
static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 esr = kvm_vcpu_get_hsr(vcpu);
|
||||
u32 esr = kvm_vcpu_get_esr(vcpu);
|
||||
|
||||
if (esr & ESR_ELx_CV)
|
||||
return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
|
||||
|
@ -270,64 +270,64 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
|
|||
|
||||
static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
|
||||
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
|
||||
}
|
||||
|
||||
static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
|
||||
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
|
||||
}
|
||||
|
||||
static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
|
||||
return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
|
||||
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
|
||||
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
|
||||
}
|
||||
|
||||
static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
|
||||
return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
|
||||
}
|
||||
|
||||
static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
|
||||
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
|
||||
}
|
||||
|
||||
static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
|
||||
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
|
||||
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
|
||||
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
|
||||
}
|
||||
|
||||
static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
|
||||
return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
|
||||
}
|
||||
|
||||
/* This one is not specific to Data Abort */
|
||||
static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
|
||||
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
|
||||
}
|
||||
|
||||
static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
|
||||
return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
|
||||
|
@ -337,12 +337,12 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
|
|||
|
||||
static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
|
||||
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
|
||||
}
|
||||
|
||||
static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
|
||||
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
|
||||
}
|
||||
|
||||
static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
|
||||
|
@ -366,7 +366,7 @@ static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
|
|||
|
||||
static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 esr = kvm_vcpu_get_hsr(vcpu);
|
||||
u32 esr = kvm_vcpu_get_esr(vcpu);
|
||||
return ESR_ELx_SYS64_ISS_RT(esr);
|
||||
}
|
||||
|
||||
|
|
|
@ -637,7 +637,7 @@ has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
|
|||
return is_midr_in_range(midr, &range) && has_dic;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_HARDEN_EL2_VECTORS)
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
|
||||
static const struct midr_range ca57_a72[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
|
||||
|
@ -882,7 +882,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
.matches = check_branch_predictor,
|
||||
},
|
||||
#ifdef CONFIG_HARDEN_EL2_VECTORS
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
{
|
||||
.desc = "EL2 vector hardening",
|
||||
.capability = ARM64_HARDEN_EL2_VECTORS,
|
||||
|
|
|
@ -58,7 +58,7 @@ config KVM_ARM_PMU
|
|||
virtual machines.
|
||||
|
||||
config KVM_INDIRECT_VECTORS
|
||||
def_bool HARDEN_BRANCH_PREDICTOR || HARDEN_EL2_VECTORS
|
||||
def_bool HARDEN_BRANCH_PREDICTOR || RANDOMIZE_BASE
|
||||
|
||||
endif # KVM
|
||||
|
||||
|
|
|
@ -456,7 +456,6 @@ static bool need_new_vmid_gen(struct kvm_vmid *vmid)
|
|||
|
||||
/**
|
||||
* update_vmid - Update the vmid with a valid VMID for the current generation
|
||||
* @kvm: The guest that struct vmid belongs to
|
||||
* @vmid: The stage-2 VMID information struct
|
||||
*/
|
||||
static void update_vmid(struct kvm_vmid *vmid)
|
||||
|
|
|
@ -89,7 +89,7 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
*/
|
||||
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
|
||||
if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
|
||||
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
|
||||
vcpu->stat.wfe_exit_stat++;
|
||||
kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
|
||||
|
@ -119,13 +119,13 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
*/
|
||||
static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||
u32 esr = kvm_vcpu_get_esr(vcpu);
|
||||
int ret = 0;
|
||||
|
||||
run->exit_reason = KVM_EXIT_DEBUG;
|
||||
run->debug.arch.hsr = hsr;
|
||||
run->debug.arch.hsr = esr;
|
||||
|
||||
switch (ESR_ELx_EC(hsr)) {
|
||||
switch (ESR_ELx_EC(esr)) {
|
||||
case ESR_ELx_EC_WATCHPT_LOW:
|
||||
run->debug.arch.far = vcpu->arch.fault.far_el2;
|
||||
/* fall through */
|
||||
|
@ -135,8 +135,8 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
case ESR_ELx_EC_BRK64:
|
||||
break;
|
||||
default:
|
||||
kvm_err("%s: un-handled case hsr: %#08x\n",
|
||||
__func__, (unsigned int) hsr);
|
||||
kvm_err("%s: un-handled case esr: %#08x\n",
|
||||
__func__, (unsigned int) esr);
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
|
@ -146,10 +146,10 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
|
||||
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||
u32 esr = kvm_vcpu_get_esr(vcpu);
|
||||
|
||||
kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
|
||||
hsr, esr_get_class_string(hsr));
|
||||
kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n",
|
||||
esr, esr_get_class_string(esr));
|
||||
|
||||
kvm_inject_undefined(vcpu);
|
||||
return 1;
|
||||
|
@ -200,10 +200,10 @@ static exit_handle_fn arm_exit_handlers[] = {
|
|||
|
||||
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||
u8 hsr_ec = ESR_ELx_EC(hsr);
|
||||
u32 esr = kvm_vcpu_get_esr(vcpu);
|
||||
u8 esr_ec = ESR_ELx_EC(esr);
|
||||
|
||||
return arm_exit_handlers[hsr_ec];
|
||||
return arm_exit_handlers[esr_ec];
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -241,15 +241,15 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|||
int exception_index)
|
||||
{
|
||||
if (ARM_SERROR_PENDING(exception_index)) {
|
||||
u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
|
||||
u8 esr_ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
|
||||
|
||||
/*
|
||||
* HVC/SMC already have an adjusted PC, which we need
|
||||
* to correct in order to return to after having
|
||||
* injected the SError.
|
||||
*/
|
||||
if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 ||
|
||||
hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) {
|
||||
if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64 ||
|
||||
esr_ec == ESR_ELx_EC_SMC32 || esr_ec == ESR_ELx_EC_SMC64) {
|
||||
u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
|
||||
*vcpu_pc(vcpu) -= adj;
|
||||
}
|
||||
|
@ -307,5 +307,5 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|||
exception_index = ARM_EXCEPTION_CODE(exception_index);
|
||||
|
||||
if (exception_index == ARM_EXCEPTION_EL1_SERROR)
|
||||
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_hsr(vcpu));
|
||||
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
|
|||
int cond;
|
||||
|
||||
/* Top two bits non-zero? Unconditional. */
|
||||
if (kvm_vcpu_get_hsr(vcpu) >> 30)
|
||||
if (kvm_vcpu_get_esr(vcpu) >> 30)
|
||||
return true;
|
||||
|
||||
/* Is condition field valid? */
|
||||
|
|
|
@ -199,7 +199,7 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
|
|||
static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
bool vhe, sve_guest, sve_host;
|
||||
u8 hsr_ec;
|
||||
u8 esr_ec;
|
||||
|
||||
if (!system_supports_fpsimd())
|
||||
return false;
|
||||
|
@ -219,14 +219,14 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
|
|||
vhe = has_vhe();
|
||||
}
|
||||
|
||||
hsr_ec = kvm_vcpu_trap_get_class(vcpu);
|
||||
if (hsr_ec != ESR_ELx_EC_FP_ASIMD &&
|
||||
hsr_ec != ESR_ELx_EC_SVE)
|
||||
esr_ec = kvm_vcpu_trap_get_class(vcpu);
|
||||
if (esr_ec != ESR_ELx_EC_FP_ASIMD &&
|
||||
esr_ec != ESR_ELx_EC_SVE)
|
||||
return false;
|
||||
|
||||
/* Don't handle SVE traps for non-SVE vcpus here: */
|
||||
if (!sve_guest)
|
||||
if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
|
||||
if (esr_ec != ESR_ELx_EC_FP_ASIMD)
|
||||
return false;
|
||||
|
||||
/* Valid trap. Switch the context: */
|
||||
|
@ -284,7 +284,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
|
|||
|
||||
static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
|
||||
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
|
||||
int rt = kvm_vcpu_sys_get_rt(vcpu);
|
||||
u64 val = vcpu_get_reg(vcpu, rt);
|
||||
|
||||
|
@ -379,7 +379,7 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
|
|||
u64 val;
|
||||
|
||||
if (!vcpu_has_ptrauth(vcpu) ||
|
||||
!esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu)))
|
||||
!esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
|
||||
return false;
|
||||
|
||||
ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
||||
|
|
|
@ -426,7 +426,7 @@ static int __vgic_v3_bpr_min(void)
|
|||
|
||||
static int __vgic_v3_get_group(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 esr = kvm_vcpu_get_hsr(vcpu);
|
||||
u32 esr = kvm_vcpu_get_esr(vcpu);
|
||||
u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
|
||||
|
||||
return crm != 8;
|
||||
|
@ -978,7 +978,7 @@ int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
|
|||
bool is_read;
|
||||
u32 sysreg;
|
||||
|
||||
esr = kvm_vcpu_get_hsr(vcpu);
|
||||
esr = kvm_vcpu_get_esr(vcpu);
|
||||
if (vcpu_mode_is_32bit(vcpu)) {
|
||||
if (!kvm_condition_valid(vcpu)) {
|
||||
__kvm_skip_instr(vcpu);
|
||||
|
|
|
@ -2116,7 +2116,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
* For RAS the host kernel may handle this abort.
|
||||
* There is no need to pass the error into the guest.
|
||||
*/
|
||||
if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
|
||||
if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
|
||||
return 1;
|
||||
|
||||
if (unlikely(!is_iabt)) {
|
||||
|
@ -2125,7 +2125,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
}
|
||||
}
|
||||
|
||||
trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
|
||||
trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
|
||||
kvm_vcpu_get_hfar(vcpu), fault_ipa);
|
||||
|
||||
/* Check the stage-2 fault is trans. fault or write fault */
|
||||
|
@ -2134,7 +2134,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
|
||||
kvm_vcpu_trap_get_class(vcpu),
|
||||
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
|
||||
(unsigned long)kvm_vcpu_get_hsr(vcpu));
|
||||
(unsigned long)kvm_vcpu_get_esr(vcpu));
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
|
|
@ -2220,10 +2220,10 @@ static int emulate_cp(struct kvm_vcpu *vcpu,
|
|||
static void unhandled_cp_access(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *params)
|
||||
{
|
||||
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
|
||||
u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
|
||||
int cp = -1;
|
||||
|
||||
switch(hsr_ec) {
|
||||
switch (esr_ec) {
|
||||
case ESR_ELx_EC_CP15_32:
|
||||
case ESR_ELx_EC_CP15_64:
|
||||
cp = 15;
|
||||
|
@ -2252,17 +2252,17 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
|
|||
size_t nr_global)
|
||||
{
|
||||
struct sys_reg_params params;
|
||||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||
u32 esr = kvm_vcpu_get_esr(vcpu);
|
||||
int Rt = kvm_vcpu_sys_get_rt(vcpu);
|
||||
int Rt2 = (hsr >> 10) & 0x1f;
|
||||
int Rt2 = (esr >> 10) & 0x1f;
|
||||
|
||||
params.is_aarch32 = true;
|
||||
params.is_32bit = false;
|
||||
params.CRm = (hsr >> 1) & 0xf;
|
||||
params.is_write = ((hsr & 1) == 0);
|
||||
params.CRm = (esr >> 1) & 0xf;
|
||||
params.is_write = ((esr & 1) == 0);
|
||||
|
||||
params.Op0 = 0;
|
||||
params.Op1 = (hsr >> 16) & 0xf;
|
||||
params.Op1 = (esr >> 16) & 0xf;
|
||||
params.Op2 = 0;
|
||||
params.CRn = 0;
|
||||
|
||||
|
@ -2304,18 +2304,18 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
|
|||
size_t nr_global)
|
||||
{
|
||||
struct sys_reg_params params;
|
||||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||
u32 esr = kvm_vcpu_get_esr(vcpu);
|
||||
int Rt = kvm_vcpu_sys_get_rt(vcpu);
|
||||
|
||||
params.is_aarch32 = true;
|
||||
params.is_32bit = true;
|
||||
params.CRm = (hsr >> 1) & 0xf;
|
||||
params.CRm = (esr >> 1) & 0xf;
|
||||
params.regval = vcpu_get_reg(vcpu, Rt);
|
||||
params.is_write = ((hsr & 1) == 0);
|
||||
params.CRn = (hsr >> 10) & 0xf;
|
||||
params.is_write = ((esr & 1) == 0);
|
||||
params.CRn = (esr >> 10) & 0xf;
|
||||
params.Op0 = 0;
|
||||
params.Op1 = (hsr >> 14) & 0x7;
|
||||
params.Op2 = (hsr >> 17) & 0x7;
|
||||
params.Op1 = (esr >> 14) & 0x7;
|
||||
params.Op2 = (esr >> 17) & 0x7;
|
||||
|
||||
if (!emulate_cp(vcpu, ¶ms, global, nr_global)) {
|
||||
if (!params.is_write)
|
||||
|
@ -2397,7 +2397,7 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
|
|||
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
struct sys_reg_params params;
|
||||
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
|
||||
unsigned long esr = kvm_vcpu_get_esr(vcpu);
|
||||
int Rt = kvm_vcpu_sys_get_rt(vcpu);
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ __init void kvm_compute_layout(void)
|
|||
va_mask = GENMASK_ULL(tag_lsb - 1, 0);
|
||||
tag_val = hyp_va_msb;
|
||||
|
||||
if (tag_lsb != (vabits_actual - 1)) {
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1)) {
|
||||
/* We have some free bits to insert a random tag. */
|
||||
tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
|
||||
}
|
||||
|
|
|
@ -100,19 +100,33 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
|
|||
|
||||
/**
|
||||
* kvm_arch_set_irq_inatomic: fast-path for irqfd injection
|
||||
*
|
||||
* Currently only direct MSI injection is supported.
|
||||
*/
|
||||
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
|
||||
struct kvm *kvm, int irq_source_id, int level,
|
||||
bool line_status)
|
||||
{
|
||||
if (e->type == KVM_IRQ_ROUTING_MSI && vgic_has_its(kvm) && level) {
|
||||
if (!level)
|
||||
return -EWOULDBLOCK;
|
||||
|
||||
switch (e->type) {
|
||||
case KVM_IRQ_ROUTING_MSI: {
|
||||
struct kvm_msi msi;
|
||||
|
||||
if (!vgic_has_its(kvm))
|
||||
break;
|
||||
|
||||
kvm_populate_msi(e, &msi);
|
||||
if (!vgic_its_inject_cached_translation(kvm, &msi))
|
||||
return 0;
|
||||
return vgic_its_inject_cached_translation(kvm, &msi);
|
||||
}
|
||||
|
||||
case KVM_IRQ_ROUTING_IRQCHIP:
|
||||
/*
|
||||
* Injecting SPIs is always possible in atomic context
|
||||
* as long as the damn vgic is initialized.
|
||||
*/
|
||||
if (unlikely(!vgic_initialized(kvm)))
|
||||
break;
|
||||
return vgic_irqfd_set_irq(e, kvm, irq_source_id, 1, line_status);
|
||||
}
|
||||
|
||||
return -EWOULDBLOCK;
|
||||
|
|
|
@ -757,9 +757,8 @@ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
|
|||
|
||||
db = (u64)msi->address_hi << 32 | msi->address_lo;
|
||||
irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data);
|
||||
|
||||
if (!irq)
|
||||
return -1;
|
||||
return -EWOULDBLOCK;
|
||||
|
||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||
irq->pending_latch = true;
|
||||
|
|
|
@ -389,7 +389,7 @@ u64 vgic_sanitise_outer_cacheability(u64 field)
|
|||
case GIC_BASER_CACHE_nC:
|
||||
return field;
|
||||
default:
|
||||
return GIC_BASER_CACHE_nC;
|
||||
return GIC_BASER_CACHE_SameAsInner;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue