Merge branch 'kvm-arm64/erratum-1319367' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into for-next/core
Similarly to erratum 1165522 that affects Cortex-A76, A57 and A72 respectively suffer from errata 1319537 and 1319367, potentially resulting in TLB corruption if the CPU speculates an AT instruction while switching guests. The fix is slightly more involved since we don't have VHE to help us here, but the idea is the same: when switching a guest in, we must prevent any speculated AT from being able to parse the page tables until S2 is up and running. Only at this stage can we allow AT to take place. For this, we always restore the guest sysregs first, except for its SCTLR and TCR registers, which must be set with SCTLR.M=1 and TCR.EPD{0,1} = {1, 1}, effectively disabling the PTW and TLB allocation. Once S2 is setup, we restore the guest's SCTLR and TCR. Similar things must be done on TLB invalidation... * 'kvm-arm64/erratum-1319367' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms: arm64: Enable and document ARM errata 1319367 and 1319537 arm64: KVM: Prevent speculative S1 PTW when restoring vcpu context arm64: KVM: Disable EL1 PTW when invalidating S2 TLBs arm64: KVM: Reorder system register restoration and stage-2 activation arm64: Add ARM64_WORKAROUND_1319367 for all A57 and A72 versions
This commit is contained in:
commit
346f6a4636
|
@ -70,8 +70,12 @@ stable kernels.
|
|||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A57 | #1319537 | ARM64_ERRATUM_1319367 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A72 | #853709 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A72 | #1319367 | ARM64_ERRATUM_1319367 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
|
||||
|
|
|
@ -538,6 +538,16 @@ config ARM64_ERRATUM_1286807
|
|||
invalidated has been observed by other observers. The
|
||||
workaround repeats the TLBI+DSB operation.
|
||||
|
||||
config ARM64_ERRATUM_1319367
|
||||
bool "Cortex-A57/A72: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
|
||||
default y
|
||||
help
|
||||
This option adds work arounds for ARM Cortex-A57 erratum 1319537
|
||||
and A72 erratum 1319367
|
||||
|
||||
Cortex-A57 and A72 cores could end-up with corrupted TLBs by
|
||||
speculating an AT instruction during a guest context switch.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_1463225
|
||||
|
|
|
@ -55,7 +55,8 @@
|
|||
#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
|
||||
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
|
||||
#define ARM64_WORKAROUND_1542419 47
|
||||
#define ARM64_WORKAROUND_1319367 48
|
||||
|
||||
#define ARM64_NCAPS 48
|
||||
#define ARM64_NCAPS 49
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
|
|
@ -668,9 +668,9 @@ has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
|
|||
return is_midr_in_range(midr, &range) && has_dic;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HARDEN_EL2_VECTORS
|
||||
#if defined(CONFIG_HARDEN_EL2_VECTORS) || defined(CONFIG_ARM64_ERRATUM_1319367)
|
||||
|
||||
static const struct midr_range arm64_harden_el2_vectors[] = {
|
||||
static const struct midr_range ca57_a72[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
||||
{},
|
||||
|
@ -864,7 +864,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
{
|
||||
.desc = "EL2 vector hardening",
|
||||
.capability = ARM64_HARDEN_EL2_VECTORS,
|
||||
ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
|
||||
ERRATA_MIDR_RANGE_LIST(ca57_a72),
|
||||
},
|
||||
#endif
|
||||
{
|
||||
|
@ -919,6 +919,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
.matches = has_neoverse_n1_erratum_1542419,
|
||||
.cpu_enable = cpu_enable_trap_ctr_access,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1319367
|
||||
{
|
||||
.desc = "ARM erratum 1319367",
|
||||
.capability = ARM64_WORKAROUND_1319367,
|
||||
ERRATA_MIDR_RANGE_LIST(ca57_a72),
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
|
|
@ -118,6 +118,20 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
write_sysreg(val, cptr_el2);
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
|
||||
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
|
||||
|
||||
isb();
|
||||
/*
|
||||
* At this stage, and thanks to the above isb(), S2 is
|
||||
* configured and enabled. We can now restore the guest's S1
|
||||
* configuration: SCTLR, and only then TCR.
|
||||
*/
|
||||
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
|
||||
isb();
|
||||
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
|
||||
}
|
||||
}
|
||||
|
||||
static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
|
||||
|
@ -159,6 +173,23 @@ static void __hyp_text __deactivate_traps_nvhe(void)
|
|||
{
|
||||
u64 mdcr_el2 = read_sysreg(mdcr_el2);
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* Set the TCR and SCTLR registers in the exact opposite
|
||||
* sequence as __activate_traps_nvhe (first prevent walks,
|
||||
* then force the MMU on). A generous sprinkling of isb()
|
||||
* ensure that things happen in this exact order.
|
||||
*/
|
||||
val = read_sysreg_el1(SYS_TCR);
|
||||
write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
|
||||
isb();
|
||||
val = read_sysreg_el1(SYS_SCTLR);
|
||||
write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
|
||||
isb();
|
||||
}
|
||||
|
||||
__deactivate_traps_common();
|
||||
|
||||
mdcr_el2 &= MDCR_EL2_HPMN_MASK;
|
||||
|
@ -670,18 +701,23 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
|
|||
|
||||
__sysreg_save_state_nvhe(host_ctxt);
|
||||
|
||||
/*
|
||||
* We must restore the 32-bit state before the sysregs, thanks
|
||||
* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
|
||||
*
|
||||
* Also, and in order to be able to deal with erratum #1319537 (A57)
|
||||
* and #1319367 (A72), we must ensure that all VM-related sysreg are
|
||||
* restored before we enable S2 translation.
|
||||
*/
|
||||
__sysreg32_restore_state(vcpu);
|
||||
__sysreg_restore_state_nvhe(guest_ctxt);
|
||||
|
||||
__activate_vm(kern_hyp_va(vcpu->kvm));
|
||||
__activate_traps(vcpu);
|
||||
|
||||
__hyp_vgic_restore_state(vcpu);
|
||||
__timer_enable_traps(vcpu);
|
||||
|
||||
/*
|
||||
* We must restore the 32-bit state before the sysregs, thanks
|
||||
* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
|
||||
*/
|
||||
__sysreg32_restore_state(vcpu);
|
||||
__sysreg_restore_state_nvhe(guest_ctxt);
|
||||
__debug_switch_to_guest(vcpu);
|
||||
|
||||
__set_guest_arch_workaround_state(vcpu);
|
||||
|
|
|
@ -117,12 +117,26 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
|
|||
{
|
||||
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
|
||||
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
|
||||
|
||||
if (!cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
|
||||
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
|
||||
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
|
||||
} else if (!ctxt->__hyp_running_vcpu) {
|
||||
/*
|
||||
* Must only be done for guest registers, hence the context
|
||||
* test. We're coming from the host, so SCTLR.M is already
|
||||
* set. Pairs with __activate_traps_nvhe().
|
||||
*/
|
||||
write_sysreg_el1((ctxt->sys_regs[TCR_EL1] |
|
||||
TCR_EPD1_MASK | TCR_EPD0_MASK),
|
||||
SYS_TCR);
|
||||
isb();
|
||||
}
|
||||
|
||||
write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
|
||||
write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR);
|
||||
write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0);
|
||||
write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1);
|
||||
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
|
||||
write_sysreg_el1(ctxt->sys_regs[ESR_EL1], SYS_ESR);
|
||||
write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], SYS_AFSR0);
|
||||
write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], SYS_AFSR1);
|
||||
|
@ -135,6 +149,23 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
|
|||
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
|
||||
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1319367) &&
|
||||
ctxt->__hyp_running_vcpu) {
|
||||
/*
|
||||
* Must only be done for host registers, hence the context
|
||||
* test. Pairs with __deactivate_traps_nvhe().
|
||||
*/
|
||||
isb();
|
||||
/*
|
||||
* At this stage, and thanks to the above isb(), S2 is
|
||||
* deconfigured and disabled. We can now restore the host's
|
||||
* S1 configuration: SCTLR, and only then TCR.
|
||||
*/
|
||||
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
|
||||
isb();
|
||||
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
|
||||
}
|
||||
|
||||
write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
|
||||
write_sysreg_el1(ctxt->gp_regs.elr_el1, SYS_ELR);
|
||||
write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR);
|
||||
|
|
|
@ -63,6 +63,22 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
|
|||
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
|
||||
struct tlb_inv_context *cxt)
|
||||
{
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* For CPUs that are affected by ARM 1319367, we need to
|
||||
* avoid a host Stage-1 walk while we have the guest's
|
||||
* VMID set in the VTTBR in order to invalidate TLBs.
|
||||
* We're guaranteed that the S1 MMU is enabled, so we can
|
||||
* simply set the EPD bits to avoid any further TLB fill.
|
||||
*/
|
||||
val = cxt->tcr = read_sysreg_el1(SYS_TCR);
|
||||
val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
|
||||
write_sysreg_el1(val, SYS_TCR);
|
||||
isb();
|
||||
}
|
||||
|
||||
__load_guest_stage2(kvm);
|
||||
isb();
|
||||
}
|
||||
|
@ -100,6 +116,13 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
|
|||
struct tlb_inv_context *cxt)
|
||||
{
|
||||
write_sysreg(0, vttbr_el2);
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
|
||||
/* Ensure write of the host VMID */
|
||||
isb();
|
||||
/* Restore the host's TCR_EL1 */
|
||||
write_sysreg_el1(cxt->tcr, SYS_TCR);
|
||||
}
|
||||
}
|
||||
|
||||
static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,
|
||||
|
|
Loading…
Reference in New Issue