arm64: Rename ARM64_SSBD to ARM64_SPECTRE_V4

In a similar manner to the renaming of ARM64_HARDEN_BRANCH_PREDICTOR
to ARM64_SPECTRE_V2, rename ARM64_SSBD to ARM64_SPECTRE_V4. This isn't
_entirely_ accurate, as we also need to take into account the interaction
with SSBS, but that will be taken care of in subsequent patches.

Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Will Deacon 2020-09-15 23:00:31 +01:00
parent 532d581583
commit 9b0955baa4
3 changed files with 3 additions and 3 deletions

View File

@ -37,7 +37,7 @@
#define ARM64_HAS_CACHE_IDC 27 #define ARM64_HAS_CACHE_IDC 27
#define ARM64_HAS_CACHE_DIC 28 #define ARM64_HAS_CACHE_DIC 28
#define ARM64_HW_DBM 29 #define ARM64_HW_DBM 29
#define ARM64_SSBD 30 #define ARM64_SPECTRE_V4 30
#define ARM64_MISMATCHED_CACHE_TYPE 31 #define ARM64_MISMATCHED_CACHE_TYPE 31
#define ARM64_HAS_STAGE2_FWB 32 #define ARM64_HAS_STAGE2_FWB 32
#define ARM64_HAS_CRC32 33 #define ARM64_HAS_CRC32 33

View File

@ -675,7 +675,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
#endif #endif
{ {
.desc = "Speculative Store Bypass Disable", .desc = "Speculative Store Bypass Disable",
.capability = ARM64_SSBD, .capability = ARM64_SPECTRE_V4,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_ssbd_mitigation, .matches = has_ssbd_mitigation,
.cpu_enable = cpu_enable_ssbd_mitigation, .cpu_enable = cpu_enable_ssbd_mitigation,

View File

@ -481,7 +481,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu) static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
{ {
if (!cpus_have_final_cap(ARM64_SSBD)) if (!cpus_have_final_cap(ARM64_SPECTRE_V4))
return false; return false;
return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG); return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);