mirror of https://gitee.com/openkylin/linux.git
Merge remote-tracking branch 'arm64/for-next/ghostbusters' into kvm-arm64/hyp-pcpu
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
816c347f3a
|
@ -1165,32 +1165,6 @@ config UNMAP_KERNEL_AT_EL0
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config HARDEN_BRANCH_PREDICTOR
|
||||
bool "Harden the branch predictor against aliasing attacks" if EXPERT
|
||||
default y
|
||||
help
|
||||
Speculation attacks against some high-performance processors rely on
|
||||
being able to manipulate the branch predictor for a victim context by
|
||||
executing aliasing branches in the attacker context. Such attacks
|
||||
can be partially mitigated against by clearing internal branch
|
||||
predictor state and limiting the prediction logic in some situations.
|
||||
|
||||
This config option will take CPU-specific actions to harden the
|
||||
branch predictor against aliasing attacks and may rely on specific
|
||||
instruction sequences or control bits being set by the system
|
||||
firmware.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_SSBD
|
||||
bool "Speculative Store Bypass Disable" if EXPERT
|
||||
default y
|
||||
help
|
||||
This enables mitigation of the bypassing of previous stores
|
||||
by speculative loads.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config RODATA_FULL_DEFAULT_ENABLED
|
||||
bool "Apply r/o permissions of VM areas also to their linear aliases"
|
||||
default y
|
||||
|
|
|
@ -31,13 +31,13 @@
|
|||
#define ARM64_HAS_DCPOP 21
|
||||
#define ARM64_SVE 22
|
||||
#define ARM64_UNMAP_KERNEL_AT_EL0 23
|
||||
#define ARM64_HARDEN_BRANCH_PREDICTOR 24
|
||||
#define ARM64_SPECTRE_V2 24
|
||||
#define ARM64_HAS_RAS_EXTN 25
|
||||
#define ARM64_WORKAROUND_843419 26
|
||||
#define ARM64_HAS_CACHE_IDC 27
|
||||
#define ARM64_HAS_CACHE_DIC 28
|
||||
#define ARM64_HW_DBM 29
|
||||
#define ARM64_SSBD 30
|
||||
#define ARM64_SPECTRE_V4 30
|
||||
#define ARM64_MISMATCHED_CACHE_TYPE 31
|
||||
#define ARM64_HAS_STAGE2_FWB 32
|
||||
#define ARM64_HAS_CRC32 33
|
||||
|
|
|
@ -698,30 +698,6 @@ static inline bool system_supports_tlb_range(void)
|
|||
cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
|
||||
}
|
||||
|
||||
#define ARM64_BP_HARDEN_UNKNOWN -1
|
||||
#define ARM64_BP_HARDEN_WA_NEEDED 0
|
||||
#define ARM64_BP_HARDEN_NOT_REQUIRED 1
|
||||
|
||||
int get_spectre_v2_workaround_state(void);
|
||||
|
||||
#define ARM64_SSBD_UNKNOWN -1
|
||||
#define ARM64_SSBD_FORCE_DISABLE 0
|
||||
#define ARM64_SSBD_KERNEL 1
|
||||
#define ARM64_SSBD_FORCE_ENABLE 2
|
||||
#define ARM64_SSBD_MITIGATED 3
|
||||
|
||||
static inline int arm64_get_ssbd_state(void)
|
||||
{
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
extern int ssbd_state;
|
||||
return ssbd_state;
|
||||
#else
|
||||
return ARM64_SSBD_UNKNOWN;
|
||||
#endif
|
||||
}
|
||||
|
||||
void arm64_set_ssbd_mitigation(bool state);
|
||||
|
||||
extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
|
||||
|
||||
static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
|
||||
|
|
|
@ -10,9 +10,6 @@
|
|||
#include <asm/hyp_image.h>
|
||||
#include <asm/virt.h>
|
||||
|
||||
#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
|
||||
#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
|
||||
|
||||
#define ARM_EXIT_WITH_SERROR_BIT 31
|
||||
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
|
||||
#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
|
||||
|
@ -132,11 +129,9 @@ extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
|
|||
DECLARE_KVM_NVHE_SYM(__per_cpu_start);
|
||||
DECLARE_KVM_NVHE_SYM(__per_cpu_end);
|
||||
|
||||
#ifdef CONFIG_KVM_INDIRECT_VECTORS
|
||||
extern atomic_t arm64_el2_vector_last_slot;
|
||||
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
|
||||
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
|
||||
#endif
|
||||
|
||||
extern void __kvm_flush_vm_context(void);
|
||||
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
|
||||
|
|
|
@ -383,20 +383,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
|
|||
return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
|
||||
}
|
||||
|
||||
static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
|
||||
}
|
||||
|
||||
static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
|
||||
bool flag)
|
||||
{
|
||||
if (flag)
|
||||
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
|
||||
else
|
||||
vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
|
||||
}
|
||||
|
||||
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu_mode_is_32bit(vcpu)) {
|
||||
|
|
|
@ -631,46 +631,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
|
|||
static inline void kvm_clr_pmu_events(u32 clr) {}
|
||||
#endif
|
||||
|
||||
#define KVM_BP_HARDEN_UNKNOWN -1
|
||||
#define KVM_BP_HARDEN_WA_NEEDED 0
|
||||
#define KVM_BP_HARDEN_NOT_REQUIRED 1
|
||||
|
||||
static inline int kvm_arm_harden_branch_predictor(void)
|
||||
{
|
||||
switch (get_spectre_v2_workaround_state()) {
|
||||
case ARM64_BP_HARDEN_WA_NEEDED:
|
||||
return KVM_BP_HARDEN_WA_NEEDED;
|
||||
case ARM64_BP_HARDEN_NOT_REQUIRED:
|
||||
return KVM_BP_HARDEN_NOT_REQUIRED;
|
||||
case ARM64_BP_HARDEN_UNKNOWN:
|
||||
default:
|
||||
return KVM_BP_HARDEN_UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
#define KVM_SSBD_UNKNOWN -1
|
||||
#define KVM_SSBD_FORCE_DISABLE 0
|
||||
#define KVM_SSBD_KERNEL 1
|
||||
#define KVM_SSBD_FORCE_ENABLE 2
|
||||
#define KVM_SSBD_MITIGATED 3
|
||||
|
||||
static inline int kvm_arm_have_ssbd(void)
|
||||
{
|
||||
switch (arm64_get_ssbd_state()) {
|
||||
case ARM64_SSBD_FORCE_DISABLE:
|
||||
return KVM_SSBD_FORCE_DISABLE;
|
||||
case ARM64_SSBD_KERNEL:
|
||||
return KVM_SSBD_KERNEL;
|
||||
case ARM64_SSBD_FORCE_ENABLE:
|
||||
return KVM_SSBD_FORCE_ENABLE;
|
||||
case ARM64_SSBD_MITIGATED:
|
||||
return KVM_SSBD_MITIGATED;
|
||||
case ARM64_SSBD_UNKNOWN:
|
||||
default:
|
||||
return KVM_SSBD_UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
|
||||
void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include <asm/page.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
/*
|
||||
|
@ -430,19 +431,17 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_INDIRECT_VECTORS
|
||||
/*
|
||||
* EL2 vectors can be mapped and rerouted in a number of ways,
|
||||
* depending on the kernel configuration and CPU present:
|
||||
*
|
||||
* - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
|
||||
* hardening sequence is placed in one of the vector slots, which is
|
||||
* executed before jumping to the real vectors.
|
||||
* - If the CPU is affected by Spectre-v2, the hardening sequence is
|
||||
* placed in one of the vector slots, which is executed before jumping
|
||||
* to the real vectors.
|
||||
*
|
||||
* - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
|
||||
* ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
|
||||
* hardening sequence is mapped next to the idmap page, and executed
|
||||
* before jumping to the real vectors.
|
||||
* - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot
|
||||
* containing the hardening sequence is mapped next to the idmap page,
|
||||
* and executed before jumping to the real vectors.
|
||||
*
|
||||
* - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
|
||||
* empty slot is selected, mapped next to the idmap page, and
|
||||
|
@ -452,19 +451,16 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
|
|||
* VHE, as we don't have hypervisor-specific mappings. If the system
|
||||
* is VHE and yet selects this capability, it will be ignored.
|
||||
*/
|
||||
#include <asm/mmu.h>
|
||||
|
||||
extern void *__kvm_bp_vect_base;
|
||||
extern int __kvm_harden_el2_vector_slot;
|
||||
|
||||
/* This is called on both VHE and !VHE systems */
|
||||
static inline void *kvm_get_hyp_vector(void)
|
||||
{
|
||||
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
|
||||
void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
|
||||
int slot = -1;
|
||||
|
||||
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
|
||||
if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
|
||||
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
|
||||
slot = data->hyp_vectors_slot;
|
||||
}
|
||||
|
@ -481,68 +477,6 @@ static inline void *kvm_get_hyp_vector(void)
|
|||
return vect;
|
||||
}
|
||||
|
||||
/* This is only called on a !VHE system */
|
||||
static inline int kvm_map_vectors(void)
|
||||
{
|
||||
/*
|
||||
* HBP = ARM64_HARDEN_BRANCH_PREDICTOR
|
||||
* HEL2 = ARM64_HARDEN_EL2_VECTORS
|
||||
*
|
||||
* !HBP + !HEL2 -> use direct vectors
|
||||
* HBP + !HEL2 -> use hardened vectors in place
|
||||
* !HBP + HEL2 -> allocate one vector slot and use exec mapping
|
||||
* HBP + HEL2 -> use hardened vertors and use exec mapping
|
||||
*/
|
||||
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
|
||||
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
|
||||
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
|
||||
}
|
||||
|
||||
if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
|
||||
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
|
||||
unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
|
||||
|
||||
/*
|
||||
* Always allocate a spare vector slot, as we don't
|
||||
* know yet which CPUs have a BP hardening slot that
|
||||
* we can reuse.
|
||||
*/
|
||||
__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
|
||||
BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
|
||||
return create_hyp_exec_mappings(vect_pa, size,
|
||||
&__kvm_bp_vect_base);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline void *kvm_get_hyp_vector(void)
|
||||
{
|
||||
return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
|
||||
}
|
||||
|
||||
static inline int kvm_map_vectors(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
|
||||
DECLARE_KVM_NVHE_PER_CPU(u64, arm64_ssbd_callback_required);
|
||||
|
||||
static inline void hyp_init_aux_data(void)
|
||||
{
|
||||
u64 *ptr;
|
||||
|
||||
/* Copy arm64_ssbd_callback_required value from kernel to hyp. */
|
||||
ptr = this_cpu_ptr_nvhe_sym(arm64_ssbd_callback_required);
|
||||
*ptr = __this_cpu_read(arm64_ssbd_callback_required);
|
||||
}
|
||||
#else
|
||||
static inline void hyp_init_aux_data(void) {}
|
||||
#endif
|
||||
|
||||
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
|
||||
|
||||
/*
|
||||
|
|
|
@ -45,7 +45,6 @@ struct bp_hardening_data {
|
|||
bp_hardening_cb_t fn;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
|
||||
|
||||
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
|
||||
|
@ -57,21 +56,13 @@ static inline void arm64_apply_bp_hardening(void)
|
|||
{
|
||||
struct bp_hardening_data *d;
|
||||
|
||||
if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
|
||||
if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
|
||||
return;
|
||||
|
||||
d = arm64_get_bp_hardening_data();
|
||||
if (d->fn)
|
||||
d->fn();
|
||||
}
|
||||
#else
|
||||
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void arm64_apply_bp_hardening(void) { }
|
||||
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
|
||||
|
||||
extern void arm64_memblock_init(void);
|
||||
extern void paging_init(void);
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <asm/pgtable-hwdef.h>
|
||||
#include <asm/pointer_auth.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/spectre.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
/*
|
||||
|
@ -197,40 +198,15 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
|
|||
regs->pmr_save = GIC_PRIO_IRQON;
|
||||
}
|
||||
|
||||
static inline void set_ssbs_bit(struct pt_regs *regs)
|
||||
{
|
||||
regs->pstate |= PSR_SSBS_BIT;
|
||||
}
|
||||
|
||||
static inline void set_compat_ssbs_bit(struct pt_regs *regs)
|
||||
{
|
||||
regs->pstate |= PSR_AA32_SSBS_BIT;
|
||||
}
|
||||
|
||||
static inline void start_thread(struct pt_regs *regs, unsigned long pc,
|
||||
unsigned long sp)
|
||||
{
|
||||
start_thread_common(regs, pc);
|
||||
regs->pstate = PSR_MODE_EL0t;
|
||||
|
||||
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
|
||||
set_ssbs_bit(regs);
|
||||
|
||||
spectre_v4_enable_task_mitigation(current);
|
||||
regs->sp = sp;
|
||||
}
|
||||
|
||||
static inline bool is_ttbr0_addr(unsigned long addr)
|
||||
{
|
||||
/* entry assembly clears tags for TTBR0 addrs */
|
||||
return addr < TASK_SIZE;
|
||||
}
|
||||
|
||||
static inline bool is_ttbr1_addr(unsigned long addr)
|
||||
{
|
||||
/* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
|
||||
return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
|
||||
unsigned long sp)
|
||||
|
@ -244,13 +220,23 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
|
|||
regs->pstate |= PSR_AA32_E_BIT;
|
||||
#endif
|
||||
|
||||
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
|
||||
set_compat_ssbs_bit(regs);
|
||||
|
||||
spectre_v4_enable_task_mitigation(current);
|
||||
regs->compat_sp = sp;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool is_ttbr0_addr(unsigned long addr)
|
||||
{
|
||||
/* entry assembly clears tags for TTBR0 addrs */
|
||||
return addr < TASK_SIZE;
|
||||
}
|
||||
|
||||
static inline bool is_ttbr1_addr(unsigned long addr)
|
||||
{
|
||||
/* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
|
||||
return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
|
||||
}
|
||||
|
||||
/* Forward declaration, a strange C thing */
|
||||
struct task_struct;
|
||||
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Interface for managing mitigations for Spectre vulnerabilities.
|
||||
*
|
||||
* Copyright (C) 2020 Google LLC
|
||||
* Author: Will Deacon <will@kernel.org>
|
||||
*/
|
||||
|
||||
#ifndef __ASM_SPECTRE_H
|
||||
#define __ASM_SPECTRE_H
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
/* Watch out, ordering is important here. */
|
||||
enum mitigation_state {
|
||||
SPECTRE_UNAFFECTED,
|
||||
SPECTRE_MITIGATED,
|
||||
SPECTRE_VULNERABLE,
|
||||
};
|
||||
|
||||
struct task_struct;
|
||||
|
||||
enum mitigation_state arm64_get_spectre_v2_state(void);
|
||||
bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
|
||||
void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
||||
|
||||
enum mitigation_state arm64_get_spectre_v4_state(void);
|
||||
bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope);
|
||||
void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
||||
void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
|
||||
|
||||
#endif /* __ASM_SPECTRE_H */
|
|
@ -242,6 +242,15 @@ struct kvm_vcpu_events {
|
|||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
|
||||
|
||||
/*
|
||||
* Only two states can be presented by the host kernel:
|
||||
* - NOT_REQUIRED: the guest doesn't need to do anything
|
||||
* - NOT_AVAIL: the guest isn't mitigated (it can still use SSBS if available)
|
||||
*
|
||||
* All the other values are deprecated. The host still accepts all
|
||||
* values (they are ABI), but will narrow them to the above two.
|
||||
*/
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
|
||||
|
|
|
@ -19,7 +19,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
|
|||
return_address.o cpuinfo.o cpu_errata.o \
|
||||
cpufeature.o alternative.o cacheinfo.o \
|
||||
smp.o smp_spin_table.o topology.o smccc-call.o \
|
||||
syscall.o
|
||||
syscall.o proton-pack.o
|
||||
|
||||
targets += efi-entry.o
|
||||
|
||||
|
@ -59,7 +59,6 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
|
|||
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
obj-$(CONFIG_CRASH_CORE) += crash_core.o
|
||||
obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
|
||||
obj-$(CONFIG_ARM64_SSBD) += ssbd.o
|
||||
obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
|
||||
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
|
||||
|
||||
|
|
|
@ -106,365 +106,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
|
|||
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
|
||||
}
|
||||
|
||||
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
|
||||
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
|
||||
|
||||
#ifdef CONFIG_KVM_INDIRECT_VECTORS
|
||||
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
|
||||
const char *hyp_vecs_end)
|
||||
{
|
||||
void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < SZ_2K; i += 0x80)
|
||||
memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
|
||||
|
||||
__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
|
||||
}
|
||||
|
||||
static void install_bp_hardening_cb(bp_hardening_cb_t fn,
|
||||
const char *hyp_vecs_start,
|
||||
const char *hyp_vecs_end)
|
||||
{
|
||||
static DEFINE_RAW_SPINLOCK(bp_lock);
|
||||
int cpu, slot = -1;
|
||||
|
||||
/*
|
||||
* detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
|
||||
* we're a guest. Skip the hyp-vectors work.
|
||||
*/
|
||||
if (!hyp_vecs_start) {
|
||||
__this_cpu_write(bp_hardening_data.fn, fn);
|
||||
return;
|
||||
}
|
||||
|
||||
raw_spin_lock(&bp_lock);
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
|
||||
slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (slot == -1) {
|
||||
slot = atomic_inc_return(&arm64_el2_vector_last_slot);
|
||||
BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
|
||||
__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
|
||||
}
|
||||
|
||||
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
|
||||
__this_cpu_write(bp_hardening_data.fn, fn);
|
||||
raw_spin_unlock(&bp_lock);
|
||||
}
|
||||
#else
|
||||
static void install_bp_hardening_cb(bp_hardening_cb_t fn,
|
||||
const char *hyp_vecs_start,
|
||||
const char *hyp_vecs_end)
|
||||
{
|
||||
__this_cpu_write(bp_hardening_data.fn, fn);
|
||||
}
|
||||
#endif /* CONFIG_KVM_INDIRECT_VECTORS */
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
|
||||
static void __maybe_unused call_smc_arch_workaround_1(void)
|
||||
{
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
|
||||
}
|
||||
|
||||
static void call_hvc_arch_workaround_1(void)
|
||||
{
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
|
||||
}
|
||||
|
||||
static void qcom_link_stack_sanitization(void)
|
||||
{
|
||||
u64 tmp;
|
||||
|
||||
asm volatile("mov %0, x30 \n"
|
||||
".rept 16 \n"
|
||||
"bl . + 4 \n"
|
||||
".endr \n"
|
||||
"mov x30, %0 \n"
|
||||
: "=&r" (tmp));
|
||||
}
|
||||
|
||||
static bool __nospectre_v2;
|
||||
static int __init parse_nospectre_v2(char *str)
|
||||
{
|
||||
__nospectre_v2 = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("nospectre_v2", parse_nospectre_v2);
|
||||
|
||||
/*
|
||||
* -1: No workaround
|
||||
* 0: No workaround required
|
||||
* 1: Workaround installed
|
||||
*/
|
||||
static int detect_harden_bp_fw(void)
|
||||
{
|
||||
bp_hardening_cb_t cb;
|
||||
void *smccc_start, *smccc_end;
|
||||
struct arm_smccc_res res;
|
||||
u32 midr = read_cpuid_id();
|
||||
|
||||
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
|
||||
switch ((int)res.a0) {
|
||||
case 1:
|
||||
/* Firmware says we're just fine */
|
||||
return 0;
|
||||
case 0:
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch (arm_smccc_1_1_get_conduit()) {
|
||||
case SMCCC_CONDUIT_HVC:
|
||||
cb = call_hvc_arch_workaround_1;
|
||||
/* This is a guest, no need to patch KVM vectors */
|
||||
smccc_start = NULL;
|
||||
smccc_end = NULL;
|
||||
break;
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
case SMCCC_CONDUIT_SMC:
|
||||
cb = call_smc_arch_workaround_1;
|
||||
smccc_start = __smccc_workaround_1_smc;
|
||||
smccc_end = __smccc_workaround_1_smc +
|
||||
__SMCCC_WORKAROUND_1_SMC_SZ;
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
|
||||
((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
|
||||
cb = qcom_link_stack_sanitization;
|
||||
|
||||
if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
|
||||
install_bp_hardening_cb(cb, smccc_start, smccc_end);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
|
||||
|
||||
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
|
||||
static bool __ssb_safe = true;
|
||||
|
||||
static const struct ssbd_options {
|
||||
const char *str;
|
||||
int state;
|
||||
} ssbd_options[] = {
|
||||
{ "force-on", ARM64_SSBD_FORCE_ENABLE, },
|
||||
{ "force-off", ARM64_SSBD_FORCE_DISABLE, },
|
||||
{ "kernel", ARM64_SSBD_KERNEL, },
|
||||
};
|
||||
|
||||
static int __init ssbd_cfg(char *buf)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!buf || !buf[0])
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
|
||||
int len = strlen(ssbd_options[i].str);
|
||||
|
||||
if (strncmp(buf, ssbd_options[i].str, len))
|
||||
continue;
|
||||
|
||||
ssbd_state = ssbd_options[i].state;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
early_param("ssbd", ssbd_cfg);
|
||||
|
||||
void __init arm64_update_smccc_conduit(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr,
|
||||
int nr_inst)
|
||||
{
|
||||
u32 insn;
|
||||
|
||||
BUG_ON(nr_inst != 1);
|
||||
|
||||
switch (arm_smccc_1_1_get_conduit()) {
|
||||
case SMCCC_CONDUIT_HVC:
|
||||
insn = aarch64_insn_get_hvc_value();
|
||||
break;
|
||||
case SMCCC_CONDUIT_SMC:
|
||||
insn = aarch64_insn_get_smc_value();
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
*updptr = cpu_to_le32(insn);
|
||||
}
|
||||
|
||||
void __init arm64_enable_wa2_handling(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr,
|
||||
int nr_inst)
|
||||
{
|
||||
BUG_ON(nr_inst != 1);
|
||||
/*
|
||||
* Only allow mitigation on EL1 entry/exit and guest
|
||||
* ARCH_WORKAROUND_2 handling if the SSBD state allows it to
|
||||
* be flipped.
|
||||
*/
|
||||
if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
|
||||
*updptr = cpu_to_le32(aarch64_insn_gen_nop());
|
||||
}
|
||||
|
||||
void arm64_set_ssbd_mitigation(bool state)
|
||||
{
|
||||
int conduit;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
|
||||
pr_info_once("SSBD disabled by kernel configuration\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (this_cpu_has_cap(ARM64_SSBS)) {
|
||||
if (state)
|
||||
asm volatile(SET_PSTATE_SSBS(0));
|
||||
else
|
||||
asm volatile(SET_PSTATE_SSBS(1));
|
||||
return;
|
||||
}
|
||||
|
||||
conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state,
|
||||
NULL);
|
||||
|
||||
WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE);
|
||||
}
|
||||
|
||||
static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
bool required = true;
|
||||
s32 val;
|
||||
bool this_cpu_safe = false;
|
||||
int conduit;
|
||||
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
|
||||
if (cpu_mitigations_off())
|
||||
ssbd_state = ARM64_SSBD_FORCE_DISABLE;
|
||||
|
||||
/* delay setting __ssb_safe until we get a firmware response */
|
||||
if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
|
||||
this_cpu_safe = true;
|
||||
|
||||
if (this_cpu_has_cap(ARM64_SSBS)) {
|
||||
if (!this_cpu_safe)
|
||||
__ssb_safe = false;
|
||||
required = false;
|
||||
goto out_printmsg;
|
||||
}
|
||||
|
||||
conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_2, &res);
|
||||
|
||||
if (conduit == SMCCC_CONDUIT_NONE) {
|
||||
ssbd_state = ARM64_SSBD_UNKNOWN;
|
||||
if (!this_cpu_safe)
|
||||
__ssb_safe = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
val = (s32)res.a0;
|
||||
|
||||
switch (val) {
|
||||
case SMCCC_RET_NOT_SUPPORTED:
|
||||
ssbd_state = ARM64_SSBD_UNKNOWN;
|
||||
if (!this_cpu_safe)
|
||||
__ssb_safe = false;
|
||||
return false;
|
||||
|
||||
/* machines with mixed mitigation requirements must not return this */
|
||||
case SMCCC_RET_NOT_REQUIRED:
|
||||
pr_info_once("%s mitigation not required\n", entry->desc);
|
||||
ssbd_state = ARM64_SSBD_MITIGATED;
|
||||
return false;
|
||||
|
||||
case SMCCC_RET_SUCCESS:
|
||||
__ssb_safe = false;
|
||||
required = true;
|
||||
break;
|
||||
|
||||
case 1: /* Mitigation not required on this CPU */
|
||||
required = false;
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_ON(1);
|
||||
if (!this_cpu_safe)
|
||||
__ssb_safe = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (ssbd_state) {
|
||||
case ARM64_SSBD_FORCE_DISABLE:
|
||||
arm64_set_ssbd_mitigation(false);
|
||||
required = false;
|
||||
break;
|
||||
|
||||
case ARM64_SSBD_KERNEL:
|
||||
if (required) {
|
||||
__this_cpu_write(arm64_ssbd_callback_required, 1);
|
||||
arm64_set_ssbd_mitigation(true);
|
||||
}
|
||||
break;
|
||||
|
||||
case ARM64_SSBD_FORCE_ENABLE:
|
||||
arm64_set_ssbd_mitigation(true);
|
||||
required = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_ON(1);
|
||||
break;
|
||||
}
|
||||
|
||||
out_printmsg:
|
||||
switch (ssbd_state) {
|
||||
case ARM64_SSBD_FORCE_DISABLE:
|
||||
pr_info_once("%s disabled from command-line\n", entry->desc);
|
||||
break;
|
||||
|
||||
case ARM64_SSBD_FORCE_ENABLE:
|
||||
pr_info_once("%s forced from command-line\n", entry->desc);
|
||||
break;
|
||||
}
|
||||
|
||||
return required;
|
||||
}
|
||||
|
||||
/* known invulnerable cores */
|
||||
static const struct midr_range arm64_ssb_cpus[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
|
||||
{},
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1463225
|
||||
DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
|
||||
|
||||
|
@ -519,83 +160,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
|
|||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
|
||||
CAP_MIDR_RANGE_LIST(midr_list)
|
||||
|
||||
/* Track overall mitigation state. We are only mitigated if all cores are ok */
|
||||
static bool __hardenbp_enab = true;
|
||||
static bool __spectrev2_safe = true;
|
||||
|
||||
int get_spectre_v2_workaround_state(void)
|
||||
{
|
||||
if (__spectrev2_safe)
|
||||
return ARM64_BP_HARDEN_NOT_REQUIRED;
|
||||
|
||||
if (!__hardenbp_enab)
|
||||
return ARM64_BP_HARDEN_UNKNOWN;
|
||||
|
||||
return ARM64_BP_HARDEN_WA_NEEDED;
|
||||
}
|
||||
|
||||
/*
|
||||
* List of CPUs that do not need any Spectre-v2 mitigation at all.
|
||||
*/
|
||||
static const struct midr_range spectre_v2_safe_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
|
||||
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
/*
|
||||
* Track overall bp hardening for all heterogeneous cores in the machine.
|
||||
* We are only considered "safe" if all booted cores are known safe.
|
||||
*/
|
||||
static bool __maybe_unused
|
||||
check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
{
|
||||
int need_wa;
|
||||
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
|
||||
/* If the CPU has CSV2 set, we're safe */
|
||||
if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
|
||||
ID_AA64PFR0_CSV2_SHIFT))
|
||||
return false;
|
||||
|
||||
/* Alternatively, we have a list of unaffected CPUs */
|
||||
if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
|
||||
return false;
|
||||
|
||||
/* Fallback to firmware detection */
|
||||
need_wa = detect_harden_bp_fw();
|
||||
if (!need_wa)
|
||||
return false;
|
||||
|
||||
__spectrev2_safe = false;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
|
||||
pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
|
||||
__hardenbp_enab = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* forced off */
|
||||
if (__nospectre_v2 || cpu_mitigations_off()) {
|
||||
pr_info_once("spectrev2 mitigation disabled by command line option\n");
|
||||
__hardenbp_enab = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (need_wa < 0) {
|
||||
pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
|
||||
__hardenbp_enab = false;
|
||||
}
|
||||
|
||||
return (need_wa > 0);
|
||||
}
|
||||
|
||||
static const __maybe_unused struct midr_range tx2_family_cpus[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
|
||||
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
|
||||
|
@ -887,9 +451,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
},
|
||||
#endif
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
.desc = "Spectre-v2",
|
||||
.capability = ARM64_SPECTRE_V2,
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
.matches = check_branch_predictor,
|
||||
.matches = has_spectre_v2,
|
||||
.cpu_enable = spectre_v2_enable_mitigation,
|
||||
},
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
{
|
||||
|
@ -899,11 +465,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
},
|
||||
#endif
|
||||
{
|
||||
.desc = "Speculative Store Bypass Disable",
|
||||
.capability = ARM64_SSBD,
|
||||
.desc = "Spectre-v4",
|
||||
.capability = ARM64_SPECTRE_V4,
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
.matches = has_ssbd_mitigation,
|
||||
.midr_range_list = arm64_ssb_cpus,
|
||||
.matches = has_spectre_v4,
|
||||
.cpu_enable = spectre_v4_enable_mitigation,
|
||||
},
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1418040
|
||||
{
|
||||
|
@ -956,40 +522,3 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
{
|
||||
}
|
||||
};
|
||||
|
||||
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
switch (get_spectre_v2_workaround_state()) {
|
||||
case ARM64_BP_HARDEN_NOT_REQUIRED:
|
||||
return sprintf(buf, "Not affected\n");
|
||||
case ARM64_BP_HARDEN_WA_NEEDED:
|
||||
return sprintf(buf, "Mitigation: Branch predictor hardening\n");
|
||||
case ARM64_BP_HARDEN_UNKNOWN:
|
||||
default:
|
||||
return sprintf(buf, "Vulnerable\n");
|
||||
}
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spec_store_bypass(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
if (__ssb_safe)
|
||||
return sprintf(buf, "Not affected\n");
|
||||
|
||||
switch (ssbd_state) {
|
||||
case ARM64_SSBD_KERNEL:
|
||||
case ARM64_SSBD_FORCE_ENABLE:
|
||||
if (IS_ENABLED(CONFIG_ARM64_SSBD))
|
||||
return sprintf(buf,
|
||||
"Mitigation: Speculative Store Bypass disabled via prctl\n");
|
||||
}
|
||||
|
||||
return sprintf(buf, "Vulnerable\n");
|
||||
}
|
||||
|
|
|
@ -227,7 +227,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
|
|||
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
|
||||
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
|
||||
ARM64_FTR_END,
|
||||
|
@ -487,7 +487,7 @@ static const struct arm64_ftr_bits ftr_id_pfr1[] = {
|
|||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_id_pfr2[] = {
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_CSV3_SHIFT, 4, 0),
|
||||
ARM64_FTR_END,
|
||||
};
|
||||
|
@ -1583,48 +1583,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
|
|||
WARN_ON(val & (7 << 27 | 7 << 21));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
|
||||
{
|
||||
if (user_mode(regs))
|
||||
return 1;
|
||||
|
||||
if (instr & BIT(PSTATE_Imm_shift))
|
||||
regs->pstate |= PSR_SSBS_BIT;
|
||||
else
|
||||
regs->pstate &= ~PSR_SSBS_BIT;
|
||||
|
||||
arm64_skip_faulting_instruction(regs, 4);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct undef_hook ssbs_emulation_hook = {
|
||||
.instr_mask = ~(1U << PSTATE_Imm_shift),
|
||||
.instr_val = 0xd500401f | PSTATE_SSBS,
|
||||
.fn = ssbs_emulation_handler,
|
||||
};
|
||||
|
||||
static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
|
||||
{
|
||||
static bool undef_hook_registered = false;
|
||||
static DEFINE_RAW_SPINLOCK(hook_lock);
|
||||
|
||||
raw_spin_lock(&hook_lock);
|
||||
if (!undef_hook_registered) {
|
||||
register_undef_hook(&ssbs_emulation_hook);
|
||||
undef_hook_registered = true;
|
||||
}
|
||||
raw_spin_unlock(&hook_lock);
|
||||
|
||||
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
|
||||
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
|
||||
arm64_set_ssbd_mitigation(false);
|
||||
} else {
|
||||
arm64_set_ssbd_mitigation(true);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_ARM64_SSBD */
|
||||
|
||||
#ifdef CONFIG_ARM64_PAN
|
||||
static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
|
||||
{
|
||||
|
@ -1976,19 +1934,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||
.field_pos = ID_AA64ISAR0_CRC32_SHIFT,
|
||||
.min_field_value = 1,
|
||||
},
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
{
|
||||
.desc = "Speculative Store Bypassing Safe (SSBS)",
|
||||
.capability = ARM64_SSBS,
|
||||
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.matches = has_cpuid_feature,
|
||||
.sys_reg = SYS_ID_AA64PFR1_EL1,
|
||||
.field_pos = ID_AA64PFR1_SSBS_SHIFT,
|
||||
.sign = FTR_UNSIGNED,
|
||||
.min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
|
||||
.cpu_enable = cpu_enable_ssbs,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_CNP
|
||||
{
|
||||
.desc = "Common not Private translations",
|
||||
|
|
|
@ -132,9 +132,8 @@ alternative_else_nop_endif
|
|||
* them if required.
|
||||
*/
|
||||
.macro apply_ssbd, state, tmp1, tmp2
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
alternative_cb arm64_enable_wa2_handling
|
||||
b .L__asm_ssbd_skip\@
|
||||
alternative_cb spectre_v4_patch_fw_mitigation_enable
|
||||
b .L__asm_ssbd_skip\@ // Patched to NOP
|
||||
alternative_cb_end
|
||||
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
|
||||
cbz \tmp2, .L__asm_ssbd_skip\@
|
||||
|
@ -142,11 +141,10 @@ alternative_cb_end
|
|||
tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
|
||||
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
|
||||
mov w1, #\state
|
||||
alternative_cb arm64_update_smccc_conduit
|
||||
alternative_cb spectre_v4_patch_fw_mitigation_conduit
|
||||
nop // Patched to SMC/HVC #0
|
||||
alternative_cb_end
|
||||
.L__asm_ssbd_skip\@:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro kernel_entry, el, regsize = 64
|
||||
|
@ -697,11 +695,9 @@ el0_irq_naked:
|
|||
bl trace_hardirqs_off
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
tbz x22, #55, 1f
|
||||
bl do_el0_irq_bp_hardening
|
||||
1:
|
||||
#endif
|
||||
irq_handler
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
|
|
|
@ -332,11 +332,7 @@ int swsusp_arch_suspend(void)
|
|||
* mitigation off behind our back, let's set the state
|
||||
* to what we expect it to be.
|
||||
*/
|
||||
switch (arm64_get_ssbd_state()) {
|
||||
case ARM64_SSBD_FORCE_ENABLE:
|
||||
case ARM64_SSBD_KERNEL:
|
||||
arm64_set_ssbd_mitigation(true);
|
||||
}
|
||||
spectre_v4_enable_mitigation(NULL);
|
||||
}
|
||||
|
||||
local_daif_restore(flags);
|
||||
|
|
|
@ -62,7 +62,6 @@ __efistub__ctype = _ctype;
|
|||
*/
|
||||
|
||||
/* Alternative callbacks for init-time patching of nVHE hyp code. */
|
||||
KVM_NVHE_ALIAS(arm64_enable_wa2_handling);
|
||||
KVM_NVHE_ALIAS(kvm_patch_vector_branch);
|
||||
KVM_NVHE_ALIAS(kvm_update_va_mask);
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/lockdep.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/unistd.h>
|
||||
|
@ -421,8 +422,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
|||
cpus_have_const_cap(ARM64_HAS_UAO))
|
||||
childregs->pstate |= PSR_UAO_BIT;
|
||||
|
||||
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
|
||||
set_ssbs_bit(childregs);
|
||||
spectre_v4_enable_task_mitigation(p);
|
||||
|
||||
if (system_uses_irq_prio_masking())
|
||||
childregs->pmr_save = GIC_PRIO_IRQON;
|
||||
|
@ -472,8 +472,6 @@ void uao_thread_switch(struct task_struct *next)
|
|||
*/
|
||||
static void ssbs_thread_switch(struct task_struct *next)
|
||||
{
|
||||
struct pt_regs *regs = task_pt_regs(next);
|
||||
|
||||
/*
|
||||
* Nothing to do for kernel threads, but 'regs' may be junk
|
||||
* (e.g. idle task) so check the flags and bail early.
|
||||
|
@ -485,18 +483,10 @@ static void ssbs_thread_switch(struct task_struct *next)
|
|||
* If all CPUs implement the SSBS extension, then we just need to
|
||||
* context-switch the PSTATE field.
|
||||
*/
|
||||
if (cpu_have_feature(cpu_feature(SSBS)))
|
||||
if (cpus_have_const_cap(ARM64_SSBS))
|
||||
return;
|
||||
|
||||
/* If the mitigation is enabled, then we leave SSBS clear. */
|
||||
if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
|
||||
test_tsk_thread_flag(next, TIF_SSBD))
|
||||
return;
|
||||
|
||||
if (compat_user_mode(regs))
|
||||
set_compat_ssbs_bit(regs);
|
||||
else if (user_mode(regs))
|
||||
set_ssbs_bit(regs);
|
||||
spectre_v4_enable_task_mitigation(next);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -620,6 +610,11 @@ void arch_setup_new_exec(void)
|
|||
current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
|
||||
|
||||
ptrauth_thread_init_user(current);
|
||||
|
||||
if (task_spec_ssb_noexec(current)) {
|
||||
arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
|
||||
PR_SPEC_ENABLE);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
|
||||
|
|
|
@ -0,0 +1,792 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as
|
||||
* detailed at:
|
||||
*
|
||||
* https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
|
||||
*
|
||||
* This code was originally written hastily under an awful lot of stress and so
|
||||
* aspects of it are somewhat hacky. Unfortunately, changing anything in here
|
||||
* instantly makes me feel ill. Thanks, Jann. Thann.
|
||||
*
|
||||
* Copyright (C) 2018 ARM Ltd, All Rights Reserved.
|
||||
* Copyright (C) 2020 Google LLC
|
||||
*
|
||||
* "If there's something strange in your neighbourhood, who you gonna call?"
|
||||
*
|
||||
* Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
|
||||
*/
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
|
||||
#include <asm/spectre.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
/*
|
||||
* We try to ensure that the mitigation state can never change as the result of
|
||||
* onlining a late CPU.
|
||||
*/
|
||||
static void update_mitigation_state(enum mitigation_state *oldp,
|
||||
enum mitigation_state new)
|
||||
{
|
||||
enum mitigation_state state;
|
||||
|
||||
do {
|
||||
state = READ_ONCE(*oldp);
|
||||
if (new <= state)
|
||||
break;
|
||||
|
||||
/* Userspace almost certainly can't deal with this. */
|
||||
if (WARN_ON(system_capabilities_finalized()))
|
||||
break;
|
||||
} while (cmpxchg_relaxed(oldp, state, new) != state);
|
||||
}
|
||||
|
||||
/*
|
||||
* Spectre v1.
|
||||
*
|
||||
* The kernel can't protect userspace for this one: it's each person for
|
||||
* themselves. Advertise what we're doing and be done with it.
|
||||
*/
|
||||
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Spectre v2.
|
||||
*
|
||||
* This one sucks. A CPU is either:
|
||||
*
|
||||
* - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
|
||||
* - Mitigated in hardware and listed in our "safe list".
|
||||
* - Mitigated in software by firmware.
|
||||
* - Mitigated in software by a CPU-specific dance in the kernel.
|
||||
* - Vulnerable.
|
||||
*
|
||||
* It's not unlikely for different CPUs in a big.LITTLE system to fall into
|
||||
* different camps.
|
||||
*/
|
||||
static enum mitigation_state spectre_v2_state;
|
||||
|
||||
static bool __read_mostly __nospectre_v2;
|
||||
static int __init parse_spectre_v2_param(char *str)
|
||||
{
|
||||
__nospectre_v2 = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("nospectre_v2", parse_spectre_v2_param);
|
||||
|
||||
static bool spectre_v2_mitigations_off(void)
|
||||
{
|
||||
bool ret = __nospectre_v2 || cpu_mitigations_off();
|
||||
|
||||
if (ret)
|
||||
pr_info_once("spectre-v2 mitigation disabled by command line option\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
switch (spectre_v2_state) {
|
||||
case SPECTRE_UNAFFECTED:
|
||||
return sprintf(buf, "Not affected\n");
|
||||
case SPECTRE_MITIGATED:
|
||||
return sprintf(buf, "Mitigation: Branch predictor hardening\n");
|
||||
case SPECTRE_VULNERABLE:
|
||||
fallthrough;
|
||||
default:
|
||||
return sprintf(buf, "Vulnerable\n");
|
||||
}
|
||||
}
|
||||
|
||||
static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
|
||||
{
|
||||
u64 pfr0;
|
||||
static const struct midr_range spectre_v2_safe_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
|
||||
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
/* If the CPU has CSV2 set, we're safe */
|
||||
pfr0 = read_cpuid(ID_AA64PFR0_EL1);
|
||||
if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
|
||||
return SPECTRE_UNAFFECTED;
|
||||
|
||||
/* Alternatively, we have a list of unaffected CPUs */
|
||||
if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
|
||||
return SPECTRE_UNAFFECTED;
|
||||
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
|
||||
#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED (1)
|
||||
|
||||
static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
|
||||
{
|
||||
int ret;
|
||||
struct arm_smccc_res res;
|
||||
|
||||
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
|
||||
ret = res.a0;
|
||||
switch (ret) {
|
||||
case SMCCC_RET_SUCCESS:
|
||||
return SPECTRE_MITIGATED;
|
||||
case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
|
||||
return SPECTRE_UNAFFECTED;
|
||||
default:
|
||||
fallthrough;
|
||||
case SMCCC_RET_NOT_SUPPORTED:
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
}
|
||||
|
||||
bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
{
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
|
||||
if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
|
||||
return false;
|
||||
|
||||
if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
|
||||
|
||||
enum mitigation_state arm64_get_spectre_v2_state(void)
|
||||
{
|
||||
return spectre_v2_state;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
|
||||
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
|
||||
|
||||
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
|
||||
const char *hyp_vecs_end)
|
||||
{
|
||||
void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < SZ_2K; i += 0x80)
|
||||
memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
|
||||
|
||||
__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
|
||||
}
|
||||
|
||||
static void install_bp_hardening_cb(bp_hardening_cb_t fn)
|
||||
{
|
||||
static DEFINE_RAW_SPINLOCK(bp_lock);
|
||||
int cpu, slot = -1;
|
||||
const char *hyp_vecs_start = __smccc_workaround_1_smc;
|
||||
const char *hyp_vecs_end = __smccc_workaround_1_smc +
|
||||
__SMCCC_WORKAROUND_1_SMC_SZ;
|
||||
|
||||
/*
|
||||
* detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
|
||||
* we're a guest. Skip the hyp-vectors work.
|
||||
*/
|
||||
if (!is_hyp_mode_available()) {
|
||||
__this_cpu_write(bp_hardening_data.fn, fn);
|
||||
return;
|
||||
}
|
||||
|
||||
raw_spin_lock(&bp_lock);
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
|
||||
slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (slot == -1) {
|
||||
slot = atomic_inc_return(&arm64_el2_vector_last_slot);
|
||||
BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
|
||||
__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
|
||||
}
|
||||
|
||||
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
|
||||
__this_cpu_write(bp_hardening_data.fn, fn);
|
||||
raw_spin_unlock(&bp_lock);
|
||||
}
|
||||
#else
|
||||
static void install_bp_hardening_cb(bp_hardening_cb_t fn)
|
||||
{
|
||||
__this_cpu_write(bp_hardening_data.fn, fn);
|
||||
}
|
||||
#endif /* CONFIG_KVM */
|
||||
|
||||
static void call_smc_arch_workaround_1(void)
|
||||
{
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
|
||||
}
|
||||
|
||||
static void call_hvc_arch_workaround_1(void)
|
||||
{
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
|
||||
}
|
||||
|
||||
static void qcom_link_stack_sanitisation(void)
|
||||
{
|
||||
u64 tmp;
|
||||
|
||||
asm volatile("mov %0, x30 \n"
|
||||
".rept 16 \n"
|
||||
"bl . + 4 \n"
|
||||
".endr \n"
|
||||
"mov x30, %0 \n"
|
||||
: "=&r" (tmp));
|
||||
}
|
||||
|
||||
static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
|
||||
{
|
||||
bp_hardening_cb_t cb;
|
||||
enum mitigation_state state;
|
||||
|
||||
state = spectre_v2_get_cpu_fw_mitigation_state();
|
||||
if (state != SPECTRE_MITIGATED)
|
||||
return state;
|
||||
|
||||
if (spectre_v2_mitigations_off())
|
||||
return SPECTRE_VULNERABLE;
|
||||
|
||||
switch (arm_smccc_1_1_get_conduit()) {
|
||||
case SMCCC_CONDUIT_HVC:
|
||||
cb = call_hvc_arch_workaround_1;
|
||||
break;
|
||||
|
||||
case SMCCC_CONDUIT_SMC:
|
||||
cb = call_smc_arch_workaround_1;
|
||||
break;
|
||||
|
||||
default:
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
|
||||
install_bp_hardening_cb(cb);
|
||||
return SPECTRE_MITIGATED;
|
||||
}
|
||||
|
||||
static enum mitigation_state spectre_v2_enable_sw_mitigation(void)
|
||||
{
|
||||
u32 midr;
|
||||
|
||||
if (spectre_v2_mitigations_off())
|
||||
return SPECTRE_VULNERABLE;
|
||||
|
||||
midr = read_cpuid_id();
|
||||
if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
|
||||
((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
|
||||
return SPECTRE_VULNERABLE;
|
||||
|
||||
install_bp_hardening_cb(qcom_link_stack_sanitisation);
|
||||
return SPECTRE_MITIGATED;
|
||||
}
|
||||
|
||||
void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
|
||||
{
|
||||
enum mitigation_state state;
|
||||
|
||||
WARN_ON(preemptible());
|
||||
|
||||
state = spectre_v2_get_cpu_hw_mitigation_state();
|
||||
if (state == SPECTRE_VULNERABLE)
|
||||
state = spectre_v2_enable_fw_mitigation();
|
||||
if (state == SPECTRE_VULNERABLE)
|
||||
state = spectre_v2_enable_sw_mitigation();
|
||||
|
||||
update_mitigation_state(&spectre_v2_state, state);
|
||||
}
|
||||
|
||||
/*
|
||||
* Spectre v4.
|
||||
*
|
||||
* If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
|
||||
* either:
|
||||
*
|
||||
* - Mitigated in hardware and listed in our "safe list".
|
||||
* - Mitigated in hardware via PSTATE.SSBS.
|
||||
* - Mitigated in software by firmware (sometimes referred to as SSBD).
|
||||
*
|
||||
* Wait, that doesn't sound so bad, does it? Keep reading...
|
||||
*
|
||||
* A major source of headaches is that the software mitigation is enabled both
|
||||
* on a per-task basis, but can also be forced on for the kernel, necessitating
|
||||
* both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
|
||||
* allow EL0 to toggle SSBS directly, which can end up with the prctl() state
|
||||
* being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
|
||||
* so you can have systems that have both firmware and SSBS mitigations. This
|
||||
* means we actually have to reject late onlining of CPUs with mitigations if
|
||||
* all of the currently onlined CPUs are safelisted, as the mitigation tends to
|
||||
* be opt-in for userspace. Yes, really, the cure is worse than the disease.
|
||||
*
|
||||
* The only good part is that if the firmware mitigation is present, then it is
|
||||
* present for all CPUs, meaning we don't have to worry about late onlining of a
|
||||
* vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
|
||||
*
|
||||
* Give me a VAX-11/780 any day of the week...
|
||||
*/
|
||||
static enum mitigation_state spectre_v4_state;
|
||||
|
||||
/* This is the per-cpu state tracking whether we need to talk to firmware */
|
||||
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
|
||||
|
||||
enum spectre_v4_policy {
|
||||
SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
|
||||
SPECTRE_V4_POLICY_MITIGATION_ENABLED,
|
||||
SPECTRE_V4_POLICY_MITIGATION_DISABLED,
|
||||
};
|
||||
|
||||
static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
|
||||
|
||||
static const struct spectre_v4_param {
|
||||
const char *str;
|
||||
enum spectre_v4_policy policy;
|
||||
} spectre_v4_params[] = {
|
||||
{ "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
|
||||
{ "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
|
||||
{ "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
|
||||
};
|
||||
static int __init parse_spectre_v4_param(char *str)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!str || !str[0])
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
|
||||
const struct spectre_v4_param *param = &spectre_v4_params[i];
|
||||
|
||||
if (strncmp(str, param->str, strlen(param->str)))
|
||||
continue;
|
||||
|
||||
__spectre_v4_policy = param->policy;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
early_param("ssbd", parse_spectre_v4_param);
|
||||
|
||||
/*
|
||||
* Because this was all written in a rush by people working in different silos,
|
||||
* we've ended up with multiple command line options to control the same thing.
|
||||
* Wrap these up in some helpers, which prefer disabling the mitigation if faced
|
||||
* with contradictory parameters. The mitigation is always either "off",
|
||||
* "dynamic" or "on".
|
||||
*/
|
||||
static bool spectre_v4_mitigations_off(void)
|
||||
{
|
||||
bool ret = cpu_mitigations_off() ||
|
||||
__spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
|
||||
|
||||
if (ret)
|
||||
pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
|
||||
static bool spectre_v4_mitigations_dynamic(void)
|
||||
{
|
||||
return !spectre_v4_mitigations_off() &&
|
||||
__spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
|
||||
}
|
||||
|
||||
static bool spectre_v4_mitigations_on(void)
|
||||
{
|
||||
return !spectre_v4_mitigations_off() &&
|
||||
__spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spec_store_bypass(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
switch (spectre_v4_state) {
|
||||
case SPECTRE_UNAFFECTED:
|
||||
return sprintf(buf, "Not affected\n");
|
||||
case SPECTRE_MITIGATED:
|
||||
return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
|
||||
case SPECTRE_VULNERABLE:
|
||||
fallthrough;
|
||||
default:
|
||||
return sprintf(buf, "Vulnerable\n");
|
||||
}
|
||||
}
|
||||
|
||||
enum mitigation_state arm64_get_spectre_v4_state(void)
|
||||
{
|
||||
return spectre_v4_state;
|
||||
}
|
||||
|
||||
static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
|
||||
{
|
||||
static const struct midr_range spectre_v4_safe_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
|
||||
if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
|
||||
return SPECTRE_UNAFFECTED;
|
||||
|
||||
/* CPU features are detected first */
|
||||
if (this_cpu_has_cap(ARM64_SSBS))
|
||||
return SPECTRE_MITIGATED;
|
||||
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
|
||||
static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
|
||||
{
|
||||
int ret;
|
||||
struct arm_smccc_res res;
|
||||
|
||||
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_2, &res);
|
||||
|
||||
ret = res.a0;
|
||||
switch (ret) {
|
||||
case SMCCC_RET_SUCCESS:
|
||||
return SPECTRE_MITIGATED;
|
||||
case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
|
||||
fallthrough;
|
||||
case SMCCC_RET_NOT_REQUIRED:
|
||||
return SPECTRE_UNAFFECTED;
|
||||
default:
|
||||
fallthrough;
|
||||
case SMCCC_RET_NOT_SUPPORTED:
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
}
|
||||
|
||||
bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
|
||||
{
|
||||
enum mitigation_state state;
|
||||
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
|
||||
state = spectre_v4_get_cpu_hw_mitigation_state();
|
||||
if (state == SPECTRE_VULNERABLE)
|
||||
state = spectre_v4_get_cpu_fw_mitigation_state();
|
||||
|
||||
return state != SPECTRE_UNAFFECTED;
|
||||
}
|
||||
|
||||
static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
|
||||
{
|
||||
if (user_mode(regs))
|
||||
return 1;
|
||||
|
||||
if (instr & BIT(PSTATE_Imm_shift))
|
||||
regs->pstate |= PSR_SSBS_BIT;
|
||||
else
|
||||
regs->pstate &= ~PSR_SSBS_BIT;
|
||||
|
||||
arm64_skip_faulting_instruction(regs, 4);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct undef_hook ssbs_emulation_hook = {
|
||||
.instr_mask = ~(1U << PSTATE_Imm_shift),
|
||||
.instr_val = 0xd500401f | PSTATE_SSBS,
|
||||
.fn = ssbs_emulation_handler,
|
||||
};
|
||||
|
||||
static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
|
||||
{
|
||||
static bool undef_hook_registered = false;
|
||||
static DEFINE_RAW_SPINLOCK(hook_lock);
|
||||
enum mitigation_state state;
|
||||
|
||||
/*
|
||||
* If the system is mitigated but this CPU doesn't have SSBS, then
|
||||
* we must be on the safelist and there's nothing more to do.
|
||||
*/
|
||||
state = spectre_v4_get_cpu_hw_mitigation_state();
|
||||
if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
|
||||
return state;
|
||||
|
||||
raw_spin_lock(&hook_lock);
|
||||
if (!undef_hook_registered) {
|
||||
register_undef_hook(&ssbs_emulation_hook);
|
||||
undef_hook_registered = true;
|
||||
}
|
||||
raw_spin_unlock(&hook_lock);
|
||||
|
||||
if (spectre_v4_mitigations_off()) {
|
||||
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
|
||||
asm volatile(SET_PSTATE_SSBS(1));
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
|
||||
/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
|
||||
asm volatile(SET_PSTATE_SSBS(0));
|
||||
return SPECTRE_MITIGATED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Patch a branch over the Spectre-v4 mitigation code with a NOP so that
|
||||
* we fallthrough and check whether firmware needs to be called on this CPU.
|
||||
*/
|
||||
void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
|
||||
__le32 *origptr,
|
||||
__le32 *updptr, int nr_inst)
|
||||
{
|
||||
BUG_ON(nr_inst != 1); /* Branch -> NOP */
|
||||
|
||||
if (spectre_v4_mitigations_off())
|
||||
return;
|
||||
|
||||
if (cpus_have_final_cap(ARM64_SSBS))
|
||||
return;
|
||||
|
||||
if (spectre_v4_mitigations_dynamic())
|
||||
*updptr = cpu_to_le32(aarch64_insn_gen_nop());
|
||||
}
|
||||
|
||||
/*
|
||||
* Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
|
||||
* to call into firmware to adjust the mitigation state.
|
||||
*/
|
||||
void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
|
||||
__le32 *origptr,
|
||||
__le32 *updptr, int nr_inst)
|
||||
{
|
||||
u32 insn;
|
||||
|
||||
BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
|
||||
|
||||
switch (arm_smccc_1_1_get_conduit()) {
|
||||
case SMCCC_CONDUIT_HVC:
|
||||
insn = aarch64_insn_get_hvc_value();
|
||||
break;
|
||||
case SMCCC_CONDUIT_SMC:
|
||||
insn = aarch64_insn_get_smc_value();
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
*updptr = cpu_to_le32(insn);
|
||||
}
|
||||
|
||||
static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
|
||||
{
|
||||
enum mitigation_state state;
|
||||
|
||||
state = spectre_v4_get_cpu_fw_mitigation_state();
|
||||
if (state != SPECTRE_MITIGATED)
|
||||
return state;
|
||||
|
||||
if (spectre_v4_mitigations_off()) {
|
||||
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
|
||||
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
|
||||
|
||||
if (spectre_v4_mitigations_dynamic())
|
||||
__this_cpu_write(arm64_ssbd_callback_required, 1);
|
||||
|
||||
return SPECTRE_MITIGATED;
|
||||
}
|
||||
|
||||
void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
|
||||
{
|
||||
enum mitigation_state state;
|
||||
|
||||
WARN_ON(preemptible());
|
||||
|
||||
state = spectre_v4_enable_hw_mitigation();
|
||||
if (state == SPECTRE_VULNERABLE)
|
||||
state = spectre_v4_enable_fw_mitigation();
|
||||
|
||||
update_mitigation_state(&spectre_v4_state, state);
|
||||
}
|
||||
|
||||
static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
|
||||
{
|
||||
u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
|
||||
|
||||
if (state)
|
||||
regs->pstate |= bit;
|
||||
else
|
||||
regs->pstate &= ~bit;
|
||||
}
|
||||
|
||||
void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
|
||||
{
|
||||
struct pt_regs *regs = task_pt_regs(tsk);
|
||||
bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
|
||||
|
||||
if (spectre_v4_mitigations_off())
|
||||
ssbs = true;
|
||||
else if (spectre_v4_mitigations_dynamic() && !kthread)
|
||||
ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
|
||||
|
||||
__update_pstate_ssbs(regs, ssbs);
|
||||
}
|
||||
|
||||
/*
|
||||
* The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
|
||||
* This is interesting because the "speculation disabled" behaviour can be
|
||||
* configured so that it is preserved across exec(), which means that the
|
||||
* prctl() may be necessary even when PSTATE.SSBS can be toggled directly
|
||||
* from userspace.
|
||||
*/
|
||||
static void ssbd_prctl_enable_mitigation(struct task_struct *task)
|
||||
{
|
||||
task_clear_spec_ssb_noexec(task);
|
||||
task_set_spec_ssb_disable(task);
|
||||
set_tsk_thread_flag(task, TIF_SSBD);
|
||||
}
|
||||
|
||||
static void ssbd_prctl_disable_mitigation(struct task_struct *task)
|
||||
{
|
||||
task_clear_spec_ssb_noexec(task);
|
||||
task_clear_spec_ssb_disable(task);
|
||||
clear_tsk_thread_flag(task, TIF_SSBD);
|
||||
}
|
||||
|
||||
static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
{
|
||||
switch (ctrl) {
|
||||
case PR_SPEC_ENABLE:
|
||||
/* Enable speculation: disable mitigation */
|
||||
/*
|
||||
* Force disabled speculation prevents it from being
|
||||
* re-enabled.
|
||||
*/
|
||||
if (task_spec_ssb_force_disable(task))
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* If the mitigation is forced on, then speculation is forced
|
||||
* off and we again prevent it from being re-enabled.
|
||||
*/
|
||||
if (spectre_v4_mitigations_on())
|
||||
return -EPERM;
|
||||
|
||||
ssbd_prctl_disable_mitigation(task);
|
||||
break;
|
||||
case PR_SPEC_FORCE_DISABLE:
|
||||
/* Force disable speculation: force enable mitigation */
|
||||
/*
|
||||
* If the mitigation is forced off, then speculation is forced
|
||||
* on and we prevent it from being disabled.
|
||||
*/
|
||||
if (spectre_v4_mitigations_off())
|
||||
return -EPERM;
|
||||
|
||||
task_set_spec_ssb_force_disable(task);
|
||||
fallthrough;
|
||||
case PR_SPEC_DISABLE:
|
||||
/* Disable speculation: enable mitigation */
|
||||
/* Same as PR_SPEC_FORCE_DISABLE */
|
||||
if (spectre_v4_mitigations_off())
|
||||
return -EPERM;
|
||||
|
||||
ssbd_prctl_enable_mitigation(task);
|
||||
break;
|
||||
case PR_SPEC_DISABLE_NOEXEC:
|
||||
/* Disable speculation until execve(): enable mitigation */
|
||||
/*
|
||||
* If the mitigation state is forced one way or the other, then
|
||||
* we must fail now before we try to toggle it on execve().
|
||||
*/
|
||||
if (task_spec_ssb_force_disable(task) ||
|
||||
spectre_v4_mitigations_off() ||
|
||||
spectre_v4_mitigations_on()) {
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
ssbd_prctl_enable_mitigation(task);
|
||||
task_set_spec_ssb_noexec(task);
|
||||
break;
|
||||
default:
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
spectre_v4_enable_task_mitigation(task);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
|
||||
unsigned long ctrl)
|
||||
{
|
||||
switch (which) {
|
||||
case PR_SPEC_STORE_BYPASS:
|
||||
return ssbd_prctl_set(task, ctrl);
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
static int ssbd_prctl_get(struct task_struct *task)
|
||||
{
|
||||
switch (spectre_v4_state) {
|
||||
case SPECTRE_UNAFFECTED:
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
case SPECTRE_MITIGATED:
|
||||
if (spectre_v4_mitigations_on())
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
|
||||
if (spectre_v4_mitigations_dynamic())
|
||||
break;
|
||||
|
||||
/* Mitigations are disabled, so we're vulnerable. */
|
||||
fallthrough;
|
||||
case SPECTRE_VULNERABLE:
|
||||
fallthrough;
|
||||
default:
|
||||
return PR_SPEC_ENABLE;
|
||||
}
|
||||
|
||||
/* Check the mitigation state for this task */
|
||||
if (task_spec_ssb_force_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
|
||||
|
||||
if (task_spec_ssb_noexec(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
|
||||
|
||||
if (task_spec_ssb_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
|
||||
|
||||
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
|
||||
}
|
||||
|
||||
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
||||
{
|
||||
switch (which) {
|
||||
case PR_SPEC_STORE_BYPASS:
|
||||
return ssbd_prctl_get(task);
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
|
@ -1,129 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2018 ARM Ltd, All Rights Reserved.
|
||||
*/
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/thread_info.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
static void ssbd_ssbs_enable(struct task_struct *task)
|
||||
{
|
||||
u64 val = is_compat_thread(task_thread_info(task)) ?
|
||||
PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
|
||||
|
||||
task_pt_regs(task)->pstate |= val;
|
||||
}
|
||||
|
||||
static void ssbd_ssbs_disable(struct task_struct *task)
|
||||
{
|
||||
u64 val = is_compat_thread(task_thread_info(task)) ?
|
||||
PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
|
||||
|
||||
task_pt_regs(task)->pstate &= ~val;
|
||||
}
|
||||
|
||||
/*
|
||||
* prctl interface for SSBD
|
||||
*/
|
||||
static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
{
|
||||
int state = arm64_get_ssbd_state();
|
||||
|
||||
/* Unsupported */
|
||||
if (state == ARM64_SSBD_UNKNOWN)
|
||||
return -ENODEV;
|
||||
|
||||
/* Treat the unaffected/mitigated state separately */
|
||||
if (state == ARM64_SSBD_MITIGATED) {
|
||||
switch (ctrl) {
|
||||
case PR_SPEC_ENABLE:
|
||||
return -EPERM;
|
||||
case PR_SPEC_DISABLE:
|
||||
case PR_SPEC_FORCE_DISABLE:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Things are a bit backward here: the arm64 internal API
|
||||
* *enables the mitigation* when the userspace API *disables
|
||||
* speculation*. So much fun.
|
||||
*/
|
||||
switch (ctrl) {
|
||||
case PR_SPEC_ENABLE:
|
||||
/* If speculation is force disabled, enable is not allowed */
|
||||
if (state == ARM64_SSBD_FORCE_ENABLE ||
|
||||
task_spec_ssb_force_disable(task))
|
||||
return -EPERM;
|
||||
task_clear_spec_ssb_disable(task);
|
||||
clear_tsk_thread_flag(task, TIF_SSBD);
|
||||
ssbd_ssbs_enable(task);
|
||||
break;
|
||||
case PR_SPEC_DISABLE:
|
||||
if (state == ARM64_SSBD_FORCE_DISABLE)
|
||||
return -EPERM;
|
||||
task_set_spec_ssb_disable(task);
|
||||
set_tsk_thread_flag(task, TIF_SSBD);
|
||||
ssbd_ssbs_disable(task);
|
||||
break;
|
||||
case PR_SPEC_FORCE_DISABLE:
|
||||
if (state == ARM64_SSBD_FORCE_DISABLE)
|
||||
return -EPERM;
|
||||
task_set_spec_ssb_disable(task);
|
||||
task_set_spec_ssb_force_disable(task);
|
||||
set_tsk_thread_flag(task, TIF_SSBD);
|
||||
ssbd_ssbs_disable(task);
|
||||
break;
|
||||
default:
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
|
||||
unsigned long ctrl)
|
||||
{
|
||||
switch (which) {
|
||||
case PR_SPEC_STORE_BYPASS:
|
||||
return ssbd_prctl_set(task, ctrl);
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
static int ssbd_prctl_get(struct task_struct *task)
|
||||
{
|
||||
switch (arm64_get_ssbd_state()) {
|
||||
case ARM64_SSBD_UNKNOWN:
|
||||
return -ENODEV;
|
||||
case ARM64_SSBD_FORCE_ENABLE:
|
||||
return PR_SPEC_DISABLE;
|
||||
case ARM64_SSBD_KERNEL:
|
||||
if (task_spec_ssb_force_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
|
||||
if (task_spec_ssb_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
|
||||
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
|
||||
case ARM64_SSBD_FORCE_DISABLE:
|
||||
return PR_SPEC_ENABLE;
|
||||
default:
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
}
|
||||
}
|
||||
|
||||
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
||||
{
|
||||
switch (which) {
|
||||
case PR_SPEC_STORE_BYPASS:
|
||||
return ssbd_prctl_get(task);
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
|
@ -72,8 +72,7 @@ void notrace __cpu_suspend_exit(void)
|
|||
* have turned the mitigation on. If the user has forcefully
|
||||
* disabled it, make sure their wishes are obeyed.
|
||||
*/
|
||||
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
|
||||
arm64_set_ssbd_mitigation(false);
|
||||
spectre_v4_enable_mitigation(NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -57,9 +57,6 @@ config KVM_ARM_PMU
|
|||
Adds support for a virtual Performance Monitoring Unit (PMU) in
|
||||
virtual machines.
|
||||
|
||||
config KVM_INDIRECT_VECTORS
|
||||
def_bool HARDEN_BRANCH_PREDICTOR || RANDOMIZE_BASE
|
||||
|
||||
endif # KVM
|
||||
|
||||
endif # VIRTUALIZATION
|
||||
|
|
|
@ -1269,6 +1269,40 @@ static unsigned long nvhe_percpu_order(void)
|
|||
return size ? get_order(size) : 0;
|
||||
}
|
||||
|
||||
static int kvm_map_vectors(void)
|
||||
{
|
||||
/*
|
||||
* SV2 = ARM64_SPECTRE_V2
|
||||
* HEL2 = ARM64_HARDEN_EL2_VECTORS
|
||||
*
|
||||
* !SV2 + !HEL2 -> use direct vectors
|
||||
* SV2 + !HEL2 -> use hardened vectors in place
|
||||
* !SV2 + HEL2 -> allocate one vector slot and use exec mapping
|
||||
* SV2 + HEL2 -> use hardened vectors and use exec mapping
|
||||
*/
|
||||
if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
|
||||
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
|
||||
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
|
||||
}
|
||||
|
||||
if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
|
||||
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
|
||||
unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
|
||||
|
||||
/*
|
||||
* Always allocate a spare vector slot, as we don't
|
||||
* know yet which CPUs have a BP hardening slot that
|
||||
* we can reuse.
|
||||
*/
|
||||
__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
|
||||
BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
|
||||
return create_hyp_exec_mappings(vect_pa, size,
|
||||
&__kvm_bp_vect_base);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cpu_init_hyp_mode(void)
|
||||
{
|
||||
phys_addr_t pgd_ptr;
|
||||
|
@ -1305,12 +1339,9 @@ static void cpu_init_hyp_mode(void)
|
|||
* at EL2.
|
||||
*/
|
||||
if (this_cpu_has_cap(ARM64_SSBS) &&
|
||||
arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
|
||||
arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
|
||||
kvm_call_hyp_nvhe(__kvm_enable_ssbs);
|
||||
}
|
||||
|
||||
/* Copy information whether SSBD callback is required to hyp. */
|
||||
hyp_init_aux_data();
|
||||
}
|
||||
|
||||
static void cpu_hyp_reset(void)
|
||||
|
|
|
@ -10,5 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
|
|||
-DDISABLE_BRANCH_PROFILING \
|
||||
$(DISABLE_STACKLEAK_PLUGIN)
|
||||
|
||||
obj-$(CONFIG_KVM) += vhe/ nvhe/
|
||||
obj-$(CONFIG_KVM_INDIRECT_VECTORS) += smccc_wa.o
|
||||
obj-$(CONFIG_KVM) += vhe/ nvhe/ smccc_wa.o
|
||||
|
|
|
@ -116,35 +116,6 @@ el1_hvc_guest:
|
|||
ARM_SMCCC_ARCH_WORKAROUND_2)
|
||||
cbnz w1, el1_trap
|
||||
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
alternative_cb arm64_enable_wa2_handling
|
||||
b wa2_end
|
||||
alternative_cb_end
|
||||
get_vcpu_ptr x2, x0
|
||||
ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
|
||||
|
||||
// Sanitize the argument and update the guest flags
|
||||
ldr x1, [sp, #8] // Guest's x1
|
||||
clz w1, w1 // Murphy's device:
|
||||
lsr w1, w1, #5 // w1 = !!w1 without using
|
||||
eor w1, w1, #1 // the flags...
|
||||
bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
|
||||
str x0, [x2, #VCPU_WORKAROUND_FLAGS]
|
||||
|
||||
/* Check that we actually need to perform the call */
|
||||
ldr_this_cpu x0, arm64_ssbd_callback_required, x2
|
||||
cbz x0, wa2_end
|
||||
|
||||
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
|
||||
smc #0
|
||||
|
||||
/* Don't leak data from the SMC call */
|
||||
mov x3, xzr
|
||||
wa2_end:
|
||||
mov x2, xzr
|
||||
mov x1, xzr
|
||||
#endif
|
||||
|
||||
wa_epilogue:
|
||||
mov x0, xzr
|
||||
add sp, sp, #16
|
||||
|
@ -288,7 +259,6 @@ SYM_CODE_START(__kvm_hyp_vector)
|
|||
valid_vect el1_error // Error 32-bit EL1
|
||||
SYM_CODE_END(__kvm_hyp_vector)
|
||||
|
||||
#ifdef CONFIG_KVM_INDIRECT_VECTORS
|
||||
.macro hyp_ventry
|
||||
.align 7
|
||||
1: esb
|
||||
|
@ -338,4 +308,3 @@ SYM_CODE_START(__bp_harden_hyp_vecs)
|
|||
1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
|
||||
.org 1b
|
||||
SYM_CODE_END(__bp_harden_hyp_vecs)
|
||||
#endif
|
||||
|
|
|
@ -479,39 +479,6 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!cpus_have_final_cap(ARM64_SSBD))
|
||||
return false;
|
||||
|
||||
return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
|
||||
}
|
||||
|
||||
static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
/*
|
||||
* The host runs with the workaround always present. If the
|
||||
* guest wants it disabled, so be it...
|
||||
*/
|
||||
if (__needs_ssbd_off(vcpu) &&
|
||||
__this_cpu_read(arm64_ssbd_callback_required))
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
/*
|
||||
* If the guest has disabled the workaround, bring it back on.
|
||||
*/
|
||||
if (__needs_ssbd_off(vcpu) &&
|
||||
__this_cpu_read(arm64_ssbd_callback_required))
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void __kvm_unexpected_el2_exception(void)
|
||||
{
|
||||
unsigned long addr, fixup;
|
||||
|
|
|
@ -208,8 +208,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
|
||||
__debug_switch_to_guest(vcpu);
|
||||
|
||||
__set_guest_arch_workaround_state(vcpu);
|
||||
|
||||
do {
|
||||
/* Jump in the fire! */
|
||||
exit_code = __guest_enter(vcpu, host_ctxt);
|
||||
|
@ -217,8 +215,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
/* And we're baaack! */
|
||||
} while (fixup_guest_exit(vcpu, &exit_code));
|
||||
|
||||
__set_host_arch_workaround_state(vcpu);
|
||||
|
||||
__sysreg_save_state_nvhe(guest_ctxt);
|
||||
__sysreg32_save_state(vcpu);
|
||||
__timer_disable_traps(vcpu);
|
||||
|
|
|
@ -134,8 +134,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
|||
sysreg_restore_guest_state_vhe(guest_ctxt);
|
||||
__debug_switch_to_guest(vcpu);
|
||||
|
||||
__set_guest_arch_workaround_state(vcpu);
|
||||
|
||||
do {
|
||||
/* Jump in the fire! */
|
||||
exit_code = __guest_enter(vcpu, host_ctxt);
|
||||
|
@ -143,8 +141,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
|||
/* And we're baaack! */
|
||||
} while (fixup_guest_exit(vcpu, &exit_code));
|
||||
|
||||
__set_host_arch_workaround_state(vcpu);
|
||||
|
||||
sysreg_save_guest_state_vhe(guest_ctxt);
|
||||
|
||||
__deactivate_traps(vcpu);
|
||||
|
|
|
@ -24,27 +24,36 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
|
|||
feature = smccc_get_arg1(vcpu);
|
||||
switch (feature) {
|
||||
case ARM_SMCCC_ARCH_WORKAROUND_1:
|
||||
switch (kvm_arm_harden_branch_predictor()) {
|
||||
case KVM_BP_HARDEN_UNKNOWN:
|
||||
switch (arm64_get_spectre_v2_state()) {
|
||||
case SPECTRE_VULNERABLE:
|
||||
break;
|
||||
case KVM_BP_HARDEN_WA_NEEDED:
|
||||
case SPECTRE_MITIGATED:
|
||||
val = SMCCC_RET_SUCCESS;
|
||||
break;
|
||||
case KVM_BP_HARDEN_NOT_REQUIRED:
|
||||
case SPECTRE_UNAFFECTED:
|
||||
val = SMCCC_RET_NOT_REQUIRED;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case ARM_SMCCC_ARCH_WORKAROUND_2:
|
||||
switch (kvm_arm_have_ssbd()) {
|
||||
case KVM_SSBD_FORCE_DISABLE:
|
||||
case KVM_SSBD_UNKNOWN:
|
||||
switch (arm64_get_spectre_v4_state()) {
|
||||
case SPECTRE_VULNERABLE:
|
||||
break;
|
||||
case KVM_SSBD_KERNEL:
|
||||
val = SMCCC_RET_SUCCESS;
|
||||
break;
|
||||
case KVM_SSBD_FORCE_ENABLE:
|
||||
case KVM_SSBD_MITIGATED:
|
||||
case SPECTRE_MITIGATED:
|
||||
/*
|
||||
* SSBS everywhere: Indicate no firmware
|
||||
* support, as the SSBS support will be
|
||||
* indicated to the guest and the default is
|
||||
* safe.
|
||||
*
|
||||
* Otherwise, expose a permanent mitigation
|
||||
* to the guest, and hide SSBS so that the
|
||||
* guest stays protected.
|
||||
*/
|
||||
if (cpus_have_final_cap(ARM64_SSBS))
|
||||
break;
|
||||
fallthrough;
|
||||
case SPECTRE_UNAFFECTED:
|
||||
val = SMCCC_RET_NOT_REQUIRED;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -425,27 +425,30 @@ static int get_kernel_wa_level(u64 regid)
|
|||
{
|
||||
switch (regid) {
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
|
||||
switch (kvm_arm_harden_branch_predictor()) {
|
||||
case KVM_BP_HARDEN_UNKNOWN:
|
||||
switch (arm64_get_spectre_v2_state()) {
|
||||
case SPECTRE_VULNERABLE:
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
|
||||
case KVM_BP_HARDEN_WA_NEEDED:
|
||||
case SPECTRE_MITIGATED:
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
|
||||
case KVM_BP_HARDEN_NOT_REQUIRED:
|
||||
case SPECTRE_UNAFFECTED:
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
|
||||
}
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
|
||||
switch (kvm_arm_have_ssbd()) {
|
||||
case KVM_SSBD_FORCE_DISABLE:
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
|
||||
case KVM_SSBD_KERNEL:
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL;
|
||||
case KVM_SSBD_FORCE_ENABLE:
|
||||
case KVM_SSBD_MITIGATED:
|
||||
switch (arm64_get_spectre_v4_state()) {
|
||||
case SPECTRE_MITIGATED:
|
||||
/*
|
||||
* As for the hypercall discovery, we pretend we
|
||||
* don't have any FW mitigation if SSBS is there at
|
||||
* all times.
|
||||
*/
|
||||
if (cpus_have_final_cap(ARM64_SSBS))
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
|
||||
fallthrough;
|
||||
case SPECTRE_UNAFFECTED:
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
|
||||
case KVM_SSBD_UNKNOWN:
|
||||
default:
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN;
|
||||
case SPECTRE_VULNERABLE:
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -462,14 +465,8 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
|||
val = kvm_psci_version(vcpu, vcpu->kvm);
|
||||
break;
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
|
||||
val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
|
||||
break;
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
|
||||
val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
|
||||
|
||||
if (val == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
|
||||
kvm_arm_get_vcpu_workaround_2_flag(vcpu))
|
||||
val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED;
|
||||
break;
|
||||
default:
|
||||
return -ENOENT;
|
||||
|
@ -527,34 +524,35 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
|||
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
|
||||
return -EINVAL;
|
||||
|
||||
wa_level = val & KVM_REG_FEATURE_LEVEL_MASK;
|
||||
|
||||
if (get_kernel_wa_level(reg->id) < wa_level)
|
||||
return -EINVAL;
|
||||
|
||||
/* The enabled bit must not be set unless the level is AVAIL. */
|
||||
if (wa_level != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
|
||||
wa_level != val)
|
||||
if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) &&
|
||||
(val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL)
|
||||
return -EINVAL;
|
||||
|
||||
/* Are we finished or do we need to check the enable bit ? */
|
||||
if (kvm_arm_have_ssbd() != KVM_SSBD_KERNEL)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If this kernel supports the workaround to be switched on
|
||||
* or off, make sure it matches the requested setting.
|
||||
* Map all the possible incoming states to the only two we
|
||||
* really want to deal with.
|
||||
*/
|
||||
switch (wa_level) {
|
||||
switch (val & KVM_REG_FEATURE_LEVEL_MASK) {
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
|
||||
wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
|
||||
break;
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
|
||||
kvm_arm_set_vcpu_workaround_2_flag(vcpu,
|
||||
val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED);
|
||||
break;
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
|
||||
kvm_arm_set_vcpu_workaround_2_flag(vcpu, true);
|
||||
wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
|
||||
* other way around.
|
||||
*/
|
||||
if (get_kernel_wa_level(reg->id) < wa_level)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
default:
|
||||
return -ENOENT;
|
||||
|
|
|
@ -319,10 +319,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.reset_state.reset = false;
|
||||
}
|
||||
|
||||
/* Default workaround setup is enabled (if supported) */
|
||||
if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
|
||||
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
|
||||
|
||||
/* Reset timer */
|
||||
ret = kvm_timer_vcpu_reset(vcpu);
|
||||
out:
|
||||
|
|
|
@ -1131,6 +1131,9 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
|
|||
if (!vcpu_has_sve(vcpu))
|
||||
val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
|
||||
val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
|
||||
if (!(val & (0xfUL << ID_AA64PFR0_CSV2_SHIFT)) &&
|
||||
arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
|
||||
val |= (1UL << ID_AA64PFR0_CSV2_SHIFT);
|
||||
} else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
|
||||
val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
|
||||
(0xfUL << ID_AA64ISAR1_API_SHIFT) |
|
||||
|
|
Loading…
Reference in New Issue