x86/litf: Introduce vmx status variable
Store the effective mitigation of VMX in a status variable and use it to report the VMX state in the l1tf sysfs file. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Jiri Kosina <jkosina@suse.cz> Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com> Link: https://lkml.kernel.org/r/20180713142322.433098358@linutronix.de
This commit is contained in:
parent
215af5499d
commit
72c6d2db64
|
@ -573,4 +573,13 @@ enum vm_instruction_error_number {
|
|||
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
|
||||
};
|
||||
|
||||
enum vmx_l1d_flush_state {
|
||||
VMENTER_L1D_FLUSH_AUTO,
|
||||
VMENTER_L1D_FLUSH_NEVER,
|
||||
VMENTER_L1D_FLUSH_COND,
|
||||
VMENTER_L1D_FLUSH_ALWAYS,
|
||||
};
|
||||
|
||||
extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <asm/processor-flags.h>
|
||||
#include <asm/fpu/internal.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/vmx.h>
|
||||
#include <asm/paravirt.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
@ -657,6 +658,12 @@ void x86_spec_ctrl_setup_ap(void)
|
|||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "L1TF: " fmt
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM_INTEL)
|
||||
enum vmx_l1d_flush_state l1tf_vmx_mitigation __ro_after_init = VMENTER_L1D_FLUSH_AUTO;
|
||||
EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
|
||||
#endif
|
||||
|
||||
static void __init l1tf_select_mitigation(void)
|
||||
{
|
||||
u64 half_pa;
|
||||
|
@ -686,6 +693,32 @@ static void __init l1tf_select_mitigation(void)
|
|||
|
||||
#ifdef CONFIG_SYSFS
|
||||
|
||||
#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM_INTEL)
|
||||
static const char *l1tf_vmx_states[] = {
|
||||
[VMENTER_L1D_FLUSH_AUTO] = "auto",
|
||||
[VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
|
||||
[VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
|
||||
[VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
|
||||
};
|
||||
|
||||
static ssize_t l1tf_show_state(char *buf)
|
||||
{
|
||||
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
|
||||
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
|
||||
|
||||
return sprintf(buf, "%s; VMX: SMT %s, L1D %s\n", L1TF_DEFAULT_MSG,
|
||||
cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled",
|
||||
l1tf_vmx_states[l1tf_vmx_mitigation]);
|
||||
}
|
||||
#else
|
||||
static ssize_t l1tf_show_state(char *buf)
|
||||
{
|
||||
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
|
||||
}
|
||||
#endif
|
||||
|
||||
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
||||
char *buf, unsigned int bug)
|
||||
{
|
||||
|
@ -713,9 +746,8 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
|||
|
||||
case X86_BUG_L1TF:
|
||||
if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
|
||||
return sprintf(buf, "Mitigation: Page Table Inversion\n");
|
||||
return l1tf_show_state(buf);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -193,19 +193,13 @@ extern const ulong vmx_return;
|
|||
|
||||
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
|
||||
|
||||
/* These MUST be in sync with vmentry_l1d_param order. */
|
||||
enum vmx_l1d_flush_state {
|
||||
VMENTER_L1D_FLUSH_NEVER,
|
||||
VMENTER_L1D_FLUSH_COND,
|
||||
VMENTER_L1D_FLUSH_ALWAYS,
|
||||
};
|
||||
|
||||
static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND;
|
||||
|
||||
static const struct {
|
||||
const char *option;
|
||||
enum vmx_l1d_flush_state cmd;
|
||||
} vmentry_l1d_param[] = {
|
||||
{"auto", VMENTER_L1D_FLUSH_AUTO},
|
||||
{"never", VMENTER_L1D_FLUSH_NEVER},
|
||||
{"cond", VMENTER_L1D_FLUSH_COND},
|
||||
{"always", VMENTER_L1D_FLUSH_ALWAYS},
|
||||
|
@ -13235,8 +13229,12 @@ static int __init vmx_setup_l1d_flush(void)
|
|||
{
|
||||
struct page *page;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_L1TF))
|
||||
return 0;
|
||||
|
||||
l1tf_vmx_mitigation = vmentry_l1d_flush;
|
||||
|
||||
if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
|
||||
!boot_cpu_has_bug(X86_BUG_L1TF) ||
|
||||
vmx_l1d_use_msr_save_list())
|
||||
return 0;
|
||||
|
||||
|
@ -13251,12 +13249,14 @@ static int __init vmx_setup_l1d_flush(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void vmx_free_l1d_flush_pages(void)
|
||||
static void vmx_cleanup_l1d_flush(void)
|
||||
{
|
||||
if (vmx_l1d_flush_pages) {
|
||||
free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
|
||||
vmx_l1d_flush_pages = NULL;
|
||||
}
|
||||
/* Restore state so sysfs ignores VMX */
|
||||
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
|
||||
}
|
||||
|
||||
static int __init vmx_init(void)
|
||||
|
@ -13299,7 +13299,7 @@ static int __init vmx_init(void)
|
|||
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
|
||||
__alignof__(struct vcpu_vmx), THIS_MODULE);
|
||||
if (r) {
|
||||
vmx_free_l1d_flush_pages();
|
||||
vmx_cleanup_l1d_flush();
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -13343,7 +13343,7 @@ static void __exit vmx_exit(void)
|
|||
static_branch_disable(&enable_evmcs);
|
||||
}
|
||||
#endif
|
||||
vmx_free_l1d_flush_pages();
|
||||
vmx_cleanup_l1d_flush();
|
||||
}
|
||||
|
||||
module_init(vmx_init)
|
||||
|
|
Loading…
Reference in New Issue