mirror of https://gitee.com/openkylin/linux.git
x86/KVM/VMX: Add L1D flush logic
Add the logic for flushing L1D on VMENTER. The flush depends on the static key being enabled and the new l1tf_flush_l1d flag being set. The flags is set: - Always, if the flush module parameter is 'always' - Conditionally at: - Entry to vcpu_run(), i.e. after executing user space - From the sched_in notifier, i.e. when switching to a vCPU thread. - From vmexit handlers which are considered unsafe, i.e. where sensitive data can be brought into L1D: - The emulator, which could be a good target for other speculative execution-based threats, - The MMU, which can bring host page tables in the L1 cache. - External interrupts - Nested operations that require the MMU (see above). That is vmptrld, vmptrst, vmclear,vmwrite,vmread. - When handling invept,invvpid [ tglx: Split out from combo patch and reduced to a single flag ] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
3fa045be4c
commit
c595ceee45
|
@ -713,6 +713,9 @@ struct kvm_vcpu_arch {
|
|||
|
||||
/* be preempted when it's in kernel-mode(cpl=0) */
|
||||
bool preempted_in_kernel;
|
||||
|
||||
/* Flush the L1 Data cache for L1TF mitigation on VMENTER */
|
||||
bool l1tf_flush_l1d;
|
||||
};
|
||||
|
||||
struct kvm_lpage_info {
|
||||
|
@ -881,6 +884,7 @@ struct kvm_vcpu_stat {
|
|||
u64 signal_exits;
|
||||
u64 irq_window_exits;
|
||||
u64 nmi_window_exits;
|
||||
u64 l1d_flush;
|
||||
u64 halt_exits;
|
||||
u64 halt_successful_poll;
|
||||
u64 halt_attempted_poll;
|
||||
|
|
|
@ -3840,6 +3840,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
|
|||
{
|
||||
int r = 1;
|
||||
|
||||
vcpu->arch.l1tf_flush_l1d = true;
|
||||
switch (vcpu->arch.apf.host_apf_reason) {
|
||||
default:
|
||||
trace_kvm_page_fault(fault_address, error_code);
|
||||
|
|
|
@ -9576,9 +9576,20 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
|
|||
#define L1D_CACHE_ORDER 4
|
||||
static void *vmx_l1d_flush_pages;
|
||||
|
||||
static void __maybe_unused vmx_l1d_flush(void)
|
||||
static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int size = PAGE_SIZE << L1D_CACHE_ORDER;
|
||||
bool always;
|
||||
|
||||
/*
|
||||
* If the mitigation mode is 'flush always', keep the flush bit
|
||||
* set, otherwise clear it. It gets set again either from
|
||||
* vcpu_run() or from one of the unsafe VMEXIT handlers.
|
||||
*/
|
||||
always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
|
||||
vcpu->arch.l1tf_flush_l1d = always;
|
||||
|
||||
vcpu->stat.l1d_flush++;
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
|
||||
wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
|
||||
|
@ -9847,6 +9858,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
|
|||
[ss]"i"(__KERNEL_DS),
|
||||
[cs]"i"(__KERNEL_CS)
|
||||
);
|
||||
vcpu->arch.l1tf_flush_l1d = true;
|
||||
}
|
||||
}
|
||||
STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
|
||||
|
@ -10104,6 +10116,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
|
||||
(unsigned long)¤t_evmcs->host_rsp : 0;
|
||||
|
||||
if (static_branch_unlikely(&vmx_l1d_should_flush)) {
|
||||
if (vcpu->arch.l1tf_flush_l1d)
|
||||
vmx_l1d_flush(vcpu);
|
||||
}
|
||||
|
||||
asm(
|
||||
/* Store host registers */
|
||||
"push %%" _ASM_DX "; push %%" _ASM_BP ";"
|
||||
|
@ -11917,6 +11934,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Hide L1D cache contents from the nested guest. */
|
||||
vmx->vcpu.arch.l1tf_flush_l1d = true;
|
||||
|
||||
/*
|
||||
* If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
|
||||
* by event injection, halt vcpu.
|
||||
|
|
|
@ -195,6 +195,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|||
{ "irq_injections", VCPU_STAT(irq_injections) },
|
||||
{ "nmi_injections", VCPU_STAT(nmi_injections) },
|
||||
{ "req_event", VCPU_STAT(req_event) },
|
||||
{ "l1d_flush", VCPU_STAT(l1d_flush) },
|
||||
{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
|
||||
{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
|
||||
{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
|
||||
|
@ -4874,6 +4875,9 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v
|
|||
int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
|
||||
unsigned int bytes, struct x86_exception *exception)
|
||||
{
|
||||
/* kvm_write_guest_virt_system can pull in tons of pages. */
|
||||
vcpu->arch.l1tf_flush_l1d = true;
|
||||
|
||||
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
|
||||
PFERR_WRITE_MASK, exception);
|
||||
}
|
||||
|
@ -6050,6 +6054,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
|
|||
bool writeback = true;
|
||||
bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
|
||||
|
||||
vcpu->arch.l1tf_flush_l1d = true;
|
||||
|
||||
/*
|
||||
* Clear write_fault_to_shadow_pgtable here to ensure it is
|
||||
* never reused.
|
||||
|
@ -7579,6 +7585,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
|
|||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
vcpu->arch.l1tf_flush_l1d = true;
|
||||
|
||||
for (;;) {
|
||||
if (kvm_vcpu_running(vcpu)) {
|
||||
|
@ -8698,6 +8705,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
|
|||
|
||||
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
vcpu->arch.l1tf_flush_l1d = true;
|
||||
kvm_x86_ops->sched_in(vcpu, cpu);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue