KVM fixes for v4.10-rc3
MIPS: (both for stable) - fix host kernel crashes when receiving a signal with 64-bit userspace - flush instruction cache on all vcpus after generating entry code x86: - fix NULL dereference in MMU caused by SMM transitions (for stable) - correct guest instruction pointer after emulating some VMX errors - minor cleanup -----BEGIN PGP SIGNATURE----- iQEcBAABCAAGBQJYb/N7AAoJEED/6hsPKofoa4QH/0/jwHr64lFeiOzMxqZfTF0y wufcTqw3zGq5iPaNlEwn+6AkKnTq2IPws92FludfPHPb7BrLUPqrXxRlSRN+XPVw pHVcV9u0q4yghMi7/6Flu3JASnpD6PrPZ7ezugZwgXFrR7pewd/+sTq6xBUnI9rZ nNEYsfh8dYiBicxSGXlmZcHLuJJHKshjsv9F6ngyBGXAAf/F+nLiJReUzPO0m2+P gmXi5zhVu6z05zlaCW1KAmJ1QV1UJla1vZnzrnK3twRK/05l7YX+xCbHIo1wB03R 2YhKDnSrnG3Zt+KpXfRhADXazNgM5ASvORdvI6RvjLNVxlnOveQtAcfRyvZezT4= =LXLf -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM fixes from Radim Krčmář: "MIPS: - fix host kernel crashes when receiving a signal with 64-bit userspace - flush instruction cache on all vcpus after generating entry code (both for stable) x86: - fix NULL dereference in MMU caused by SMM transitions (for stable) - correct guest instruction pointer after emulating some VMX errors - minor cleanup" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: VMX: remove duplicated declaration KVM: MIPS: Flush KVM entry code from icache globally KVM: MIPS: Don't clobber CP0_Status.UX KVM: x86: reset MMU on KVM_SET_VCPU_EVENTS KVM: nVMX: fix instruction skipping during emulated vm-entry
This commit is contained in:
commit
08289086b0
|
@ -521,6 +521,9 @@ void *kvm_mips_build_exit(void *addr)
|
||||||
uasm_i_and(&p, V0, V0, AT);
|
uasm_i_and(&p, V0, V0, AT);
|
||||||
uasm_i_lui(&p, AT, ST0_CU0 >> 16);
|
uasm_i_lui(&p, AT, ST0_CU0 >> 16);
|
||||||
uasm_i_or(&p, V0, V0, AT);
|
uasm_i_or(&p, V0, V0, AT);
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
|
||||||
|
#endif
|
||||||
uasm_i_mtc0(&p, V0, C0_STATUS);
|
uasm_i_mtc0(&p, V0, C0_STATUS);
|
||||||
uasm_i_ehb(&p);
|
uasm_i_ehb(&p);
|
||||||
|
|
||||||
|
@ -643,7 +646,7 @@ static void *kvm_mips_build_ret_to_guest(void *addr)
|
||||||
|
|
||||||
/* Setup status register for running guest in UM */
|
/* Setup status register for running guest in UM */
|
||||||
uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
|
uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
|
||||||
UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX));
|
UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
|
||||||
uasm_i_and(&p, V1, V1, AT);
|
uasm_i_and(&p, V1, V1, AT);
|
||||||
uasm_i_mtc0(&p, V1, C0_STATUS);
|
uasm_i_mtc0(&p, V1, C0_STATUS);
|
||||||
uasm_i_ehb(&p);
|
uasm_i_ehb(&p);
|
||||||
|
|
|
@ -360,8 +360,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||||
dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
|
dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
|
||||||
|
|
||||||
/* Invalidate the icache for these ranges */
|
/* Invalidate the icache for these ranges */
|
||||||
local_flush_icache_range((unsigned long)gebase,
|
flush_icache_range((unsigned long)gebase,
|
||||||
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
|
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate comm page for guest kernel, a TLB will be reserved for
|
* Allocate comm page for guest kernel, a TLB will be reserved for
|
||||||
|
|
|
@ -132,12 +132,6 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
|
||||||
|
|
||||||
#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
|
#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
|
||||||
|
|
||||||
#define VMX_VPID_EXTENT_SUPPORTED_MASK \
|
|
||||||
(VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
|
|
||||||
VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
|
|
||||||
VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
|
|
||||||
VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hyper-V requires all of these, so mark them as supported even though
|
* Hyper-V requires all of these, so mark them as supported even though
|
||||||
* they are just treated the same as all-context.
|
* they are just treated the same as all-context.
|
||||||
|
@ -10473,12 +10467,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
||||||
!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
|
!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
|
||||||
nested_vmx_entry_failure(vcpu, vmcs12,
|
nested_vmx_entry_failure(vcpu, vmcs12,
|
||||||
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
|
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
|
||||||
goto out;
|
return 1;
|
||||||
}
|
}
|
||||||
if (vmcs12->vmcs_link_pointer != -1ull) {
|
if (vmcs12->vmcs_link_pointer != -1ull) {
|
||||||
nested_vmx_entry_failure(vcpu, vmcs12,
|
nested_vmx_entry_failure(vcpu, vmcs12,
|
||||||
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
|
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
|
||||||
goto out;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -10498,7 +10492,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
||||||
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
|
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
|
||||||
nested_vmx_entry_failure(vcpu, vmcs12,
|
nested_vmx_entry_failure(vcpu, vmcs12,
|
||||||
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
|
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
|
||||||
goto out;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10516,7 +10510,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
||||||
ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
|
ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
|
||||||
nested_vmx_entry_failure(vcpu, vmcs12,
|
nested_vmx_entry_failure(vcpu, vmcs12,
|
||||||
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
|
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
|
||||||
goto out;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3070,6 +3070,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
|
||||||
memset(&events->reserved, 0, sizeof(events->reserved));
|
memset(&events->reserved, 0, sizeof(events->reserved));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
|
||||||
|
|
||||||
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_vcpu_events *events)
|
struct kvm_vcpu_events *events)
|
||||||
{
|
{
|
||||||
|
@ -3106,10 +3108,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
||||||
vcpu->arch.apic->sipi_vector = events->sipi_vector;
|
vcpu->arch.apic->sipi_vector = events->sipi_vector;
|
||||||
|
|
||||||
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
|
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
|
||||||
|
u32 hflags = vcpu->arch.hflags;
|
||||||
if (events->smi.smm)
|
if (events->smi.smm)
|
||||||
vcpu->arch.hflags |= HF_SMM_MASK;
|
hflags |= HF_SMM_MASK;
|
||||||
else
|
else
|
||||||
vcpu->arch.hflags &= ~HF_SMM_MASK;
|
hflags &= ~HF_SMM_MASK;
|
||||||
|
kvm_set_hflags(vcpu, hflags);
|
||||||
|
|
||||||
vcpu->arch.smi_pending = events->smi.pending;
|
vcpu->arch.smi_pending = events->smi.pending;
|
||||||
if (events->smi.smm_inside_nmi)
|
if (events->smi.smm_inside_nmi)
|
||||||
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
|
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
|
||||||
|
|
Loading…
Reference in New Issue