Merge branch 'kvm-master' into kvm-next
Grab MPX bugfix, and fix conflicts against Rik's adaptive FPU deactivation patch. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
commit
a9b4fb7e79
|
@ -1952,7 +1952,7 @@ static void post_guest_process(struct kvmppc_vcore *vc)
|
||||||
*/
|
*/
|
||||||
static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu, *vnext;
|
||||||
int i;
|
int i;
|
||||||
int srcu_idx;
|
int srcu_idx;
|
||||||
|
|
||||||
|
@ -1982,7 +1982,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
||||||
*/
|
*/
|
||||||
if ((threads_per_core > 1) &&
|
if ((threads_per_core > 1) &&
|
||||||
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
|
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
|
||||||
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
|
list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
|
||||||
|
arch.run_list) {
|
||||||
vcpu->arch.ret = -EBUSY;
|
vcpu->arch.ret = -EBUSY;
|
||||||
kvmppc_remove_runnable(vc, vcpu);
|
kvmppc_remove_runnable(vc, vcpu);
|
||||||
wake_up(&vcpu->arch.cpu_run);
|
wake_up(&vcpu->arch.cpu_run);
|
||||||
|
|
|
@ -401,6 +401,7 @@ struct kvm_vcpu_arch {
|
||||||
struct kvm_mmu_memory_cache mmu_page_header_cache;
|
struct kvm_mmu_memory_cache mmu_page_header_cache;
|
||||||
|
|
||||||
struct fpu guest_fpu;
|
struct fpu guest_fpu;
|
||||||
|
bool eager_fpu;
|
||||||
u64 xcr0;
|
u64 xcr0;
|
||||||
u64 guest_supported_xcr0;
|
u64 guest_supported_xcr0;
|
||||||
u32 guest_xstate_size;
|
u32 guest_xstate_size;
|
||||||
|
@ -747,6 +748,7 @@ struct kvm_x86_ops {
|
||||||
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
|
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
|
||||||
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
|
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
|
||||||
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
|
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
|
||||||
|
void (*fpu_activate)(struct kvm_vcpu *vcpu);
|
||||||
void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
|
void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
void (*tlb_flush)(struct kvm_vcpu *vcpu);
|
void (*tlb_flush)(struct kvm_vcpu *vcpu);
|
||||||
|
|
|
@ -16,6 +16,8 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
#include <asm/i387.h> /* For use_eager_fpu. Ugh! */
|
||||||
|
#include <asm/fpu-internal.h> /* For use_eager_fpu. Ugh! */
|
||||||
#include <asm/user.h>
|
#include <asm/user.h>
|
||||||
#include <asm/xsave.h>
|
#include <asm/xsave.h>
|
||||||
#include "cpuid.h"
|
#include "cpuid.h"
|
||||||
|
@ -95,6 +97,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
|
||||||
if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
|
if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
|
||||||
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
|
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
|
||||||
|
|
||||||
|
vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The existing code assumes virtual address is 48-bit in the canonical
|
* The existing code assumes virtual address is 48-bit in the canonical
|
||||||
* address checks; exit if it is ever changed.
|
* address checks; exit if it is ever changed.
|
||||||
|
|
|
@ -117,4 +117,12 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
|
||||||
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
||||||
return best && (best->ebx & bit(X86_FEATURE_RTM));
|
return best && (best->ebx & bit(X86_FEATURE_RTM));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct kvm_cpuid_entry2 *best;
|
||||||
|
|
||||||
|
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
||||||
|
return best && (best->ebx & bit(X86_FEATURE_MPX));
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -4383,6 +4383,7 @@ static struct kvm_x86_ops svm_x86_ops = {
|
||||||
.cache_reg = svm_cache_reg,
|
.cache_reg = svm_cache_reg,
|
||||||
.get_rflags = svm_get_rflags,
|
.get_rflags = svm_get_rflags,
|
||||||
.set_rflags = svm_set_rflags,
|
.set_rflags = svm_set_rflags,
|
||||||
|
.fpu_activate = svm_fpu_activate,
|
||||||
.fpu_deactivate = svm_fpu_deactivate,
|
.fpu_deactivate = svm_fpu_deactivate,
|
||||||
|
|
||||||
.tlb_flush = svm_flush_tlb,
|
.tlb_flush = svm_flush_tlb,
|
||||||
|
|
|
@ -10333,6 +10333,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
|
||||||
.cache_reg = vmx_cache_reg,
|
.cache_reg = vmx_cache_reg,
|
||||||
.get_rflags = vmx_get_rflags,
|
.get_rflags = vmx_get_rflags,
|
||||||
.set_rflags = vmx_set_rflags,
|
.set_rflags = vmx_set_rflags,
|
||||||
|
.fpu_activate = vmx_fpu_activate,
|
||||||
.fpu_deactivate = vmx_fpu_deactivate,
|
.fpu_deactivate = vmx_fpu_deactivate,
|
||||||
|
|
||||||
.tlb_flush = vmx_flush_tlb,
|
.tlb_flush = vmx_flush_tlb,
|
||||||
|
|
|
@ -6282,6 +6282,8 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
|
page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
|
||||||
|
if (is_error_page(page))
|
||||||
|
return;
|
||||||
kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
|
kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -7155,7 +7157,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
||||||
* Every 255 times fpu_counter rolls over to 0; a guest that uses
|
* Every 255 times fpu_counter rolls over to 0; a guest that uses
|
||||||
* the FPU in bursts will revert to loading it on demand.
|
* the FPU in bursts will revert to loading it on demand.
|
||||||
*/
|
*/
|
||||||
if (!use_eager_fpu()) {
|
if (!vcpu->arch.eager_fpu) {
|
||||||
if (++vcpu->fpu_counter < 5)
|
if (++vcpu->fpu_counter < 5)
|
||||||
kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
|
kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
|
||||||
}
|
}
|
||||||
|
@ -7174,11 +7176,21 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
||||||
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
||||||
unsigned int id)
|
unsigned int id)
|
||||||
{
|
{
|
||||||
|
struct kvm_vcpu *vcpu;
|
||||||
|
|
||||||
if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
|
if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
|
||||||
printk_once(KERN_WARNING
|
printk_once(KERN_WARNING
|
||||||
"kvm: SMP vm created on host with unstable TSC; "
|
"kvm: SMP vm created on host with unstable TSC; "
|
||||||
"guest TSC will not be reliable\n");
|
"guest TSC will not be reliable\n");
|
||||||
return kvm_x86_ops->vcpu_create(kvm, id);
|
|
||||||
|
vcpu = kvm_x86_ops->vcpu_create(kvm, id);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Activate fpu unconditionally in case the guest needs eager FPU. It will be
|
||||||
|
* deactivated soon if it doesn't.
|
||||||
|
*/
|
||||||
|
kvm_x86_ops->fpu_activate(vcpu);
|
||||||
|
return vcpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||||
|
|
Loading…
Reference in New Issue