KVM: x86 shared msr infrastructure
The various syscall-related MSRs are fairly expensive to switch. Currently we switch them on every vcpu preemption, which is far too often: - if we're switching to a kernel thread (idle task, threaded interrupt, kernel-mode virtio server (vhost-net), for example) and back, then there's no need to switch those MSRs since kernel threasd won't be exiting to userspace. - if we're switching to another guest running an identical OS, most likely those MSRs will have the same value, so there's little point in reloading them. - if we're running the same OS on the guest and host, the MSRs will have identical values and reloading is unnecessary. This patch uses the new user return notifiers to implement last-minute switching, and checks the msr values to avoid unnecessary reloading. Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
44ea2b1758
commit
18863bdd60
|
@ -809,4 +809,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
|
||||||
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
|
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
|
||||||
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
|
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
|
||||||
|
|
||||||
|
void kvm_define_shared_msr(unsigned index, u32 msr);
|
||||||
|
void kvm_set_shared_msr(unsigned index, u64 val);
|
||||||
|
|
||||||
#endif /* _ASM_X86_KVM_HOST_H */
|
#endif /* _ASM_X86_KVM_HOST_H */
|
||||||
|
|
|
@ -28,6 +28,7 @@ config KVM
|
||||||
select HAVE_KVM_IRQCHIP
|
select HAVE_KVM_IRQCHIP
|
||||||
select HAVE_KVM_EVENTFD
|
select HAVE_KVM_EVENTFD
|
||||||
select KVM_APIC_ARCHITECTURE
|
select KVM_APIC_ARCHITECTURE
|
||||||
|
select USER_RETURN_NOTIFIER
|
||||||
---help---
|
---help---
|
||||||
Support hosting fully virtualized guest machines using hardware
|
Support hosting fully virtualized guest machines using hardware
|
||||||
virtualization extensions. You will need a fairly recent
|
virtualization extensions. You will need a fairly recent
|
||||||
|
|
|
@ -37,6 +37,7 @@
|
||||||
#include <linux/iommu.h>
|
#include <linux/iommu.h>
|
||||||
#include <linux/intel-iommu.h>
|
#include <linux/intel-iommu.h>
|
||||||
#include <linux/cpufreq.h>
|
#include <linux/cpufreq.h>
|
||||||
|
#include <linux/user-return-notifier.h>
|
||||||
#include <trace/events/kvm.h>
|
#include <trace/events/kvm.h>
|
||||||
#undef TRACE_INCLUDE_FILE
|
#undef TRACE_INCLUDE_FILE
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
|
@ -87,6 +88,25 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops);
|
||||||
int ignore_msrs = 0;
|
int ignore_msrs = 0;
|
||||||
module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
|
module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
|
||||||
|
|
||||||
|
#define KVM_NR_SHARED_MSRS 16
|
||||||
|
|
||||||
|
struct kvm_shared_msrs_global {
|
||||||
|
int nr;
|
||||||
|
struct kvm_shared_msr {
|
||||||
|
u32 msr;
|
||||||
|
u64 value;
|
||||||
|
} msrs[KVM_NR_SHARED_MSRS];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct kvm_shared_msrs {
|
||||||
|
struct user_return_notifier urn;
|
||||||
|
bool registered;
|
||||||
|
u64 current_value[KVM_NR_SHARED_MSRS];
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
|
||||||
|
static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
|
||||||
|
|
||||||
struct kvm_stats_debugfs_item debugfs_entries[] = {
|
struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||||
{ "pf_fixed", VCPU_STAT(pf_fixed) },
|
{ "pf_fixed", VCPU_STAT(pf_fixed) },
|
||||||
{ "pf_guest", VCPU_STAT(pf_guest) },
|
{ "pf_guest", VCPU_STAT(pf_guest) },
|
||||||
|
@ -123,6 +143,64 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||||
{ NULL }
|
{ NULL }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void kvm_on_user_return(struct user_return_notifier *urn)
|
||||||
|
{
|
||||||
|
unsigned slot;
|
||||||
|
struct kvm_shared_msr *global;
|
||||||
|
struct kvm_shared_msrs *locals
|
||||||
|
= container_of(urn, struct kvm_shared_msrs, urn);
|
||||||
|
|
||||||
|
for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
|
||||||
|
global = &shared_msrs_global.msrs[slot];
|
||||||
|
if (global->value != locals->current_value[slot]) {
|
||||||
|
wrmsrl(global->msr, global->value);
|
||||||
|
locals->current_value[slot] = global->value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
locals->registered = false;
|
||||||
|
user_return_notifier_unregister(urn);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvm_define_shared_msr(unsigned slot, u32 msr)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
u64 value;
|
||||||
|
|
||||||
|
if (slot >= shared_msrs_global.nr)
|
||||||
|
shared_msrs_global.nr = slot + 1;
|
||||||
|
shared_msrs_global.msrs[slot].msr = msr;
|
||||||
|
rdmsrl_safe(msr, &value);
|
||||||
|
shared_msrs_global.msrs[slot].value = value;
|
||||||
|
for_each_online_cpu(cpu)
|
||||||
|
per_cpu(shared_msrs, cpu).current_value[slot] = value;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
|
||||||
|
|
||||||
|
static void kvm_shared_msr_cpu_online(void)
|
||||||
|
{
|
||||||
|
unsigned i;
|
||||||
|
struct kvm_shared_msrs *locals = &__get_cpu_var(shared_msrs);
|
||||||
|
|
||||||
|
for (i = 0; i < shared_msrs_global.nr; ++i)
|
||||||
|
locals->current_value[i] = shared_msrs_global.msrs[i].value;
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvm_set_shared_msr(unsigned slot, u64 value)
|
||||||
|
{
|
||||||
|
struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
|
||||||
|
|
||||||
|
if (value == smsr->current_value[slot])
|
||||||
|
return;
|
||||||
|
smsr->current_value[slot] = value;
|
||||||
|
wrmsrl(shared_msrs_global.msrs[slot].msr, value);
|
||||||
|
if (!smsr->registered) {
|
||||||
|
smsr->urn.on_user_return = kvm_on_user_return;
|
||||||
|
user_return_notifier_register(&smsr->urn);
|
||||||
|
smsr->registered = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
|
||||||
|
|
||||||
unsigned long segment_base(u16 selector)
|
unsigned long segment_base(u16 selector)
|
||||||
{
|
{
|
||||||
struct descriptor_table gdt;
|
struct descriptor_table gdt;
|
||||||
|
@ -4815,6 +4893,9 @@ int kvm_arch_hardware_enable(void *garbage)
|
||||||
int cpu = raw_smp_processor_id();
|
int cpu = raw_smp_processor_id();
|
||||||
per_cpu(cpu_tsc_khz, cpu) = 0;
|
per_cpu(cpu_tsc_khz, cpu) = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kvm_shared_msr_cpu_online();
|
||||||
|
|
||||||
return kvm_x86_ops->hardware_enable(garbage);
|
return kvm_x86_ops->hardware_enable(garbage);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue