x86, kvm: cache the base of the KVM cpuid leaves
It is unnecessary to go through hypervisor_cpuid_base every time
a leaf is found (which will be every time a feature is requested
after the next patch).
Fixes: 1085ba7f55
Cc: stable@vger.kernel.org
Cc: mtosatti@redhat.com
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
5f66b62095
commit
1c300a4077
|
@ -85,28 +85,13 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline uint32_t kvm_cpuid_base(void)
|
|
||||||
{
|
|
||||||
if (boot_cpu_data.cpuid_level < 0)
|
|
||||||
return 0; /* So we don't blow up on old processors */
|
|
||||||
|
|
||||||
if (cpu_has_hypervisor)
|
|
||||||
return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool kvm_para_available(void)
|
|
||||||
{
|
|
||||||
return kvm_cpuid_base() != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int kvm_arch_para_features(void)
|
static inline unsigned int kvm_arch_para_features(void)
|
||||||
{
|
{
|
||||||
return cpuid_eax(KVM_CPUID_FEATURES);
|
return cpuid_eax(KVM_CPUID_FEATURES);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_GUEST
|
#ifdef CONFIG_KVM_GUEST
|
||||||
|
bool kvm_para_available(void);
|
||||||
void __init kvm_guest_init(void);
|
void __init kvm_guest_init(void);
|
||||||
void kvm_async_pf_task_wait(u32 token);
|
void kvm_async_pf_task_wait(u32 token);
|
||||||
void kvm_async_pf_task_wake(u32 token);
|
void kvm_async_pf_task_wake(u32 token);
|
||||||
|
@ -126,6 +111,11 @@ static inline void kvm_spinlock_init(void)
|
||||||
#define kvm_async_pf_task_wait(T) do {} while(0)
|
#define kvm_async_pf_task_wait(T) do {} while(0)
|
||||||
#define kvm_async_pf_task_wake(T) do {} while(0)
|
#define kvm_async_pf_task_wake(T) do {} while(0)
|
||||||
|
|
||||||
|
static inline bool kvm_para_available(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline u32 kvm_read_and_reset_pf_reason(void)
|
static inline u32 kvm_read_and_reset_pf_reason(void)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -500,6 +500,33 @@ void __init kvm_guest_init(void)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static noinline uint32_t __kvm_cpuid_base(void)
|
||||||
|
{
|
||||||
|
if (boot_cpu_data.cpuid_level < 0)
|
||||||
|
return 0; /* So we don't blow up on old processors */
|
||||||
|
|
||||||
|
if (cpu_has_hypervisor)
|
||||||
|
return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline uint32_t kvm_cpuid_base(void)
|
||||||
|
{
|
||||||
|
static int kvm_cpuid_base = -1;
|
||||||
|
|
||||||
|
if (kvm_cpuid_base == -1)
|
||||||
|
kvm_cpuid_base = __kvm_cpuid_base();
|
||||||
|
|
||||||
|
return kvm_cpuid_base;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool kvm_para_available(void)
|
||||||
|
{
|
||||||
|
return kvm_cpuid_base() != 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvm_para_available);
|
||||||
|
|
||||||
static uint32_t __init kvm_detect(void)
|
static uint32_t __init kvm_detect(void)
|
||||||
{
|
{
|
||||||
return kvm_cpuid_base();
|
return kvm_cpuid_base();
|
||||||
|
|
Loading…
Reference in New Issue