2008-11-18 05:03:16 +08:00
|
|
|
/* CPU virtualization extensions handling
|
|
|
|
*
|
|
|
|
* This should carry the code for handling CPU virtualization extensions
|
|
|
|
* that needs to live in the kernel core.
|
|
|
|
*
|
|
|
|
* Author: Eduardo Habkost <ehabkost@redhat.com>
|
|
|
|
*
|
|
|
|
* Copyright (C) 2008, Red Hat Inc.
|
|
|
|
*
|
|
|
|
* Contains code from KVM, Copyright (C) 2006 Qumranet, Inc.
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*/
|
|
|
|
#ifndef _ASM_X86_VIRTEX_H
|
|
|
|
#define _ASM_X86_VIRTEX_H
|
|
|
|
|
|
|
|
#include <asm/processor.h>
|
|
|
|
|
2008-11-18 05:03:17 +08:00
|
|
|
#include <asm/vmx.h>
|
2008-11-18 05:03:20 +08:00
|
|
|
#include <asm/svm.h>
|
2014-10-25 06:58:07 +08:00
|
|
|
#include <asm/tlbflush.h>
|
2008-11-18 05:03:17 +08:00
|
|
|
|
2008-11-18 05:03:16 +08:00
|
|
|
/*
|
|
|
|
* VMX functions:
|
|
|
|
*/
|
|
|
|
|
|
|
|
static inline int cpu_has_vmx(void)
|
|
|
|
{
|
|
|
|
unsigned long ecx = cpuid_ecx(1);
|
|
|
|
return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
|
|
|
|
}
|
|
|
|
|
2008-11-18 05:03:17 +08:00
|
|
|
|
|
|
|
/** Disable VMX on the current CPU
|
|
|
|
*
|
|
|
|
* vmxoff causes a undefined-opcode exception if vmxon was not run
|
|
|
|
* on the CPU previously. Only call this function if you know VMX
|
|
|
|
* is enabled.
|
|
|
|
*/
|
|
|
|
static inline void cpu_vmxoff(void)
|
|
|
|
{
|
KVM/x86: Use assembly instruction mnemonics instead of .byte streams
Recently the minimum required version of binutils was changed to 2.20,
which supports all VMX instruction mnemonics. The patch removes
all .byte #defines and uses real instruction mnemonics instead.
The compiler is now able to pass memory operand to the instruction,
so there is no need for memory clobber anymore. Also, the compiler
adds CC register clobber automatically to all extended asm clauses,
so the patch also removes explicit CC clobber.
The immediate benefit of the patch is removal of many unnecesary
register moves, resulting in 1434 saved bytes in vmx.o:
text data bss dec hex filename
151257 18246 8500 178003 2b753 vmx.o
152691 18246 8500 179437 2bced vmx-old.o
Some examples of improvement include removal of unneeded moves
of %rsp to %rax in front of invept and invvpid instructions:
a57e: b9 01 00 00 00 mov $0x1,%ecx
a583: 48 89 04 24 mov %rax,(%rsp)
a587: 48 89 e0 mov %rsp,%rax
a58a: 48 c7 44 24 08 00 00 movq $0x0,0x8(%rsp)
a591: 00 00
a593: 66 0f 38 80 08 invept (%rax),%rcx
to:
a45c: 48 89 04 24 mov %rax,(%rsp)
a460: b8 01 00 00 00 mov $0x1,%eax
a465: 48 c7 44 24 08 00 00 movq $0x0,0x8(%rsp)
a46c: 00 00
a46e: 66 0f 38 80 04 24 invept (%rsp),%rax
and the ability to use more optimal registers and memory operands
in the instruction:
8faa: 48 8b 44 24 28 mov 0x28(%rsp),%rax
8faf: 4c 89 c2 mov %r8,%rdx
8fb2: 0f 79 d0 vmwrite %rax,%rdx
to:
8e7c: 44 0f 79 44 24 28 vmwrite 0x28(%rsp),%r8
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-10-12 01:40:44 +08:00
|
|
|
asm volatile ("vmxoff");
|
2014-10-25 06:58:07 +08:00
|
|
|
cr4_clear_bits(X86_CR4_VMXE);
|
2008-11-18 05:03:17 +08:00
|
|
|
}
|
|
|
|
|
2008-11-18 05:03:19 +08:00
|
|
|
static inline int cpu_vmx_enabled(void)
|
|
|
|
{
|
2014-10-25 06:58:08 +08:00
|
|
|
return __read_cr4() & X86_CR4_VMXE;
|
2008-11-18 05:03:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Disable VMX if it is enabled on the current CPU
|
|
|
|
*
|
|
|
|
* You shouldn't call this if cpu_has_vmx() returns 0.
|
|
|
|
*/
|
|
|
|
static inline void __cpu_emergency_vmxoff(void)
|
|
|
|
{
|
|
|
|
if (cpu_vmx_enabled())
|
|
|
|
cpu_vmxoff();
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Disable VMX if it is supported and enabled on the current CPU
|
|
|
|
*/
|
|
|
|
static inline void cpu_emergency_vmxoff(void)
|
|
|
|
{
|
|
|
|
if (cpu_has_vmx())
|
|
|
|
__cpu_emergency_vmxoff();
|
|
|
|
}
|
|
|
|
|
2008-11-18 05:03:20 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SVM functions:
|
|
|
|
*/
|
|
|
|
|
|
|
|
/** Check if the CPU has SVM support
|
|
|
|
*
|
|
|
|
* You can use the 'msg' arg to get a message describing the problem,
|
|
|
|
* if the function returns zero. Simply pass NULL if you are not interested
|
|
|
|
* on the messages; gcc should take care of not generating code for
|
|
|
|
* the messages on this case.
|
|
|
|
*/
|
|
|
|
static inline int cpu_has_svm(const char **msg)
|
|
|
|
{
|
2018-09-23 17:36:31 +08:00
|
|
|
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
|
|
|
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) {
|
2008-11-18 05:03:20 +08:00
|
|
|
if (msg)
|
2018-09-23 17:36:31 +08:00
|
|
|
*msg = "not amd or hygon";
|
2008-11-18 05:03:20 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-09 17:53:06 +08:00
|
|
|
if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) {
|
2008-11-18 05:03:20 +08:00
|
|
|
if (msg)
|
|
|
|
*msg = "can't execute cpuid_8000000a";
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-09 17:53:06 +08:00
|
|
|
if (!boot_cpu_has(X86_FEATURE_SVM)) {
|
2008-11-18 05:03:20 +08:00
|
|
|
if (msg)
|
|
|
|
*msg = "svm not available";
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2008-11-18 05:03:21 +08:00
|
|
|
|
|
|
|
/** Disable SVM on the current CPU
|
|
|
|
*
|
|
|
|
* You should call this only if cpu_has_svm() returned true.
|
|
|
|
*/
|
|
|
|
static inline void cpu_svm_disable(void)
|
|
|
|
{
|
|
|
|
uint64_t efer;
|
|
|
|
|
|
|
|
wrmsrl(MSR_VM_HSAVE_PA, 0);
|
|
|
|
rdmsrl(MSR_EFER, efer);
|
2008-11-26 03:17:02 +08:00
|
|
|
wrmsrl(MSR_EFER, efer & ~EFER_SVME);
|
2008-11-18 05:03:21 +08:00
|
|
|
}
|
|
|
|
|
2008-11-18 05:03:22 +08:00
|
|
|
/** Makes sure SVM is disabled, if it is supported on the CPU
|
|
|
|
*/
|
|
|
|
static inline void cpu_emergency_svm_disable(void)
|
|
|
|
{
|
|
|
|
if (cpu_has_svm(NULL))
|
|
|
|
cpu_svm_disable();
|
|
|
|
}
|
|
|
|
|
2008-11-18 05:03:16 +08:00
|
|
|
#endif /* _ASM_X86_VIRTEX_H */
|