mirror of https://gitee.com/openkylin/linux.git
KVM: VMX: Move the hardware {un}setup functions to the bottom
...so that future patches can reference e.g. @kvm_vmx_exit_handlers without having to simultaneously move a big chunk of code. Speaking from experience, resolving merge conflicts is an absolute nightmare without pre-moving the code. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
5158917c7b
commit
a3203381ca
|
@ -6067,190 +6067,6 @@ static void vmx_enable_tdp(void)
|
|||
kvm_enable_tdp();
|
||||
}
|
||||
|
||||
static __exit void nested_vmx_hardware_unsetup(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (enable_shadow_vmcs) {
|
||||
for (i = 0; i < VMX_BITMAP_NR; i++)
|
||||
free_page((unsigned long)vmx_bitmap[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static __init int nested_vmx_hardware_setup(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (enable_shadow_vmcs) {
|
||||
for (i = 0; i < VMX_BITMAP_NR; i++) {
|
||||
vmx_bitmap[i] = (unsigned long *)
|
||||
__get_free_page(GFP_KERNEL);
|
||||
if (!vmx_bitmap[i]) {
|
||||
nested_vmx_hardware_unsetup();
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
init_vmcs_shadow_fields();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __init int hardware_setup(void)
|
||||
{
|
||||
unsigned long host_bndcfgs;
|
||||
int r, i;
|
||||
|
||||
rdmsrl_safe(MSR_EFER, &host_efer);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
|
||||
kvm_define_shared_msr(i, vmx_msr_index[i]);
|
||||
|
||||
if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
|
||||
return -EIO;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_NX))
|
||||
kvm_enable_efer_bits(EFER_NX);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_MPX)) {
|
||||
rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs);
|
||||
WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
|
||||
}
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_XSAVES))
|
||||
rdmsrl(MSR_IA32_XSS, host_xss);
|
||||
|
||||
if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
|
||||
!(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
|
||||
enable_vpid = 0;
|
||||
|
||||
if (!cpu_has_vmx_ept() ||
|
||||
!cpu_has_vmx_ept_4levels() ||
|
||||
!cpu_has_vmx_ept_mt_wb() ||
|
||||
!cpu_has_vmx_invept_global())
|
||||
enable_ept = 0;
|
||||
|
||||
if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
|
||||
enable_ept_ad_bits = 0;
|
||||
|
||||
if (!cpu_has_vmx_unrestricted_guest() || !enable_ept)
|
||||
enable_unrestricted_guest = 0;
|
||||
|
||||
if (!cpu_has_vmx_flexpriority())
|
||||
flexpriority_enabled = 0;
|
||||
|
||||
if (!cpu_has_virtual_nmis())
|
||||
enable_vnmi = 0;
|
||||
|
||||
/*
|
||||
* set_apic_access_page_addr() is used to reload apic access
|
||||
* page upon invalidation. No need to do anything if not
|
||||
* using the APIC_ACCESS_ADDR VMCS field.
|
||||
*/
|
||||
if (!flexpriority_enabled)
|
||||
kvm_x86_ops->set_apic_access_page_addr = NULL;
|
||||
|
||||
if (!cpu_has_vmx_tpr_shadow())
|
||||
kvm_x86_ops->update_cr8_intercept = NULL;
|
||||
|
||||
if (enable_ept && !cpu_has_vmx_ept_2m_page())
|
||||
kvm_disable_largepages();
|
||||
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
|
||||
&& enable_ept)
|
||||
kvm_x86_ops->tlb_remote_flush = vmx_hv_remote_flush_tlb;
|
||||
#endif
|
||||
|
||||
if (!cpu_has_vmx_ple()) {
|
||||
ple_gap = 0;
|
||||
ple_window = 0;
|
||||
ple_window_grow = 0;
|
||||
ple_window_max = 0;
|
||||
ple_window_shrink = 0;
|
||||
}
|
||||
|
||||
if (!cpu_has_vmx_apicv()) {
|
||||
enable_apicv = 0;
|
||||
kvm_x86_ops->sync_pir_to_irr = NULL;
|
||||
}
|
||||
|
||||
if (cpu_has_vmx_tsc_scaling()) {
|
||||
kvm_has_tsc_control = true;
|
||||
kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
|
||||
kvm_tsc_scaling_ratio_frac_bits = 48;
|
||||
}
|
||||
|
||||
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
|
||||
|
||||
if (enable_ept)
|
||||
vmx_enable_tdp();
|
||||
else
|
||||
kvm_disable_tdp();
|
||||
|
||||
if (!nested) {
|
||||
kvm_x86_ops->get_nested_state = NULL;
|
||||
kvm_x86_ops->set_nested_state = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only enable PML when hardware supports PML feature, and both EPT
|
||||
* and EPT A/D bit features are enabled -- PML depends on them to work.
|
||||
*/
|
||||
if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
|
||||
enable_pml = 0;
|
||||
|
||||
if (!enable_pml) {
|
||||
kvm_x86_ops->slot_enable_log_dirty = NULL;
|
||||
kvm_x86_ops->slot_disable_log_dirty = NULL;
|
||||
kvm_x86_ops->flush_log_dirty = NULL;
|
||||
kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
|
||||
}
|
||||
|
||||
if (!cpu_has_vmx_preemption_timer())
|
||||
kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit;
|
||||
|
||||
if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
|
||||
u64 vmx_msr;
|
||||
|
||||
rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
|
||||
cpu_preemption_timer_multi =
|
||||
vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
|
||||
} else {
|
||||
kvm_x86_ops->set_hv_timer = NULL;
|
||||
kvm_x86_ops->cancel_hv_timer = NULL;
|
||||
}
|
||||
|
||||
if (!cpu_has_vmx_shadow_vmcs() || !nested)
|
||||
enable_shadow_vmcs = 0;
|
||||
|
||||
kvm_set_posted_intr_wakeup_handler(wakeup_handler);
|
||||
nested_vmx_setup_ctls_msrs(&vmcs_config.nested, vmx_capability.ept,
|
||||
enable_apicv);
|
||||
|
||||
kvm_mce_cap_supported |= MCG_LMCE_P;
|
||||
|
||||
if (nested) {
|
||||
r = nested_vmx_hardware_setup();
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = alloc_kvm_area();
|
||||
if (r)
|
||||
nested_vmx_hardware_unsetup();
|
||||
return r;
|
||||
}
|
||||
|
||||
static __exit void hardware_unsetup(void)
|
||||
{
|
||||
if (nested)
|
||||
nested_vmx_hardware_unsetup();
|
||||
|
||||
free_kvm_area();
|
||||
}
|
||||
|
||||
/*
|
||||
* Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
|
||||
* exiting, so only get here on cpu with PAUSE-Loop-Exiting.
|
||||
|
@ -13152,6 +12968,190 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __exit void nested_vmx_hardware_unsetup(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (enable_shadow_vmcs) {
|
||||
for (i = 0; i < VMX_BITMAP_NR; i++)
|
||||
free_page((unsigned long)vmx_bitmap[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static __init int nested_vmx_hardware_setup(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (enable_shadow_vmcs) {
|
||||
for (i = 0; i < VMX_BITMAP_NR; i++) {
|
||||
vmx_bitmap[i] = (unsigned long *)
|
||||
__get_free_page(GFP_KERNEL);
|
||||
if (!vmx_bitmap[i]) {
|
||||
nested_vmx_hardware_unsetup();
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
init_vmcs_shadow_fields();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __init int hardware_setup(void)
|
||||
{
|
||||
unsigned long host_bndcfgs;
|
||||
int r, i;
|
||||
|
||||
rdmsrl_safe(MSR_EFER, &host_efer);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
|
||||
kvm_define_shared_msr(i, vmx_msr_index[i]);
|
||||
|
||||
if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
|
||||
return -EIO;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_NX))
|
||||
kvm_enable_efer_bits(EFER_NX);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_MPX)) {
|
||||
rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs);
|
||||
WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
|
||||
}
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_XSAVES))
|
||||
rdmsrl(MSR_IA32_XSS, host_xss);
|
||||
|
||||
if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
|
||||
!(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
|
||||
enable_vpid = 0;
|
||||
|
||||
if (!cpu_has_vmx_ept() ||
|
||||
!cpu_has_vmx_ept_4levels() ||
|
||||
!cpu_has_vmx_ept_mt_wb() ||
|
||||
!cpu_has_vmx_invept_global())
|
||||
enable_ept = 0;
|
||||
|
||||
if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
|
||||
enable_ept_ad_bits = 0;
|
||||
|
||||
if (!cpu_has_vmx_unrestricted_guest() || !enable_ept)
|
||||
enable_unrestricted_guest = 0;
|
||||
|
||||
if (!cpu_has_vmx_flexpriority())
|
||||
flexpriority_enabled = 0;
|
||||
|
||||
if (!cpu_has_virtual_nmis())
|
||||
enable_vnmi = 0;
|
||||
|
||||
/*
|
||||
* set_apic_access_page_addr() is used to reload apic access
|
||||
* page upon invalidation. No need to do anything if not
|
||||
* using the APIC_ACCESS_ADDR VMCS field.
|
||||
*/
|
||||
if (!flexpriority_enabled)
|
||||
kvm_x86_ops->set_apic_access_page_addr = NULL;
|
||||
|
||||
if (!cpu_has_vmx_tpr_shadow())
|
||||
kvm_x86_ops->update_cr8_intercept = NULL;
|
||||
|
||||
if (enable_ept && !cpu_has_vmx_ept_2m_page())
|
||||
kvm_disable_largepages();
|
||||
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
|
||||
&& enable_ept)
|
||||
kvm_x86_ops->tlb_remote_flush = vmx_hv_remote_flush_tlb;
|
||||
#endif
|
||||
|
||||
if (!cpu_has_vmx_ple()) {
|
||||
ple_gap = 0;
|
||||
ple_window = 0;
|
||||
ple_window_grow = 0;
|
||||
ple_window_max = 0;
|
||||
ple_window_shrink = 0;
|
||||
}
|
||||
|
||||
if (!cpu_has_vmx_apicv()) {
|
||||
enable_apicv = 0;
|
||||
kvm_x86_ops->sync_pir_to_irr = NULL;
|
||||
}
|
||||
|
||||
if (cpu_has_vmx_tsc_scaling()) {
|
||||
kvm_has_tsc_control = true;
|
||||
kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
|
||||
kvm_tsc_scaling_ratio_frac_bits = 48;
|
||||
}
|
||||
|
||||
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
|
||||
|
||||
if (enable_ept)
|
||||
vmx_enable_tdp();
|
||||
else
|
||||
kvm_disable_tdp();
|
||||
|
||||
if (!nested) {
|
||||
kvm_x86_ops->get_nested_state = NULL;
|
||||
kvm_x86_ops->set_nested_state = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only enable PML when hardware supports PML feature, and both EPT
|
||||
* and EPT A/D bit features are enabled -- PML depends on them to work.
|
||||
*/
|
||||
if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
|
||||
enable_pml = 0;
|
||||
|
||||
if (!enable_pml) {
|
||||
kvm_x86_ops->slot_enable_log_dirty = NULL;
|
||||
kvm_x86_ops->slot_disable_log_dirty = NULL;
|
||||
kvm_x86_ops->flush_log_dirty = NULL;
|
||||
kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
|
||||
}
|
||||
|
||||
if (!cpu_has_vmx_preemption_timer())
|
||||
kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit;
|
||||
|
||||
if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
|
||||
u64 vmx_msr;
|
||||
|
||||
rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
|
||||
cpu_preemption_timer_multi =
|
||||
vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
|
||||
} else {
|
||||
kvm_x86_ops->set_hv_timer = NULL;
|
||||
kvm_x86_ops->cancel_hv_timer = NULL;
|
||||
}
|
||||
|
||||
if (!cpu_has_vmx_shadow_vmcs() || !nested)
|
||||
enable_shadow_vmcs = 0;
|
||||
|
||||
kvm_set_posted_intr_wakeup_handler(wakeup_handler);
|
||||
nested_vmx_setup_ctls_msrs(&vmcs_config.nested, vmx_capability.ept,
|
||||
enable_apicv);
|
||||
|
||||
kvm_mce_cap_supported |= MCG_LMCE_P;
|
||||
|
||||
if (nested) {
|
||||
r = nested_vmx_hardware_setup();
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = alloc_kvm_area();
|
||||
if (r)
|
||||
nested_vmx_hardware_unsetup();
|
||||
return r;
|
||||
}
|
||||
|
||||
static __exit void hardware_unsetup(void)
|
||||
{
|
||||
if (nested)
|
||||
nested_vmx_hardware_unsetup();
|
||||
|
||||
free_kvm_area();
|
||||
}
|
||||
|
||||
static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
||||
.cpu_has_kvm_support = cpu_has_kvm_support,
|
||||
.disabled_by_bios = vmx_disabled_by_bios,
|
||||
|
|
Loading…
Reference in New Issue