KVM: nVMX: Move the checks for VM-Exit Control Fields to a separate helper function

.. to improve readability and maintainability, and to align the code as per
the layout of the checks in chapter "VM Entries" in Intel SDM vol 3C.

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
Reviewed-by: Mark Kanda <mark.kanda@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Krish Sadhukhan 2018-12-12 13:30:09 -05:00 committed by Paolo Bonzini
parent f9b245e182
commit 61446ba75e
1 changed files with 33 additions and 10 deletions

View File

@ -714,20 +714,28 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
return 0;
}
static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count,
vmcs12->vm_exit_msr_load_addr) ||
nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count,
vmcs12->vm_exit_msr_store_addr) ||
nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
vmcs12->vm_entry_msr_load_addr))
vmcs12->vm_exit_msr_store_addr))
return -EINVAL;
return 0;
}
static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
vmcs12->vm_entry_msr_load_addr))
return -EINVAL;
return 0;
}
static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
@ -2485,6 +2493,23 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
return 0;
}
/*
* Checks related to VM-Exit Control Fields
*/
static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!vmx_control_verify(vmcs12->vm_exit_controls,
vmx->nested.msrs.exit_ctls_low,
vmx->nested.msrs.exit_ctls_high) ||
nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))
return -EINVAL;
return 0;
}
static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
@ -2495,7 +2520,8 @@ static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu,
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
if (nested_check_vm_execution_controls(vcpu, vmcs12))
if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
nested_check_vm_exit_controls(vcpu, vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))
@ -2506,10 +2532,7 @@ static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu,
!nested_cr3_valid(vcpu, vmcs12->host_cr3))
return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
if (!vmx_control_verify(vmcs12->vm_exit_controls,
vmx->nested.msrs.exit_ctls_low,
vmx->nested.msrs.exit_ctls_high) ||
!vmx_control_verify(vmcs12->vm_entry_controls,
if (!vmx_control_verify(vmcs12->vm_entry_controls,
vmx->nested.msrs.entry_ctls_low,
vmx->nested.msrs.entry_ctls_high))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;