KVM: nVMX: Remove param indirection from nested_vmx_check_msr_switch()
Passing the enum and doing an indirect lookup is silly when we can simply pass the field directly. Remove the "fast path" code in nested_vmx_check_msr_switch_controls() as it's now nothing more than a redundant check. Remove the debug message rather than continue passing the enum for the address field. Having debug messages for the MSRs themselves is useful as MSR legality is a huge space, whereas messing up a physical address means the VMM is fundamentally broken. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
461b4ba4c7
commit
f9b245e182
arch/x86/kvm/vmx
|
@ -700,45 +700,31 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
|
|||
}
|
||||
|
||||
static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
|
||||
unsigned long count_field,
|
||||
unsigned long addr_field)
|
||||
u32 count, u64 addr)
|
||||
{
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
int maxphyaddr;
|
||||
u64 count, addr;
|
||||
|
||||
if (vmcs12_read_any(vmcs12, count_field, &count) ||
|
||||
vmcs12_read_any(vmcs12, addr_field, &addr)) {
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (count == 0)
|
||||
return 0;
|
||||
maxphyaddr = cpuid_maxphyaddr(vcpu);
|
||||
if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
|
||||
(addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
|
||||
pr_debug_ratelimited(
|
||||
"nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)",
|
||||
addr_field, maxphyaddr, count, addr);
|
||||
(addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
|
||||
struct vmcs12 *vmcs12)
|
||||
{
|
||||
if (vmcs12->vm_exit_msr_load_count == 0 &&
|
||||
vmcs12->vm_exit_msr_store_count == 0 &&
|
||||
vmcs12->vm_entry_msr_load_count == 0)
|
||||
return 0; /* Fast path */
|
||||
if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
|
||||
VM_EXIT_MSR_LOAD_ADDR) ||
|
||||
nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
|
||||
VM_EXIT_MSR_STORE_ADDR) ||
|
||||
nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
|
||||
VM_ENTRY_MSR_LOAD_ADDR))
|
||||
if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count,
|
||||
vmcs12->vm_exit_msr_load_addr) ||
|
||||
nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count,
|
||||
vmcs12->vm_exit_msr_store_addr) ||
|
||||
nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
|
||||
vmcs12->vm_entry_msr_load_addr))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue