mirror of https://gitee.com/openkylin/linux.git
KVM: move EXIT_FASTPATH_REENTER_GUEST to common code
Now that KVM is using static calls, calling vmx_vcpu_run and vmx_sync_pir_to_irr does not incur anymore the cost of a retpoline. Therefore there is no need anymore to handle EXIT_FASTPATH_REENTER_GUEST in vendor code. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
fb18d053b7
commit
d89d04ab60
|
@ -6711,11 +6711,9 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
fastpath_t exit_fastpath;
|
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
unsigned long cr3, cr4;
|
unsigned long cr3, cr4;
|
||||||
|
|
||||||
reenter_guest:
|
|
||||||
/* Record the guest's net vcpu time for enforced NMI injections. */
|
/* Record the guest's net vcpu time for enforced NMI injections. */
|
||||||
if (unlikely(!enable_vnmi &&
|
if (unlikely(!enable_vnmi &&
|
||||||
vmx->loaded_vmcs->soft_vnmi_blocked))
|
vmx->loaded_vmcs->soft_vnmi_blocked))
|
||||||
|
@ -6865,22 +6863,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
if (is_guest_mode(vcpu))
|
if (is_guest_mode(vcpu))
|
||||||
return EXIT_FASTPATH_NONE;
|
return EXIT_FASTPATH_NONE;
|
||||||
|
|
||||||
exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
|
return vmx_exit_handlers_fastpath(vcpu);
|
||||||
if (exit_fastpath == EXIT_FASTPATH_REENTER_GUEST) {
|
|
||||||
if (!kvm_vcpu_exit_request(vcpu)) {
|
|
||||||
/*
|
|
||||||
* FIXME: this goto should be a loop in vcpu_enter_guest,
|
|
||||||
* but it would incur the cost of a retpoline for now.
|
|
||||||
* Revisit once static calls are available.
|
|
||||||
*/
|
|
||||||
if (vcpu->arch.apicv_active)
|
|
||||||
vmx_sync_pir_to_irr(vcpu);
|
|
||||||
goto reenter_guest;
|
|
||||||
}
|
|
||||||
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
return exit_fastpath;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
|
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
|
||||||
|
|
|
@ -1796,12 +1796,11 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
|
EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
|
||||||
|
|
||||||
bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
|
static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
|
return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
|
||||||
xfer_to_guest_mode_work_pending();
|
xfer_to_guest_mode_work_pending();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_vcpu_exit_request);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The fast path for frequent and performance sensitive wrmsr emulation,
|
* The fast path for frequent and performance sensitive wrmsr emulation,
|
||||||
|
@ -9044,7 +9043,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||||
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
|
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
|
||||||
}
|
}
|
||||||
|
|
||||||
exit_fastpath = static_call(kvm_x86_run)(vcpu);
|
for (;;) {
|
||||||
|
exit_fastpath = static_call(kvm_x86_run)(vcpu);
|
||||||
|
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
|
||||||
|
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vcpu->arch.apicv_active)
|
||||||
|
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do this here before restoring debug registers on the host. And
|
* Do this here before restoring debug registers on the host. And
|
||||||
|
|
|
@ -395,7 +395,6 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
|
||||||
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
|
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
|
||||||
int kvm_spec_ctrl_test_value(u64 value);
|
int kvm_spec_ctrl_test_value(u64 value);
|
||||||
bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
|
bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
|
||||||
bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
|
|
||||||
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
|
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
|
||||||
struct x86_exception *e);
|
struct x86_exception *e);
|
||||||
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
|
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
|
||||||
|
|
Loading…
Reference in New Issue