target/i386: skip KVM_GET/SET_NESTED_STATE if VMX disabled, or for SVM

Do not allocate env->nested_state unless we later need to migrate the
nested virtualization state.

With this change, nested_state_needed() will return false if the
VMX flag is not included in the virtual machine.  KVM_GET/SET_NESTED_STATE
is also disabled for SVM which is safer (we know that at least the NPT
root and paging mode have to be saved/loaded), and thus the corresponding
subsection can go away as well.

Inspired by a patch from Liran Alon.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2019-07-11 15:41:48 +02:00
parent 79a197ab18
commit 1e44f3ab71
2 changed files with 9 additions and 28 deletions

View File

@ -1711,15 +1711,15 @@ int kvm_arch_init_vcpu(CPUState *cs)
max_nested_state_len = kvm_max_nested_state_length(); max_nested_state_len = kvm_max_nested_state_length();
if (max_nested_state_len > 0) { if (max_nested_state_len > 0) {
assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data)); assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
env->nested_state = g_malloc0(max_nested_state_len);
env->nested_state->size = max_nested_state_len; if (cpu_has_vmx(env)) {
struct kvm_vmx_nested_state_hdr *vmx_hdr;
if (IS_INTEL_CPU(env)) {
struct kvm_vmx_nested_state_hdr *vmx_hdr =
&env->nested_state->hdr.vmx;
env->nested_state = g_malloc0(max_nested_state_len);
env->nested_state->size = max_nested_state_len;
env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX; env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
vmx_hdr = &env->nested_state->hdr.vmx;
vmx_hdr->vmxon_pa = -1ull; vmx_hdr->vmxon_pa = -1ull;
vmx_hdr->vmcs12_pa = -1ull; vmx_hdr->vmcs12_pa = -1ull;
} }
@ -3515,7 +3515,7 @@ static int kvm_put_nested_state(X86CPU *cpu)
CPUX86State *env = &cpu->env; CPUX86State *env = &cpu->env;
int max_nested_state_len = kvm_max_nested_state_length(); int max_nested_state_len = kvm_max_nested_state_length();
if (max_nested_state_len <= 0) { if (!env->nested_state) {
return 0; return 0;
} }
@ -3529,7 +3529,7 @@ static int kvm_get_nested_state(X86CPU *cpu)
int max_nested_state_len = kvm_max_nested_state_length(); int max_nested_state_len = kvm_max_nested_state_length();
int ret; int ret;
if (max_nested_state_len <= 0) { if (!env->nested_state) {
return 0; return 0;
} }

View File

@ -1035,31 +1035,13 @@ static const VMStateDescription vmstate_vmx_nested_state = {
} }
}; };
static bool svm_nested_state_needed(void *opaque)
{
struct kvm_nested_state *nested_state = opaque;
return (nested_state->format == KVM_STATE_NESTED_FORMAT_SVM);
}
static const VMStateDescription vmstate_svm_nested_state = {
.name = "cpu/kvm_nested_state/svm",
.version_id = 1,
.minimum_version_id = 1,
.needed = svm_nested_state_needed,
.fields = (VMStateField[]) {
VMSTATE_END_OF_LIST()
}
};
static bool nested_state_needed(void *opaque) static bool nested_state_needed(void *opaque)
{ {
X86CPU *cpu = opaque; X86CPU *cpu = opaque;
CPUX86State *env = &cpu->env; CPUX86State *env = &cpu->env;
return (env->nested_state && return (env->nested_state &&
(vmx_nested_state_needed(env->nested_state) || vmx_nested_state_needed(env->nested_state));
svm_nested_state_needed(env->nested_state)));
} }
static int nested_state_post_load(void *opaque, int version_id) static int nested_state_post_load(void *opaque, int version_id)
@ -1121,7 +1103,6 @@ static const VMStateDescription vmstate_kvm_nested_state = {
}, },
.subsections = (const VMStateDescription*[]) { .subsections = (const VMStateDescription*[]) {
&vmstate_vmx_nested_state, &vmstate_vmx_nested_state,
&vmstate_svm_nested_state,
NULL NULL
} }
}; };