KVM: x86/xen: Take srcu lock when accessing kvm_memslots()
kvm_memslots() will be called by kvm_write_guest_offset_cached() so we should
take the srcu lock. Let's pull the srcu lock operation from kvm_steal_time_set_preempted()
again to fix xen part.
Fixes: 30b5c851af
("KVM: x86/xen: Add support for vCPU runstate information")
Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
Message-Id: <1619166200-9215-1-git-send-email-wanpengli@tencent.com>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
bf05bf16c7
commit
9c1a07442c
|
@ -4025,7 +4025,6 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
struct kvm_host_map map;
|
||||
struct kvm_steal_time *st;
|
||||
int idx;
|
||||
|
||||
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
|
||||
return;
|
||||
|
@ -4033,15 +4032,9 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
|||
if (vcpu->arch.st.preempted)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Take the srcu lock as memslots will be accessed to check the gfn
|
||||
* cache generation against the memslots generation.
|
||||
*/
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
|
||||
if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
|
||||
&vcpu->arch.st.cache, true))
|
||||
goto out;
|
||||
return;
|
||||
|
||||
st = map.hva +
|
||||
offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
|
||||
|
@ -4049,20 +4042,25 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
|||
st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
|
||||
|
||||
kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
|
||||
|
||||
out:
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int idx;
|
||||
|
||||
if (vcpu->preempted && !vcpu->arch.guest_state_protected)
|
||||
vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
|
||||
|
||||
/*
|
||||
* Take the srcu lock as memslots will be accessed to check the gfn
|
||||
* cache generation against the memslots generation.
|
||||
*/
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
if (kvm_xen_msr_enabled(vcpu->kvm))
|
||||
kvm_xen_runstate_set_preempted(vcpu);
|
||||
else
|
||||
kvm_steal_time_set_preempted(vcpu);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
|
||||
static_call(kvm_x86_vcpu_put)(vcpu);
|
||||
vcpu->arch.last_host_tsc = rdtsc();
|
||||
|
|
Loading…
Reference in New Issue