KVM: x86/mmu: Consolidate kvm_mmu_zap_all() and kvm_mmu_zap_mmio_sptes()
...via a new helper, __kvm_mmu_zap_all(). An alternative to passing a 'bool mmio_only' would be to pass a callback function to filter the shadow page, i.e. to make __kvm_mmu_zap_all() generic and reusable, but zapping all shadow pages is a last resort, i.e. making the helper less extensible is a feature of sorts. And the explicit MMIO parameter makes it easy to preserve the WARN_ON_ONCE() if a restart is triggered when zapping MMIO sptes. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
24efe61f69
commit
8ab3c471ee
|
@ -5840,7 +5840,7 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
|
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
|
||||||
|
|
||||||
void kvm_mmu_zap_all(struct kvm *kvm)
|
static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
|
||||||
{
|
{
|
||||||
struct kvm_mmu_page *sp, *node;
|
struct kvm_mmu_page *sp, *node;
|
||||||
LIST_HEAD(invalid_list);
|
LIST_HEAD(invalid_list);
|
||||||
|
@ -5849,30 +5849,12 @@ void kvm_mmu_zap_all(struct kvm *kvm)
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
restart:
|
restart:
|
||||||
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
|
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
|
||||||
|
if (mmio_only && !sp->mmio_cached)
|
||||||
|
continue;
|
||||||
if (sp->role.invalid && sp->root_count)
|
if (sp->role.invalid && sp->root_count)
|
||||||
continue;
|
continue;
|
||||||
if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign) ||
|
|
||||||
cond_resched_lock(&kvm->mmu_lock))
|
|
||||||
goto restart;
|
|
||||||
}
|
|
||||||
|
|
||||||
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
|
||||||
spin_unlock(&kvm->mmu_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
|
|
||||||
{
|
|
||||||
struct kvm_mmu_page *sp, *node;
|
|
||||||
LIST_HEAD(invalid_list);
|
|
||||||
int ign;
|
|
||||||
|
|
||||||
spin_lock(&kvm->mmu_lock);
|
|
||||||
restart:
|
|
||||||
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
|
|
||||||
if (!sp->mmio_cached)
|
|
||||||
continue;
|
|
||||||
if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) {
|
if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) {
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(mmio_only);
|
||||||
goto restart;
|
goto restart;
|
||||||
}
|
}
|
||||||
if (cond_resched_lock(&kvm->mmu_lock))
|
if (cond_resched_lock(&kvm->mmu_lock))
|
||||||
|
@ -5883,6 +5865,11 @@ static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
|
||||||
spin_unlock(&kvm->mmu_lock);
|
spin_unlock(&kvm->mmu_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvm_mmu_zap_all(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
return __kvm_mmu_zap_all(kvm, false);
|
||||||
|
}
|
||||||
|
|
||||||
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
|
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
|
||||||
{
|
{
|
||||||
WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
|
WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
|
||||||
|
@ -5904,7 +5891,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
|
||||||
*/
|
*/
|
||||||
if (unlikely(gen == 0)) {
|
if (unlikely(gen == 0)) {
|
||||||
kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
|
kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
|
||||||
kvm_mmu_zap_mmio_sptes(kvm);
|
__kvm_mmu_zap_all(kvm, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue