Revert "KVM: Simplify kvm->tlbs_dirty handling"

This reverts commit 5befdc385d.

Since we will allow flush tlb out of mmu-lock in the later
patch

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
Xiao Guangrong 2014-04-17 17:06:12 +08:00 committed by Marcelo Tosatti
parent 42bf549f3c
commit a086f6a1eb
3 changed files with 8 additions and 8 deletions

View File

@ -913,8 +913,7 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
* and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
* used by guest then tlbs are not flushed, so guest is allowed to access the * used by guest then tlbs are not flushed, so guest is allowed to access the
* freed pages. * freed pages.
* We set tlbs_dirty to let the notifier know this change and delay the flush * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
* until such a case actually happens.
*/ */
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{ {
@ -943,7 +942,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
return -EINVAL; return -EINVAL;
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
vcpu->kvm->tlbs_dirty = true; vcpu->kvm->tlbs_dirty++;
continue; continue;
} }
@ -958,7 +957,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
if (gfn != sp->gfns[i]) { if (gfn != sp->gfns[i]) {
drop_spte(vcpu->kvm, &sp->spt[i]); drop_spte(vcpu->kvm, &sp->spt[i]);
vcpu->kvm->tlbs_dirty = true; vcpu->kvm->tlbs_dirty++;
continue; continue;
} }

View File

@ -411,9 +411,7 @@ struct kvm {
unsigned long mmu_notifier_seq; unsigned long mmu_notifier_seq;
long mmu_notifier_count; long mmu_notifier_count;
#endif #endif
/* Protected by mmu_lock */ long tlbs_dirty;
bool tlbs_dirty;
struct list_head devices; struct list_head devices;
}; };

View File

@ -186,9 +186,12 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
void kvm_flush_remote_tlbs(struct kvm *kvm) void kvm_flush_remote_tlbs(struct kvm *kvm)
{ {
long dirty_count = kvm->tlbs_dirty;
smp_mb();
if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
++kvm->stat.remote_tlb_flush; ++kvm->stat.remote_tlb_flush;
kvm->tlbs_dirty = false; cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
} }
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);