KVM: async_pf: Provide additional direct page notification

By setting a Kconfig option, the architecture can control when
guest notifications will be presented by the apf backend.
There is the default batch mechanism, working as before, where the vcpu
thread should pull in this information.
Opposite to this, there is now the direct mechanism, that will push the
information to the guest.
This way s390 can use an already existing architecture interface.

Still the vcpu thread should call check_completion to cleanup leftovers.

Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
This commit is contained in:
Dominik Dingel 2013-06-06 15:32:37 +02:00 committed by Christian Borntraeger
parent 24eb3a824c
commit e0ead41a6d
4 changed files with 24 additions and 4 deletions

View File

@ -3328,7 +3328,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
arch.direct_map = vcpu->arch.mmu.direct_map; arch.direct_map = vcpu->arch.mmu.direct_map;
arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
return kvm_setup_async_pf(vcpu, gva, gfn, &arch); return kvm_setup_async_pf(vcpu, gva, gfn_to_hva(vcpu->kvm, gfn), &arch);
} }
static bool can_do_async_pf(struct kvm_vcpu *vcpu) static bool can_do_async_pf(struct kvm_vcpu *vcpu)

View File

@ -192,7 +192,7 @@ struct kvm_async_pf {
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
struct kvm_arch_async_pf *arch); struct kvm_arch_async_pf *arch);
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif #endif

View File

@ -22,6 +22,10 @@ config KVM_MMIO
config KVM_ASYNC_PF config KVM_ASYNC_PF
bool bool
# Toggle to switch between direct notification and batch job
config KVM_ASYNC_PF_SYNC
bool
config HAVE_KVM_MSI config HAVE_KVM_MSI
bool bool

View File

@ -28,6 +28,21 @@
#include "async_pf.h" #include "async_pf.h"
#include <trace/events/kvm.h> #include <trace/events/kvm.h>
static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
#ifdef CONFIG_KVM_ASYNC_PF_SYNC
kvm_arch_async_page_present(vcpu, work);
#endif
}
static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
#ifndef CONFIG_KVM_ASYNC_PF_SYNC
kvm_arch_async_page_present(vcpu, work);
#endif
}
static struct kmem_cache *async_pf_cache; static struct kmem_cache *async_pf_cache;
int kvm_async_pf_init(void) int kvm_async_pf_init(void)
@ -69,6 +84,7 @@ static void async_pf_execute(struct work_struct *work)
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL); get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
kvm_async_page_present_sync(vcpu, apf);
unuse_mm(mm); unuse_mm(mm);
spin_lock(&vcpu->async_pf.lock); spin_lock(&vcpu->async_pf.lock);
@ -138,7 +154,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
} }
} }
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
struct kvm_arch_async_pf *arch) struct kvm_arch_async_pf *arch)
{ {
struct kvm_async_pf *work; struct kvm_async_pf *work;
@ -159,7 +175,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
work->wakeup_all = false; work->wakeup_all = false;
work->vcpu = vcpu; work->vcpu = vcpu;
work->gva = gva; work->gva = gva;
work->addr = gfn_to_hva(vcpu->kvm, gfn); work->addr = hva;
work->arch = *arch; work->arch = *arch;
work->mm = current->mm; work->mm = current->mm;
atomic_inc(&work->mm->mm_count); atomic_inc(&work->mm->mm_count);