KVM: PPC: Book3S HV: Create kvmppc_unmap_hpte_helper()
The kvm_unmap_rmapp() function, called from certain MMU notifiers, is used to force all guest mappings of a particular host page to be set ABSENT, and removed from the reverse mappings. For HPT resizing, we will have some cases where we want to set just a single guest HPTE ABSENT and remove its reverse mappings. To prepare with this, we split out the logic from kvm_unmap_rmapp() to evict a single HPTE, moving it to a new helper function. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
This commit is contained in:
parent
f98a8bf9ee
commit
639e459768
|
@ -742,37 +742,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
|
|||
return kvm_handle_hva_range(kvm, hva, hva + 1, handler);
|
||||
}
|
||||
|
||||
static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
unsigned long gfn)
|
||||
/* Must be called with both HPTE and rmap locked */
|
||||
static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i,
|
||||
unsigned long *rmapp, unsigned long gfn)
|
||||
{
|
||||
__be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
|
||||
struct revmap_entry *rev = kvm->arch.hpt.rev;
|
||||
unsigned long h, i, j;
|
||||
__be64 *hptep;
|
||||
unsigned long j, h;
|
||||
unsigned long ptel, psize, rcbits;
|
||||
unsigned long *rmapp;
|
||||
|
||||
rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
|
||||
for (;;) {
|
||||
lock_rmap(rmapp);
|
||||
if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
|
||||
unlock_rmap(rmapp);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* To avoid an ABBA deadlock with the HPTE lock bit,
|
||||
* we can't spin on the HPTE lock while holding the
|
||||
* rmap chain lock.
|
||||
*/
|
||||
i = *rmapp & KVMPPC_RMAP_INDEX;
|
||||
hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
|
||||
if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
|
||||
/* unlock rmap before spinning on the HPTE lock */
|
||||
unlock_rmap(rmapp);
|
||||
while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
|
||||
cpu_relax();
|
||||
continue;
|
||||
}
|
||||
j = rev[i].forw;
|
||||
if (j == i) {
|
||||
/* chain is now empty */
|
||||
|
@ -804,6 +782,39 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|||
note_hpte_modification(kvm, &rev[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
unsigned long gfn)
|
||||
{
|
||||
unsigned long i;
|
||||
__be64 *hptep;
|
||||
unsigned long *rmapp;
|
||||
|
||||
rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
|
||||
for (;;) {
|
||||
lock_rmap(rmapp);
|
||||
if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
|
||||
unlock_rmap(rmapp);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* To avoid an ABBA deadlock with the HPTE lock bit,
|
||||
* we can't spin on the HPTE lock while holding the
|
||||
* rmap chain lock.
|
||||
*/
|
||||
i = *rmapp & KVMPPC_RMAP_INDEX;
|
||||
hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
|
||||
if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
|
||||
/* unlock rmap before spinning on the HPTE lock */
|
||||
unlock_rmap(rmapp);
|
||||
while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
|
||||
cpu_relax();
|
||||
continue;
|
||||
}
|
||||
|
||||
kvmppc_unmap_hpte(kvm, i, rmapp, gfn);
|
||||
unlock_rmap(rmapp);
|
||||
__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue