KVM: PPC: Book3S HV: Add helpers for lock/unlock hpte
This adds helper routines for locking and unlocking HPTEs, and uses them in the rest of the code. We don't change any locking rules in this patch. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
parent
31037ecad2
commit
a4bd6eb07c
|
@ -85,6 +85,20 @@ static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
|
|||
return old == 0;
|
||||
}
|
||||
|
||||
static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
|
||||
{
|
||||
hpte_v &= ~HPTE_V_HVLOCK;
|
||||
asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
|
||||
hpte[0] = cpu_to_be64(hpte_v);
|
||||
}
|
||||
|
||||
/* Without barrier */
|
||||
static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
|
||||
{
|
||||
hpte_v &= ~HPTE_V_HVLOCK;
|
||||
hpte[0] = cpu_to_be64(hpte_v);
|
||||
}
|
||||
|
||||
static inline int __hpte_actual_psize(unsigned int lp, int psize)
|
||||
{
|
||||
int i, shift;
|
||||
|
|
|
@ -338,9 +338,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
|
||||
gr = kvm->arch.revmap[index].guest_rpte;
|
||||
|
||||
/* Unlock the HPTE */
|
||||
asm volatile("lwsync" : : : "memory");
|
||||
hptep[0] = cpu_to_be64(v);
|
||||
unlock_hpte(hptep, v);
|
||||
preempt_enable();
|
||||
|
||||
gpte->eaddr = eaddr;
|
||||
|
@ -469,8 +467,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
|
||||
hpte[1] = be64_to_cpu(hptep[1]);
|
||||
hpte[2] = r = rev->guest_rpte;
|
||||
asm volatile("lwsync" : : : "memory");
|
||||
hptep[0] = cpu_to_be64(hpte[0]);
|
||||
unlock_hpte(hptep, hpte[0]);
|
||||
preempt_enable();
|
||||
|
||||
if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
|
||||
|
@ -621,7 +618,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
|
||||
hptep[1] = cpu_to_be64(r);
|
||||
eieio();
|
||||
hptep[0] = cpu_to_be64(hpte[0]);
|
||||
__unlock_hpte(hptep, hpte[0]);
|
||||
asm volatile("ptesync" : : : "memory");
|
||||
preempt_enable();
|
||||
if (page && hpte_is_writable(r))
|
||||
|
@ -642,7 +639,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
return ret;
|
||||
|
||||
out_unlock:
|
||||
hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
|
||||
__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
|
||||
preempt_enable();
|
||||
goto out_put;
|
||||
}
|
||||
|
@ -771,7 +768,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|||
}
|
||||
}
|
||||
unlock_rmap(rmapp);
|
||||
hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
|
||||
__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -857,7 +854,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|||
}
|
||||
ret = 1;
|
||||
}
|
||||
hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
|
||||
__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
|
||||
} while ((i = j) != head);
|
||||
|
||||
unlock_rmap(rmapp);
|
||||
|
@ -974,8 +971,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
|
|||
|
||||
/* Now check and modify the HPTE */
|
||||
if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) {
|
||||
/* unlock and continue */
|
||||
hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
|
||||
__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -996,9 +992,9 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
|
|||
npages_dirty = n;
|
||||
eieio();
|
||||
}
|
||||
v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK);
|
||||
v &= ~HPTE_V_ABSENT;
|
||||
v |= HPTE_V_VALID;
|
||||
hptep[0] = cpu_to_be64(v);
|
||||
__unlock_hpte(hptep, v);
|
||||
} while ((i = j) != head);
|
||||
|
||||
unlock_rmap(rmapp);
|
||||
|
@ -1218,8 +1214,7 @@ static long record_hpte(unsigned long flags, __be64 *hptp,
|
|||
r &= ~HPTE_GR_MODIFIED;
|
||||
revp->guest_rpte = r;
|
||||
}
|
||||
asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
|
||||
hptp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
|
||||
unlock_hpte(hptp, be64_to_cpu(hptp[0]));
|
||||
preempt_enable();
|
||||
if (!(valid == want_valid && (first_pass || dirty)))
|
||||
ok = 0;
|
||||
|
|
|
@ -150,12 +150,6 @@ static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
|
|||
return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
|
||||
}
|
||||
|
||||
static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
|
||||
{
|
||||
asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
|
||||
hpte[0] = cpu_to_be64(hpte_v);
|
||||
}
|
||||
|
||||
long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||
long pte_index, unsigned long pteh, unsigned long ptel,
|
||||
pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
|
||||
|
@ -271,10 +265,10 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
|||
u64 pte;
|
||||
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
|
||||
cpu_relax();
|
||||
pte = be64_to_cpu(*hpte);
|
||||
pte = be64_to_cpu(hpte[0]);
|
||||
if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
|
||||
break;
|
||||
*hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
|
||||
__unlock_hpte(hpte, pte);
|
||||
hpte += 2;
|
||||
}
|
||||
if (i == 8)
|
||||
|
@ -290,9 +284,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
|||
|
||||
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
|
||||
cpu_relax();
|
||||
pte = be64_to_cpu(*hpte);
|
||||
pte = be64_to_cpu(hpte[0]);
|
||||
if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
|
||||
*hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
|
||||
__unlock_hpte(hpte, pte);
|
||||
return H_PTEG_FULL;
|
||||
}
|
||||
}
|
||||
|
@ -331,7 +325,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
|||
|
||||
/* Write the first HPTE dword, unlocking the HPTE and making it valid */
|
||||
eieio();
|
||||
hpte[0] = cpu_to_be64(pteh);
|
||||
__unlock_hpte(hpte, pteh);
|
||||
asm volatile("ptesync" : : : "memory");
|
||||
|
||||
*pte_idx_ret = pte_index;
|
||||
|
@ -412,7 +406,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
|
|||
if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
|
||||
((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
|
||||
((flags & H_ANDCOND) && (pte & avpn) != 0)) {
|
||||
hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
|
||||
__unlock_hpte(hpte, pte);
|
||||
return H_NOT_FOUND;
|
||||
}
|
||||
|
||||
|
@ -548,7 +542,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
|
|||
be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
|
||||
rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
|
||||
args[j] |= rcbits << (56 - 5);
|
||||
hp[0] = 0;
|
||||
__unlock_hpte(hp, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -574,7 +568,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
|
|||
pte = be64_to_cpu(hpte[0]);
|
||||
if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
|
||||
((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
|
||||
hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
|
||||
__unlock_hpte(hpte, pte);
|
||||
return H_NOT_FOUND;
|
||||
}
|
||||
|
||||
|
@ -755,8 +749,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
|
|||
/* Return with the HPTE still locked */
|
||||
return (hash << 3) + (i >> 1);
|
||||
|
||||
/* Unlock and move on */
|
||||
hpte[i] = cpu_to_be64(v);
|
||||
__unlock_hpte(&hpte[i], v);
|
||||
}
|
||||
|
||||
if (val & HPTE_V_SECONDARY)
|
||||
|
|
Loading…
Reference in New Issue