mirror of https://gitee.com/openkylin/linux.git
powerpc/kvm/book3s: Avoid using rmap to protect parallel page table update.
We now depend on kvm->mmu_lock Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200505071729.54912-17-aneesh.kumar@linux.ibm.com
This commit is contained in:
parent
7769a3394b
commit
3ff8df1430
|
@ -74,8 +74,8 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
|
||||||
EXPORT_SYMBOL_GPL(kvmppc_find_table);
|
EXPORT_SYMBOL_GPL(kvmppc_find_table);
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||||
static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
|
static long kvmppc_rm_tce_to_ua(struct kvm *kvm,
|
||||||
unsigned long *ua, unsigned long **prmap)
|
unsigned long tce, unsigned long *ua)
|
||||||
{
|
{
|
||||||
unsigned long gfn = tce >> PAGE_SHIFT;
|
unsigned long gfn = tce >> PAGE_SHIFT;
|
||||||
struct kvm_memory_slot *memslot;
|
struct kvm_memory_slot *memslot;
|
||||||
|
@ -87,9 +87,6 @@ static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
|
||||||
*ua = __gfn_to_hva_memslot(memslot, gfn) |
|
*ua = __gfn_to_hva_memslot(memslot, gfn) |
|
||||||
(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
|
(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
|
||||||
|
|
||||||
if (prmap)
|
|
||||||
*prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,7 +113,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
|
||||||
if (iommu_tce_check_gpa(stt->page_shift, gpa))
|
if (iommu_tce_check_gpa(stt->page_shift, gpa))
|
||||||
return H_PARAMETER;
|
return H_PARAMETER;
|
||||||
|
|
||||||
if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL))
|
if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua))
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
|
||||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||||
|
@ -411,7 +408,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
dir = iommu_tce_direction(tce);
|
dir = iommu_tce_direction(tce);
|
||||||
if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
|
if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua))
|
||||||
return H_PARAMETER;
|
return H_PARAMETER;
|
||||||
|
|
||||||
entry = ioba >> stt->page_shift;
|
entry = ioba >> stt->page_shift;
|
||||||
|
@ -488,7 +485,6 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||||
struct kvmppc_spapr_tce_table *stt;
|
struct kvmppc_spapr_tce_table *stt;
|
||||||
long i, ret = H_SUCCESS;
|
long i, ret = H_SUCCESS;
|
||||||
unsigned long tces, entry, ua = 0;
|
unsigned long tces, entry, ua = 0;
|
||||||
unsigned long *rmap = NULL;
|
|
||||||
unsigned long mmu_seq;
|
unsigned long mmu_seq;
|
||||||
bool prereg = false;
|
bool prereg = false;
|
||||||
struct kvmppc_spapr_tce_iommu_table *stit;
|
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||||
|
@ -530,7 +526,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||||
*/
|
*/
|
||||||
struct mm_iommu_table_group_mem_t *mem;
|
struct mm_iommu_table_group_mem_t *mem;
|
||||||
|
|
||||||
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
|
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
|
||||||
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
|
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
|
||||||
|
@ -546,23 +542,9 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||||
* We do not require memory to be preregistered in this case
|
* We do not require memory to be preregistered in this case
|
||||||
* so lock rmap and do __find_linux_pte_or_hugepte().
|
* so lock rmap and do __find_linux_pte_or_hugepte().
|
||||||
*/
|
*/
|
||||||
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
|
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
|
||||||
rmap = (void *) vmalloc_to_phys(rmap);
|
|
||||||
if (WARN_ON_ONCE_RM(!rmap))
|
|
||||||
return H_TOO_HARD;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Synchronize with the MMU notifier callbacks in
|
|
||||||
* book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
|
|
||||||
* While we have the rmap lock, code running on other CPUs
|
|
||||||
* cannot finish unmapping the host real page that backs
|
|
||||||
* this guest real page, so we are OK to access the host
|
|
||||||
* real page.
|
|
||||||
*/
|
|
||||||
lock_rmap(rmap);
|
|
||||||
|
|
||||||
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
|
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
|
||||||
if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) {
|
if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) {
|
||||||
ret = H_TOO_HARD;
|
ret = H_TOO_HARD;
|
||||||
|
@ -582,7 +564,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||||
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
|
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
|
||||||
|
|
||||||
ua = 0;
|
ua = 0;
|
||||||
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
|
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
|
||||||
ret = H_PARAMETER;
|
ret = H_PARAMETER;
|
||||||
goto invalidate_exit;
|
goto invalidate_exit;
|
||||||
}
|
}
|
||||||
|
@ -607,10 +589,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||||
iommu_tce_kill_rm(stit->tbl, entry, npages);
|
iommu_tce_kill_rm(stit->tbl, entry, npages);
|
||||||
|
|
||||||
unlock_exit:
|
unlock_exit:
|
||||||
if (rmap)
|
if (!prereg)
|
||||||
unlock_rmap(rmap);
|
arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
|
||||||
|
|
||||||
arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue