ANDROID: KVM: arm64: Don't update IOMMUs unnecessarily

When handling host stage-2 faults the hypervisor currently updates the
CPU _and_ IOMMUs page-tables. However, since we currently proactively
map accessible PA ranges into IOMMUs, updating them during stage-2
faults is unnecessary -- it only needs to be done during ownership
transitions. Optimize this by skipping the IOMMU updates from the host
memory abort path, which also reduces contention on the host stage-2
lock during boot and saves up to 1.1 sec of boot time on Pixel 6.

Bug: 232879742
Change-Id: I71f439311fe9573005efcc9529a2be53f21993a4
Signed-off-by: Quentin Perret <qperret@google.com>
This commit is contained in:
Quentin Perret 2022-06-07 12:00:07 +00:00
parent 39e9847ef0
commit b9029542be
3 changed files with 11 additions and 8 deletions

View File

@ -78,7 +78,8 @@ int __pkvm_remove_ioguard_page(struct kvm_vcpu *vcpu, u64 ipa);
bool __pkvm_check_ioguard_page(struct kvm_vcpu *vcpu);
bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot,
bool update_iommu);
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, pkvm_id owner_id);
int host_stage2_unmap_dev_locked(phys_addr_t start, u64 size);
int kvm_host_prepare_stage2(void *pgt_pool_base);

View File

@ -465,7 +465,8 @@ static bool range_is_memory(u64 start, u64 end)
}
static inline int __host_stage2_idmap(u64 start, u64 end,
enum kvm_pgtable_prot prot)
enum kvm_pgtable_prot prot,
bool update_iommu)
{
int ret;
@ -474,7 +475,8 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
if (ret)
return ret;
pkvm_iommu_host_stage2_idmap(start, end, prot);
if (update_iommu)
pkvm_iommu_host_stage2_idmap(start, end, prot);
return 0;
}
@ -536,9 +538,9 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
}
int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
enum kvm_pgtable_prot prot)
enum kvm_pgtable_prot prot, bool update_iommu)
{
return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot);
return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot, update_iommu);
}
#define KVM_INVALID_PTE_OWNER_MASK GENMASK(32, 1)
@ -612,7 +614,7 @@ static int host_stage2_idmap(u64 addr)
if (ret)
return ret;
return host_stage2_idmap_locked(range.start, range.end - range.start, prot);
return host_stage2_idmap_locked(range.start, range.end - range.start, prot, false);
}
static bool is_dabt(u64 esr)
@ -833,7 +835,7 @@ static int __host_set_page_state_range(u64 addr, u64 size,
{
enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state);
return host_stage2_idmap_locked(addr, size, prot);
return host_stage2_idmap_locked(addr, size, prot, true);
}
static int host_request_owned_transition(u64 *completer_addr,

View File

@ -222,7 +222,7 @@ static int fix_host_ownership_walker(u64 addr, u64 end, u32 level,
return -EINVAL;
}
return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
return host_stage2_idmap_locked(phys, PAGE_SIZE, prot, false);
}
static int fix_hyp_pgtable_refcnt_walker(u64 addr, u64 end, u32 level,