From b9029542bed3ea7de4de14b9ede94aadabdff01b Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Tue, 7 Jun 2022 12:00:07 +0000 Subject: [PATCH] ANDROID: KVM: arm64: Don't update IOMMUs unnecessarily When handling host stage-2 faults the hypervisor currently updates the CPU _and_ IOMMUs page-tables. However, since we currently proactively map accessible PA ranges into IOMMUs, updating them during stage-2 faults is unnecessary -- it only needs to be done during ownership transitions. Optimize this by skipping the IOMMU updates from the host memory abort path, which also reduces contention on the host stage-2 lock during boot and saves up to 1.1 sec of boot time on Pixel 6. Bug: 232879742 Change-Id: I71f439311fe9573005efcc9529a2be53f21993a4 Signed-off-by: Quentin Perret --- arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 3 ++- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 14 ++++++++------ arch/arm64/kvm/hyp/nvhe/setup.c | 2 +- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h index e796ff5e74fb..f9ce8841f8a1 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h @@ -78,7 +78,8 @@ int __pkvm_remove_ioguard_page(struct kvm_vcpu *vcpu, u64 ipa); bool __pkvm_check_ioguard_page(struct kvm_vcpu *vcpu); bool addr_is_memory(phys_addr_t phys); -int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot); +int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot, + bool update_iommu); int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, pkvm_id owner_id); int host_stage2_unmap_dev_locked(phys_addr_t start, u64 size); int kvm_host_prepare_stage2(void *pgt_pool_base); diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 76703b2b6f3c..6ca172ac3445 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -465,7 +465,8 @@ static bool range_is_memory(u64 start, u64 end) } static inline int __host_stage2_idmap(u64 start, u64 end, - enum kvm_pgtable_prot prot) + enum kvm_pgtable_prot prot, + bool update_iommu) { int ret; @@ -474,7 +475,8 @@ static inline int __host_stage2_idmap(u64 start, u64 end, if (ret) return ret; - pkvm_iommu_host_stage2_idmap(start, end, prot); + if (update_iommu) + pkvm_iommu_host_stage2_idmap(start, end, prot); return 0; } @@ -536,9 +538,9 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range) } int host_stage2_idmap_locked(phys_addr_t addr, u64 size, - enum kvm_pgtable_prot prot) + enum kvm_pgtable_prot prot, bool update_iommu) { - return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot); + return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot, update_iommu); } #define KVM_INVALID_PTE_OWNER_MASK GENMASK(32, 1) @@ -612,7 +614,7 @@ static int host_stage2_idmap(u64 addr) if (ret) return ret; - return host_stage2_idmap_locked(range.start, range.end - range.start, prot); + return host_stage2_idmap_locked(range.start, range.end - range.start, prot, false); } static bool is_dabt(u64 esr) @@ -833,7 +835,7 @@ static int __host_set_page_state_range(u64 addr, u64 size, { enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state); - return host_stage2_idmap_locked(addr, size, prot); + return host_stage2_idmap_locked(addr, size, prot, true); } static int host_request_owned_transition(u64 *completer_addr, diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index 1a6f3eba5035..a3d8d4198550 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -222,7 +222,7 @@ static int fix_host_ownership_walker(u64 addr, u64 end, u32 level, return -EINVAL; } - return host_stage2_idmap_locked(phys, PAGE_SIZE, prot); + return host_stage2_idmap_locked(phys, PAGE_SIZE, prot, false); } static int fix_hyp_pgtable_refcnt_walker(u64 addr, u64 end, u32 level,