From 12cb814f3bb35736420cc6bfc9fed7b6a9d3a828 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Sat, 7 May 2011 16:31:36 +0900 Subject: [PATCH] KVM: MMU: Clean up gpte reading with copy_from_user() When we optimized walk_addr_generic() by not using the generic guest memory reader, we replaced copy_from_user() with get_user(): commit e30d2a170506830d5eef5e9d7990c5aedf1b0a51 KVM: MMU: Optimize guest page table walk commit 15e2ac9a43d4d7d08088e404fddf2533a8e7d52e KVM: MMU: Fix 64-bit paging breakage on x86_32 But as Andi pointed out later, copy_from_user() does the same as get_user() as long as we give a constant size to it. So we use copy_from_user() to clean up the code. The only, noticeable, regression introduced by this is 64-bit gpte reading on x86_32 hosts needed for PAE guests. But this can be mitigated by implementing 8-byte get_user() for x86_32, if needed. Signed-off-by: Takuya Yoshikawa Signed-off-by: Avi Kivity --- arch/x86/kvm/paging_tmpl.h | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 52450a6b784f..88ca456ccd68 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -115,20 +115,6 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte) return access; } -static int FNAME(read_gpte)(pt_element_t *pte, pt_element_t __user *ptep_user) -{ -#if defined(CONFIG_X86_32) && (PTTYPE == 64) - u32 *p = (u32 *)pte; - u32 __user *p_user = (u32 __user *)ptep_user; - - if (unlikely(get_user(*p, p_user))) - return -EFAULT; - return get_user(*(p + 1), p_user + 1); -#else - return get_user(*pte, ptep_user); -#endif -} - /* * Fetch a guest pte for a guest virtual address */ @@ -199,7 +185,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, } ptep_user = (pt_element_t __user *)((void *)host_addr + offset); - if (unlikely(FNAME(read_gpte)(&pte, ptep_user))) { + if (unlikely(copy_from_user(&pte, ptep_user, sizeof(pte)))) { present = false; break; }