userfaultfd: mcopy_atomic: return -ENOENT when no compatible VMA found
The memory mapping of a process may change between #PF event and the call to mcopy_atomic that comes to resolve the page fault. In such case, there will be no VMA covering the range passed to mcopy_atomic or the VMA will not have userfaultfd context. To allow uffd monitor to distinguish those case from other errors, let's return -ENOENT instead of -EINVAL. Note, that despite availability of UFFD_EVENT_UNMAP there still might be race between the processing of UFFD_EVENT_UNMAP and outstanding mcopy_atomic in case of non-cooperative uffd usage. [rppt@linux.vnet.ibm.com: update cases returning -ENOENT] Link: http://lkml.kernel.org/r/20170207150249.GA6709@rapoport-lnx [aarcange@redhat.com: merge fix] [akpm@linux-foundation.org: fix the merge fix] Link: http://lkml.kernel.org/r/1485542673-24387-5-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Pavel Emelyanov <xemul@virtuozzo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ca49ca7114
commit
27d02568f5
|
@ -197,22 +197,25 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
|
|||
* retry, dst_vma will be set to NULL and we must lookup again.
|
||||
*/
|
||||
if (!dst_vma) {
|
||||
err = -EINVAL;
|
||||
err = -ENOENT;
|
||||
dst_vma = find_vma(dst_mm, dst_start);
|
||||
if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
|
||||
goto out_unlock;
|
||||
|
||||
if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
|
||||
/*
|
||||
* Only allow __mcopy_atomic_hugetlb on userfaultfd
|
||||
* registered ranges.
|
||||
*/
|
||||
if (!dst_vma->vm_userfaultfd_ctx.ctx)
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Make sure the remaining dst range is both valid and
|
||||
* fully within a single existing vma.
|
||||
*/
|
||||
if (dst_start < dst_vma->vm_start ||
|
||||
dst_start + len > dst_vma->vm_end)
|
||||
goto out_unlock;
|
||||
|
||||
err = -EINVAL;
|
||||
if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
|
||||
goto out_unlock;
|
||||
|
||||
vm_shared = dst_vma->vm_flags & VM_SHARED;
|
||||
}
|
||||
|
||||
|
@ -220,12 +223,6 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
|
|||
(len - copied) & (vma_hpagesize - 1)))
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Only allow __mcopy_atomic_hugetlb on userfaultfd registered ranges.
|
||||
*/
|
||||
if (!dst_vma->vm_userfaultfd_ctx.ctx)
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* If not shared, ensure the dst_vma has a anon_vma.
|
||||
*/
|
||||
|
@ -404,29 +401,10 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
|
|||
* Make sure the vma is not shared, that the dst range is
|
||||
* both valid and fully within a single existing vma.
|
||||
*/
|
||||
err = -EINVAL;
|
||||
err = -ENOENT;
|
||||
dst_vma = find_vma(dst_mm, dst_start);
|
||||
if (!dst_vma)
|
||||
goto out_unlock;
|
||||
/*
|
||||
* shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
|
||||
* it will overwrite vm_ops, so vma_is_anonymous must return false.
|
||||
*/
|
||||
if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
|
||||
dst_vma->vm_flags & VM_SHARED))
|
||||
goto out_unlock;
|
||||
|
||||
if (dst_start < dst_vma->vm_start ||
|
||||
dst_start + len > dst_vma->vm_end)
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* If this is a HUGETLB vma, pass off to appropriate routine
|
||||
*/
|
||||
if (is_vm_hugetlb_page(dst_vma))
|
||||
return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
|
||||
src_start, len, zeropage);
|
||||
|
||||
/*
|
||||
* Be strict and only allow __mcopy_atomic on userfaultfd
|
||||
* registered ranges to prevent userland errors going
|
||||
|
@ -439,6 +417,26 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
|
|||
if (!dst_vma->vm_userfaultfd_ctx.ctx)
|
||||
goto out_unlock;
|
||||
|
||||
if (dst_start < dst_vma->vm_start ||
|
||||
dst_start + len > dst_vma->vm_end)
|
||||
goto out_unlock;
|
||||
|
||||
err = -EINVAL;
|
||||
/*
|
||||
* shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
|
||||
* it will overwrite vm_ops, so vma_is_anonymous must return false.
|
||||
*/
|
||||
if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
|
||||
dst_vma->vm_flags & VM_SHARED))
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* If this is a HUGETLB vma, pass off to appropriate routine
|
||||
*/
|
||||
if (is_vm_hugetlb_page(dst_vma))
|
||||
return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
|
||||
src_start, len, zeropage);
|
||||
|
||||
if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
|
||||
goto out_unlock;
|
||||
|
||||
|
|
Loading…
Reference in New Issue