mirror of https://gitee.com/openkylin/linux.git
hugetlb: follow_hugetlb_page() for write access
When calling get_user_pages(), a write flag is passed in by the caller to indicate if write access is required on the faulted-in pages. Currently, follow_hugetlb_page() ignores this flag and always faults pages for read-only access. This can cause data corruption because a device driver that calls get_user_pages() with write set will not expect COW faults to occur on the returned pages. This patch passes the write flag down to follow_hugetlb_page() and makes sure hugetlb_fault() is called with the right write_access parameter. [ezk@cs.sunysb.edu: build fix] Signed-off-by: Adam Litke <agl@us.ibm.com> Reviewed-by: Ken Chen <kenchen@google.com> Cc: David Gibson <hermes@gibson.dropbear.id.au> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: Badari Pulavarty <pbadari@us.ibm.com> Signed-off-by: Erez Zadok <ezk@cs.sunysb.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
19cd7537bd
commit
5b23dbe817
|
@ -19,7 +19,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
|
|||
int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
|
||||
int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
|
||||
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
|
||||
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
|
||||
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int, int);
|
||||
void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
|
||||
void __unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
|
||||
int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
|
||||
|
@ -106,7 +106,7 @@ static inline unsigned long hugetlb_total_pages(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; })
|
||||
#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
|
||||
#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
|
||||
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
|
||||
#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
|
||||
|
|
|
@ -868,7 +868,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
|
||||
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
struct page **pages, struct vm_area_struct **vmas,
|
||||
unsigned long *position, int *length, int i)
|
||||
unsigned long *position, int *length, int i,
|
||||
int write)
|
||||
{
|
||||
unsigned long pfn_offset;
|
||||
unsigned long vaddr = *position;
|
||||
|
@ -890,7 +891,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
int ret;
|
||||
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
ret = hugetlb_fault(mm, vma, vaddr, 0);
|
||||
ret = hugetlb_fault(mm, vma, vaddr, write);
|
||||
spin_lock(&mm->page_table_lock);
|
||||
if (!(ret & VM_FAULT_ERROR))
|
||||
continue;
|
||||
|
|
|
@ -1036,7 +1036,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
|
||||
if (is_vm_hugetlb_page(vma)) {
|
||||
i = follow_hugetlb_page(mm, vma, pages, vmas,
|
||||
&start, &len, i);
|
||||
&start, &len, i, write);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue