mm: vm_page_prot: update with WRITE_ONCE/READ_ONCE
vma->vm_page_prot is read lockless from the rmap_walk, it may be updated concurrently and this prevents the risk of reading intermediate values. Link: http://lkml.kernel.org/r/1474660305-19222-1-git-send-email-aarcange@redhat.com Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Jan Vorlicek <janvorli@microsoft.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
6213055f2c
commit
6d2329f887
|
@ -1517,7 +1517,7 @@ static inline int pte_devmap(pte_t pte)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int vma_wants_writenotify(struct vm_area_struct *vma);
|
int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
|
||||||
|
|
||||||
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
|
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
|
||||||
spinlock_t **ptl);
|
spinlock_t **ptl);
|
||||||
|
|
|
@ -1620,7 +1620,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
||||||
if (soft_dirty)
|
if (soft_dirty)
|
||||||
entry = pte_swp_mksoft_dirty(entry);
|
entry = pte_swp_mksoft_dirty(entry);
|
||||||
} else {
|
} else {
|
||||||
entry = mk_pte(page + i, vma->vm_page_prot);
|
entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
|
||||||
entry = maybe_mkwrite(entry, vma);
|
entry = maybe_mkwrite(entry, vma);
|
||||||
if (!write)
|
if (!write)
|
||||||
entry = pte_wrprotect(entry);
|
entry = pte_wrprotect(entry);
|
||||||
|
|
|
@ -234,7 +234,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
get_page(new);
|
get_page(new);
|
||||||
pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
|
pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
|
||||||
if (pte_swp_soft_dirty(*ptep))
|
if (pte_swp_soft_dirty(*ptep))
|
||||||
pte = pte_mksoft_dirty(pte);
|
pte = pte_mksoft_dirty(pte);
|
||||||
|
|
||||||
|
|
16
mm/mmap.c
16
mm/mmap.c
|
@ -116,13 +116,15 @@ static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
|
||||||
void vma_set_page_prot(struct vm_area_struct *vma)
|
void vma_set_page_prot(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
unsigned long vm_flags = vma->vm_flags;
|
unsigned long vm_flags = vma->vm_flags;
|
||||||
|
pgprot_t vm_page_prot;
|
||||||
|
|
||||||
vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
|
vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
|
||||||
if (vma_wants_writenotify(vma)) {
|
if (vma_wants_writenotify(vma, vm_page_prot)) {
|
||||||
vm_flags &= ~VM_SHARED;
|
vm_flags &= ~VM_SHARED;
|
||||||
vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot,
|
vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
|
||||||
vm_flags);
|
|
||||||
}
|
}
|
||||||
|
/* remove_protection_ptes reads vma->vm_page_prot without mmap_sem */
|
||||||
|
WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1386,7 +1388,7 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
|
||||||
* to the private version (using protection_map[] without the
|
* to the private version (using protection_map[] without the
|
||||||
* VM_SHARED bit).
|
* VM_SHARED bit).
|
||||||
*/
|
*/
|
||||||
int vma_wants_writenotify(struct vm_area_struct *vma)
|
int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
|
||||||
{
|
{
|
||||||
vm_flags_t vm_flags = vma->vm_flags;
|
vm_flags_t vm_flags = vma->vm_flags;
|
||||||
const struct vm_operations_struct *vm_ops = vma->vm_ops;
|
const struct vm_operations_struct *vm_ops = vma->vm_ops;
|
||||||
|
@ -1401,8 +1403,8 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
|
||||||
|
|
||||||
/* The open routine did something to the protections that pgprot_modify
|
/* The open routine did something to the protections that pgprot_modify
|
||||||
* won't preserve? */
|
* won't preserve? */
|
||||||
if (pgprot_val(vma->vm_page_prot) !=
|
if (pgprot_val(vm_page_prot) !=
|
||||||
pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags)))
|
pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Do we need to track softdirty? */
|
/* Do we need to track softdirty? */
|
||||||
|
|
|
@ -327,7 +327,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
|
||||||
* held in write mode.
|
* held in write mode.
|
||||||
*/
|
*/
|
||||||
vma->vm_flags = newflags;
|
vma->vm_flags = newflags;
|
||||||
dirty_accountable = vma_wants_writenotify(vma);
|
dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
|
||||||
vma_set_page_prot(vma);
|
vma_set_page_prot(vma);
|
||||||
|
|
||||||
change_protection(vma, start, end, vma->vm_page_prot,
|
change_protection(vma, start, end, vma->vm_page_prot,
|
||||||
|
|
Loading…
Reference in New Issue