mm: fix clear_refs_write locking

Turning page table entries read-only requires the mmap_sem held for
writing.

So stop doing the odd games with turning things from read locks to write
locks and back.  Just get the write lock.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Linus Torvalds 2021-01-08 13:13:41 -08:00
parent 1d94330a43
commit 29a951dfb3
1 changed files with 9 additions and 23 deletions

View File

@ -1215,42 +1215,27 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
.type = type,
};
if (type == CLEAR_REFS_MM_HIWATER_RSS) {
if (mmap_write_lock_killable(mm)) {
count = -EINTR;
goto out_mm;
}
if (type == CLEAR_REFS_MM_HIWATER_RSS) {
/*
* Writing 5 to /proc/pid/clear_refs resets the peak
* resident set size to this mm's current rss value.
*/
reset_mm_hiwater_rss(mm);
mmap_write_unlock(mm);
goto out_mm;
goto out_unlock;
}
if (mmap_read_lock_killable(mm)) {
count = -EINTR;
goto out_mm;
}
tlb_gather_mmu(&tlb, mm, 0, -1);
if (type == CLEAR_REFS_SOFT_DIRTY) {
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!(vma->vm_flags & VM_SOFTDIRTY))
continue;
mmap_read_unlock(mm);
if (mmap_write_lock_killable(mm)) {
count = -EINTR;
goto out_mm;
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma->vm_flags &= ~VM_SOFTDIRTY;
vma_set_page_prot(vma);
}
mmap_write_downgrade(mm);
break;
}
mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
0, NULL, mm, 0, -1UL);
@ -1261,7 +1246,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
if (type == CLEAR_REFS_SOFT_DIRTY)
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb, 0, -1);
mmap_read_unlock(mm);
out_unlock:
mmap_write_unlock(mm);
out_mm:
mmput(mm);
}