mirror of https://gitee.com/openkylin/linux.git
mmap locking API: use coccinelle to convert mmap_sem rwsem call sites
This change converts the existing mmap_sem rwsem calls to use the new mmap locking API instead. The change is generated using coccinelle with the following rule: // spatch --sp-file mmap_lock_api.cocci --in-place --include-headers --dir . @@ expression mm; @@ ( -init_rwsem +mmap_init_lock | -down_write +mmap_write_lock | -down_write_killable +mmap_write_lock_killable | -down_write_trylock +mmap_write_trylock | -up_write +mmap_write_unlock | -downgrade_write +mmap_write_downgrade | -down_read +mmap_read_lock | -down_read_killable +mmap_read_lock_killable | -down_read_trylock +mmap_read_trylock | -up_read +mmap_read_unlock ) -(&mm->mmap_sem) +(mm) Signed-off-by: Michel Lespinasse <walken@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Reviewed-by: Laurent Dufour <ldufour@linux.ibm.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Davidlohr Bueso <dbueso@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Liam Howlett <Liam.Howlett@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ying Han <yinghan@google.com> Link: http://lkml.kernel.org/r/20200520052908.204642-5-walken@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
0adf65f53a
commit
d8ed45c5dc
|
@ -957,12 +957,12 @@ do_entUnaUser(void __user * va, unsigned long opcode,
|
||||||
si_code = SEGV_ACCERR;
|
si_code = SEGV_ACCERR;
|
||||||
else {
|
else {
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
if (find_vma(mm, (unsigned long)va))
|
if (find_vma(mm, (unsigned long)va))
|
||||||
si_code = SEGV_ACCERR;
|
si_code = SEGV_ACCERR;
|
||||||
else
|
else
|
||||||
si_code = SEGV_MAPERR;
|
si_code = SEGV_MAPERR;
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
}
|
}
|
||||||
send_sig_fault(SIGSEGV, si_code, va, 0, current);
|
send_sig_fault(SIGSEGV, si_code, va, 0, current);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -117,7 +117,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
|
||||||
if (user_mode(regs))
|
if (user_mode(regs))
|
||||||
flags |= FAULT_FLAG_USER;
|
flags |= FAULT_FLAG_USER;
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
vma = find_vma(mm, address);
|
vma = find_vma(mm, address);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
goto bad_area;
|
goto bad_area;
|
||||||
|
@ -180,14 +180,14 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Something tried to access memory that isn't in our memory map.
|
/* Something tried to access memory that isn't in our memory map.
|
||||||
Fix it, but check if it's kernel or user first. */
|
Fix it, but check if it's kernel or user first. */
|
||||||
bad_area:
|
bad_area:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
if (user_mode(regs))
|
if (user_mode(regs))
|
||||||
goto do_sigsegv;
|
goto do_sigsegv;
|
||||||
|
@ -211,14 +211,14 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
|
||||||
/* We ran out of memory, or some other thing happened to us that
|
/* We ran out of memory, or some other thing happened to us that
|
||||||
made us unable to handle the page fault gracefully. */
|
made us unable to handle the page fault gracefully. */
|
||||||
out_of_memory:
|
out_of_memory:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
goto no_context;
|
goto no_context;
|
||||||
pagefault_out_of_memory();
|
pagefault_out_of_memory();
|
||||||
return;
|
return;
|
||||||
|
|
||||||
do_sigbus:
|
do_sigbus:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
/* Send a sigbus, regardless of whether we were in kernel
|
/* Send a sigbus, regardless of whether we were in kernel
|
||||||
or user mode. */
|
or user mode. */
|
||||||
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address, 0);
|
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address, 0);
|
||||||
|
|
|
@ -90,10 +90,10 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
|
||||||
if (unlikely(ret != -EFAULT))
|
if (unlikely(ret != -EFAULT))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
|
ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
|
||||||
FAULT_FLAG_WRITE, NULL);
|
FAULT_FLAG_WRITE, NULL);
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
|
|
||||||
if (likely(!ret))
|
if (likely(!ret))
|
||||||
goto again;
|
goto again;
|
||||||
|
|
|
@ -89,7 +89,7 @@ static void show_faulting_vma(unsigned long address)
|
||||||
/* can't use print_vma_addr() yet as it doesn't check for
|
/* can't use print_vma_addr() yet as it doesn't check for
|
||||||
* non-inclusive vma
|
* non-inclusive vma
|
||||||
*/
|
*/
|
||||||
down_read(&active_mm->mmap_sem);
|
mmap_read_lock(active_mm);
|
||||||
vma = find_vma(active_mm, address);
|
vma = find_vma(active_mm, address);
|
||||||
|
|
||||||
/* check against the find_vma( ) behaviour which returns the next VMA
|
/* check against the find_vma( ) behaviour which returns the next VMA
|
||||||
|
@ -111,7 +111,7 @@ static void show_faulting_vma(unsigned long address)
|
||||||
} else
|
} else
|
||||||
pr_info(" @No matching VMA found\n");
|
pr_info(" @No matching VMA found\n");
|
||||||
|
|
||||||
up_read(&active_mm->mmap_sem);
|
mmap_read_unlock(active_mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void show_ecr_verbose(struct pt_regs *regs)
|
static void show_ecr_verbose(struct pt_regs *regs)
|
||||||
|
|
|
@ -107,7 +107,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
|
||||||
flags |= FAULT_FLAG_WRITE;
|
flags |= FAULT_FLAG_WRITE;
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
|
|
||||||
vma = find_vma(mm, address);
|
vma = find_vma(mm, address);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
|
@ -150,7 +150,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
|
|
||||||
bad_area:
|
bad_area:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Major/minor page fault accounting
|
* Major/minor page fault accounting
|
||||||
|
|
|
@ -431,7 +431,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
npages = 1; /* for sigpage */
|
npages = 1; /* for sigpage */
|
||||||
npages += vdso_total_pages;
|
npages += vdso_total_pages;
|
||||||
|
|
||||||
if (down_write_killable(&mm->mmap_sem))
|
if (mmap_write_lock_killable(mm))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
hint = sigpage_addr(mm, npages);
|
hint = sigpage_addr(mm, npages);
|
||||||
addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
|
addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
|
||||||
|
@ -458,7 +458,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
arm_install_vdso(mm, addr + PAGE_SIZE);
|
arm_install_vdso(mm, addr + PAGE_SIZE);
|
||||||
|
|
||||||
up_fail:
|
up_fail:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -97,12 +97,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr)
|
||||||
{
|
{
|
||||||
int si_code;
|
int si_code;
|
||||||
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
if (find_vma(current->mm, addr) == NULL)
|
if (find_vma(current->mm, addr) == NULL)
|
||||||
si_code = SEGV_MAPERR;
|
si_code = SEGV_MAPERR;
|
||||||
else
|
else
|
||||||
si_code = SEGV_ACCERR;
|
si_code = SEGV_ACCERR;
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
|
|
||||||
pr_debug("SWP{B} emulation: access caused memory abort!\n");
|
pr_debug("SWP{B} emulation: access caused memory abort!\n");
|
||||||
arm_notify_die("Illegal memory access", regs,
|
arm_notify_die("Illegal memory access", regs,
|
||||||
|
|
|
@ -101,7 +101,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
||||||
atomic = faulthandler_disabled();
|
atomic = faulthandler_disabled();
|
||||||
|
|
||||||
if (!atomic)
|
if (!atomic)
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
while (n) {
|
while (n) {
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
|
@ -109,11 +109,11 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
||||||
|
|
||||||
while (!pin_page_for_write(to, &pte, &ptl)) {
|
while (!pin_page_for_write(to, &pte, &ptl)) {
|
||||||
if (!atomic)
|
if (!atomic)
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
if (__put_user(0, (char __user *)to))
|
if (__put_user(0, (char __user *)to))
|
||||||
goto out;
|
goto out;
|
||||||
if (!atomic)
|
if (!atomic)
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
|
tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
|
||||||
|
@ -133,7 +133,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
}
|
}
|
||||||
if (!atomic)
|
if (!atomic)
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return n;
|
return n;
|
||||||
|
@ -170,17 +170,17 @@ __clear_user_memset(void __user *addr, unsigned long n)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
while (n) {
|
while (n) {
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
int tocopy;
|
int tocopy;
|
||||||
|
|
||||||
while (!pin_page_for_write(addr, &pte, &ptl)) {
|
while (!pin_page_for_write(addr, &pte, &ptl)) {
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
if (__put_user(0, (char __user *)addr))
|
if (__put_user(0, (char __user *)addr))
|
||||||
goto out;
|
goto out;
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
|
tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
|
||||||
|
@ -198,7 +198,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
|
||||||
else
|
else
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
}
|
}
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return n;
|
return n;
|
||||||
|
|
|
@ -271,11 +271,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||||
* validly references user space from well defined areas of the code,
|
* validly references user space from well defined areas of the code,
|
||||||
* we can bug out early if this is from code which shouldn't.
|
* we can bug out early if this is from code which shouldn't.
|
||||||
*/
|
*/
|
||||||
if (!down_read_trylock(&mm->mmap_sem)) {
|
if (!mmap_read_trylock(mm)) {
|
||||||
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
|
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
|
||||||
goto no_context;
|
goto no_context;
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* The above down_read_trylock() might have succeeded in
|
* The above down_read_trylock() might have succeeded in
|
||||||
|
@ -325,7 +325,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Handle the "normal" case first - VM_FAULT_MAJOR
|
* Handle the "normal" case first - VM_FAULT_MAJOR
|
||||||
|
|
|
@ -448,12 +448,12 @@ void arm64_notify_segfault(unsigned long addr)
|
||||||
{
|
{
|
||||||
int code;
|
int code;
|
||||||
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
if (find_vma(current->mm, addr) == NULL)
|
if (find_vma(current->mm, addr) == NULL)
|
||||||
code = SEGV_MAPERR;
|
code = SEGV_MAPERR;
|
||||||
else
|
else
|
||||||
code = SEGV_ACCERR;
|
code = SEGV_ACCERR;
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
|
|
||||||
force_signal_inject(SIGSEGV, code, addr);
|
force_signal_inject(SIGSEGV, code, addr);
|
||||||
}
|
}
|
||||||
|
|
|
@ -340,7 +340,7 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (down_write_killable(&mm->mmap_sem))
|
if (mmap_write_lock_killable(mm))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
|
||||||
ret = aarch32_kuser_helpers_setup(mm);
|
ret = aarch32_kuser_helpers_setup(mm);
|
||||||
|
@ -357,7 +357,7 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
#endif /* CONFIG_COMPAT_VDSO */
|
#endif /* CONFIG_COMPAT_VDSO */
|
||||||
|
|
||||||
out:
|
out:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_COMPAT */
|
#endif /* CONFIG_COMPAT */
|
||||||
|
@ -398,7 +398,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (down_write_killable(&mm->mmap_sem))
|
if (mmap_write_lock_killable(mm))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
|
||||||
ret = __setup_additional_pages(VDSO_ABI_AA64,
|
ret = __setup_additional_pages(VDSO_ABI_AA64,
|
||||||
|
@ -406,7 +406,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||||
bprm,
|
bprm,
|
||||||
uses_interp);
|
uses_interp);
|
||||||
|
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -497,11 +497,11 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
||||||
* validly references user space from well defined areas of the code,
|
* validly references user space from well defined areas of the code,
|
||||||
* we can bug out early if this is from code which shouldn't.
|
* we can bug out early if this is from code which shouldn't.
|
||||||
*/
|
*/
|
||||||
if (!down_read_trylock(&mm->mmap_sem)) {
|
if (!mmap_read_trylock(mm)) {
|
||||||
if (!user_mode(regs) && !search_exception_tables(regs->pc))
|
if (!user_mode(regs) && !search_exception_tables(regs->pc))
|
||||||
goto no_context;
|
goto no_context;
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* The above down_read_trylock() might have succeeded in which
|
* The above down_read_trylock() might have succeeded in which
|
||||||
|
@ -510,7 +510,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
||||||
might_sleep();
|
might_sleep();
|
||||||
#ifdef CONFIG_DEBUG_VM
|
#ifdef CONFIG_DEBUG_VM
|
||||||
if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
|
if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
goto no_context;
|
goto no_context;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -532,7 +532,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Handle the "normal" (no error) case first.
|
* Handle the "normal" (no error) case first.
|
||||||
|
|
|
@ -50,7 +50,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
mmap_write_lock(mm);
|
||||||
|
|
||||||
addr = get_unmapped_area(NULL, STACK_TOP, PAGE_SIZE, 0, 0);
|
addr = get_unmapped_area(NULL, STACK_TOP, PAGE_SIZE, 0, 0);
|
||||||
if (IS_ERR_VALUE(addr)) {
|
if (IS_ERR_VALUE(addr)) {
|
||||||
|
@ -70,7 +70,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
mm->context.vdso = (void *)addr;
|
mm->context.vdso = (void *)addr;
|
||||||
|
|
||||||
up_fail:
|
up_fail:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -120,7 +120,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
|
||||||
if (in_atomic() || !mm)
|
if (in_atomic() || !mm)
|
||||||
goto bad_area_nosemaphore;
|
goto bad_area_nosemaphore;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
vma = find_vma(mm, address);
|
vma = find_vma(mm, address);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
goto bad_area;
|
goto bad_area;
|
||||||
|
@ -170,7 +170,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
|
||||||
address);
|
address);
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -178,7 +178,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
|
||||||
* Fix it, but check if it's kernel or user first..
|
* Fix it, but check if it's kernel or user first..
|
||||||
*/
|
*/
|
||||||
bad_area:
|
bad_area:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
bad_area_nosemaphore:
|
bad_area_nosemaphore:
|
||||||
/* User mode accesses just cause a SIGSEGV */
|
/* User mode accesses just cause a SIGSEGV */
|
||||||
|
@ -217,7 +217,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
|
||||||
do_sigbus:
|
do_sigbus:
|
||||||
tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
|
tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
/* Kernel mode? Handle exceptions or die */
|
/* Kernel mode? Handle exceptions or die */
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
|
|
|
@ -52,7 +52,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
unsigned long vdso_base;
|
unsigned long vdso_base;
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
|
|
||||||
if (down_write_killable(&mm->mmap_sem))
|
if (mmap_write_lock_killable(mm))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
|
||||||
/* Try to get it loaded right near ld.so/glibc. */
|
/* Try to get it loaded right near ld.so/glibc. */
|
||||||
|
@ -76,7 +76,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
mm->context.vdso = (void *)vdso_base;
|
mm->context.vdso = (void *)vdso_base;
|
||||||
|
|
||||||
up_fail:
|
up_fail:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
|
||||||
if (user_mode(regs))
|
if (user_mode(regs))
|
||||||
flags |= FAULT_FLAG_USER;
|
flags |= FAULT_FLAG_USER;
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
vma = find_vma(mm, address);
|
vma = find_vma(mm, address);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
goto bad_area;
|
goto bad_area;
|
||||||
|
@ -106,11 +106,11 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
/* Handle copyin/out exception cases */
|
/* Handle copyin/out exception cases */
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
|
@ -137,7 +137,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bad_area:
|
bad_area:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
if (user_mode(regs)) {
|
if (user_mode(regs)) {
|
||||||
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
|
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
|
||||||
|
|
|
@ -2260,13 +2260,13 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
|
||||||
* now we atomically find some area in the address space and
|
* now we atomically find some area in the address space and
|
||||||
* remap the buffer in it.
|
* remap the buffer in it.
|
||||||
*/
|
*/
|
||||||
down_write(&task->mm->mmap_sem);
|
mmap_write_lock(task->mm);
|
||||||
|
|
||||||
/* find some free area in address space, must have mmap sem held */
|
/* find some free area in address space, must have mmap sem held */
|
||||||
vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
|
vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
|
||||||
if (IS_ERR_VALUE(vma->vm_start)) {
|
if (IS_ERR_VALUE(vma->vm_start)) {
|
||||||
DPRINT(("Cannot find unmapped area for size %ld\n", size));
|
DPRINT(("Cannot find unmapped area for size %ld\n", size));
|
||||||
up_write(&task->mm->mmap_sem);
|
mmap_write_unlock(task->mm);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
vma->vm_end = vma->vm_start + size;
|
vma->vm_end = vma->vm_start + size;
|
||||||
|
@ -2277,7 +2277,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
|
||||||
/* can only be applied to current task, need to have the mm semaphore held when called */
|
/* can only be applied to current task, need to have the mm semaphore held when called */
|
||||||
if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
|
if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
|
||||||
DPRINT(("Can't remap buffer\n"));
|
DPRINT(("Can't remap buffer\n"));
|
||||||
up_write(&task->mm->mmap_sem);
|
mmap_write_unlock(task->mm);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2288,7 +2288,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
|
||||||
insert_vm_struct(mm, vma);
|
insert_vm_struct(mm, vma);
|
||||||
|
|
||||||
vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma));
|
vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma));
|
||||||
up_write(&task->mm->mmap_sem);
|
mmap_write_unlock(task->mm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* keep track of user level virtual address
|
* keep track of user level virtual address
|
||||||
|
|
|
@ -106,7 +106,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
|
||||||
if (mask & VM_WRITE)
|
if (mask & VM_WRITE)
|
||||||
flags |= FAULT_FLAG_WRITE;
|
flags |= FAULT_FLAG_WRITE;
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
|
|
||||||
vma = find_vma_prev(mm, address, &prev_vma);
|
vma = find_vma_prev(mm, address, &prev_vma);
|
||||||
if (!vma && !prev_vma )
|
if (!vma && !prev_vma )
|
||||||
|
@ -182,7 +182,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
check_expansion:
|
check_expansion:
|
||||||
|
@ -213,7 +213,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
|
||||||
goto good_area;
|
goto good_area;
|
||||||
|
|
||||||
bad_area:
|
bad_area:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||||
bad_area_no_up:
|
bad_area_no_up:
|
||||||
#endif
|
#endif
|
||||||
|
@ -279,7 +279,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
|
||||||
return;
|
return;
|
||||||
|
|
||||||
out_of_memory:
|
out_of_memory:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
goto no_context;
|
goto no_context;
|
||||||
pagefault_out_of_memory();
|
pagefault_out_of_memory();
|
||||||
|
|
|
@ -118,13 +118,13 @@ ia64_init_addr_space (void)
|
||||||
vma->vm_end = vma->vm_start + PAGE_SIZE;
|
vma->vm_end = vma->vm_start + PAGE_SIZE;
|
||||||
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
|
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
|
||||||
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||||
down_write(¤t->mm->mmap_sem);
|
mmap_write_lock(current->mm);
|
||||||
if (insert_vm_struct(current->mm, vma)) {
|
if (insert_vm_struct(current->mm, vma)) {
|
||||||
up_write(¤t->mm->mmap_sem);
|
mmap_write_unlock(current->mm);
|
||||||
vm_area_free(vma);
|
vm_area_free(vma);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
up_write(¤t->mm->mmap_sem);
|
mmap_write_unlock(current->mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
|
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
|
||||||
|
@ -136,13 +136,13 @@ ia64_init_addr_space (void)
|
||||||
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
|
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
|
||||||
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
|
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
|
||||||
VM_DONTEXPAND | VM_DONTDUMP;
|
VM_DONTEXPAND | VM_DONTDUMP;
|
||||||
down_write(¤t->mm->mmap_sem);
|
mmap_write_lock(current->mm);
|
||||||
if (insert_vm_struct(current->mm, vma)) {
|
if (insert_vm_struct(current->mm, vma)) {
|
||||||
up_write(¤t->mm->mmap_sem);
|
mmap_write_unlock(current->mm);
|
||||||
vm_area_free(vma);
|
vm_area_free(vma);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
up_write(¤t->mm->mmap_sem);
|
mmap_write_unlock(current->mm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -399,7 +399,7 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
|
||||||
* Verify that the specified address region actually belongs
|
* Verify that the specified address region actually belongs
|
||||||
* to this process.
|
* to this process.
|
||||||
*/
|
*/
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
vma = find_vma(current->mm, addr);
|
vma = find_vma(current->mm, addr);
|
||||||
if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
|
if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
@ -450,7 +450,7 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out_unlock:
|
out_unlock:
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -472,7 +472,7 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
unsigned long mem_value;
|
unsigned long mem_value;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
pgd = pgd_offset(mm, (unsigned long)mem);
|
pgd = pgd_offset(mm, (unsigned long)mem);
|
||||||
if (!pgd_present(*pgd))
|
if (!pgd_present(*pgd))
|
||||||
goto bad_access;
|
goto bad_access;
|
||||||
|
@ -501,11 +501,11 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
|
||||||
__put_user(newval, mem);
|
__put_user(newval, mem);
|
||||||
|
|
||||||
pte_unmap_unlock(pte, ptl);
|
pte_unmap_unlock(pte, ptl);
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return mem_value;
|
return mem_value;
|
||||||
|
|
||||||
bad_access:
|
bad_access:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
/* This is not necessarily a bad access, we can get here if
|
/* This is not necessarily a bad access, we can get here if
|
||||||
a memory we're trying to write to should be copied-on-write.
|
a memory we're trying to write to should be copied-on-write.
|
||||||
Make the kernel do the necessary page stuff, then re-iterate.
|
Make the kernel do the necessary page stuff, then re-iterate.
|
||||||
|
@ -545,13 +545,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
unsigned long mem_value;
|
unsigned long mem_value;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
|
|
||||||
mem_value = *mem;
|
mem_value = *mem;
|
||||||
if (mem_value == oldval)
|
if (mem_value == oldval)
|
||||||
*mem = newval;
|
*mem = newval;
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return mem_value;
|
return mem_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -86,7 +86,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
if (user_mode(regs))
|
if (user_mode(regs))
|
||||||
flags |= FAULT_FLAG_USER;
|
flags |= FAULT_FLAG_USER;
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
|
|
||||||
vma = find_vma(mm, address);
|
vma = find_vma(mm, address);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
|
@ -174,7 +174,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -182,7 +182,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
* us unable to handle the page fault gracefully.
|
* us unable to handle the page fault gracefully.
|
||||||
*/
|
*/
|
||||||
out_of_memory:
|
out_of_memory:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
goto no_context;
|
goto no_context;
|
||||||
pagefault_out_of_memory();
|
pagefault_out_of_memory();
|
||||||
|
@ -211,6 +211,6 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
current->thread.faddr = address;
|
current->thread.faddr = address;
|
||||||
|
|
||||||
send_sig:
|
send_sig:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return send_fault_sig(regs);
|
return send_fault_sig(regs);
|
||||||
}
|
}
|
||||||
|
|
|
@ -136,12 +136,12 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
* source. If this is invalid we can skip the address space check,
|
* source. If this is invalid we can skip the address space check,
|
||||||
* thus avoiding the deadlock.
|
* thus avoiding the deadlock.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
|
if (unlikely(!mmap_read_trylock(mm))) {
|
||||||
if (kernel_mode(regs) && !search_exception_tables(regs->pc))
|
if (kernel_mode(regs) && !search_exception_tables(regs->pc))
|
||||||
goto bad_area_nosemaphore;
|
goto bad_area_nosemaphore;
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
vma = find_vma(mm, address);
|
vma = find_vma(mm, address);
|
||||||
|
@ -247,7 +247,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* keep track of tlb+htab misses that are good addrs but
|
* keep track of tlb+htab misses that are good addrs but
|
||||||
|
@ -258,7 +258,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bad_area:
|
bad_area:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
bad_area_nosemaphore:
|
bad_area_nosemaphore:
|
||||||
pte_errors++;
|
pte_errors++;
|
||||||
|
@ -277,7 +277,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
* us unable to handle the page fault gracefully.
|
* us unable to handle the page fault gracefully.
|
||||||
*/
|
*/
|
||||||
out_of_memory:
|
out_of_memory:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
bad_page_fault(regs, address, SIGKILL);
|
bad_page_fault(regs, address, SIGKILL);
|
||||||
else
|
else
|
||||||
|
@ -285,7 +285,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
do_sigbus:
|
do_sigbus:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (user_mode(regs)) {
|
if (user_mode(regs)) {
|
||||||
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
|
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -793,13 +793,13 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
case SIGSEGV:
|
case SIGSEGV:
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
vma = find_vma(current->mm, (unsigned long)fault_addr);
|
vma = find_vma(current->mm, (unsigned long)fault_addr);
|
||||||
if (vma && (vma->vm_start <= (unsigned long)fault_addr))
|
if (vma && (vma->vm_start <= (unsigned long)fault_addr))
|
||||||
si_code = SEGV_ACCERR;
|
si_code = SEGV_ACCERR;
|
||||||
else
|
else
|
||||||
si_code = SEGV_MAPERR;
|
si_code = SEGV_MAPERR;
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
force_sig_fault(SIGSEGV, si_code, fault_addr);
|
force_sig_fault(SIGSEGV, si_code, fault_addr);
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (down_write_killable(&mm->mmap_sem))
|
if (mmap_write_lock_killable(mm))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
|
if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
|
||||||
|
@ -187,6 +187,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -130,7 +130,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
vdso_mapping_len += L1_cache_info[DCACHE].aliasing_num - 1;
|
vdso_mapping_len += L1_cache_info[DCACHE].aliasing_num - 1;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (down_write_killable(&mm->mmap_sem))
|
if (mmap_write_lock_killable(mm))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
|
||||||
addr = vdso_random_addr(vdso_mapping_len);
|
addr = vdso_random_addr(vdso_mapping_len);
|
||||||
|
@ -185,12 +185,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
goto up_fail;
|
goto up_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
up_fail:
|
up_fail:
|
||||||
mm->context.vdso = NULL;
|
mm->context.vdso = NULL;
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -126,12 +126,12 @@ void do_page_fault(unsigned long entry, unsigned long addr,
|
||||||
* validly references user space from well defined areas of the code,
|
* validly references user space from well defined areas of the code,
|
||||||
* we can bug out early if this is from code which shouldn't.
|
* we can bug out early if this is from code which shouldn't.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
|
if (unlikely(!mmap_read_trylock(mm))) {
|
||||||
if (!user_mode(regs) &&
|
if (!user_mode(regs) &&
|
||||||
!search_exception_tables(instruction_pointer(regs)))
|
!search_exception_tables(instruction_pointer(regs)))
|
||||||
goto no_context;
|
goto no_context;
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* The above down_read_trylock() might have succeeded in which
|
* The above down_read_trylock() might have succeeded in which
|
||||||
|
@ -255,7 +255,7 @@ void do_page_fault(unsigned long entry, unsigned long addr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -263,7 +263,7 @@ void do_page_fault(unsigned long entry, unsigned long addr,
|
||||||
* Fix it, but check if it's kernel or user first..
|
* Fix it, but check if it's kernel or user first..
|
||||||
*/
|
*/
|
||||||
bad_area:
|
bad_area:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
bad_area_nosemaphore:
|
bad_area_nosemaphore:
|
||||||
|
|
||||||
|
@ -323,14 +323,14 @@ void do_page_fault(unsigned long entry, unsigned long addr,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
out_of_memory:
|
out_of_memory:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
goto no_context;
|
goto no_context;
|
||||||
pagefault_out_of_memory();
|
pagefault_out_of_memory();
|
||||||
return;
|
return;
|
||||||
|
|
||||||
do_sigbus:
|
do_sigbus:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
/* Kernel mode? Handle exceptions or die */
|
/* Kernel mode? Handle exceptions or die */
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
|
|
|
@ -83,11 +83,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
|
||||||
if (user_mode(regs))
|
if (user_mode(regs))
|
||||||
flags |= FAULT_FLAG_USER;
|
flags |= FAULT_FLAG_USER;
|
||||||
|
|
||||||
if (!down_read_trylock(&mm->mmap_sem)) {
|
if (!mmap_read_trylock(mm)) {
|
||||||
if (!user_mode(regs) && !search_exception_tables(regs->ea))
|
if (!user_mode(regs) && !search_exception_tables(regs->ea))
|
||||||
goto bad_area_nosemaphore;
|
goto bad_area_nosemaphore;
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
vma = find_vma(mm, address);
|
vma = find_vma(mm, address);
|
||||||
|
@ -169,7 +169,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -177,7 +177,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
|
||||||
* Fix it, but check if it's kernel or user first..
|
* Fix it, but check if it's kernel or user first..
|
||||||
*/
|
*/
|
||||||
bad_area:
|
bad_area:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
bad_area_nosemaphore:
|
bad_area_nosemaphore:
|
||||||
/* User mode accesses just cause a SIGSEGV */
|
/* User mode accesses just cause a SIGSEGV */
|
||||||
|
@ -215,14 +215,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
|
||||||
* us unable to handle the page fault gracefully.
|
* us unable to handle the page fault gracefully.
|
||||||
*/
|
*/
|
||||||
out_of_memory:
|
out_of_memory:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
goto no_context;
|
goto no_context;
|
||||||
pagefault_out_of_memory();
|
pagefault_out_of_memory();
|
||||||
return;
|
return;
|
||||||
|
|
||||||
do_sigbus:
|
do_sigbus:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
/* Kernel mode? Handle exceptions or die */
|
/* Kernel mode? Handle exceptions or die */
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
|
|
|
@ -109,14 +109,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
mmap_write_lock(mm);
|
||||||
|
|
||||||
/* Map kuser helpers to user space address */
|
/* Map kuser helpers to user space address */
|
||||||
ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
|
ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
|
||||||
VM_READ | VM_EXEC | VM_MAYREAD |
|
VM_READ | VM_EXEC | VM_MAYREAD |
|
||||||
VM_MAYEXEC, kuser_page);
|
VM_MAYEXEC, kuser_page);
|
||||||
|
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,7 +104,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
goto no_context;
|
goto no_context;
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
vma = find_vma(mm, address);
|
vma = find_vma(mm, address);
|
||||||
|
|
||||||
if (!vma)
|
if (!vma)
|
||||||
|
@ -192,7 +192,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -201,7 +201,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
bad_area:
|
bad_area:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
bad_area_nosemaphore:
|
bad_area_nosemaphore:
|
||||||
|
|
||||||
|
@ -260,14 +260,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
__asm__ __volatile__("l.nop 42");
|
__asm__ __volatile__("l.nop 42");
|
||||||
__asm__ __volatile__("l.nop 1");
|
__asm__ __volatile__("l.nop 1");
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
goto no_context;
|
goto no_context;
|
||||||
pagefault_out_of_memory();
|
pagefault_out_of_memory();
|
||||||
return;
|
return;
|
||||||
|
|
||||||
do_sigbus:
|
do_sigbus:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Send a sigbus, regardless of whether we were in kernel
|
* Send a sigbus, regardless of whether we were in kernel
|
||||||
|
|
|
@ -717,7 +717,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
|
||||||
if (user_mode(regs)) {
|
if (user_mode(regs)) {
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
vma = find_vma(current->mm,regs->iaoq[0]);
|
vma = find_vma(current->mm,regs->iaoq[0]);
|
||||||
if (vma && (regs->iaoq[0] >= vma->vm_start)
|
if (vma && (regs->iaoq[0] >= vma->vm_start)
|
||||||
&& (vma->vm_flags & VM_EXEC)) {
|
&& (vma->vm_flags & VM_EXEC)) {
|
||||||
|
@ -725,10 +725,10 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
|
||||||
fault_address = regs->iaoq[0];
|
fault_address = regs->iaoq[0];
|
||||||
fault_space = regs->iasq[0];
|
fault_space = regs->iasq[0];
|
||||||
|
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
break; /* call do_page_fault() */
|
break; /* call do_page_fault() */
|
||||||
}
|
}
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
}
|
}
|
||||||
/* Fall Through */
|
/* Fall Through */
|
||||||
case 27:
|
case 27:
|
||||||
|
|
|
@ -282,7 +282,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
|
||||||
if (acc_type & VM_WRITE)
|
if (acc_type & VM_WRITE)
|
||||||
flags |= FAULT_FLAG_WRITE;
|
flags |= FAULT_FLAG_WRITE;
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
vma = find_vma_prev(mm, address, &prev_vma);
|
vma = find_vma_prev(mm, address, &prev_vma);
|
||||||
if (!vma || address < vma->vm_start)
|
if (!vma || address < vma->vm_start)
|
||||||
goto check_expansion;
|
goto check_expansion;
|
||||||
|
@ -337,7 +337,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
check_expansion:
|
check_expansion:
|
||||||
|
@ -349,7 +349,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
|
||||||
* Something tried to access memory that isn't in our memory map..
|
* Something tried to access memory that isn't in our memory map..
|
||||||
*/
|
*/
|
||||||
bad_area:
|
bad_area:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
if (user_mode(regs)) {
|
if (user_mode(regs)) {
|
||||||
int signo, si_code;
|
int signo, si_code;
|
||||||
|
@ -421,7 +421,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
|
||||||
parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
|
parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
|
||||||
|
|
||||||
out_of_memory:
|
out_of_memory:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
goto no_context;
|
goto no_context;
|
||||||
pagefault_out_of_memory();
|
pagefault_out_of_memory();
|
||||||
|
|
|
@ -170,7 +170,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
* and end up putting it elsewhere.
|
* and end up putting it elsewhere.
|
||||||
* Add enough to the size so that the result can be aligned.
|
* Add enough to the size so that the result can be aligned.
|
||||||
*/
|
*/
|
||||||
if (down_write_killable(&mm->mmap_sem))
|
if (mmap_write_lock_killable(mm))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
vdso_base = get_unmapped_area(NULL, vdso_base,
|
vdso_base = get_unmapped_area(NULL, vdso_base,
|
||||||
(vdso_pages << PAGE_SHIFT) +
|
(vdso_pages << PAGE_SHIFT) +
|
||||||
|
@ -210,11 +210,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
goto fail_mmapsem;
|
goto fail_mmapsem;
|
||||||
}
|
}
|
||||||
|
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail_mmapsem:
|
fail_mmapsem:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4621,14 +4621,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
/* Look up the VMA for the start of this memory slot */
|
/* Look up the VMA for the start of this memory slot */
|
||||||
hva = memslot->userspace_addr;
|
hva = memslot->userspace_addr;
|
||||||
down_read(&kvm->mm->mmap_sem);
|
mmap_read_lock(kvm->mm);
|
||||||
vma = find_vma(kvm->mm, hva);
|
vma = find_vma(kvm->mm, hva);
|
||||||
if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
|
if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
|
||||||
goto up_out;
|
goto up_out;
|
||||||
|
|
||||||
psize = vma_kernel_pagesize(vma);
|
psize = vma_kernel_pagesize(vma);
|
||||||
|
|
||||||
up_read(&kvm->mm->mmap_sem);
|
mmap_read_unlock(kvm->mm);
|
||||||
|
|
||||||
/* We can handle 4k, 64k or 16M pages in the VRMA */
|
/* We can handle 4k, 64k or 16M pages in the VRMA */
|
||||||
if (psize >= 0x1000000)
|
if (psize >= 0x1000000)
|
||||||
|
@ -4661,7 +4661,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
up_out:
|
up_out:
|
||||||
up_read(&kvm->mm->mmap_sem);
|
mmap_read_unlock(kvm->mm);
|
||||||
goto out_srcu;
|
goto out_srcu;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -408,7 +408,7 @@ kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
|
||||||
*/
|
*/
|
||||||
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
|
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
|
||||||
MADV_UNMERGEABLE, &vma->vm_flags);
|
MADV_UNMERGEABLE, &vma->vm_flags);
|
||||||
downgrade_write(&kvm->mm->mmap_sem);
|
mmap_write_downgrade(kvm->mm);
|
||||||
*downgrade = true;
|
*downgrade = true;
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -525,7 +525,7 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
|
||||||
|
|
||||||
ret = H_PARAMETER;
|
ret = H_PARAMETER;
|
||||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||||
down_write(&kvm->mm->mmap_sem);
|
mmap_write_lock(kvm->mm);
|
||||||
|
|
||||||
start = gfn_to_hva(kvm, gfn);
|
start = gfn_to_hva(kvm, gfn);
|
||||||
if (kvm_is_error_hva(start))
|
if (kvm_is_error_hva(start))
|
||||||
|
@ -548,9 +548,9 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
|
||||||
mutex_unlock(&kvm->arch.uvmem_lock);
|
mutex_unlock(&kvm->arch.uvmem_lock);
|
||||||
out:
|
out:
|
||||||
if (downgrade)
|
if (downgrade)
|
||||||
up_read(&kvm->mm->mmap_sem);
|
mmap_read_unlock(kvm->mm);
|
||||||
else
|
else
|
||||||
up_write(&kvm->mm->mmap_sem);
|
mmap_write_unlock(kvm->mm);
|
||||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -703,7 +703,7 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
|
||||||
|
|
||||||
ret = H_PARAMETER;
|
ret = H_PARAMETER;
|
||||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||||
down_read(&kvm->mm->mmap_sem);
|
mmap_read_lock(kvm->mm);
|
||||||
start = gfn_to_hva(kvm, gfn);
|
start = gfn_to_hva(kvm, gfn);
|
||||||
if (kvm_is_error_hva(start))
|
if (kvm_is_error_hva(start))
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -716,7 +716,7 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
|
||||||
if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
|
if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
|
||||||
ret = H_SUCCESS;
|
ret = H_SUCCESS;
|
||||||
out:
|
out:
|
||||||
up_read(&kvm->mm->mmap_sem);
|
mmap_read_unlock(kvm->mm);
|
||||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -355,7 +355,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||||
|
|
||||||
if (tlbsel == 1) {
|
if (tlbsel == 1) {
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
down_read(&kvm->mm->mmap_sem);
|
mmap_read_lock(kvm->mm);
|
||||||
|
|
||||||
vma = find_vma(kvm->mm, hva);
|
vma = find_vma(kvm->mm, hva);
|
||||||
if (vma && hva >= vma->vm_start &&
|
if (vma && hva >= vma->vm_start &&
|
||||||
|
@ -441,7 +441,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||||
tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
|
tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&kvm->mm->mmap_sem);
|
mmap_read_unlock(kvm->mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(!pfnmap)) {
|
if (likely(!pfnmap)) {
|
||||||
|
|
|
@ -96,7 +96,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
|
||||||
goto unlock_exit;
|
goto unlock_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
|
chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
|
||||||
sizeof(struct vm_area_struct *);
|
sizeof(struct vm_area_struct *);
|
||||||
chunk = min(chunk, entries);
|
chunk = min(chunk, entries);
|
||||||
|
@ -114,7 +114,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
|
||||||
pinned += ret;
|
pinned += ret;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (pinned != entries) {
|
if (pinned != entries) {
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
|
|
|
@ -94,7 +94,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
|
||||||
size_t nw;
|
size_t nw;
|
||||||
unsigned long next, limit;
|
unsigned long next, limit;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
mmap_write_lock(mm);
|
||||||
|
|
||||||
spt = mm_ctx_subpage_prot(&mm->context);
|
spt = mm_ctx_subpage_prot(&mm->context);
|
||||||
if (!spt)
|
if (!spt)
|
||||||
|
@ -129,7 +129,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
|
||||||
}
|
}
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
|
@ -219,7 +219,7 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
|
||||||
if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32)))
|
if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
mmap_write_lock(mm);
|
||||||
|
|
||||||
spt = mm_ctx_subpage_prot(&mm->context);
|
spt = mm_ctx_subpage_prot(&mm->context);
|
||||||
if (!spt) {
|
if (!spt) {
|
||||||
|
@ -269,11 +269,11 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
|
||||||
if (addr + (nw << PAGE_SHIFT) > next)
|
if (addr + (nw << PAGE_SHIFT) > next)
|
||||||
nw = (next - addr) >> PAGE_SHIFT;
|
nw = (next - addr) >> PAGE_SHIFT;
|
||||||
|
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
if (__copy_from_user(spp, map, nw * sizeof(u32)))
|
if (__copy_from_user(spp, map, nw * sizeof(u32)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
map += nw;
|
map += nw;
|
||||||
down_write(&mm->mmap_sem);
|
mmap_write_lock(mm);
|
||||||
|
|
||||||
/* now flush any existing HPTEs for the range */
|
/* now flush any existing HPTEs for the range */
|
||||||
hpte_flush_range(mm, addr, nw);
|
hpte_flush_range(mm, addr, nw);
|
||||||
|
@ -282,6 +282,6 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
|
||||||
spt->maxaddr = limit;
|
spt->maxaddr = limit;
|
||||||
err = 0;
|
err = 0;
|
||||||
out:
|
out:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
|
||||||
if (mm->pgd == NULL)
|
if (mm->pgd == NULL)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
vma = find_vma(mm, ea);
|
vma = find_vma(mm, ea);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
|
@ -82,7 +82,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
|
||||||
current->min_flt++;
|
current->min_flt++;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
|
EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
|
||||||
|
|
|
@ -108,7 +108,7 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
|
||||||
* Something tried to access memory that isn't in our memory map..
|
* Something tried to access memory that isn't in our memory map..
|
||||||
* Fix it, but check if it's kernel or user first..
|
* Fix it, but check if it's kernel or user first..
|
||||||
*/
|
*/
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
return __bad_area_nosemaphore(regs, address, si_code);
|
return __bad_area_nosemaphore(regs, address, si_code);
|
||||||
}
|
}
|
||||||
|
@ -144,7 +144,7 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
|
||||||
*/
|
*/
|
||||||
pkey = vma_pkey(vma);
|
pkey = vma_pkey(vma);
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we are in kernel mode, bail out with a SEGV, this will
|
* If we are in kernel mode, bail out with a SEGV, this will
|
||||||
|
@ -551,12 +551,12 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
* source. If this is invalid we can skip the address space check,
|
* source. If this is invalid we can skip the address space check,
|
||||||
* thus avoiding the deadlock.
|
* thus avoiding the deadlock.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
|
if (unlikely(!mmap_read_trylock(mm))) {
|
||||||
if (!is_user && !search_exception_tables(regs->nip))
|
if (!is_user && !search_exception_tables(regs->nip))
|
||||||
return bad_area_nosemaphore(regs, address);
|
return bad_area_nosemaphore(regs, address);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* The above down_read_trylock() might have succeeded in
|
* The above down_read_trylock() might have succeeded in
|
||||||
|
@ -580,7 +580,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
if (!must_retry)
|
if (!must_retry)
|
||||||
return bad_area(regs, address);
|
return bad_area(regs, address);
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (fault_in_pages_readable((const char __user *)regs->nip,
|
if (fault_in_pages_readable((const char __user *)regs->nip,
|
||||||
sizeof(unsigned int)))
|
sizeof(unsigned int)))
|
||||||
return bad_area_nosemaphore(regs, address);
|
return bad_area_nosemaphore(regs, address);
|
||||||
|
@ -625,7 +625,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
|
|
||||||
if (unlikely(fault & VM_FAULT_ERROR))
|
if (unlikely(fault & VM_FAULT_ERROR))
|
||||||
return mm_fault_error(regs, address, fault);
|
return mm_fault_error(regs, address, fault);
|
||||||
|
|
|
@ -332,7 +332,7 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
|
||||||
fput(exe_file);
|
fput(exe_file);
|
||||||
}
|
}
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||||
if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
|
if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
|
||||||
continue;
|
continue;
|
||||||
|
@ -349,13 +349,13 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
|
||||||
*spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path);
|
*spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path);
|
||||||
pr_debug("got dcookie for %pD\n", vma->vm_file);
|
pr_debug("got dcookie for %pD\n", vma->vm_file);
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return app_cookie;
|
return app_cookie;
|
||||||
|
|
||||||
fail_no_image_cookie:
|
fail_no_image_cookie:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
printk(KERN_ERR "SPU_PROF: "
|
printk(KERN_ERR "SPU_PROF: "
|
||||||
"%s, line %d: Cannot find dcookie for SPU binary\n",
|
"%s, line %d: Cannot find dcookie for SPU binary\n",
|
||||||
|
|
|
@ -343,11 +343,11 @@ static vm_fault_t spufs_ps_fault(struct vm_fault *vmf,
|
||||||
goto refault;
|
goto refault;
|
||||||
|
|
||||||
if (ctx->state == SPU_STATE_SAVED) {
|
if (ctx->state == SPU_STATE_SAVED) {
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
|
spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
|
||||||
err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
|
err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
|
||||||
spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
|
spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
} else {
|
} else {
|
||||||
area = ctx->spu->problem_phys + ps_offs;
|
area = ctx->spu->problem_phys + ps_offs;
|
||||||
ret = vmf_insert_pfn(vmf->vma, vmf->address,
|
ret = vmf_insert_pfn(vmf->vma, vmf->address,
|
||||||
|
|
|
@ -61,7 +61,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||||
|
|
||||||
vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
|
vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
mmap_write_lock(mm);
|
||||||
vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
|
vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
|
||||||
if (IS_ERR_VALUE(vdso_base)) {
|
if (IS_ERR_VALUE(vdso_base)) {
|
||||||
ret = vdso_base;
|
ret = vdso_base;
|
||||||
|
@ -83,7 +83,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||||
mm->context.vdso = NULL;
|
mm->context.vdso = NULL;
|
||||||
|
|
||||||
end:
|
end:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -69,7 +69,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
|
||||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
vma = find_vma(mm, addr);
|
vma = find_vma(mm, addr);
|
||||||
if (unlikely(!vma))
|
if (unlikely(!vma))
|
||||||
goto bad_area;
|
goto bad_area;
|
||||||
|
@ -155,7 +155,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -163,7 +163,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
|
||||||
* Fix it, but check if it's kernel or user first.
|
* Fix it, but check if it's kernel or user first.
|
||||||
*/
|
*/
|
||||||
bad_area:
|
bad_area:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
/* User mode accesses just cause a SIGSEGV */
|
/* User mode accesses just cause a SIGSEGV */
|
||||||
if (user_mode(regs)) {
|
if (user_mode(regs)) {
|
||||||
do_trap(regs, SIGSEGV, code, addr);
|
do_trap(regs, SIGSEGV, code, addr);
|
||||||
|
@ -191,14 +191,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
|
||||||
* (which will retry the fault, or kill us if we got oom-killed).
|
* (which will retry the fault, or kill us if we got oom-killed).
|
||||||
*/
|
*/
|
||||||
out_of_memory:
|
out_of_memory:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
goto no_context;
|
goto no_context;
|
||||||
pagefault_out_of_memory();
|
pagefault_out_of_memory();
|
||||||
return;
|
return;
|
||||||
|
|
||||||
do_sigbus:
|
do_sigbus:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
/* Kernel mode? Handle exceptions or die */
|
/* Kernel mode? Handle exceptions or die */
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
goto no_context;
|
goto no_context;
|
||||||
|
|
|
@ -205,7 +205,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
|
||||||
|
|
||||||
again:
|
again:
|
||||||
rc = -EFAULT;
|
rc = -EFAULT;
|
||||||
down_read(&gmap->mm->mmap_sem);
|
mmap_read_lock(gmap->mm);
|
||||||
|
|
||||||
uaddr = __gmap_translate(gmap, gaddr);
|
uaddr = __gmap_translate(gmap, gaddr);
|
||||||
if (IS_ERR_VALUE(uaddr))
|
if (IS_ERR_VALUE(uaddr))
|
||||||
|
@ -234,7 +234,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
|
||||||
pte_unmap_unlock(ptep, ptelock);
|
pte_unmap_unlock(ptep, ptelock);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
out:
|
out:
|
||||||
up_read(&gmap->mm->mmap_sem);
|
mmap_read_unlock(gmap->mm);
|
||||||
|
|
||||||
if (rc == -EAGAIN) {
|
if (rc == -EAGAIN) {
|
||||||
wait_on_page_writeback(page);
|
wait_on_page_writeback(page);
|
||||||
|
|
|
@ -207,7 +207,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
* it at vdso_base which is the "natural" base for it, but we might
|
* it at vdso_base which is the "natural" base for it, but we might
|
||||||
* fail and end up putting it elsewhere.
|
* fail and end up putting it elsewhere.
|
||||||
*/
|
*/
|
||||||
if (down_write_killable(&mm->mmap_sem))
|
if (mmap_write_lock_killable(mm))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
|
vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
|
||||||
if (IS_ERR_VALUE(vdso_base)) {
|
if (IS_ERR_VALUE(vdso_base)) {
|
||||||
|
@ -238,7 +238,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
rc = 0;
|
rc = 0;
|
||||||
|
|
||||||
out_up:
|
out_up:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1173,7 +1173,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
|
||||||
int dat_protection, fake;
|
int dat_protection, fake;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
down_read(&sg->mm->mmap_sem);
|
mmap_read_lock(sg->mm);
|
||||||
/*
|
/*
|
||||||
* We don't want any guest-2 tables to change - so the parent
|
* We don't want any guest-2 tables to change - so the parent
|
||||||
* tables/pointers we read stay valid - unshadowing is however
|
* tables/pointers we read stay valid - unshadowing is however
|
||||||
|
@ -1202,6 +1202,6 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
|
||||||
if (!rc)
|
if (!rc)
|
||||||
rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
|
rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
|
||||||
ipte_unlock(vcpu);
|
ipte_unlock(vcpu);
|
||||||
up_read(&sg->mm->mmap_sem);
|
mmap_read_unlock(sg->mm);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2767,10 +2767,10 @@ static struct page *get_map_page(struct kvm *kvm, u64 uaddr)
|
||||||
{
|
{
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
|
|
||||||
down_read(&kvm->mm->mmap_sem);
|
mmap_read_lock(kvm->mm);
|
||||||
get_user_pages_remote(NULL, kvm->mm, uaddr, 1, FOLL_WRITE,
|
get_user_pages_remote(NULL, kvm->mm, uaddr, 1, FOLL_WRITE,
|
||||||
&page, NULL, NULL);
|
&page, NULL, NULL);
|
||||||
up_read(&kvm->mm->mmap_sem);
|
mmap_read_unlock(kvm->mm);
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -763,9 +763,9 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
else {
|
else {
|
||||||
r = 0;
|
r = 0;
|
||||||
down_write(&kvm->mm->mmap_sem);
|
mmap_write_lock(kvm->mm);
|
||||||
kvm->mm->context.allow_gmap_hpage_1m = 1;
|
kvm->mm->context.allow_gmap_hpage_1m = 1;
|
||||||
up_write(&kvm->mm->mmap_sem);
|
mmap_write_unlock(kvm->mm);
|
||||||
/*
|
/*
|
||||||
* We might have to create fake 4k page
|
* We might have to create fake 4k page
|
||||||
* tables. To avoid that the hardware works on
|
* tables. To avoid that the hardware works on
|
||||||
|
@ -1815,7 +1815,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
|
||||||
if (!keys)
|
if (!keys)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||||
for (i = 0; i < args->count; i++) {
|
for (i = 0; i < args->count; i++) {
|
||||||
hva = gfn_to_hva(kvm, args->start_gfn + i);
|
hva = gfn_to_hva(kvm, args->start_gfn + i);
|
||||||
|
@ -1829,7 +1829,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
|
|
||||||
if (!r) {
|
if (!r) {
|
||||||
r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
|
r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
|
||||||
|
@ -1873,7 +1873,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||||
while (i < args->count) {
|
while (i < args->count) {
|
||||||
unlocked = false;
|
unlocked = false;
|
||||||
|
@ -1900,7 +1900,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
out:
|
out:
|
||||||
kvfree(keys);
|
kvfree(keys);
|
||||||
return r;
|
return r;
|
||||||
|
@ -2089,14 +2089,14 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm,
|
||||||
if (!values)
|
if (!values)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
down_read(&kvm->mm->mmap_sem);
|
mmap_read_lock(kvm->mm);
|
||||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||||
if (peek)
|
if (peek)
|
||||||
ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
|
ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
|
||||||
else
|
else
|
||||||
ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
|
ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
|
||||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||||
up_read(&kvm->mm->mmap_sem);
|
mmap_read_unlock(kvm->mm);
|
||||||
|
|
||||||
if (kvm->arch.migration_mode)
|
if (kvm->arch.migration_mode)
|
||||||
args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
|
args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
|
||||||
|
@ -2146,7 +2146,7 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
down_read(&kvm->mm->mmap_sem);
|
mmap_read_lock(kvm->mm);
|
||||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||||
for (i = 0; i < args->count; i++) {
|
for (i = 0; i < args->count; i++) {
|
||||||
hva = gfn_to_hva(kvm, args->start_gfn + i);
|
hva = gfn_to_hva(kvm, args->start_gfn + i);
|
||||||
|
@ -2161,12 +2161,12 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm,
|
||||||
set_pgste_bits(kvm->mm, hva, mask, pgstev);
|
set_pgste_bits(kvm->mm, hva, mask, pgstev);
|
||||||
}
|
}
|
||||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||||
up_read(&kvm->mm->mmap_sem);
|
mmap_read_unlock(kvm->mm);
|
||||||
|
|
||||||
if (!kvm->mm->context.uses_cmm) {
|
if (!kvm->mm->context.uses_cmm) {
|
||||||
down_write(&kvm->mm->mmap_sem);
|
mmap_write_lock(kvm->mm);
|
||||||
kvm->mm->context.uses_cmm = 1;
|
kvm->mm->context.uses_cmm = 1;
|
||||||
up_write(&kvm->mm->mmap_sem);
|
mmap_write_unlock(kvm->mm);
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
vfree(bits);
|
vfree(bits);
|
||||||
|
@ -2239,9 +2239,9 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
|
||||||
if (r)
|
if (r)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
down_write(¤t->mm->mmap_sem);
|
mmap_write_lock(current->mm);
|
||||||
r = gmap_mark_unmergeable();
|
r = gmap_mark_unmergeable();
|
||||||
up_write(¤t->mm->mmap_sem);
|
mmap_write_unlock(current->mm);
|
||||||
if (r)
|
if (r)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
@ -270,18 +270,18 @@ static int handle_iske(struct kvm_vcpu *vcpu)
|
||||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||||
retry:
|
retry:
|
||||||
unlocked = false;
|
unlocked = false;
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
rc = get_guest_storage_key(current->mm, vmaddr, &key);
|
rc = get_guest_storage_key(current->mm, vmaddr, &key);
|
||||||
|
|
||||||
if (rc) {
|
if (rc) {
|
||||||
rc = fixup_user_fault(current, current->mm, vmaddr,
|
rc = fixup_user_fault(current, current->mm, vmaddr,
|
||||||
FAULT_FLAG_WRITE, &unlocked);
|
FAULT_FLAG_WRITE, &unlocked);
|
||||||
if (!rc) {
|
if (!rc) {
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
if (rc == -EFAULT)
|
if (rc == -EFAULT)
|
||||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
|
@ -317,17 +317,17 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
|
||||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||||
retry:
|
retry:
|
||||||
unlocked = false;
|
unlocked = false;
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
rc = reset_guest_reference_bit(current->mm, vmaddr);
|
rc = reset_guest_reference_bit(current->mm, vmaddr);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
rc = fixup_user_fault(current, current->mm, vmaddr,
|
rc = fixup_user_fault(current, current->mm, vmaddr,
|
||||||
FAULT_FLAG_WRITE, &unlocked);
|
FAULT_FLAG_WRITE, &unlocked);
|
||||||
if (!rc) {
|
if (!rc) {
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
if (rc == -EFAULT)
|
if (rc == -EFAULT)
|
||||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
|
@ -385,7 +385,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
|
||||||
if (kvm_is_error_hva(vmaddr))
|
if (kvm_is_error_hva(vmaddr))
|
||||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||||
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
|
rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
|
||||||
m3 & SSKE_NQ, m3 & SSKE_MR,
|
m3 & SSKE_NQ, m3 & SSKE_MR,
|
||||||
m3 & SSKE_MC);
|
m3 & SSKE_MC);
|
||||||
|
@ -395,7 +395,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
|
||||||
FAULT_FLAG_WRITE, &unlocked);
|
FAULT_FLAG_WRITE, &unlocked);
|
||||||
rc = !rc ? -EAGAIN : rc;
|
rc = !rc ? -EAGAIN : rc;
|
||||||
}
|
}
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
if (rc == -EFAULT)
|
if (rc == -EFAULT)
|
||||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
|
@ -1091,7 +1091,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
rc = cond_set_guest_storage_key(current->mm, vmaddr,
|
rc = cond_set_guest_storage_key(current->mm, vmaddr,
|
||||||
key, NULL, nq, mr, mc);
|
key, NULL, nq, mr, mc);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
|
@ -1099,7 +1099,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
|
||||||
FAULT_FLAG_WRITE, &unlocked);
|
FAULT_FLAG_WRITE, &unlocked);
|
||||||
rc = !rc ? -EAGAIN : rc;
|
rc = !rc ? -EAGAIN : rc;
|
||||||
}
|
}
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
if (rc == -EFAULT)
|
if (rc == -EFAULT)
|
||||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||||
if (rc == -EAGAIN)
|
if (rc == -EAGAIN)
|
||||||
|
@ -1220,9 +1220,9 @@ static int handle_essa(struct kvm_vcpu *vcpu)
|
||||||
* already correct, we do nothing and avoid the lock.
|
* already correct, we do nothing and avoid the lock.
|
||||||
*/
|
*/
|
||||||
if (vcpu->kvm->mm->context.uses_cmm == 0) {
|
if (vcpu->kvm->mm->context.uses_cmm == 0) {
|
||||||
down_write(&vcpu->kvm->mm->mmap_sem);
|
mmap_write_lock(vcpu->kvm->mm);
|
||||||
vcpu->kvm->mm->context.uses_cmm = 1;
|
vcpu->kvm->mm->context.uses_cmm = 1;
|
||||||
up_write(&vcpu->kvm->mm->mmap_sem);
|
mmap_write_unlock(vcpu->kvm->mm);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* If we are here, we are supposed to have CMMA enabled in
|
* If we are here, we are supposed to have CMMA enabled in
|
||||||
|
@ -1239,11 +1239,11 @@ static int handle_essa(struct kvm_vcpu *vcpu)
|
||||||
} else {
|
} else {
|
||||||
int srcu_idx;
|
int srcu_idx;
|
||||||
|
|
||||||
down_read(&vcpu->kvm->mm->mmap_sem);
|
mmap_read_lock(vcpu->kvm->mm);
|
||||||
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
i = __do_essa(vcpu, orc);
|
i = __do_essa(vcpu, orc);
|
||||||
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
|
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
|
||||||
up_read(&vcpu->kvm->mm->mmap_sem);
|
mmap_read_unlock(vcpu->kvm->mm);
|
||||||
if (i < 0)
|
if (i < 0)
|
||||||
return i;
|
return i;
|
||||||
/* Account for the possible extra cbrl entry */
|
/* Account for the possible extra cbrl entry */
|
||||||
|
@ -1251,10 +1251,10 @@ static int handle_essa(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
|
vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
|
||||||
cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
|
cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
|
||||||
down_read(&gmap->mm->mmap_sem);
|
mmap_read_lock(gmap->mm);
|
||||||
for (i = 0; i < entries; ++i)
|
for (i = 0; i < entries; ++i)
|
||||||
__gmap_zap(gmap, cbrlo[i]);
|
__gmap_zap(gmap, cbrlo[i]);
|
||||||
up_read(&gmap->mm->mmap_sem);
|
mmap_read_unlock(gmap->mm);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -433,7 +433,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||||
flags |= FAULT_FLAG_USER;
|
flags |= FAULT_FLAG_USER;
|
||||||
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
|
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
|
||||||
flags |= FAULT_FLAG_WRITE;
|
flags |= FAULT_FLAG_WRITE;
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
|
|
||||||
gmap = NULL;
|
gmap = NULL;
|
||||||
if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
|
if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
|
||||||
|
@ -514,7 +514,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||||
}
|
}
|
||||||
flags &= ~FAULT_FLAG_RETRY_NOWAIT;
|
flags &= ~FAULT_FLAG_RETRY_NOWAIT;
|
||||||
flags |= FAULT_FLAG_TRIED;
|
flags |= FAULT_FLAG_TRIED;
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -532,7 +532,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||||
}
|
}
|
||||||
fault = 0;
|
fault = 0;
|
||||||
out_up:
|
out_up:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
out:
|
out:
|
||||||
return fault;
|
return fault;
|
||||||
}
|
}
|
||||||
|
@ -824,22 +824,22 @@ void do_secure_storage_access(struct pt_regs *regs)
|
||||||
switch (get_fault_type(regs)) {
|
switch (get_fault_type(regs)) {
|
||||||
case USER_FAULT:
|
case USER_FAULT:
|
||||||
mm = current->mm;
|
mm = current->mm;
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
vma = find_vma(mm, addr);
|
vma = find_vma(mm, addr);
|
||||||
if (!vma) {
|
if (!vma) {
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
|
do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
|
page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
|
||||||
if (IS_ERR_OR_NULL(page)) {
|
if (IS_ERR_OR_NULL(page)) {
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (arch_make_page_accessible(page))
|
if (arch_make_page_accessible(page))
|
||||||
send_sig(SIGSEGV, current, 0);
|
send_sig(SIGSEGV, current, 0);
|
||||||
put_page(page);
|
put_page(page);
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
break;
|
break;
|
||||||
case KERNEL_FAULT:
|
case KERNEL_FAULT:
|
||||||
page = phys_to_page(addr);
|
page = phys_to_page(addr);
|
||||||
|
|
|
@ -405,10 +405,10 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
flush = 0;
|
flush = 0;
|
||||||
down_write(&gmap->mm->mmap_sem);
|
mmap_write_lock(gmap->mm);
|
||||||
for (off = 0; off < len; off += PMD_SIZE)
|
for (off = 0; off < len; off += PMD_SIZE)
|
||||||
flush |= __gmap_unmap_by_gaddr(gmap, to + off);
|
flush |= __gmap_unmap_by_gaddr(gmap, to + off);
|
||||||
up_write(&gmap->mm->mmap_sem);
|
mmap_write_unlock(gmap->mm);
|
||||||
if (flush)
|
if (flush)
|
||||||
gmap_flush_tlb(gmap);
|
gmap_flush_tlb(gmap);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -438,7 +438,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
flush = 0;
|
flush = 0;
|
||||||
down_write(&gmap->mm->mmap_sem);
|
mmap_write_lock(gmap->mm);
|
||||||
for (off = 0; off < len; off += PMD_SIZE) {
|
for (off = 0; off < len; off += PMD_SIZE) {
|
||||||
/* Remove old translation */
|
/* Remove old translation */
|
||||||
flush |= __gmap_unmap_by_gaddr(gmap, to + off);
|
flush |= __gmap_unmap_by_gaddr(gmap, to + off);
|
||||||
|
@ -448,7 +448,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
|
||||||
(void *) from + off))
|
(void *) from + off))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
up_write(&gmap->mm->mmap_sem);
|
mmap_write_unlock(gmap->mm);
|
||||||
if (flush)
|
if (flush)
|
||||||
gmap_flush_tlb(gmap);
|
gmap_flush_tlb(gmap);
|
||||||
if (off >= len)
|
if (off >= len)
|
||||||
|
@ -495,9 +495,9 @@ unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
|
||||||
{
|
{
|
||||||
unsigned long rc;
|
unsigned long rc;
|
||||||
|
|
||||||
down_read(&gmap->mm->mmap_sem);
|
mmap_read_lock(gmap->mm);
|
||||||
rc = __gmap_translate(gmap, gaddr);
|
rc = __gmap_translate(gmap, gaddr);
|
||||||
up_read(&gmap->mm->mmap_sem);
|
mmap_read_unlock(gmap->mm);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(gmap_translate);
|
EXPORT_SYMBOL_GPL(gmap_translate);
|
||||||
|
@ -640,7 +640,7 @@ int gmap_fault(struct gmap *gmap, unsigned long gaddr,
|
||||||
int rc;
|
int rc;
|
||||||
bool unlocked;
|
bool unlocked;
|
||||||
|
|
||||||
down_read(&gmap->mm->mmap_sem);
|
mmap_read_lock(gmap->mm);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
unlocked = false;
|
unlocked = false;
|
||||||
|
@ -663,7 +663,7 @@ int gmap_fault(struct gmap *gmap, unsigned long gaddr,
|
||||||
|
|
||||||
rc = __gmap_link(gmap, gaddr, vmaddr);
|
rc = __gmap_link(gmap, gaddr, vmaddr);
|
||||||
out_up:
|
out_up:
|
||||||
up_read(&gmap->mm->mmap_sem);
|
mmap_read_unlock(gmap->mm);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(gmap_fault);
|
EXPORT_SYMBOL_GPL(gmap_fault);
|
||||||
|
@ -696,7 +696,7 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
|
||||||
unsigned long gaddr, vmaddr, size;
|
unsigned long gaddr, vmaddr, size;
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
down_read(&gmap->mm->mmap_sem);
|
mmap_read_lock(gmap->mm);
|
||||||
for (gaddr = from; gaddr < to;
|
for (gaddr = from; gaddr < to;
|
||||||
gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
|
gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
|
||||||
/* Find the vm address for the guest address */
|
/* Find the vm address for the guest address */
|
||||||
|
@ -719,7 +719,7 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
|
||||||
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
|
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
|
||||||
zap_page_range(vma, vmaddr, size);
|
zap_page_range(vma, vmaddr, size);
|
||||||
}
|
}
|
||||||
up_read(&gmap->mm->mmap_sem);
|
mmap_read_unlock(gmap->mm);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(gmap_discard);
|
EXPORT_SYMBOL_GPL(gmap_discard);
|
||||||
|
|
||||||
|
@ -1106,9 +1106,9 @@ int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (!MACHINE_HAS_ESOP && prot == PROT_READ)
|
if (!MACHINE_HAS_ESOP && prot == PROT_READ)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
down_read(&gmap->mm->mmap_sem);
|
mmap_read_lock(gmap->mm);
|
||||||
rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
|
rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
|
||||||
up_read(&gmap->mm->mmap_sem);
|
mmap_read_unlock(gmap->mm);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
|
EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
|
||||||
|
@ -1696,11 +1696,11 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
|
||||||
}
|
}
|
||||||
spin_unlock(&parent->shadow_lock);
|
spin_unlock(&parent->shadow_lock);
|
||||||
/* protect after insertion, so it will get properly invalidated */
|
/* protect after insertion, so it will get properly invalidated */
|
||||||
down_read(&parent->mm->mmap_sem);
|
mmap_read_lock(parent->mm);
|
||||||
rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
|
rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
|
||||||
((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
|
((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
|
||||||
PROT_READ, GMAP_NOTIFY_SHADOW);
|
PROT_READ, GMAP_NOTIFY_SHADOW);
|
||||||
up_read(&parent->mm->mmap_sem);
|
mmap_read_unlock(parent->mm);
|
||||||
spin_lock(&parent->shadow_lock);
|
spin_lock(&parent->shadow_lock);
|
||||||
new->initialized = true;
|
new->initialized = true;
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
@ -2543,12 +2543,12 @@ int s390_enable_sie(void)
|
||||||
/* Fail if the page tables are 2K */
|
/* Fail if the page tables are 2K */
|
||||||
if (!mm_alloc_pgste(mm))
|
if (!mm_alloc_pgste(mm))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
down_write(&mm->mmap_sem);
|
mmap_write_lock(mm);
|
||||||
mm->context.has_pgste = 1;
|
mm->context.has_pgste = 1;
|
||||||
/* split thp mappings and disable thp for future mappings */
|
/* split thp mappings and disable thp for future mappings */
|
||||||
thp_split_mm(mm);
|
thp_split_mm(mm);
|
||||||
walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
|
walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(s390_enable_sie);
|
EXPORT_SYMBOL_GPL(s390_enable_sie);
|
||||||
|
@ -2617,7 +2617,7 @@ int s390_enable_skey(void)
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
mmap_write_lock(mm);
|
||||||
if (mm_uses_skeys(mm))
|
if (mm_uses_skeys(mm))
|
||||||
goto out_up;
|
goto out_up;
|
||||||
|
|
||||||
|
@ -2630,7 +2630,7 @@ int s390_enable_skey(void)
|
||||||
walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
|
walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
|
||||||
|
|
||||||
out_up:
|
out_up:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(s390_enable_skey);
|
EXPORT_SYMBOL_GPL(s390_enable_skey);
|
||||||
|
@ -2651,9 +2651,9 @@ static const struct mm_walk_ops reset_cmma_walk_ops = {
|
||||||
|
|
||||||
void s390_reset_cmma(struct mm_struct *mm)
|
void s390_reset_cmma(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
down_write(&mm->mmap_sem);
|
mmap_write_lock(mm);
|
||||||
walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
|
walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(s390_reset_cmma);
|
EXPORT_SYMBOL_GPL(s390_reset_cmma);
|
||||||
|
|
||||||
|
@ -2685,9 +2685,9 @@ void s390_reset_acc(struct mm_struct *mm)
|
||||||
*/
|
*/
|
||||||
if (!mmget_not_zero(mm))
|
if (!mmget_not_zero(mm))
|
||||||
return;
|
return;
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
walk_page_range(mm, 0, TASK_SIZE, &reset_acc_walk_ops, NULL);
|
walk_page_range(mm, 0, TASK_SIZE, &reset_acc_walk_ops, NULL);
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(s390_reset_acc);
|
EXPORT_SYMBOL_GPL(s390_reset_acc);
|
||||||
|
|
|
@ -125,7 +125,7 @@ static long get_pfn(unsigned long user_addr, unsigned long access,
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
long ret;
|
long ret;
|
||||||
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
vma = find_vma(current->mm, user_addr);
|
vma = find_vma(current->mm, user_addr);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
|
@ -135,7 +135,7 @@ static long get_pfn(unsigned long user_addr, unsigned long access,
|
||||||
goto out;
|
goto out;
|
||||||
ret = follow_pfn(vma, user_addr, pfn);
|
ret = follow_pfn(vma, user_addr, pfn);
|
||||||
out:
|
out:
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -69,10 +69,10 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
|
||||||
if (addr + len < addr)
|
if (addr + len < addr)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
vma = find_vma (current->mm, addr);
|
vma = find_vma (current->mm, addr);
|
||||||
if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
|
if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,6 +91,6 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
|
||||||
if (op & CACHEFLUSH_I)
|
if (op & CACHEFLUSH_I)
|
||||||
flush_icache_range(addr, addr+len);
|
flush_icache_range(addr, addr+len);
|
||||||
|
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,7 +61,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (down_write_killable(&mm->mmap_sem))
|
if (mmap_write_lock_killable(mm))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
|
||||||
addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
|
addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
|
||||||
|
@ -80,7 +80,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
current->mm->context.vdso = (void *)addr;
|
current->mm->context.vdso = (void *)addr;
|
||||||
|
|
||||||
up_fail:
|
up_fail:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -279,7 +279,7 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
|
||||||
* Something tried to access memory that isn't in our memory map..
|
* Something tried to access memory that isn't in our memory map..
|
||||||
* Fix it, but check if it's kernel or user first..
|
* Fix it, but check if it's kernel or user first..
|
||||||
*/
|
*/
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
__bad_area_nosemaphore(regs, error_code, address, si_code);
|
__bad_area_nosemaphore(regs, error_code, address, si_code);
|
||||||
}
|
}
|
||||||
|
@ -303,7 +303,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
|
||||||
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
||||||
struct mm_struct *mm = tsk->mm;
|
struct mm_struct *mm = tsk->mm;
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
/* Kernel mode? Handle exceptions or die: */
|
/* Kernel mode? Handle exceptions or die: */
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
|
@ -328,7 +328,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
|
||||||
|
|
||||||
/* Release mmap_sem first if necessary */
|
/* Release mmap_sem first if necessary */
|
||||||
if (!(fault & VM_FAULT_RETRY))
|
if (!(fault & VM_FAULT_RETRY))
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
|
|
||||||
if (!(fault & VM_FAULT_ERROR))
|
if (!(fault & VM_FAULT_ERROR))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -442,7 +442,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
|
||||||
}
|
}
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
|
|
||||||
vma = find_vma(mm, address);
|
vma = find_vma(mm, address);
|
||||||
if (unlikely(!vma)) {
|
if (unlikely(!vma)) {
|
||||||
|
@ -510,5 +510,5 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
}
|
}
|
||||||
|
|
|
@ -195,7 +195,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
||||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
|
|
||||||
if (!from_user && address >= PAGE_OFFSET)
|
if (!from_user && address >= PAGE_OFFSET)
|
||||||
goto bad_area;
|
goto bad_area;
|
||||||
|
@ -271,7 +271,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -279,7 +279,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
||||||
* Fix it, but check if it's kernel or user first..
|
* Fix it, but check if it's kernel or user first..
|
||||||
*/
|
*/
|
||||||
bad_area:
|
bad_area:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
bad_area_nosemaphore:
|
bad_area_nosemaphore:
|
||||||
/* User mode accesses just cause a SIGSEGV */
|
/* User mode accesses just cause a SIGSEGV */
|
||||||
|
@ -328,7 +328,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
||||||
* us unable to handle the page fault gracefully.
|
* us unable to handle the page fault gracefully.
|
||||||
*/
|
*/
|
||||||
out_of_memory:
|
out_of_memory:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (from_user) {
|
if (from_user) {
|
||||||
pagefault_out_of_memory();
|
pagefault_out_of_memory();
|
||||||
return;
|
return;
|
||||||
|
@ -336,7 +336,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
||||||
goto no_context;
|
goto no_context;
|
||||||
|
|
||||||
do_sigbus:
|
do_sigbus:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
|
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
|
||||||
if (!from_user)
|
if (!from_user)
|
||||||
goto no_context;
|
goto no_context;
|
||||||
|
@ -390,7 +390,7 @@ static void force_user_fault(unsigned long address, int write)
|
||||||
|
|
||||||
code = SEGV_MAPERR;
|
code = SEGV_MAPERR;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
vma = find_vma(mm, address);
|
vma = find_vma(mm, address);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
goto bad_area;
|
goto bad_area;
|
||||||
|
@ -415,15 +415,15 @@ static void force_user_fault(unsigned long address, int write)
|
||||||
case VM_FAULT_OOM:
|
case VM_FAULT_OOM:
|
||||||
goto do_sigbus;
|
goto do_sigbus;
|
||||||
}
|
}
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return;
|
return;
|
||||||
bad_area:
|
bad_area:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
|
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
do_sigbus:
|
do_sigbus:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
|
__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -318,7 +318,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
||||||
|
|
||||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||||
|
|
||||||
if (!down_read_trylock(&mm->mmap_sem)) {
|
if (!mmap_read_trylock(mm)) {
|
||||||
if ((regs->tstate & TSTATE_PRIV) &&
|
if ((regs->tstate & TSTATE_PRIV) &&
|
||||||
!search_exception_tables(regs->tpc)) {
|
!search_exception_tables(regs->tpc)) {
|
||||||
insn = get_fault_insn(regs, insn);
|
insn = get_fault_insn(regs, insn);
|
||||||
|
@ -326,7 +326,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fault_code & FAULT_CODE_BAD_RA)
|
if (fault_code & FAULT_CODE_BAD_RA)
|
||||||
|
@ -458,7 +458,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
mm_rss = get_mm_rss(mm);
|
mm_rss = get_mm_rss(mm);
|
||||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||||
|
@ -489,7 +489,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
||||||
*/
|
*/
|
||||||
bad_area:
|
bad_area:
|
||||||
insn = get_fault_insn(regs, insn);
|
insn = get_fault_insn(regs, insn);
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
handle_kernel_fault:
|
handle_kernel_fault:
|
||||||
do_kernel_fault(regs, si_code, fault_code, insn, address);
|
do_kernel_fault(regs, si_code, fault_code, insn, address);
|
||||||
|
@ -501,7 +501,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
||||||
*/
|
*/
|
||||||
out_of_memory:
|
out_of_memory:
|
||||||
insn = get_fault_insn(regs, insn);
|
insn = get_fault_insn(regs, insn);
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (!(regs->tstate & TSTATE_PRIV)) {
|
if (!(regs->tstate & TSTATE_PRIV)) {
|
||||||
pagefault_out_of_memory();
|
pagefault_out_of_memory();
|
||||||
goto exit_exception;
|
goto exit_exception;
|
||||||
|
@ -514,7 +514,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
||||||
|
|
||||||
do_sigbus:
|
do_sigbus:
|
||||||
insn = get_fault_insn(regs, insn);
|
insn = get_fault_insn(regs, insn);
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Send a sigbus, regardless of whether we were in kernel
|
* Send a sigbus, regardless of whether we were in kernel
|
||||||
|
|
|
@ -366,7 +366,7 @@ static int map_vdso(const struct vdso_image *image,
|
||||||
unsigned long text_start, addr = 0;
|
unsigned long text_start, addr = 0;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
mmap_write_lock(mm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First, get an unmapped region: then randomize it, and make sure that
|
* First, get an unmapped region: then randomize it, and make sure that
|
||||||
|
@ -422,7 +422,7 @@ static int map_vdso(const struct vdso_image *image,
|
||||||
if (ret)
|
if (ret)
|
||||||
current->mm->context.vdso = NULL;
|
current->mm->context.vdso = NULL;
|
||||||
|
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
|
||||||
__switch_mm(&new->context.id);
|
__switch_mm(&new->context.id);
|
||||||
down_write_nested(&new->mmap_sem, 1);
|
down_write_nested(&new->mmap_sem, 1);
|
||||||
uml_setup_stubs(new);
|
uml_setup_stubs(new);
|
||||||
up_write(&new->mmap_sem);
|
mmap_write_unlock(new);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||||
|
|
|
@ -349,7 +349,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
|
||||||
printk(KERN_ERR "fix_range_common: failed, killing current "
|
printk(KERN_ERR "fix_range_common: failed, killing current "
|
||||||
"process: %d\n", task_tgid_vnr(current));
|
"process: %d\n", task_tgid_vnr(current));
|
||||||
/* We are under mmap_sem, release it such that current can terminate */
|
/* We are under mmap_sem, release it such that current can terminate */
|
||||||
up_write(¤t->mm->mmap_sem);
|
mmap_write_unlock(current->mm);
|
||||||
force_sig(SIGKILL);
|
force_sig(SIGKILL);
|
||||||
do_signal(¤t->thread.regs);
|
do_signal(¤t->thread.regs);
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,7 +43,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
|
||||||
if (is_user)
|
if (is_user)
|
||||||
flags |= FAULT_FLAG_USER;
|
flags |= FAULT_FLAG_USER;
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
vma = find_vma(mm, address);
|
vma = find_vma(mm, address);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -116,7 +116,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
|
||||||
#endif
|
#endif
|
||||||
flush_tlb_page(vma, address);
|
flush_tlb_page(vma, address);
|
||||||
out:
|
out:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
out_nosemaphore:
|
out_nosemaphore:
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -125,7 +125,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
|
||||||
* We ran out of memory, call the OOM killer, and return the userspace
|
* We ran out of memory, call the OOM killer, and return the userspace
|
||||||
* (which will retry the fault, or kill us if we got oom-killed).
|
* (which will retry the fault, or kill us if we got oom-killed).
|
||||||
*/
|
*/
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (!is_user)
|
if (!is_user)
|
||||||
goto out_nosemaphore;
|
goto out_nosemaphore;
|
||||||
pagefault_out_of_memory();
|
pagefault_out_of_memory();
|
||||||
|
|
|
@ -223,12 +223,12 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||||
* validly references user space from well defined areas of the code,
|
* validly references user space from well defined areas of the code,
|
||||||
* we can bug out early if this is from code which shouldn't.
|
* we can bug out early if this is from code which shouldn't.
|
||||||
*/
|
*/
|
||||||
if (!down_read_trylock(&mm->mmap_sem)) {
|
if (!mmap_read_trylock(mm)) {
|
||||||
if (!user_mode(regs)
|
if (!user_mode(regs)
|
||||||
&& !search_exception_tables(regs->UCreg_pc))
|
&& !search_exception_tables(regs->UCreg_pc))
|
||||||
goto no_context;
|
goto no_context;
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* The above down_read_trylock() might have succeeded in
|
* The above down_read_trylock() might have succeeded in
|
||||||
|
@ -263,7 +263,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Handle the "normal" case first - VM_FAULT_MAJOR
|
* Handle the "normal" case first - VM_FAULT_MAJOR
|
||||||
|
|
|
@ -144,7 +144,7 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
|
||||||
struct mm_struct *mm = task->mm;
|
struct mm_struct *mm = task->mm;
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
if (down_write_killable(&mm->mmap_sem))
|
if (mmap_write_lock_killable(mm))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
|
||||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||||
|
@ -154,7 +154,7 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
|
||||||
zap_page_range(vma, vma->vm_start, size);
|
zap_page_range(vma, vma->vm_start, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
@ -268,7 +268,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
|
||||||
unsigned long text_start;
|
unsigned long text_start;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (down_write_killable(&mm->mmap_sem))
|
if (mmap_write_lock_killable(mm))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
|
||||||
addr = get_unmapped_area(NULL, addr,
|
addr = get_unmapped_area(NULL, addr,
|
||||||
|
@ -311,7 +311,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
up_fail:
|
up_fail:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -373,7 +373,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
mmap_write_lock(mm);
|
||||||
/*
|
/*
|
||||||
* Check if we have already mapped vdso blob - fail to prevent
|
* Check if we have already mapped vdso blob - fail to prevent
|
||||||
* abusing from userspace install_speciall_mapping, which may
|
* abusing from userspace install_speciall_mapping, which may
|
||||||
|
@ -384,11 +384,11 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
|
||||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||||
if (vma_is_special_mapping(vma, &vdso_mapping) ||
|
if (vma_is_special_mapping(vma, &vdso_mapping) ||
|
||||||
vma_is_special_mapping(vma, &vvar_mapping)) {
|
vma_is_special_mapping(vma, &vvar_mapping)) {
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
|
|
||||||
return map_vdso(image, addr);
|
return map_vdso(image, addr);
|
||||||
}
|
}
|
||||||
|
|
|
@ -171,7 +171,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
mmap_write_lock(mm);
|
||||||
pgd = pgd_offset(mm, 0xA0000);
|
pgd = pgd_offset(mm, 0xA0000);
|
||||||
if (pgd_none_or_clear_bad(pgd))
|
if (pgd_none_or_clear_bad(pgd))
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -197,7 +197,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
|
||||||
}
|
}
|
||||||
pte_unmap_unlock(pte, ptl);
|
pte_unmap_unlock(pte, ptl);
|
||||||
out:
|
out:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false);
|
flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -811,7 +811,7 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
|
||||||
* Something tried to access memory that isn't in our memory map..
|
* Something tried to access memory that isn't in our memory map..
|
||||||
* Fix it, but check if it's kernel or user first..
|
* Fix it, but check if it's kernel or user first..
|
||||||
*/
|
*/
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
|
__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
|
||||||
}
|
}
|
||||||
|
@ -1239,7 +1239,7 @@ void do_user_addr_fault(struct pt_regs *regs,
|
||||||
* 1. Failed to acquire mmap_sem, and
|
* 1. Failed to acquire mmap_sem, and
|
||||||
* 2. The access did not originate in userspace.
|
* 2. The access did not originate in userspace.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
|
if (unlikely(!mmap_read_trylock(mm))) {
|
||||||
if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
|
if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
|
||||||
/*
|
/*
|
||||||
* Fault from code in kernel from
|
* Fault from code in kernel from
|
||||||
|
@ -1249,7 +1249,7 @@ void do_user_addr_fault(struct pt_regs *regs,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* The above down_read_trylock() might have succeeded in
|
* The above down_read_trylock() might have succeeded in
|
||||||
|
@ -1320,7 +1320,7 @@ void do_user_addr_fault(struct pt_regs *regs,
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||||
mm_fault_error(regs, hw_error_code, address, fault);
|
mm_fault_error(regs, hw_error_code, address, fault);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -58,7 +58,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
if (!vdso_enabled)
|
if (!vdso_enabled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (down_write_killable(&mm->mmap_sem))
|
if (mmap_write_lock_killable(mm))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
|
||||||
err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
|
err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
|
||||||
|
@ -66,7 +66,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
|
||||||
vdsop);
|
vdsop);
|
||||||
|
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,7 +74,7 @@ void do_page_fault(struct pt_regs *regs)
|
||||||
if (user_mode(regs))
|
if (user_mode(regs))
|
||||||
flags |= FAULT_FLAG_USER;
|
flags |= FAULT_FLAG_USER;
|
||||||
retry:
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
vma = find_vma(mm, address);
|
vma = find_vma(mm, address);
|
||||||
|
|
||||||
if (!vma)
|
if (!vma)
|
||||||
|
@ -139,7 +139,7 @@ void do_page_fault(struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||||
if (flags & VM_FAULT_MAJOR)
|
if (flags & VM_FAULT_MAJOR)
|
||||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
|
||||||
|
@ -152,7 +152,7 @@ void do_page_fault(struct pt_regs *regs)
|
||||||
* Fix it, but check if it's kernel or user first..
|
* Fix it, but check if it's kernel or user first..
|
||||||
*/
|
*/
|
||||||
bad_area:
|
bad_area:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (user_mode(regs)) {
|
if (user_mode(regs)) {
|
||||||
current->thread.bad_vaddr = address;
|
current->thread.bad_vaddr = address;
|
||||||
current->thread.error_code = is_write;
|
current->thread.error_code = is_write;
|
||||||
|
@ -167,7 +167,7 @@ void do_page_fault(struct pt_regs *regs)
|
||||||
* us unable to handle the page fault gracefully.
|
* us unable to handle the page fault gracefully.
|
||||||
*/
|
*/
|
||||||
out_of_memory:
|
out_of_memory:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (!user_mode(regs))
|
if (!user_mode(regs))
|
||||||
bad_page_fault(regs, address, SIGKILL);
|
bad_page_fault(regs, address, SIGKILL);
|
||||||
else
|
else
|
||||||
|
@ -175,7 +175,7 @@ void do_page_fault(struct pt_regs *regs)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
do_sigbus:
|
do_sigbus:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
/* Send a sigbus, regardless of whether we were in kernel
|
/* Send a sigbus, regardless of whether we were in kernel
|
||||||
* or user mode.
|
* or user mode.
|
||||||
|
|
|
@ -212,7 +212,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||||
mm = alloc->vma_vm_mm;
|
mm = alloc->vma_vm_mm;
|
||||||
|
|
||||||
if (mm) {
|
if (mm) {
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
vma = alloc->vma;
|
vma = alloc->vma;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,7 +270,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||||
trace_binder_alloc_page_end(alloc, index);
|
trace_binder_alloc_page_end(alloc, index);
|
||||||
}
|
}
|
||||||
if (mm) {
|
if (mm) {
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -303,7 +303,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||||
}
|
}
|
||||||
err_no_vma:
|
err_no_vma:
|
||||||
if (mm) {
|
if (mm) {
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
}
|
}
|
||||||
return vma ? -ENOMEM : -ESRCH;
|
return vma ? -ENOMEM : -ESRCH;
|
||||||
|
@ -932,7 +932,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
||||||
mm = alloc->vma_vm_mm;
|
mm = alloc->vma_vm_mm;
|
||||||
if (!mmget_not_zero(mm))
|
if (!mmget_not_zero(mm))
|
||||||
goto err_mmget;
|
goto err_mmget;
|
||||||
if (!down_read_trylock(&mm->mmap_sem))
|
if (!mmap_read_trylock(mm))
|
||||||
goto err_down_read_mmap_sem_failed;
|
goto err_down_read_mmap_sem_failed;
|
||||||
vma = binder_alloc_get_vma(alloc);
|
vma = binder_alloc_get_vma(alloc);
|
||||||
|
|
||||||
|
@ -946,7 +946,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
||||||
|
|
||||||
trace_binder_unmap_user_end(alloc, index);
|
trace_binder_unmap_user_end(alloc, index);
|
||||||
}
|
}
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
|
|
||||||
trace_binder_unmap_kernel_start(alloc, index);
|
trace_binder_unmap_kernel_start(alloc, index);
|
||||||
|
|
|
@ -1393,9 +1393,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
|
||||||
* concurrently and the queues are actually stopped
|
* concurrently and the queues are actually stopped
|
||||||
*/
|
*/
|
||||||
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
|
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
|
||||||
down_write(¤t->mm->mmap_sem);
|
mmap_write_lock(current->mm);
|
||||||
is_invalid_userptr = atomic_read(&mem->invalid);
|
is_invalid_userptr = atomic_read(&mem->invalid);
|
||||||
up_write(¤t->mm->mmap_sem);
|
mmap_write_unlock(current->mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&mem->lock);
|
mutex_lock(&mem->lock);
|
||||||
|
|
|
@ -910,7 +910,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
||||||
goto out_free_ranges;
|
goto out_free_ranges;
|
||||||
}
|
}
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
vma = find_vma(mm, start);
|
vma = find_vma(mm, start);
|
||||||
if (unlikely(!vma || start < vma->vm_start)) {
|
if (unlikely(!vma || start < vma->vm_start)) {
|
||||||
r = -EFAULT;
|
r = -EFAULT;
|
||||||
|
@ -921,15 +921,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
||||||
r = -EPERM;
|
r = -EPERM;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
|
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
|
range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
r = hmm_range_fault(range);
|
r = hmm_range_fault(range);
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (unlikely(r)) {
|
if (unlikely(r)) {
|
||||||
/*
|
/*
|
||||||
* FIXME: This timeout should encompass the retry from
|
* FIXME: This timeout should encompass the retry from
|
||||||
|
@ -954,7 +954,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
out_free_pfns:
|
out_free_pfns:
|
||||||
kvfree(range->hmm_pfns);
|
kvfree(range->hmm_pfns);
|
||||||
out_free_ranges:
|
out_free_ranges:
|
||||||
|
|
|
@ -901,7 +901,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
|
||||||
|
|
||||||
memset(&memory_exception_data, 0, sizeof(memory_exception_data));
|
memset(&memory_exception_data, 0, sizeof(memory_exception_data));
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
vma = find_vma(mm, address);
|
vma = find_vma(mm, address);
|
||||||
|
|
||||||
memory_exception_data.gpu_id = dev->id;
|
memory_exception_data.gpu_id = dev->id;
|
||||||
|
@ -924,7 +924,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
|
||||||
memory_exception_data.failure.NoExecute = 0;
|
memory_exception_data.failure.NoExecute = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
|
|
||||||
pr_debug("notpresent %d, noexecute %d, readonly %d\n",
|
pr_debug("notpresent %d, noexecute %d, readonly %d\n",
|
||||||
|
|
|
@ -93,7 +93,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
if (down_write_killable(&mm->mmap_sem)) {
|
if (mmap_write_lock_killable(mm)) {
|
||||||
addr = -EINTR;
|
addr = -EINTR;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
@ -103,7 +103,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||||
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
||||||
else
|
else
|
||||||
addr = -ENOMEM;
|
addr = -ENOMEM;
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
if (IS_ERR_VALUE(addr))
|
if (IS_ERR_VALUE(addr))
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -200,7 +200,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
|
||||||
if (IS_ERR(mn))
|
if (IS_ERR(mn))
|
||||||
err = PTR_ERR(mn);
|
err = PTR_ERR(mn);
|
||||||
|
|
||||||
down_write(&mm->mm->mmap_sem);
|
mmap_write_lock(mm->mm);
|
||||||
mutex_lock(&mm->i915->mm_lock);
|
mutex_lock(&mm->i915->mm_lock);
|
||||||
if (mm->mn == NULL && !err) {
|
if (mm->mn == NULL && !err) {
|
||||||
/* Protected by mmap_sem (write-lock) */
|
/* Protected by mmap_sem (write-lock) */
|
||||||
|
@ -217,7 +217,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
|
||||||
err = 0;
|
err = 0;
|
||||||
}
|
}
|
||||||
mutex_unlock(&mm->i915->mm_lock);
|
mutex_unlock(&mm->i915->mm_lock);
|
||||||
up_write(&mm->mm->mmap_sem);
|
mmap_write_unlock(mm->mm);
|
||||||
|
|
||||||
if (mn && !IS_ERR(mn))
|
if (mn && !IS_ERR(mn))
|
||||||
kfree(mn);
|
kfree(mn);
|
||||||
|
@ -468,7 +468,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
||||||
if (mmget_not_zero(mm)) {
|
if (mmget_not_zero(mm)) {
|
||||||
while (pinned < npages) {
|
while (pinned < npages) {
|
||||||
if (!locked) {
|
if (!locked) {
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
locked = 1;
|
locked = 1;
|
||||||
}
|
}
|
||||||
ret = pin_user_pages_remote
|
ret = pin_user_pages_remote
|
||||||
|
@ -483,7 +483,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
||||||
pinned += ret;
|
pinned += ret;
|
||||||
}
|
}
|
||||||
if (locked)
|
if (locked)
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -175,10 +175,10 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
mm = get_task_mm(current);
|
mm = get_task_mm(current);
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
|
|
||||||
if (!cli->svm.svmm) {
|
if (!cli->svm.svmm) {
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -205,7 +205,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
|
||||||
*/
|
*/
|
||||||
args->result = 0;
|
args->result = 0;
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -355,7 +355,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
||||||
down_write(¤t->mm->mmap_sem);
|
mmap_write_lock(current->mm);
|
||||||
svmm->notifier.ops = &nouveau_mn_ops;
|
svmm->notifier.ops = &nouveau_mn_ops;
|
||||||
ret = __mmu_notifier_register(&svmm->notifier, current->mm);
|
ret = __mmu_notifier_register(&svmm->notifier, current->mm);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -364,12 +364,12 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
|
||||||
|
|
||||||
cli->svm.svmm = svmm;
|
cli->svm.svmm = svmm;
|
||||||
cli->svm.cli = cli;
|
cli->svm.cli = cli;
|
||||||
up_write(¤t->mm->mmap_sem);
|
mmap_write_unlock(current->mm);
|
||||||
mutex_unlock(&cli->mutex);
|
mutex_unlock(&cli->mutex);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_mm_unlock:
|
out_mm_unlock:
|
||||||
up_write(¤t->mm->mmap_sem);
|
mmap_write_unlock(current->mm);
|
||||||
out_free:
|
out_free:
|
||||||
mutex_unlock(&cli->mutex);
|
mutex_unlock(&cli->mutex);
|
||||||
kfree(svmm);
|
kfree(svmm);
|
||||||
|
@ -571,9 +571,9 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
range.notifier_seq = mmu_interval_read_begin(range.notifier);
|
range.notifier_seq = mmu_interval_read_begin(range.notifier);
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
ret = hmm_range_fault(&range);
|
ret = hmm_range_fault(&range);
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
/*
|
/*
|
||||||
* FIXME: the input PFN_REQ flags are destroyed on
|
* FIXME: the input PFN_REQ flags are destroyed on
|
||||||
|
@ -705,18 +705,18 @@ nouveau_svm_fault(struct nvif_notify *notify)
|
||||||
/* Intersect fault window with the CPU VMA, cancelling
|
/* Intersect fault window with the CPU VMA, cancelling
|
||||||
* the fault if the address is invalid.
|
* the fault if the address is invalid.
|
||||||
*/
|
*/
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
vma = find_vma_intersection(mm, start, limit);
|
vma = find_vma_intersection(mm, start, limit);
|
||||||
if (!vma) {
|
if (!vma) {
|
||||||
SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
|
SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
|
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
start = max_t(u64, start, vma->vm_start);
|
start = max_t(u64, start, vma->vm_start);
|
||||||
limit = min_t(u64, limit, vma->vm_end);
|
limit = min_t(u64, limit, vma->vm_end);
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
|
SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
|
||||||
|
|
||||||
if (buffer->fault[fi]->addr != start) {
|
if (buffer->fault[fi]->addr != start) {
|
||||||
|
|
|
@ -196,12 +196,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
||||||
p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
|
p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
|
||||||
&p->validated);
|
&p->validated);
|
||||||
if (need_mmap_lock)
|
if (need_mmap_lock)
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
|
|
||||||
r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
|
r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
|
||||||
|
|
||||||
if (need_mmap_lock)
|
if (need_mmap_lock)
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -342,17 +342,17 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
|
if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
r = radeon_bo_reserve(bo, true);
|
r = radeon_bo_reserve(bo, true);
|
||||||
if (r) {
|
if (r) {
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
goto release_object;
|
goto release_object;
|
||||||
}
|
}
|
||||||
|
|
||||||
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
|
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
|
||||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||||
radeon_bo_unreserve(bo);
|
radeon_bo_unreserve(bo);
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
if (r)
|
if (r)
|
||||||
goto release_object;
|
goto release_object;
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,7 +68,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
ttm_bo_get(bo);
|
ttm_bo_get(bo);
|
||||||
up_read(&vmf->vma->vm_mm->mmap_sem);
|
mmap_read_unlock(vmf->vma->vm_mm);
|
||||||
(void) dma_fence_wait(bo->moving, true);
|
(void) dma_fence_wait(bo->moving, true);
|
||||||
dma_resv_unlock(bo->base.resv);
|
dma_resv_unlock(bo->base.resv);
|
||||||
ttm_bo_put(bo);
|
ttm_bo_put(bo);
|
||||||
|
@ -144,7 +144,7 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
|
||||||
if (fault_flag_allow_retry_first(vmf->flags)) {
|
if (fault_flag_allow_retry_first(vmf->flags)) {
|
||||||
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
|
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
|
||||||
ttm_bo_get(bo);
|
ttm_bo_get(bo);
|
||||||
up_read(&vmf->vma->vm_mm->mmap_sem);
|
mmap_read_unlock(vmf->vma->vm_mm);
|
||||||
if (!dma_resv_lock_interruptible(bo->base.resv,
|
if (!dma_resv_lock_interruptible(bo->base.resv,
|
||||||
NULL))
|
NULL))
|
||||||
dma_resv_unlock(bo->base.resv);
|
dma_resv_unlock(bo->base.resv);
|
||||||
|
|
|
@ -429,7 +429,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
|
||||||
ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
|
ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
|
||||||
PAGE_SIZE / sizeof(struct page *));
|
PAGE_SIZE / sizeof(struct page *));
|
||||||
|
|
||||||
down_read(&owning_mm->mmap_sem);
|
mmap_read_lock(owning_mm);
|
||||||
/*
|
/*
|
||||||
* Note: this might result in redundent page getting. We can
|
* Note: this might result in redundent page getting. We can
|
||||||
* avoid this by checking dma_list to be 0 before calling
|
* avoid this by checking dma_list to be 0 before calling
|
||||||
|
@ -440,7 +440,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
|
||||||
npages = get_user_pages_remote(owning_process, owning_mm,
|
npages = get_user_pages_remote(owning_process, owning_mm,
|
||||||
user_virt, gup_num_pages,
|
user_virt, gup_num_pages,
|
||||||
flags, local_page_list, NULL, NULL);
|
flags, local_page_list, NULL, NULL);
|
||||||
up_read(&owning_mm->mmap_sem);
|
mmap_read_unlock(owning_mm);
|
||||||
|
|
||||||
if (npages < 0) {
|
if (npages < 0) {
|
||||||
if (npages != -EAGAIN)
|
if (npages != -EAGAIN)
|
||||||
|
|
|
@ -840,7 +840,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
|
||||||
* at a time to get the lock ordering right. Typically there
|
* at a time to get the lock ordering right. Typically there
|
||||||
* will only be one mm, so no big deal.
|
* will only be one mm, so no big deal.
|
||||||
*/
|
*/
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
if (!mmget_still_valid(mm))
|
if (!mmget_still_valid(mm))
|
||||||
goto skip_mm;
|
goto skip_mm;
|
||||||
mutex_lock(&ufile->umap_lock);
|
mutex_lock(&ufile->umap_lock);
|
||||||
|
@ -862,7 +862,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
|
||||||
}
|
}
|
||||||
mutex_unlock(&ufile->umap_lock);
|
mutex_unlock(&ufile->umap_lock);
|
||||||
skip_mm:
|
skip_mm:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -380,7 +380,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
|
||||||
unsigned long untagged_start = untagged_addr(start);
|
unsigned long untagged_start = untagged_addr(start);
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
/*
|
/*
|
||||||
* FIXME: Ideally this would iterate over all the vmas that
|
* FIXME: Ideally this would iterate over all the vmas that
|
||||||
* cover the memory, but for now it requires a single vma to
|
* cover the memory, but for now it requires a single vma to
|
||||||
|
@ -395,7 +395,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
|
||||||
access_flags |= IB_ACCESS_LOCAL_WRITE;
|
access_flags |= IB_ACCESS_LOCAL_WRITE;
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ib_umem_get(device, start, length, access_flags);
|
return ib_umem_get(device, start, length, access_flags);
|
||||||
|
|
|
@ -106,18 +106,18 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
for (got = 0; got < num_pages; got += ret) {
|
for (got = 0; got < num_pages; got += ret) {
|
||||||
ret = pin_user_pages(start_page + got * PAGE_SIZE,
|
ret = pin_user_pages(start_page + got * PAGE_SIZE,
|
||||||
num_pages - got,
|
num_pages - got,
|
||||||
FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
|
FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
|
||||||
p + got, NULL);
|
p + got, NULL);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
goto bail_release;
|
goto bail_release;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
bail_release:
|
bail_release:
|
||||||
|
|
|
@ -123,7 +123,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
|
||||||
npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
|
npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
|
||||||
|
|
||||||
uiomr->owning_mm = mm = current->mm;
|
uiomr->owning_mm = mm = current->mm;
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
|
|
||||||
locked = atomic64_add_return(npages, ¤t->mm->pinned_vm);
|
locked = atomic64_add_return(npages, ¤t->mm->pinned_vm);
|
||||||
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
||||||
|
@ -187,7 +187,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
|
||||||
} else
|
} else
|
||||||
mmgrab(uiomr->owning_mm);
|
mmgrab(uiomr->owning_mm);
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
free_page((unsigned long) page_list);
|
free_page((unsigned long) page_list);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -394,7 +394,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
|
||||||
if (!writable)
|
if (!writable)
|
||||||
foll_flags |= FOLL_FORCE;
|
foll_flags |= FOLL_FORCE;
|
||||||
|
|
||||||
down_read(&mm_s->mmap_sem);
|
mmap_read_lock(mm_s);
|
||||||
|
|
||||||
mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
@ -438,7 +438,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
|
||||||
num_pages -= got;
|
num_pages -= got;
|
||||||
}
|
}
|
||||||
out_sem_up:
|
out_sem_up:
|
||||||
up_read(&mm_s->mmap_sem);
|
mmap_read_unlock(mm_s);
|
||||||
|
|
||||||
if (rv > 0)
|
if (rv > 0)
|
||||||
return umem;
|
return umem;
|
||||||
|
|
|
@ -485,7 +485,7 @@ static void do_fault(struct work_struct *work)
|
||||||
flags |= FAULT_FLAG_WRITE;
|
flags |= FAULT_FLAG_WRITE;
|
||||||
flags |= FAULT_FLAG_REMOTE;
|
flags |= FAULT_FLAG_REMOTE;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
vma = find_extend_vma(mm, address);
|
vma = find_extend_vma(mm, address);
|
||||||
if (!vma || address < vma->vm_start)
|
if (!vma || address < vma->vm_start)
|
||||||
/* failed to get a vma in the right range */
|
/* failed to get a vma in the right range */
|
||||||
|
@ -497,7 +497,7 @@ static void do_fault(struct work_struct *work)
|
||||||
|
|
||||||
ret = handle_mm_fault(vma, address, flags);
|
ret = handle_mm_fault(vma, address, flags);
|
||||||
out:
|
out:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
if (ret & VM_FAULT_ERROR)
|
if (ret & VM_FAULT_ERROR)
|
||||||
/* failed to service fault */
|
/* failed to service fault */
|
||||||
|
|
|
@ -863,7 +863,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
|
||||||
if (!mmget_not_zero(svm->mm))
|
if (!mmget_not_zero(svm->mm))
|
||||||
goto bad_req;
|
goto bad_req;
|
||||||
|
|
||||||
down_read(&svm->mm->mmap_sem);
|
mmap_read_lock(svm->mm);
|
||||||
vma = find_extend_vma(svm->mm, address);
|
vma = find_extend_vma(svm->mm, address);
|
||||||
if (!vma || address < vma->vm_start)
|
if (!vma || address < vma->vm_start)
|
||||||
goto invalid;
|
goto invalid;
|
||||||
|
@ -878,7 +878,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
|
||||||
|
|
||||||
result = QI_RESP_SUCCESS;
|
result = QI_RESP_SUCCESS;
|
||||||
invalid:
|
invalid:
|
||||||
up_read(&svm->mm->mmap_sem);
|
mmap_read_unlock(svm->mm);
|
||||||
mmput(svm->mm);
|
mmput(svm->mm);
|
||||||
bad_req:
|
bad_req:
|
||||||
/* Accounting for major/minor faults? */
|
/* Accounting for major/minor faults? */
|
||||||
|
|
|
@ -535,7 +535,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
|
||||||
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
|
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
|
||||||
|
|
||||||
if (b->memory == V4L2_MEMORY_MMAP)
|
if (b->memory == V4L2_MEMORY_MMAP)
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
|
|
||||||
videobuf_queue_lock(q);
|
videobuf_queue_lock(q);
|
||||||
retval = -EBUSY;
|
retval = -EBUSY;
|
||||||
|
@ -622,7 +622,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
|
||||||
videobuf_queue_unlock(q);
|
videobuf_queue_unlock(q);
|
||||||
|
|
||||||
if (b->memory == V4L2_MEMORY_MMAP)
|
if (b->memory == V4L2_MEMORY_MMAP)
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
|
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
|
@ -169,7 +169,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
|
||||||
mem->size = PAGE_ALIGN(vb->size + offset);
|
mem->size = PAGE_ALIGN(vb->size + offset);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
|
|
||||||
vma = find_vma(mm, untagged_baddr);
|
vma = find_vma(mm, untagged_baddr);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
|
@ -201,7 +201,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
|
||||||
}
|
}
|
||||||
|
|
||||||
out_up:
|
out_up:
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -200,9 +200,9 @@ static int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
ret = videobuf_dma_init_user_locked(dma, direction, data, size);
|
ret = videobuf_dma_init_user_locked(dma, direction, data, size);
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -207,7 +207,7 @@ static int get_vma_info(struct mm_struct *mm, u64 addr,
|
||||||
struct vm_area_struct *vma = NULL;
|
struct vm_area_struct *vma = NULL;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
|
|
||||||
vma = find_vma(mm, addr);
|
vma = find_vma(mm, addr);
|
||||||
if (!vma) {
|
if (!vma) {
|
||||||
|
@ -218,7 +218,7 @@ static int get_vma_info(struct mm_struct *mm, u64 addr,
|
||||||
*vma_start = vma->vm_start;
|
*vma_start = vma->vm_start;
|
||||||
*vma_end = vma->vm_end;
|
*vma_end = vma->vm_end;
|
||||||
out:
|
out:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -321,7 +321,7 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||||
for (ea = vma->vm_start; ea < vma->vm_end;
|
for (ea = vma->vm_start; ea < vma->vm_end;
|
||||||
ea = next_segment(ea, slb.vsid)) {
|
ea = next_segment(ea, slb.vsid)) {
|
||||||
|
@ -336,7 +336,7 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
|
||||||
last_esid = slb.esid;
|
last_esid = slb.esid;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,14 +68,14 @@ static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
struct gru_thread_state *gts = NULL;
|
struct gru_thread_state *gts = NULL;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
vma = gru_find_vma(vaddr);
|
vma = gru_find_vma(vaddr);
|
||||||
if (vma)
|
if (vma)
|
||||||
gts = gru_find_thread_state(vma, TSID(vaddr, vma));
|
gts = gru_find_thread_state(vma, TSID(vaddr, vma));
|
||||||
if (gts)
|
if (gts)
|
||||||
mutex_lock(>s->ts_ctxlock);
|
mutex_lock(>s->ts_ctxlock);
|
||||||
else
|
else
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return gts;
|
return gts;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,7 +85,7 @@ static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
struct gru_thread_state *gts = ERR_PTR(-EINVAL);
|
struct gru_thread_state *gts = ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
mmap_write_lock(mm);
|
||||||
vma = gru_find_vma(vaddr);
|
vma = gru_find_vma(vaddr);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
goto err;
|
goto err;
|
||||||
|
@ -94,11 +94,11 @@ static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
|
||||||
if (IS_ERR(gts))
|
if (IS_ERR(gts))
|
||||||
goto err;
|
goto err;
|
||||||
mutex_lock(>s->ts_ctxlock);
|
mutex_lock(>s->ts_ctxlock);
|
||||||
downgrade_write(&mm->mmap_sem);
|
mmap_write_downgrade(mm);
|
||||||
return gts;
|
return gts;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
return gts;
|
return gts;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,7 +108,7 @@ static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
|
||||||
static void gru_unlock_gts(struct gru_thread_state *gts)
|
static void gru_unlock_gts(struct gru_thread_state *gts)
|
||||||
{
|
{
|
||||||
mutex_unlock(>s->ts_ctxlock);
|
mutex_unlock(>s->ts_ctxlock);
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -574,9 +574,9 @@ static irqreturn_t gru_intr(int chiplet, int blade)
|
||||||
*/
|
*/
|
||||||
gts->ustats.fmm_tlbmiss++;
|
gts->ustats.fmm_tlbmiss++;
|
||||||
if (!gts->ts_force_cch_reload &&
|
if (!gts->ts_force_cch_reload &&
|
||||||
down_read_trylock(>s->ts_mm->mmap_sem)) {
|
mmap_read_trylock(gts->ts_mm)) {
|
||||||
gru_try_dropin(gru, gts, tfh, NULL);
|
gru_try_dropin(gru, gts, tfh, NULL);
|
||||||
up_read(>s->ts_mm->mmap_sem);
|
mmap_read_unlock(gts->ts_mm);
|
||||||
} else {
|
} else {
|
||||||
tfh_user_polling_mode(tfh);
|
tfh_user_polling_mode(tfh);
|
||||||
STAT(intr_mm_lock_failed);
|
STAT(intr_mm_lock_failed);
|
||||||
|
|
|
@ -135,7 +135,7 @@ static int gru_create_new_context(unsigned long arg)
|
||||||
if (!(req.options & GRU_OPT_MISS_MASK))
|
if (!(req.options & GRU_OPT_MISS_MASK))
|
||||||
req.options |= GRU_OPT_MISS_FMM_INTR;
|
req.options |= GRU_OPT_MISS_FMM_INTR;
|
||||||
|
|
||||||
down_write(¤t->mm->mmap_sem);
|
mmap_write_lock(current->mm);
|
||||||
vma = gru_find_vma(req.gseg);
|
vma = gru_find_vma(req.gseg);
|
||||||
if (vma) {
|
if (vma) {
|
||||||
vdata = vma->vm_private_data;
|
vdata = vma->vm_private_data;
|
||||||
|
@ -146,7 +146,7 @@ static int gru_create_new_context(unsigned long arg)
|
||||||
vdata->vd_tlb_preload_count = req.tlb_preload_count;
|
vdata->vd_tlb_preload_count = req.tlb_preload_count;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
up_write(¤t->mm->mmap_sem);
|
mmap_write_unlock(current->mm);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,11 +91,11 @@ munmap_notify(struct notifier_block *self, unsigned long val, void *data)
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
struct vm_area_struct *mpnt;
|
struct vm_area_struct *mpnt;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
|
|
||||||
mpnt = find_vma(mm, addr);
|
mpnt = find_vma(mm, addr);
|
||||||
if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
|
if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
/* To avoid latency problems, we only process the current CPU,
|
/* To avoid latency problems, we only process the current CPU,
|
||||||
* hoping that most samples for the task are on this CPU
|
* hoping that most samples for the task are on this CPU
|
||||||
*/
|
*/
|
||||||
|
@ -103,7 +103,7 @@ munmap_notify(struct notifier_block *self, unsigned long val, void *data)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,7 +256,7 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
|
||||||
unsigned long cookie = NO_COOKIE;
|
unsigned long cookie = NO_COOKIE;
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
|
for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
|
||||||
|
|
||||||
if (addr < vma->vm_start || addr >= vma->vm_end)
|
if (addr < vma->vm_start || addr >= vma->vm_end)
|
||||||
|
@ -276,7 +276,7 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
|
||||||
|
|
||||||
if (!vma)
|
if (!vma)
|
||||||
cookie = INVALID_COOKIE;
|
cookie = INVALID_COOKIE;
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
return cookie;
|
return cookie;
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,9 +75,9 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lock the user buffer pages in memory, and hold on to the page pointers (for the sglist)
|
// Lock the user buffer pages in memory, and hold on to the page pointers (for the sglist)
|
||||||
down_read(¤t->mm->mmap_sem); /* get memory map semaphore */
|
mmap_read_lock(current->mm); /* get memory map semaphore */
|
||||||
rv = get_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE | FOLL_GET, acd->user_pages, NULL);
|
rv = get_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE | FOLL_GET, acd->user_pages, NULL);
|
||||||
up_read(¤t->mm->mmap_sem); /* release the semaphore */
|
mmap_read_unlock(current->mm); /* release the semaphore */
|
||||||
if (rv != acd->page_count) {
|
if (rv != acd->page_count) {
|
||||||
dev_err(&priv->ldev->pldev->dev, "Couldn't get_user_pages (%ld)\n", rv);
|
dev_err(&priv->ldev->pldev->dev, "Couldn't get_user_pages (%ld)\n", rv);
|
||||||
goto err_get_user_pages;
|
goto err_get_user_pages;
|
||||||
|
|
|
@ -565,10 +565,10 @@ static int check_mem_type(unsigned long start, size_t num_pages)
|
||||||
if (virt_addr_valid(start))
|
if (virt_addr_valid(start))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
rc = __check_mem_type(find_vma(mm, start),
|
rc = __check_mem_type(find_vma(mm, start),
|
||||||
start + num_pages * PAGE_SIZE);
|
start + num_pages * PAGE_SIZE);
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
|
@ -376,11 +376,11 @@ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
|
||||||
if (!mm)
|
if (!mm)
|
||||||
return -ESRCH; /* process exited */
|
return -ESRCH; /* process exited */
|
||||||
|
|
||||||
ret = down_write_killable(&mm->mmap_sem);
|
ret = mmap_write_lock_killable(mm);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task,
|
ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task,
|
||||||
dma->lock_cap);
|
dma->lock_cap);
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (async)
|
if (async)
|
||||||
|
@ -452,7 +452,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
|
||||||
if (prot & IOMMU_WRITE)
|
if (prot & IOMMU_WRITE)
|
||||||
flags |= FOLL_WRITE;
|
flags |= FOLL_WRITE;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
mmap_read_lock(mm);
|
||||||
ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM,
|
ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM,
|
||||||
page, NULL, NULL);
|
page, NULL, NULL);
|
||||||
if (ret == 1) {
|
if (ret == 1) {
|
||||||
|
@ -475,7 +475,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
}
|
}
|
||||||
done:
|
done:
|
||||||
up_read(&mm->mmap_sem);
|
mmap_read_unlock(mm);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -527,7 +527,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
|
||||||
if (!npages)
|
if (!npages)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
down_read(&dev->mm->mmap_sem);
|
mmap_read_lock(dev->mm);
|
||||||
|
|
||||||
locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
|
locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
|
||||||
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
||||||
|
@ -580,7 +580,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
|
||||||
vhost_vdpa_unmap(v, msg->iova, msg->size);
|
vhost_vdpa_unmap(v, msg->iova, msg->size);
|
||||||
atomic64_sub(npages, &dev->mm->pinned_vm);
|
atomic64_sub(npages, &dev->mm->pinned_vm);
|
||||||
}
|
}
|
||||||
up_read(&dev->mm->mmap_sem);
|
mmap_read_unlock(dev->mm);
|
||||||
free_page((unsigned long)page_list);
|
free_page((unsigned long)page_list);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -625,7 +625,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
|
pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
|
||||||
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
mmap_read_lock(current->mm);
|
||||||
vma = find_vma(current->mm, op.vaddr);
|
vma = find_vma(current->mm, op.vaddr);
|
||||||
if (!vma || vma->vm_ops != &gntdev_vmops)
|
if (!vma || vma->vm_ops != &gntdev_vmops)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
@ -639,7 +639,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
|
||||||
rv = 0;
|
rv = 0;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
up_read(¤t->mm->mmap_sem);
|
mmap_read_unlock(current->mm);
|
||||||
|
|
||||||
if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
|
if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
|
@ -276,7 +276,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
|
||||||
if (rc || list_empty(&pagelist))
|
if (rc || list_empty(&pagelist))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
mmap_write_lock(mm);
|
||||||
|
|
||||||
{
|
{
|
||||||
struct page *page = list_first_entry(&pagelist,
|
struct page *page = list_first_entry(&pagelist,
|
||||||
|
@ -301,7 +301,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
|
||||||
|
|
||||||
|
|
||||||
out_up:
|
out_up:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
free_page_list(&pagelist);
|
free_page_list(&pagelist);
|
||||||
|
@ -497,7 +497,7 @@ static long privcmd_ioctl_mmap_batch(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
mmap_write_lock(mm);
|
||||||
|
|
||||||
vma = find_vma(mm, m.addr);
|
vma = find_vma(mm, m.addr);
|
||||||
if (!vma ||
|
if (!vma ||
|
||||||
|
@ -553,7 +553,7 @@ static long privcmd_ioctl_mmap_batch(
|
||||||
BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
|
BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
|
||||||
&pagelist, mmap_batch_fn, &state));
|
&pagelist, mmap_batch_fn, &state));
|
||||||
|
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
|
|
||||||
if (state.global_error) {
|
if (state.global_error) {
|
||||||
/* Write back errors in second pass. */
|
/* Write back errors in second pass. */
|
||||||
|
@ -574,7 +574,7 @@ static long privcmd_ioctl_mmap_batch(
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -739,7 +739,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
|
||||||
if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
|
if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
mmap_write_lock(mm);
|
||||||
|
|
||||||
vma = find_vma(mm, kdata.addr);
|
vma = find_vma(mm, kdata.addr);
|
||||||
if (!vma || vma->vm_ops != &privcmd_vm_ops) {
|
if (!vma || vma->vm_ops != &privcmd_vm_ops) {
|
||||||
|
@ -818,7 +818,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
kfree(pfns);
|
kfree(pfns);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
|
|
4
fs/aio.c
4
fs/aio.c
|
@ -520,7 +520,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
|
||||||
ctx->mmap_size = nr_pages * PAGE_SIZE;
|
ctx->mmap_size = nr_pages * PAGE_SIZE;
|
||||||
pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
|
pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
|
||||||
|
|
||||||
if (down_write_killable(&mm->mmap_sem)) {
|
if (mmap_write_lock_killable(mm)) {
|
||||||
ctx->mmap_size = 0;
|
ctx->mmap_size = 0;
|
||||||
aio_free_ring(ctx);
|
aio_free_ring(ctx);
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
@ -529,7 +529,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
|
||||||
ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
|
ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
|
||||||
PROT_READ | PROT_WRITE,
|
PROT_READ | PROT_WRITE,
|
||||||
MAP_SHARED, 0, &unused, NULL);
|
MAP_SHARED, 0, &unused, NULL);
|
||||||
up_write(&mm->mmap_sem);
|
mmap_write_unlock(mm);
|
||||||
if (IS_ERR((void *)ctx->mmap_base)) {
|
if (IS_ERR((void *)ctx->mmap_base)) {
|
||||||
ctx->mmap_size = 0;
|
ctx->mmap_size = 0;
|
||||||
aio_free_ring(ctx);
|
aio_free_ring(ctx);
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue