mirror of https://gitee.com/openkylin/linux.git
ARM: 7178/1: fault.c: Port OOM changes into do_page_fault
Commitd065bd810b
(mm: retry page fault when blocking on disk transfer) and commit37b23e0525
(x86,mm: make pagefault killable) The above commits introduced changes into the x86 pagefault handler for making the page fault handler retryable as well as killable. These changes reduce the mmap_sem hold time, which is crucial during OOM killer invocation. Port these changes to ARM. Without these changes, my ARM board encounters many hang and livelock scenarios. After applying this patch, OOM feature performance improves according to my testing. Signed-off-by: Kautuk Consul <consul.kautuk@gmail.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
df0e74da6d
commit
8878a539ff
|
@ -231,7 +231,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
|
||||||
|
|
||||||
static int __kprobes
|
static int __kprobes
|
||||||
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
|
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
|
||||||
struct task_struct *tsk)
|
unsigned int flags, struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
int fault;
|
int fault;
|
||||||
|
@ -253,18 +253,7 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
|
||||||
* If for any reason at all we couldn't handle the fault, make
|
|
||||||
* sure we exit gracefully rather than endlessly redo the fault.
|
|
||||||
*/
|
|
||||||
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0);
|
|
||||||
if (unlikely(fault & VM_FAULT_ERROR))
|
|
||||||
return fault;
|
|
||||||
if (fault & VM_FAULT_MAJOR)
|
|
||||||
tsk->maj_flt++;
|
|
||||||
else
|
|
||||||
tsk->min_flt++;
|
|
||||||
return fault;
|
|
||||||
|
|
||||||
check_stack:
|
check_stack:
|
||||||
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
|
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
|
||||||
|
@ -279,6 +268,9 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||||
struct task_struct *tsk;
|
struct task_struct *tsk;
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
int fault, sig, code;
|
int fault, sig, code;
|
||||||
|
int write = fsr & FSR_WRITE;
|
||||||
|
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
|
||||||
|
(write ? FAULT_FLAG_WRITE : 0);
|
||||||
|
|
||||||
if (notify_page_fault(regs, fsr))
|
if (notify_page_fault(regs, fsr))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -305,6 +297,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||||
if (!down_read_trylock(&mm->mmap_sem)) {
|
if (!down_read_trylock(&mm->mmap_sem)) {
|
||||||
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
|
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
|
||||||
goto no_context;
|
goto no_context;
|
||||||
|
retry:
|
||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
|
@ -320,14 +313,41 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
fault = __do_page_fault(mm, addr, fsr, tsk);
|
fault = __do_page_fault(mm, addr, fsr, flags, tsk);
|
||||||
up_read(&mm->mmap_sem);
|
|
||||||
|
/* If we need to retry but a fatal signal is pending, handle the
|
||||||
|
* signal first. We do not need to release the mmap_sem because
|
||||||
|
* it would already be released in __lock_page_or_retry in
|
||||||
|
* mm/filemap.c. */
|
||||||
|
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Major/minor page fault accounting is only done on the
|
||||||
|
* initial attempt. If we go through a retry, it is extremely
|
||||||
|
* likely that the page will be found in page cache at that point.
|
||||||
|
*/
|
||||||
|
|
||||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
|
||||||
if (fault & VM_FAULT_MAJOR)
|
if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
||||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
|
if (fault & VM_FAULT_MAJOR) {
|
||||||
else if (fault & VM_FAULT_MINOR)
|
tsk->maj_flt++;
|
||||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
|
||||||
|
regs, addr);
|
||||||
|
} else {
|
||||||
|
tsk->min_flt++;
|
||||||
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
|
||||||
|
regs, addr);
|
||||||
|
}
|
||||||
|
if (fault & VM_FAULT_RETRY) {
|
||||||
|
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
|
||||||
|
* of starvation. */
|
||||||
|
flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
up_read(&mm->mmap_sem);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
|
* Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
|
||||||
|
|
Loading…
Reference in New Issue