2019-05-23 17:14:57 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2017-07-11 09:06:09 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
|
|
|
|
* Lennox Wu <lennox.wu@sunplusct.com>
|
|
|
|
* Chen Liqin <liqin.chen@sunplusct.com>
|
|
|
|
* Copyright (C) 2012 Regents of the University of California
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/perf_event.h>
|
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
|
|
|
|
#include <asm/ptrace.h>
|
2019-06-17 12:26:17 +08:00
|
|
|
#include <asm/tlbflush.h>
|
2017-07-11 09:06:09 +08:00
|
|
|
|
2019-10-18 06:00:17 +08:00
|
|
|
#include "../kernel/head.h"
|
|
|
|
|
2017-07-11 09:06:09 +08:00
|
|
|
/*
|
|
|
|
* This routine handles page faults. It determines the address and the
|
|
|
|
* problem, and then passes it off to one of the appropriate routines.
|
|
|
|
*/
|
|
|
|
asmlinkage void do_page_fault(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct task_struct *tsk;
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
struct mm_struct *mm;
|
|
|
|
unsigned long addr, cause;
|
2020-04-02 12:08:37 +08:00
|
|
|
unsigned int flags = FAULT_FLAG_DEFAULT;
|
2018-08-18 06:44:47 +08:00
|
|
|
int code = SEGV_MAPERR;
|
|
|
|
vm_fault_t fault;
|
2017-07-11 09:06:09 +08:00
|
|
|
|
2019-10-28 20:10:32 +08:00
|
|
|
cause = regs->cause;
|
|
|
|
addr = regs->badaddr;
|
2017-07-11 09:06:09 +08:00
|
|
|
|
|
|
|
tsk = current;
|
|
|
|
mm = tsk->mm;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fault-in kernel-space virtual memory on-demand.
|
|
|
|
* The 'reference' page table is init_mm.pgd.
|
|
|
|
*
|
|
|
|
* NOTE! We MUST NOT take any locks for this case. We may
|
|
|
|
* be in an interrupt or a critical region, and should
|
|
|
|
* only copy the information from the master page table,
|
|
|
|
* nothing more.
|
|
|
|
*/
|
|
|
|
if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END)))
|
|
|
|
goto vmalloc_fault;
|
|
|
|
|
|
|
|
/* Enable interrupts if they were enabled in the parent context. */
|
2019-10-28 20:10:32 +08:00
|
|
|
if (likely(regs->status & SR_PIE))
|
2017-07-11 09:06:09 +08:00
|
|
|
local_irq_enable();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're in an interrupt, have no user context, or are running
|
|
|
|
* in an atomic region, then we must not take the fault.
|
|
|
|
*/
|
|
|
|
if (unlikely(faulthandler_disabled() || !mm))
|
|
|
|
goto no_context;
|
|
|
|
|
|
|
|
if (user_mode(regs))
|
|
|
|
flags |= FAULT_FLAG_USER;
|
|
|
|
|
|
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
|
|
|
|
|
|
|
|
retry:
|
2020-06-09 12:33:25 +08:00
|
|
|
mmap_read_lock(mm);
|
2017-07-11 09:06:09 +08:00
|
|
|
vma = find_vma(mm, addr);
|
|
|
|
if (unlikely(!vma))
|
|
|
|
goto bad_area;
|
|
|
|
if (likely(vma->vm_start <= addr))
|
|
|
|
goto good_area;
|
|
|
|
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
|
|
|
|
goto bad_area;
|
|
|
|
if (unlikely(expand_stack(vma, addr)))
|
|
|
|
goto bad_area;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ok, we have a good vm_area for this memory access, so
|
|
|
|
* we can handle it.
|
|
|
|
*/
|
|
|
|
good_area:
|
|
|
|
code = SEGV_ACCERR;
|
|
|
|
|
|
|
|
switch (cause) {
|
|
|
|
case EXC_INST_PAGE_FAULT:
|
|
|
|
if (!(vma->vm_flags & VM_EXEC))
|
|
|
|
goto bad_area;
|
|
|
|
break;
|
|
|
|
case EXC_LOAD_PAGE_FAULT:
|
|
|
|
if (!(vma->vm_flags & VM_READ))
|
|
|
|
goto bad_area;
|
|
|
|
break;
|
|
|
|
case EXC_STORE_PAGE_FAULT:
|
|
|
|
if (!(vma->vm_flags & VM_WRITE))
|
|
|
|
goto bad_area;
|
|
|
|
flags |= FAULT_FLAG_WRITE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
panic("%s: unhandled cause %lu", __func__, cause);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If for any reason at all we could not handle the fault,
|
|
|
|
* make sure we exit gracefully rather than endlessly redo
|
|
|
|
* the fault.
|
|
|
|
*/
|
mm: do page fault accounting in handle_mm_fault
Patch series "mm: Page fault accounting cleanups", v5.
This is v5 of the pf accounting cleanup series. It originates from Gerald
Schaefer's report on an issue a week ago regarding to incorrect page fault
accountings for retried page fault after commit 4064b9827063 ("mm: allow
VM_FAULT_RETRY for multiple times"):
https://lore.kernel.org/lkml/20200610174811.44b94525@thinkpad/
What this series did:
- Correct page fault accounting: we do accounting for a page fault
(no matter whether it's from #PF handling, or gup, or anything else)
only with the one that completed the fault. For example, page fault
retries should not be counted in page fault counters. Same to the
perf events.
- Unify definition of PERF_COUNT_SW_PAGE_FAULTS: currently this perf
event is used in an adhoc way across different archs.
Case (1): for many archs it's done at the entry of a page fault
handler, so that it will also cover e.g. errornous faults.
Case (2): for some other archs, it is only accounted when the page
fault is resolved successfully.
Case (3): there're still quite some archs that have not enabled
this perf event.
Since this series will touch merely all the archs, we unify this
perf event to always follow case (1), which is the one that makes most
sense. And since we moved the accounting into handle_mm_fault, the
other two MAJ/MIN perf events are well taken care of naturally.
- Unify definition of "major faults": the definition of "major
fault" is slightly changed when used in accounting (not
VM_FAULT_MAJOR). More information in patch 1.
- Always account the page fault onto the one that triggered the page
fault. This does not matter much for #PF handlings, but mostly for
gup. More information on this in patch 25.
Patchset layout:
Patch 1: Introduced the accounting in handle_mm_fault(), not enabled.
Patch 2-23: Enable the new accounting for arch #PF handlers one by one.
Patch 24: Enable the new accounting for the rest outliers (gup, iommu, etc.)
Patch 25: Cleanup GUP task_struct pointer since it's not needed any more
This patch (of 25):
This is a preparation patch to move page fault accountings into the
general code in handle_mm_fault(). This includes both the per task
flt_maj/flt_min counters, and the major/minor page fault perf events. To
do this, the pt_regs pointer is passed into handle_mm_fault().
PERF_COUNT_SW_PAGE_FAULTS should still be kept in per-arch page fault
handlers.
So far, all the pt_regs pointer that passed into handle_mm_fault() is
NULL, which means this patch should have no intented functional change.
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Cain <bcain@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vincent Chen <deanbo422@gmail.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: http://lkml.kernel.org/r/20200707225021.200906-1-peterx@redhat.com
Link: http://lkml.kernel.org/r/20200707225021.200906-2-peterx@redhat.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-08-12 09:37:44 +08:00
|
|
|
fault = handle_mm_fault(vma, addr, flags, NULL);
|
2017-07-11 09:06:09 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we need to retry but a fatal signal is pending, handle the
|
2020-06-09 12:33:54 +08:00
|
|
|
* signal first. We do not need to release the mmap_lock because it
|
2017-07-11 09:06:09 +08:00
|
|
|
* would already be released in __lock_page_or_retry in mm/filemap.c.
|
|
|
|
*/
|
2020-04-02 12:08:06 +08:00
|
|
|
if (fault_signal_pending(fault, regs))
|
2017-07-11 09:06:09 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
|
|
if (fault & VM_FAULT_OOM)
|
|
|
|
goto out_of_memory;
|
|
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
|
|
goto do_sigbus;
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Major/minor page fault accounting is only done on the
|
|
|
|
* initial attempt. If we go through a retry, it is extremely
|
|
|
|
* likely that the page will be found in page cache at that point.
|
|
|
|
*/
|
|
|
|
if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
|
|
|
if (fault & VM_FAULT_MAJOR) {
|
|
|
|
tsk->maj_flt++;
|
|
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
|
|
|
|
1, regs, addr);
|
|
|
|
} else {
|
|
|
|
tsk->min_flt++;
|
|
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
|
|
|
|
1, regs, addr);
|
|
|
|
}
|
|
|
|
if (fault & VM_FAULT_RETRY) {
|
|
|
|
flags |= FAULT_FLAG_TRIED;
|
|
|
|
|
|
|
|
/*
|
2020-06-09 12:33:51 +08:00
|
|
|
* No need to mmap_read_unlock(mm) as we would
|
2017-07-11 09:06:09 +08:00
|
|
|
* have already released it in __lock_page_or_retry
|
|
|
|
* in mm/filemap.c.
|
|
|
|
*/
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-09 12:33:25 +08:00
|
|
|
mmap_read_unlock(mm);
|
2017-07-11 09:06:09 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Something tried to access memory that isn't in our memory map.
|
|
|
|
* Fix it, but check if it's kernel or user first.
|
|
|
|
*/
|
|
|
|
bad_area:
|
2020-06-09 12:33:25 +08:00
|
|
|
mmap_read_unlock(mm);
|
2017-07-11 09:06:09 +08:00
|
|
|
/* User mode accesses just cause a SIGSEGV */
|
|
|
|
if (user_mode(regs)) {
|
2019-02-06 09:10:48 +08:00
|
|
|
do_trap(regs, SIGSEGV, code, addr);
|
2017-07-11 09:06:09 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
no_context:
|
|
|
|
/* Are we prepared to handle this kernel fault? */
|
|
|
|
if (fixup_exception(regs))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Oops. The kernel tried to access some bad page. We'll have to
|
|
|
|
* terminate things with extreme prejudice.
|
|
|
|
*/
|
|
|
|
bust_spinlocks(1);
|
|
|
|
pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
|
|
|
|
(addr < PAGE_SIZE) ? "NULL pointer dereference" :
|
|
|
|
"paging request", addr);
|
|
|
|
die(regs, "Oops");
|
|
|
|
do_exit(SIGKILL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We ran out of memory, call the OOM killer, and return the userspace
|
|
|
|
* (which will retry the fault, or kill us if we got oom-killed).
|
|
|
|
*/
|
|
|
|
out_of_memory:
|
2020-06-09 12:33:25 +08:00
|
|
|
mmap_read_unlock(mm);
|
2017-07-11 09:06:09 +08:00
|
|
|
if (!user_mode(regs))
|
|
|
|
goto no_context;
|
|
|
|
pagefault_out_of_memory();
|
|
|
|
return;
|
|
|
|
|
|
|
|
do_sigbus:
|
2020-06-09 12:33:25 +08:00
|
|
|
mmap_read_unlock(mm);
|
2017-07-11 09:06:09 +08:00
|
|
|
/* Kernel mode? Handle exceptions or die */
|
|
|
|
if (!user_mode(regs))
|
|
|
|
goto no_context;
|
2019-02-06 09:10:48 +08:00
|
|
|
do_trap(regs, SIGBUS, BUS_ADRERR, addr);
|
2017-07-11 09:06:09 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
vmalloc_fault:
|
|
|
|
{
|
|
|
|
pgd_t *pgd, *pgd_k;
|
|
|
|
pud_t *pud, *pud_k;
|
|
|
|
p4d_t *p4d, *p4d_k;
|
|
|
|
pmd_t *pmd, *pmd_k;
|
|
|
|
pte_t *pte_k;
|
|
|
|
int index;
|
|
|
|
|
2019-05-07 15:36:46 +08:00
|
|
|
/* User mode accesses just cause a SIGSEGV */
|
2017-07-11 09:06:09 +08:00
|
|
|
if (user_mode(regs))
|
2019-02-06 09:10:48 +08:00
|
|
|
return do_trap(regs, SIGSEGV, code, addr);
|
2017-07-11 09:06:09 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Synchronize this task's top level page-table
|
|
|
|
* with the 'reference' page table.
|
|
|
|
*
|
|
|
|
* Do _not_ use "tsk->active_mm->pgd" here.
|
|
|
|
* We might be inside an interrupt in the middle
|
|
|
|
* of a task switch.
|
|
|
|
*/
|
|
|
|
index = pgd_index(addr);
|
2019-04-25 16:38:41 +08:00
|
|
|
pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
|
2017-07-11 09:06:09 +08:00
|
|
|
pgd_k = init_mm.pgd + index;
|
|
|
|
|
|
|
|
if (!pgd_present(*pgd_k))
|
|
|
|
goto no_context;
|
|
|
|
set_pgd(pgd, *pgd_k);
|
|
|
|
|
|
|
|
p4d = p4d_offset(pgd, addr);
|
|
|
|
p4d_k = p4d_offset(pgd_k, addr);
|
|
|
|
if (!p4d_present(*p4d_k))
|
|
|
|
goto no_context;
|
|
|
|
|
|
|
|
pud = pud_offset(p4d, addr);
|
|
|
|
pud_k = pud_offset(p4d_k, addr);
|
|
|
|
if (!pud_present(*pud_k))
|
|
|
|
goto no_context;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since the vmalloc area is global, it is unnecessary
|
|
|
|
* to copy individual PTEs
|
|
|
|
*/
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
pmd_k = pmd_offset(pud_k, addr);
|
|
|
|
if (!pmd_present(*pmd_k))
|
|
|
|
goto no_context;
|
|
|
|
set_pmd(pmd, *pmd_k);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure the actual PTE exists as well to
|
|
|
|
* catch kernel vmalloc-area accesses to non-mapped
|
|
|
|
* addresses. If we don't do this, this will just
|
|
|
|
* silently loop forever.
|
|
|
|
*/
|
|
|
|
pte_k = pte_offset_kernel(pmd_k, addr);
|
|
|
|
if (!pte_present(*pte_k))
|
|
|
|
goto no_context;
|
2019-06-17 12:26:17 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The kernel assumes that TLBs don't cache invalid
|
|
|
|
* entries, but in RISC-V, SFENCE.VMA specifies an
|
|
|
|
* ordering constraint, not a cache flush; it is
|
|
|
|
* necessary even after writing invalid entries.
|
|
|
|
*/
|
|
|
|
local_flush_tlb_page(addr);
|
|
|
|
|
2017-07-11 09:06:09 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|