2014-11-26 08:28:39 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
|
|
|
|
* Debug helper to dump the current kernel pagetables of the system
|
|
|
|
* so that we can see what the various memory ranges are set to.
|
|
|
|
*
|
|
|
|
* Derived from x86 and arm implementation:
|
|
|
|
* (C) Copyright 2008 Intel Corporation
|
|
|
|
*
|
|
|
|
* Author: Arjan van de Ven <arjan@linux.intel.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; version 2
|
|
|
|
* of the License.
|
|
|
|
*/
|
|
|
|
#include <linux/debugfs.h>
|
2015-01-23 02:20:36 +08:00
|
|
|
#include <linux/errno.h>
|
2014-11-26 08:28:39 +08:00
|
|
|
#include <linux/fs.h>
|
2015-01-23 04:52:10 +08:00
|
|
|
#include <linux/io.h>
|
2015-01-23 02:20:36 +08:00
|
|
|
#include <linux/init.h>
|
2014-11-26 08:28:39 +08:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
|
|
|
|
#include <asm/fixmap.h>
|
2015-01-23 02:20:36 +08:00
|
|
|
#include <asm/memory.h>
|
2014-11-26 08:28:39 +08:00
|
|
|
#include <asm/pgtable.h>
|
2015-01-23 02:20:36 +08:00
|
|
|
#include <asm/pgtable-hwdef.h>
|
2014-11-26 08:28:39 +08:00
|
|
|
|
|
|
|
struct addr_marker {
|
|
|
|
unsigned long start_address;
|
|
|
|
const char *name;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum address_markers_idx {
|
2016-02-16 20:52:40 +08:00
|
|
|
MODULES_START_NR = 0,
|
|
|
|
MODULES_END_NR,
|
|
|
|
VMALLOC_START_NR,
|
2014-11-26 08:28:39 +08:00
|
|
|
VMALLOC_END_NR,
|
|
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
|
|
|
VMEMMAP_START_NR,
|
|
|
|
VMEMMAP_END_NR,
|
|
|
|
#endif
|
|
|
|
FIXADDR_START_NR,
|
|
|
|
FIXADDR_END_NR,
|
arm64: Fix overlapping VA allocations
PCI IO space was intended to be 16MiB, at 32MiB below MODULES_VADDR, but
commit d1e6dc91b532d3d3 ("arm64: Add architectural support for PCI")
extended this to cover the full 32MiB. The final 8KiB of this 32MiB is
also allocated for the fixmap, allowing for potential clashes between
the two.
This change was masked by assumptions in mem_init and the page table
dumping code, which assumed the I/O space to be 16MiB long through
seaparte hard-coded definitions.
This patch changes the definition of the PCI I/O space allocation to
live in asm/memory.h, along with the other VA space allocations. As the
fixmap allocation depends on the number of fixmap entries, this is moved
below the PCI I/O space allocation. Both the fixmap and PCI I/O space
are guarded with 2MB of padding. Sites assuming the I/O space was 16MiB
are moved over use new PCI_IO_{START,END} definitions, which will keep
in sync with the size of the IO space (now restored to 16MiB).
As a useful side effect, the use of the new PCI_IO_{START,END}
definitions prevents a build issue in the dumping code due to a (now
redundant) missing include of io.h for PCI_IOBASE.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Liviu Dudau <liviu.dudau@arm.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: Will Deacon <will.deacon@arm.com>
[catalin.marinas@arm.com: reorder FIXADDR and PCI_IO address_markers_idx enum]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-23 02:20:35 +08:00
|
|
|
PCI_START_NR,
|
|
|
|
PCI_END_NR,
|
2014-11-26 08:28:39 +08:00
|
|
|
KERNEL_SPACE_NR,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct addr_marker address_markers[] = {
|
2016-02-16 20:52:40 +08:00
|
|
|
{ MODULES_VADDR, "Modules start" },
|
|
|
|
{ MODULES_END, "Modules end" },
|
2014-11-26 08:28:39 +08:00
|
|
|
{ VMALLOC_START, "vmalloc() Area" },
|
|
|
|
{ VMALLOC_END, "vmalloc() End" },
|
|
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
|
|
|
{ 0, "vmemmap start" },
|
|
|
|
{ 0, "vmemmap end" },
|
|
|
|
#endif
|
|
|
|
{ FIXADDR_START, "Fixmap start" },
|
|
|
|
{ FIXADDR_TOP, "Fixmap end" },
|
arm64: Fix overlapping VA allocations
PCI IO space was intended to be 16MiB, at 32MiB below MODULES_VADDR, but
commit d1e6dc91b532d3d3 ("arm64: Add architectural support for PCI")
extended this to cover the full 32MiB. The final 8KiB of this 32MiB is
also allocated for the fixmap, allowing for potential clashes between
the two.
This change was masked by assumptions in mem_init and the page table
dumping code, which assumed the I/O space to be 16MiB long through
seaparte hard-coded definitions.
This patch changes the definition of the PCI I/O space allocation to
live in asm/memory.h, along with the other VA space allocations. As the
fixmap allocation depends on the number of fixmap entries, this is moved
below the PCI I/O space allocation. Both the fixmap and PCI I/O space
are guarded with 2MB of padding. Sites assuming the I/O space was 16MiB
are moved over use new PCI_IO_{START,END} definitions, which will keep
in sync with the size of the IO space (now restored to 16MiB).
As a useful side effect, the use of the new PCI_IO_{START,END}
definitions prevents a build issue in the dumping code due to a (now
redundant) missing include of io.h for PCI_IOBASE.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Liviu Dudau <liviu.dudau@arm.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: Will Deacon <will.deacon@arm.com>
[catalin.marinas@arm.com: reorder FIXADDR and PCI_IO address_markers_idx enum]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-23 02:20:35 +08:00
|
|
|
{ PCI_IO_START, "PCI I/O start" },
|
|
|
|
{ PCI_IO_END, "PCI I/O end" },
|
2016-02-16 20:52:40 +08:00
|
|
|
{ PAGE_OFFSET, "Linear Mapping" },
|
2014-11-26 08:28:39 +08:00
|
|
|
{ -1, NULL },
|
|
|
|
};
|
|
|
|
|
2015-10-08 01:00:23 +08:00
|
|
|
/*
|
|
|
|
* The page dumper groups page table entries of the same type into a single
|
|
|
|
* description. It uses pg_state to track the range information while
|
|
|
|
* iterating over the pte entries. When the continuity is broken it then
|
|
|
|
* dumps out a description of the range.
|
|
|
|
*/
|
2014-11-26 08:28:39 +08:00
|
|
|
struct pg_state {
|
|
|
|
struct seq_file *seq;
|
|
|
|
const struct addr_marker *marker;
|
|
|
|
unsigned long start_address;
|
|
|
|
unsigned level;
|
|
|
|
u64 current_prot;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct prot_bits {
|
|
|
|
u64 mask;
|
|
|
|
u64 val;
|
|
|
|
const char *set;
|
|
|
|
const char *clear;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct prot_bits pte_bits[] = {
|
|
|
|
{
|
2016-02-06 08:24:48 +08:00
|
|
|
.mask = PTE_VALID,
|
|
|
|
.val = PTE_VALID,
|
|
|
|
.set = " ",
|
|
|
|
.clear = "F",
|
|
|
|
}, {
|
2014-11-26 08:28:39 +08:00
|
|
|
.mask = PTE_USER,
|
|
|
|
.val = PTE_USER,
|
|
|
|
.set = "USR",
|
|
|
|
.clear = " ",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_RDONLY,
|
|
|
|
.val = PTE_RDONLY,
|
|
|
|
.set = "ro",
|
|
|
|
.clear = "RW",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_PXN,
|
|
|
|
.val = PTE_PXN,
|
|
|
|
.set = "NX",
|
|
|
|
.clear = "x ",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_SHARED,
|
|
|
|
.val = PTE_SHARED,
|
|
|
|
.set = "SHD",
|
|
|
|
.clear = " ",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_AF,
|
|
|
|
.val = PTE_AF,
|
|
|
|
.set = "AF",
|
|
|
|
.clear = " ",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_NG,
|
|
|
|
.val = PTE_NG,
|
|
|
|
.set = "NG",
|
|
|
|
.clear = " ",
|
2015-10-08 01:00:23 +08:00
|
|
|
}, {
|
|
|
|
.mask = PTE_CONT,
|
|
|
|
.val = PTE_CONT,
|
|
|
|
.set = "CON",
|
|
|
|
.clear = " ",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_TABLE_BIT,
|
|
|
|
.val = PTE_TABLE_BIT,
|
|
|
|
.set = " ",
|
|
|
|
.clear = "BLK",
|
2014-11-26 08:28:39 +08:00
|
|
|
}, {
|
|
|
|
.mask = PTE_UXN,
|
|
|
|
.val = PTE_UXN,
|
|
|
|
.set = "UXN",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_ATTRINDX_MASK,
|
|
|
|
.val = PTE_ATTRINDX(MT_DEVICE_nGnRnE),
|
|
|
|
.set = "DEVICE/nGnRnE",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_ATTRINDX_MASK,
|
|
|
|
.val = PTE_ATTRINDX(MT_DEVICE_nGnRE),
|
|
|
|
.set = "DEVICE/nGnRE",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_ATTRINDX_MASK,
|
|
|
|
.val = PTE_ATTRINDX(MT_DEVICE_GRE),
|
|
|
|
.set = "DEVICE/GRE",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_ATTRINDX_MASK,
|
|
|
|
.val = PTE_ATTRINDX(MT_NORMAL_NC),
|
|
|
|
.set = "MEM/NORMAL-NC",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_ATTRINDX_MASK,
|
|
|
|
.val = PTE_ATTRINDX(MT_NORMAL),
|
|
|
|
.set = "MEM/NORMAL",
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct pg_level {
|
|
|
|
const struct prot_bits *bits;
|
|
|
|
size_t num;
|
|
|
|
u64 mask;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct pg_level pg_level[] = {
|
|
|
|
{
|
|
|
|
}, { /* pgd */
|
|
|
|
.bits = pte_bits,
|
|
|
|
.num = ARRAY_SIZE(pte_bits),
|
|
|
|
}, { /* pud */
|
|
|
|
.bits = pte_bits,
|
|
|
|
.num = ARRAY_SIZE(pte_bits),
|
|
|
|
}, { /* pmd */
|
|
|
|
.bits = pte_bits,
|
|
|
|
.num = ARRAY_SIZE(pte_bits),
|
|
|
|
}, { /* pte */
|
|
|
|
.bits = pte_bits,
|
|
|
|
.num = ARRAY_SIZE(pte_bits),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
|
|
|
|
size_t num)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++, bits++) {
|
|
|
|
const char *s;
|
|
|
|
|
|
|
|
if ((st->current_prot & bits->mask) == bits->val)
|
|
|
|
s = bits->set;
|
|
|
|
else
|
|
|
|
s = bits->clear;
|
|
|
|
|
|
|
|
if (s)
|
|
|
|
seq_printf(st->seq, " %s", s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
|
|
|
|
u64 val)
|
|
|
|
{
|
|
|
|
static const char units[] = "KMGTPE";
|
|
|
|
u64 prot = val & pg_level[level].mask;
|
|
|
|
|
|
|
|
if (!st->level) {
|
|
|
|
st->level = level;
|
|
|
|
st->current_prot = prot;
|
|
|
|
st->start_address = addr;
|
|
|
|
seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
|
|
|
|
} else if (prot != st->current_prot || level != st->level ||
|
|
|
|
addr >= st->marker[1].start_address) {
|
|
|
|
const char *unit = units;
|
|
|
|
unsigned long delta;
|
|
|
|
|
|
|
|
if (st->current_prot) {
|
2015-10-08 01:00:23 +08:00
|
|
|
seq_printf(st->seq, "0x%016lx-0x%016lx ",
|
2014-11-26 08:28:39 +08:00
|
|
|
st->start_address, addr);
|
|
|
|
|
|
|
|
delta = (addr - st->start_address) >> 10;
|
|
|
|
while (!(delta & 1023) && unit[1]) {
|
|
|
|
delta >>= 10;
|
|
|
|
unit++;
|
|
|
|
}
|
|
|
|
seq_printf(st->seq, "%9lu%c", delta, *unit);
|
|
|
|
if (pg_level[st->level].bits)
|
|
|
|
dump_prot(st, pg_level[st->level].bits,
|
|
|
|
pg_level[st->level].num);
|
|
|
|
seq_puts(st->seq, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (addr >= st->marker[1].start_address) {
|
|
|
|
st->marker++;
|
|
|
|
seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
st->start_address = addr;
|
|
|
|
st->current_prot = prot;
|
|
|
|
st->level = level;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (addr >= st->marker[1].start_address) {
|
|
|
|
st->marker++;
|
|
|
|
seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
|
|
|
|
{
|
|
|
|
pte_t *pte = pte_offset_kernel(pmd, 0);
|
|
|
|
unsigned long addr;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
|
|
|
|
addr = start + i * PAGE_SIZE;
|
|
|
|
note_page(st, addr, 4, pte_val(*pte));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
|
|
|
|
{
|
|
|
|
pmd_t *pmd = pmd_offset(pud, 0);
|
|
|
|
unsigned long addr;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
|
|
|
|
addr = start + i * PMD_SIZE;
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
if (pmd_none(*pmd) || pmd_sect(*pmd)) {
|
2014-11-26 08:28:39 +08:00
|
|
|
note_page(st, addr, 3, pmd_val(*pmd));
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
} else {
|
|
|
|
BUG_ON(pmd_bad(*pmd));
|
2014-11-26 08:28:39 +08:00
|
|
|
walk_pte(st, pmd, addr);
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
}
|
2014-11-26 08:28:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
|
|
|
{
|
|
|
|
pud_t *pud = pud_offset(pgd, 0);
|
|
|
|
unsigned long addr;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
|
|
|
|
addr = start + i * PUD_SIZE;
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
if (pud_none(*pud) || pud_sect(*pud)) {
|
2014-11-26 08:28:39 +08:00
|
|
|
note_page(st, addr, 2, pud_val(*pud));
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
} else {
|
|
|
|
BUG_ON(pud_bad(*pud));
|
2014-11-26 08:28:39 +08:00
|
|
|
walk_pmd(st, pud, addr);
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
}
|
2014-11-26 08:28:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start)
|
|
|
|
{
|
2014-12-05 20:34:54 +08:00
|
|
|
pgd_t *pgd = pgd_offset(mm, 0UL);
|
2014-11-26 08:28:39 +08:00
|
|
|
unsigned i;
|
|
|
|
unsigned long addr;
|
|
|
|
|
|
|
|
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
|
|
|
|
addr = start + i * PGDIR_SIZE;
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
if (pgd_none(*pgd)) {
|
2014-11-26 08:28:39 +08:00
|
|
|
note_page(st, addr, 1, pgd_val(*pgd));
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
} else {
|
|
|
|
BUG_ON(pgd_bad(*pgd));
|
2014-11-26 08:28:39 +08:00
|
|
|
walk_pud(st, pgd, addr);
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
}
|
2014-11-26 08:28:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ptdump_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct pg_state st = {
|
|
|
|
.seq = m,
|
|
|
|
.marker = address_markers,
|
|
|
|
};
|
|
|
|
|
2016-01-30 15:55:21 +08:00
|
|
|
walk_pgd(&st, &init_mm, VA_START);
|
2014-11-26 08:28:39 +08:00
|
|
|
|
|
|
|
note_page(&st, 0, 0, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ptdump_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, ptdump_show, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations ptdump_fops = {
|
|
|
|
.open = ptdump_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int ptdump_init(void)
|
|
|
|
{
|
|
|
|
struct dentry *pe;
|
|
|
|
unsigned i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
|
|
|
|
if (pg_level[i].bits)
|
|
|
|
for (j = 0; j < pg_level[i].num; j++)
|
|
|
|
pg_level[i].mask |= pg_level[i].bits[j].mask;
|
|
|
|
|
2015-05-04 18:33:48 +08:00
|
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
2014-11-26 08:28:39 +08:00
|
|
|
address_markers[VMEMMAP_START_NR].start_address =
|
|
|
|
(unsigned long)virt_to_page(PAGE_OFFSET);
|
|
|
|
address_markers[VMEMMAP_END_NR].start_address =
|
|
|
|
(unsigned long)virt_to_page(high_memory);
|
2015-05-04 18:33:48 +08:00
|
|
|
#endif
|
2014-11-26 08:28:39 +08:00
|
|
|
|
|
|
|
pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
|
|
|
|
&ptdump_fops);
|
|
|
|
return pe ? 0 : -ENOMEM;
|
|
|
|
}
|
|
|
|
device_initcall(ptdump_init);
|