2012-03-05 19:49:27 +08:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/mm/mmu.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1995-2005 Russell King
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2016-08-15 14:45:46 +08:00
|
|
|
#include <linux/cache.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/init.h>
|
2015-06-01 19:40:32 +08:00
|
|
|
#include <linux/libfdt.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/nodemask.h>
|
|
|
|
#include <linux/memblock.h>
|
|
|
|
#include <linux/fs.h>
|
2012-10-23 21:55:08 +08:00
|
|
|
#include <linux/io.h>
|
2017-01-11 05:35:49 +08:00
|
|
|
#include <linux/mm.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2016-01-25 19:44:56 +08:00
|
|
|
#include <asm/barrier.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <asm/cputype.h>
|
2014-11-22 05:50:42 +08:00
|
|
|
#include <asm/fixmap.h>
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
#include <asm/kasan.h>
|
2015-10-19 21:19:28 +08:00
|
|
|
#include <asm/kernel-pgtable.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/setup.h>
|
|
|
|
#include <asm/sizes.h>
|
|
|
|
#include <asm/tlb.h>
|
2014-05-12 17:40:51 +08:00
|
|
|
#include <asm/memblock.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <asm/mmu_context.h>
|
2016-10-28 00:27:34 +08:00
|
|
|
#include <asm/ptdump.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-20 00:42:27 +08:00
|
|
|
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
|
|
|
|
|
2016-08-15 14:45:46 +08:00
|
|
|
u64 kimage_voffset __ro_after_init;
|
2016-02-16 20:52:42 +08:00
|
|
|
EXPORT_SYMBOL(kimage_voffset);
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
/*
|
|
|
|
* Empty_zero_page is a special page that is used for zero-initialized data
|
|
|
|
* and COW.
|
|
|
|
*/
|
arm64: mm: place empty_zero_page in bss
Currently the zero page is set up in paging_init, and thus we cannot use
the zero page earlier. We use the zero page as a reserved TTBR value
from which no TLB entries may be allocated (e.g. when uninstalling the
idmap). To enable such usage earlier (as may be required for invasive
changes to the kernel page tables), and to minimise the time that the
idmap is active, we need to be able to use the zero page before
paging_init.
This patch follows the example set by x86, by allocating the zero page
at compile time, in .bss. This means that the zero page itself is
available immediately upon entry to start_kernel (as we zero .bss before
this), and also means that the zero page takes up no space in the raw
Image binary. The associated struct page is allocated in bootmem_init,
and remains unavailable until this time.
Outside of arch code, the only users of empty_zero_page assume that the
empty_zero_page symbol refers to the zeroed memory itself, and that
ZERO_PAGE(x) must be used to acquire the associated struct page,
following the example of x86. This patch also brings arm64 inline with
these assumptions.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:44:57 +08:00
|
|
|
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
|
2012-03-05 19:49:27 +08:00
|
|
|
EXPORT_SYMBOL(empty_zero_page);
|
|
|
|
|
2016-02-16 20:52:40 +08:00
|
|
|
static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
|
|
|
|
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
|
|
|
|
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|
|
|
unsigned long size, pgprot_t vma_prot)
|
|
|
|
{
|
|
|
|
if (!pfn_valid(pfn))
|
|
|
|
return pgprot_noncached(vma_prot);
|
|
|
|
else if (file->f_flags & O_SYNC)
|
|
|
|
return pgprot_writecombine(vma_prot);
|
|
|
|
return vma_prot;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(phys_mem_access_prot);
|
|
|
|
|
2016-01-25 19:45:08 +08:00
|
|
|
static phys_addr_t __init early_pgtable_alloc(void)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
2015-11-21 01:45:40 +08:00
|
|
|
phys_addr_t phys;
|
|
|
|
void *ptr;
|
|
|
|
|
2016-01-25 19:44:56 +08:00
|
|
|
phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
2016-01-25 19:45:08 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
|
|
|
|
* slot will be free, so we can (ab)use the FIX_PTE slot to initialise
|
|
|
|
* any level of table.
|
|
|
|
*/
|
|
|
|
ptr = pte_set_fixmap(phys);
|
|
|
|
|
2016-01-25 19:44:56 +08:00
|
|
|
memset(ptr, 0, PAGE_SIZE);
|
|
|
|
|
2016-01-25 19:45:08 +08:00
|
|
|
/*
|
|
|
|
* Implicit barriers also ensure the zeroed page is visible to the page
|
|
|
|
* table walker
|
|
|
|
*/
|
|
|
|
pte_clear_fixmap();
|
|
|
|
|
|
|
|
return phys;
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2016-10-21 19:22:56 +08:00
|
|
|
static bool pgattr_change_is_safe(u64 old, u64 new)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The following mapping attributes may be updated in live
|
|
|
|
* kernel mappings without the need for break-before-make.
|
|
|
|
*/
|
|
|
|
static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
|
|
|
|
|
|
|
|
return old == 0 || new == 0 || ((old ^ new) & ~mask) == 0;
|
|
|
|
}
|
|
|
|
|
2015-01-22 09:36:06 +08:00
|
|
|
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
2015-11-26 23:42:41 +08:00
|
|
|
unsigned long end, unsigned long pfn,
|
2015-01-22 09:36:06 +08:00
|
|
|
pgprot_t prot,
|
2016-10-21 19:22:58 +08:00
|
|
|
phys_addr_t (*pgtable_alloc)(void),
|
|
|
|
bool page_mappings_only)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
2016-10-21 19:22:58 +08:00
|
|
|
pgprot_t __prot = prot;
|
2012-03-05 19:49:27 +08:00
|
|
|
pte_t *pte;
|
|
|
|
|
2016-06-29 20:51:29 +08:00
|
|
|
BUG_ON(pmd_sect(*pmd));
|
|
|
|
if (pmd_none(*pmd)) {
|
2016-02-06 08:24:46 +08:00
|
|
|
phys_addr_t pte_phys;
|
|
|
|
BUG_ON(!pgtable_alloc);
|
|
|
|
pte_phys = pgtable_alloc();
|
2016-01-25 19:45:08 +08:00
|
|
|
pte = pte_set_fixmap(pte_phys);
|
|
|
|
__pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
|
|
|
|
pte_clear_fixmap();
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
BUG_ON(pmd_bad(*pmd));
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2016-01-25 19:45:08 +08:00
|
|
|
pte = pte_set_fixmap_offset(pmd, addr);
|
2012-03-05 19:49:27 +08:00
|
|
|
do {
|
2016-10-21 19:22:56 +08:00
|
|
|
pte_t old_pte = *pte;
|
|
|
|
|
2016-10-21 19:22:58 +08:00
|
|
|
/*
|
|
|
|
* Set the contiguous bit for the subsequent group of PTEs if
|
|
|
|
* its size and alignment are appropriate.
|
|
|
|
*/
|
|
|
|
if (((addr | PFN_PHYS(pfn)) & ~CONT_PTE_MASK) == 0) {
|
|
|
|
if (end - addr >= CONT_PTE_SIZE && !page_mappings_only)
|
|
|
|
__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
|
|
|
|
else
|
|
|
|
__prot = prot;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_pte(pte, pfn_pte(pfn, __prot));
|
2015-11-26 23:42:41 +08:00
|
|
|
pfn++;
|
2016-10-21 19:22:56 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* After the PTE entry has been populated once, we
|
|
|
|
* only allow updates to the permission attributes.
|
|
|
|
*/
|
|
|
|
BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
|
|
|
|
|
2015-11-26 23:42:41 +08:00
|
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
2016-01-25 19:45:08 +08:00
|
|
|
|
|
|
|
pte_clear_fixmap();
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2016-01-25 19:45:10 +08:00
|
|
|
static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
2015-01-22 09:36:06 +08:00
|
|
|
phys_addr_t phys, pgprot_t prot,
|
2016-06-29 20:51:26 +08:00
|
|
|
phys_addr_t (*pgtable_alloc)(void),
|
2016-10-21 19:22:57 +08:00
|
|
|
bool page_mappings_only)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
2016-10-21 19:22:58 +08:00
|
|
|
pgprot_t __prot = prot;
|
2012-03-05 19:49:27 +08:00
|
|
|
pmd_t *pmd;
|
|
|
|
unsigned long next;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for initial section mappings in the pgd/pud and remove them.
|
|
|
|
*/
|
2016-06-29 20:51:29 +08:00
|
|
|
BUG_ON(pud_sect(*pud));
|
|
|
|
if (pud_none(*pud)) {
|
2016-02-06 08:24:46 +08:00
|
|
|
phys_addr_t pmd_phys;
|
|
|
|
BUG_ON(!pgtable_alloc);
|
|
|
|
pmd_phys = pgtable_alloc();
|
2016-01-25 19:45:08 +08:00
|
|
|
pmd = pmd_set_fixmap(pmd_phys);
|
|
|
|
__pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
|
|
|
|
pmd_clear_fixmap();
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
BUG_ON(pud_bad(*pud));
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2016-01-25 19:45:08 +08:00
|
|
|
pmd = pmd_set_fixmap_offset(pud, addr);
|
2012-03-05 19:49:27 +08:00
|
|
|
do {
|
2016-10-21 19:22:56 +08:00
|
|
|
pmd_t old_pmd = *pmd;
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
next = pmd_addr_end(addr, end);
|
2016-10-21 19:22:56 +08:00
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
/* try section mapping first */
|
2016-02-06 08:24:47 +08:00
|
|
|
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
|
2016-10-21 19:22:57 +08:00
|
|
|
!page_mappings_only) {
|
2016-10-21 19:22:58 +08:00
|
|
|
/*
|
|
|
|
* Set the contiguous bit for the subsequent group of
|
|
|
|
* PMDs if its size and alignment are appropriate.
|
|
|
|
*/
|
|
|
|
if (((addr | phys) & ~CONT_PMD_MASK) == 0) {
|
|
|
|
if (end - addr >= CONT_PMD_SIZE)
|
|
|
|
__prot = __pgprot(pgprot_val(prot) |
|
|
|
|
PTE_CONT);
|
|
|
|
else
|
|
|
|
__prot = prot;
|
|
|
|
}
|
|
|
|
pmd_set_huge(pmd, phys, __prot);
|
2016-10-21 19:22:56 +08:00
|
|
|
|
2014-02-05 00:01:31 +08:00
|
|
|
/*
|
2016-10-21 19:22:56 +08:00
|
|
|
* After the PMD entry has been populated once, we
|
|
|
|
* only allow updates to the permission attributes.
|
2014-02-05 00:01:31 +08:00
|
|
|
*/
|
2016-10-21 19:22:56 +08:00
|
|
|
BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
|
|
|
|
pmd_val(*pmd)));
|
2014-02-05 00:01:31 +08:00
|
|
|
} else {
|
2015-11-26 23:42:41 +08:00
|
|
|
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
|
2016-10-21 19:22:58 +08:00
|
|
|
prot, pgtable_alloc,
|
|
|
|
page_mappings_only);
|
2016-10-21 19:22:56 +08:00
|
|
|
|
|
|
|
BUG_ON(pmd_val(old_pmd) != 0 &&
|
|
|
|
pmd_val(old_pmd) != pmd_val(*pmd));
|
2014-02-05 00:01:31 +08:00
|
|
|
}
|
2012-03-05 19:49:27 +08:00
|
|
|
phys += next - addr;
|
|
|
|
} while (pmd++, addr = next, addr != end);
|
2016-01-25 19:45:08 +08:00
|
|
|
|
|
|
|
pmd_clear_fixmap();
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2015-01-22 09:36:06 +08:00
|
|
|
static inline bool use_1G_block(unsigned long addr, unsigned long next,
|
|
|
|
unsigned long phys)
|
|
|
|
{
|
|
|
|
if (PAGE_SHIFT != 12)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (((addr | next | phys) & ~PUD_MASK) != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-01-25 19:45:10 +08:00
|
|
|
static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
2015-01-22 09:36:06 +08:00
|
|
|
phys_addr_t phys, pgprot_t prot,
|
2016-06-29 20:51:26 +08:00
|
|
|
phys_addr_t (*pgtable_alloc)(void),
|
2016-10-21 19:22:57 +08:00
|
|
|
bool page_mappings_only)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
2014-05-12 17:40:51 +08:00
|
|
|
pud_t *pud;
|
2012-03-05 19:49:27 +08:00
|
|
|
unsigned long next;
|
|
|
|
|
2014-05-12 17:40:51 +08:00
|
|
|
if (pgd_none(*pgd)) {
|
2016-02-06 08:24:46 +08:00
|
|
|
phys_addr_t pud_phys;
|
|
|
|
BUG_ON(!pgtable_alloc);
|
|
|
|
pud_phys = pgtable_alloc();
|
2016-01-25 19:45:08 +08:00
|
|
|
__pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
|
2014-05-12 17:40:51 +08:00
|
|
|
}
|
|
|
|
BUG_ON(pgd_bad(*pgd));
|
|
|
|
|
2016-01-25 19:45:08 +08:00
|
|
|
pud = pud_set_fixmap_offset(pgd, addr);
|
2012-03-05 19:49:27 +08:00
|
|
|
do {
|
2016-10-21 19:22:56 +08:00
|
|
|
pud_t old_pud = *pud;
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
next = pud_addr_end(addr, end);
|
2014-05-06 21:02:27 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For 4K granule only, attempt to put down a 1GB block
|
|
|
|
*/
|
2016-10-21 19:22:57 +08:00
|
|
|
if (use_1G_block(addr, next, phys) && !page_mappings_only) {
|
2016-03-22 18:11:45 +08:00
|
|
|
pud_set_huge(pud, phys, prot);
|
2014-05-06 21:02:27 +08:00
|
|
|
|
|
|
|
/*
|
2016-10-21 19:22:56 +08:00
|
|
|
* After the PUD entry has been populated once, we
|
|
|
|
* only allow updates to the permission attributes.
|
2014-05-06 21:02:27 +08:00
|
|
|
*/
|
2016-10-21 19:22:56 +08:00
|
|
|
BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
|
|
|
|
pud_val(*pud)));
|
2014-05-06 21:02:27 +08:00
|
|
|
} else {
|
2016-01-25 19:45:10 +08:00
|
|
|
alloc_init_pmd(pud, addr, next, phys, prot,
|
2016-10-21 19:22:57 +08:00
|
|
|
pgtable_alloc, page_mappings_only);
|
2016-10-21 19:22:56 +08:00
|
|
|
|
|
|
|
BUG_ON(pud_val(old_pud) != 0 &&
|
|
|
|
pud_val(old_pud) != pud_val(*pud));
|
2014-05-06 21:02:27 +08:00
|
|
|
}
|
2012-03-05 19:49:27 +08:00
|
|
|
phys += next - addr;
|
|
|
|
} while (pud++, addr = next, addr != end);
|
2016-01-25 19:45:08 +08:00
|
|
|
|
|
|
|
pud_clear_fixmap();
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2016-06-29 20:51:30 +08:00
|
|
|
static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
|
|
|
|
unsigned long virt, phys_addr_t size,
|
|
|
|
pgprot_t prot,
|
|
|
|
phys_addr_t (*pgtable_alloc)(void),
|
2016-10-21 19:22:57 +08:00
|
|
|
bool page_mappings_only)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
|
|
|
unsigned long addr, length, end, next;
|
2016-06-29 20:51:30 +08:00
|
|
|
pgd_t *pgd = pgd_offset_raw(pgdir, virt);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2015-11-23 21:26:19 +08:00
|
|
|
/*
|
|
|
|
* If the virtual and physical address don't have the same offset
|
|
|
|
* within a page, we cannot map the region as the caller expects.
|
|
|
|
*/
|
|
|
|
if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
|
|
|
|
return;
|
|
|
|
|
2015-11-23 21:26:20 +08:00
|
|
|
phys &= PAGE_MASK;
|
2012-03-05 19:49:27 +08:00
|
|
|
addr = virt & PAGE_MASK;
|
|
|
|
length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
|
|
|
|
|
|
|
|
end = addr + length;
|
|
|
|
do {
|
|
|
|
next = pgd_addr_end(addr, end);
|
2016-06-29 20:51:26 +08:00
|
|
|
alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
|
2016-10-21 19:22:57 +08:00
|
|
|
page_mappings_only);
|
2012-03-05 19:49:27 +08:00
|
|
|
phys += next - addr;
|
|
|
|
} while (pgd++, addr = next, addr != end);
|
|
|
|
}
|
|
|
|
|
2016-07-23 01:32:25 +08:00
|
|
|
static phys_addr_t pgd_pgtable_alloc(void)
|
2015-01-22 09:36:06 +08:00
|
|
|
{
|
2016-01-25 19:44:56 +08:00
|
|
|
void *ptr = (void *)__get_free_page(PGALLOC_GFP);
|
2016-07-23 01:32:25 +08:00
|
|
|
if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
|
|
|
|
BUG();
|
2016-01-25 19:44:56 +08:00
|
|
|
|
|
|
|
/* Ensure the zeroed page is visible to the page table walker */
|
|
|
|
dsb(ishst);
|
2016-01-25 19:45:08 +08:00
|
|
|
return __pa(ptr);
|
2015-01-22 09:36:06 +08:00
|
|
|
}
|
|
|
|
|
2016-02-06 08:24:46 +08:00
|
|
|
/*
|
|
|
|
* This function can only be used to modify existing table entries,
|
|
|
|
* without allocating new levels of table. Note that this permits the
|
|
|
|
* creation of new section or page entries.
|
|
|
|
*/
|
|
|
|
static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
|
2015-01-22 09:36:06 +08:00
|
|
|
phys_addr_t size, pgprot_t prot)
|
2014-03-13 00:28:06 +08:00
|
|
|
{
|
|
|
|
if (virt < VMALLOC_START) {
|
|
|
|
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
|
|
|
|
&phys, virt);
|
|
|
|
return;
|
|
|
|
}
|
2016-10-21 19:22:57 +08:00
|
|
|
__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, false);
|
2014-03-13 00:28:06 +08:00
|
|
|
}
|
|
|
|
|
2014-10-20 21:42:07 +08:00
|
|
|
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
|
|
|
|
unsigned long virt, phys_addr_t size,
|
2016-10-21 19:22:57 +08:00
|
|
|
pgprot_t prot, bool page_mappings_only)
|
2014-10-20 21:42:07 +08:00
|
|
|
{
|
2016-07-23 01:32:25 +08:00
|
|
|
BUG_ON(mm == &init_mm);
|
|
|
|
|
2016-01-25 19:45:10 +08:00
|
|
|
__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
|
2016-10-21 19:22:57 +08:00
|
|
|
pgd_pgtable_alloc, page_mappings_only);
|
2014-03-13 00:28:06 +08:00
|
|
|
}
|
|
|
|
|
2015-01-22 09:36:06 +08:00
|
|
|
static void create_mapping_late(phys_addr_t phys, unsigned long virt,
|
|
|
|
phys_addr_t size, pgprot_t prot)
|
|
|
|
{
|
|
|
|
if (virt < VMALLOC_START) {
|
|
|
|
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
|
|
|
|
&phys, virt);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-01-25 19:45:10 +08:00
|
|
|
__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
|
2016-10-21 19:22:57 +08:00
|
|
|
NULL, debug_pagealloc_enabled());
|
2015-01-22 09:36:06 +08:00
|
|
|
}
|
|
|
|
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
|
2015-01-22 09:36:06 +08:00
|
|
|
{
|
2017-01-13 13:59:35 +08:00
|
|
|
phys_addr_t kernel_start = __pa_symbol(_text);
|
|
|
|
phys_addr_t kernel_end = __pa_symbol(__init_begin);
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
|
2015-01-22 09:36:06 +08:00
|
|
|
/*
|
2016-02-16 20:52:40 +08:00
|
|
|
* Take care not to create a writable alias for the
|
|
|
|
* read-only text and rodata sections of the kernel image.
|
2015-01-22 09:36:06 +08:00
|
|
|
*/
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
|
arm64: mm: fix location of _etext
As Kees Cook notes in the ARM counterpart of this patch [0]:
The _etext position is defined to be the end of the kernel text code,
and should not include any part of the data segments. This interferes
with things that might check memory ranges and expect executable code
up to _etext.
In particular, Kees is referring to the HARDENED_USERCOPY patch set [1],
which rejects attempts to call copy_to_user() on kernel ranges containing
executable code, but does allow access to the .rodata segment. Regardless
of whether one may or may not agree with the distinction, it makes sense
for _etext to have the same meaning across architectures.
So let's put _etext where it belongs, between .text and .rodata, and fix
up existing references to use __init_begin instead, which unlike _end_rodata
includes the exception and notes sections as well.
The _etext references in kaslr.c are left untouched, since its references
to [_stext, _etext) are meant to capture potential jump instruction targets,
and so disregarding .rodata is actually an improvement here.
[0] http://article.gmane.org/gmane.linux.kernel/2245084
[1] http://thread.gmane.org/gmane.linux.kernel.hardened.devel/2502
Reported-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-06-23 21:53:17 +08:00
|
|
|
/* No overlap with the kernel text/rodata */
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
if (end < kernel_start || start >= kernel_end) {
|
|
|
|
__create_pgd_mapping(pgd, start, __phys_to_virt(start),
|
|
|
|
end - start, PAGE_KERNEL,
|
2016-06-29 20:51:26 +08:00
|
|
|
early_pgtable_alloc,
|
2016-10-21 19:22:57 +08:00
|
|
|
debug_pagealloc_enabled());
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
return;
|
2015-01-22 09:36:06 +08:00
|
|
|
}
|
|
|
|
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
/*
|
arm64: mm: fix location of _etext
As Kees Cook notes in the ARM counterpart of this patch [0]:
The _etext position is defined to be the end of the kernel text code,
and should not include any part of the data segments. This interferes
with things that might check memory ranges and expect executable code
up to _etext.
In particular, Kees is referring to the HARDENED_USERCOPY patch set [1],
which rejects attempts to call copy_to_user() on kernel ranges containing
executable code, but does allow access to the .rodata segment. Regardless
of whether one may or may not agree with the distinction, it makes sense
for _etext to have the same meaning across architectures.
So let's put _etext where it belongs, between .text and .rodata, and fix
up existing references to use __init_begin instead, which unlike _end_rodata
includes the exception and notes sections as well.
The _etext references in kaslr.c are left untouched, since its references
to [_stext, _etext) are meant to capture potential jump instruction targets,
and so disregarding .rodata is actually an improvement here.
[0] http://article.gmane.org/gmane.linux.kernel/2245084
[1] http://thread.gmane.org/gmane.linux.kernel.hardened.devel/2502
Reported-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-06-23 21:53:17 +08:00
|
|
|
* This block overlaps the kernel text/rodata mappings.
|
2016-02-16 20:52:40 +08:00
|
|
|
* Map the portion(s) which don't overlap.
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
*/
|
|
|
|
if (start < kernel_start)
|
|
|
|
__create_pgd_mapping(pgd, start,
|
|
|
|
__phys_to_virt(start),
|
|
|
|
kernel_start - start, PAGE_KERNEL,
|
2016-06-29 20:51:26 +08:00
|
|
|
early_pgtable_alloc,
|
2016-10-21 19:22:57 +08:00
|
|
|
debug_pagealloc_enabled());
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
if (kernel_end < end)
|
|
|
|
__create_pgd_mapping(pgd, kernel_end,
|
|
|
|
__phys_to_virt(kernel_end),
|
|
|
|
end - kernel_end, PAGE_KERNEL,
|
2016-06-29 20:51:26 +08:00
|
|
|
early_pgtable_alloc,
|
2016-10-21 19:22:57 +08:00
|
|
|
debug_pagealloc_enabled());
|
2016-02-16 20:52:40 +08:00
|
|
|
|
|
|
|
/*
|
arm64: mm: fix location of _etext
As Kees Cook notes in the ARM counterpart of this patch [0]:
The _etext position is defined to be the end of the kernel text code,
and should not include any part of the data segments. This interferes
with things that might check memory ranges and expect executable code
up to _etext.
In particular, Kees is referring to the HARDENED_USERCOPY patch set [1],
which rejects attempts to call copy_to_user() on kernel ranges containing
executable code, but does allow access to the .rodata segment. Regardless
of whether one may or may not agree with the distinction, it makes sense
for _etext to have the same meaning across architectures.
So let's put _etext where it belongs, between .text and .rodata, and fix
up existing references to use __init_begin instead, which unlike _end_rodata
includes the exception and notes sections as well.
The _etext references in kaslr.c are left untouched, since its references
to [_stext, _etext) are meant to capture potential jump instruction targets,
and so disregarding .rodata is actually an improvement here.
[0] http://article.gmane.org/gmane.linux.kernel/2245084
[1] http://thread.gmane.org/gmane.linux.kernel.hardened.devel/2502
Reported-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-06-23 21:53:17 +08:00
|
|
|
* Map the linear alias of the [_text, __init_begin) interval as
|
2016-02-16 20:52:40 +08:00
|
|
|
* read-only/non-executable. This makes the contents of the
|
|
|
|
* region accessible to subsystems such as hibernate, but
|
|
|
|
* protects it from inadvertent modification or execution.
|
|
|
|
*/
|
|
|
|
__create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
|
|
|
|
kernel_end - kernel_start, PAGE_KERNEL_RO,
|
2016-10-21 19:22:57 +08:00
|
|
|
early_pgtable_alloc, debug_pagealloc_enabled());
|
2015-01-22 09:36:06 +08:00
|
|
|
}
|
|
|
|
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
static void __init map_mem(pgd_t *pgd)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
|
|
|
struct memblock_region *reg;
|
2013-04-30 18:00:33 +08:00
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
/* map all the memory banks */
|
|
|
|
for_each_memblock(memory, reg) {
|
|
|
|
phys_addr_t start = reg->base;
|
|
|
|
phys_addr_t end = start + reg->size;
|
|
|
|
|
|
|
|
if (start >= end)
|
|
|
|
break;
|
2015-11-30 20:28:16 +08:00
|
|
|
if (memblock_is_nomap(reg))
|
|
|
|
continue;
|
2012-03-05 19:49:27 +08:00
|
|
|
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
__map_memblock(pgd, start, end);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-22 09:36:06 +08:00
|
|
|
void mark_rodata_ro(void)
|
|
|
|
{
|
2016-02-20 01:50:32 +08:00
|
|
|
unsigned long section_size;
|
2016-02-16 20:52:40 +08:00
|
|
|
|
arm64: mm: fix location of _etext
As Kees Cook notes in the ARM counterpart of this patch [0]:
The _etext position is defined to be the end of the kernel text code,
and should not include any part of the data segments. This interferes
with things that might check memory ranges and expect executable code
up to _etext.
In particular, Kees is referring to the HARDENED_USERCOPY patch set [1],
which rejects attempts to call copy_to_user() on kernel ranges containing
executable code, but does allow access to the .rodata segment. Regardless
of whether one may or may not agree with the distinction, it makes sense
for _etext to have the same meaning across architectures.
So let's put _etext where it belongs, between .text and .rodata, and fix
up existing references to use __init_begin instead, which unlike _end_rodata
includes the exception and notes sections as well.
The _etext references in kaslr.c are left untouched, since its references
to [_stext, _etext) are meant to capture potential jump instruction targets,
and so disregarding .rodata is actually an improvement here.
[0] http://article.gmane.org/gmane.linux.kernel/2245084
[1] http://thread.gmane.org/gmane.linux.kernel.hardened.devel/2502
Reported-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-06-23 21:53:17 +08:00
|
|
|
section_size = (unsigned long)_etext - (unsigned long)_text;
|
2017-01-11 05:35:49 +08:00
|
|
|
create_mapping_late(__pa_symbol(_text), (unsigned long)_text,
|
2016-02-20 01:50:32 +08:00
|
|
|
section_size, PAGE_KERNEL_ROX);
|
|
|
|
/*
|
arm64: mm: fix location of _etext
As Kees Cook notes in the ARM counterpart of this patch [0]:
The _etext position is defined to be the end of the kernel text code,
and should not include any part of the data segments. This interferes
with things that might check memory ranges and expect executable code
up to _etext.
In particular, Kees is referring to the HARDENED_USERCOPY patch set [1],
which rejects attempts to call copy_to_user() on kernel ranges containing
executable code, but does allow access to the .rodata segment. Regardless
of whether one may or may not agree with the distinction, it makes sense
for _etext to have the same meaning across architectures.
So let's put _etext where it belongs, between .text and .rodata, and fix
up existing references to use __init_begin instead, which unlike _end_rodata
includes the exception and notes sections as well.
The _etext references in kaslr.c are left untouched, since its references
to [_stext, _etext) are meant to capture potential jump instruction targets,
and so disregarding .rodata is actually an improvement here.
[0] http://article.gmane.org/gmane.linux.kernel/2245084
[1] http://thread.gmane.org/gmane.linux.kernel.hardened.devel/2502
Reported-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-06-23 21:53:17 +08:00
|
|
|
* mark .rodata as read only. Use __init_begin rather than __end_rodata
|
|
|
|
* to cover NOTES and EXCEPTION_TABLE.
|
2016-02-20 01:50:32 +08:00
|
|
|
*/
|
arm64: mm: fix location of _etext
As Kees Cook notes in the ARM counterpart of this patch [0]:
The _etext position is defined to be the end of the kernel text code,
and should not include any part of the data segments. This interferes
with things that might check memory ranges and expect executable code
up to _etext.
In particular, Kees is referring to the HARDENED_USERCOPY patch set [1],
which rejects attempts to call copy_to_user() on kernel ranges containing
executable code, but does allow access to the .rodata segment. Regardless
of whether one may or may not agree with the distinction, it makes sense
for _etext to have the same meaning across architectures.
So let's put _etext where it belongs, between .text and .rodata, and fix
up existing references to use __init_begin instead, which unlike _end_rodata
includes the exception and notes sections as well.
The _etext references in kaslr.c are left untouched, since its references
to [_stext, _etext) are meant to capture potential jump instruction targets,
and so disregarding .rodata is actually an improvement here.
[0] http://article.gmane.org/gmane.linux.kernel/2245084
[1] http://thread.gmane.org/gmane.linux.kernel.hardened.devel/2502
Reported-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-06-23 21:53:17 +08:00
|
|
|
section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
|
2017-01-11 05:35:49 +08:00
|
|
|
create_mapping_late(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
|
2016-02-20 01:50:32 +08:00
|
|
|
section_size, PAGE_KERNEL_RO);
|
2016-10-21 19:22:56 +08:00
|
|
|
|
|
|
|
/* flush the TLBs after updating live kernel mappings */
|
|
|
|
flush_tlb_all();
|
2016-10-28 00:27:34 +08:00
|
|
|
|
|
|
|
debug_checkwx();
|
2015-01-22 09:36:06 +08:00
|
|
|
}
|
|
|
|
|
2016-03-30 23:43:06 +08:00
|
|
|
static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
|
|
|
|
pgprot_t prot, struct vm_struct *vma)
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
{
|
2017-01-11 05:35:49 +08:00
|
|
|
phys_addr_t pa_start = __pa_symbol(va_start);
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
unsigned long size = va_end - va_start;
|
|
|
|
|
|
|
|
BUG_ON(!PAGE_ALIGNED(pa_start));
|
|
|
|
BUG_ON(!PAGE_ALIGNED(size));
|
|
|
|
|
|
|
|
__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
|
2016-10-21 19:22:57 +08:00
|
|
|
early_pgtable_alloc, debug_pagealloc_enabled());
|
2016-02-16 20:52:40 +08:00
|
|
|
|
|
|
|
vma->addr = va_start;
|
|
|
|
vma->phys_addr = pa_start;
|
|
|
|
vma->size = size;
|
|
|
|
vma->flags = VM_MAP;
|
|
|
|
vma->caller = __builtin_return_address(0);
|
|
|
|
|
|
|
|
vm_area_add_early(vma);
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create fine-grained mappings for the kernel.
|
|
|
|
*/
|
|
|
|
static void __init map_kernel(pgd_t *pgd)
|
|
|
|
{
|
2016-02-20 01:50:32 +08:00
|
|
|
static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data;
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
|
arm64: mm: fix location of _etext
As Kees Cook notes in the ARM counterpart of this patch [0]:
The _etext position is defined to be the end of the kernel text code,
and should not include any part of the data segments. This interferes
with things that might check memory ranges and expect executable code
up to _etext.
In particular, Kees is referring to the HARDENED_USERCOPY patch set [1],
which rejects attempts to call copy_to_user() on kernel ranges containing
executable code, but does allow access to the .rodata segment. Regardless
of whether one may or may not agree with the distinction, it makes sense
for _etext to have the same meaning across architectures.
So let's put _etext where it belongs, between .text and .rodata, and fix
up existing references to use __init_begin instead, which unlike _end_rodata
includes the exception and notes sections as well.
The _etext references in kaslr.c are left untouched, since its references
to [_stext, _etext) are meant to capture potential jump instruction targets,
and so disregarding .rodata is actually an improvement here.
[0] http://article.gmane.org/gmane.linux.kernel/2245084
[1] http://thread.gmane.org/gmane.linux.kernel.hardened.devel/2502
Reported-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-06-23 21:53:17 +08:00
|
|
|
map_kernel_segment(pgd, _text, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
|
|
|
|
map_kernel_segment(pgd, __start_rodata, __init_begin, PAGE_KERNEL, &vmlinux_rodata);
|
2016-03-30 23:43:06 +08:00
|
|
|
map_kernel_segment(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
|
|
|
|
&vmlinux_init);
|
|
|
|
map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
|
2016-02-16 20:52:40 +08:00
|
|
|
if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
|
|
|
|
/*
|
|
|
|
* The fixmap falls in a separate pgd to the kernel, and doesn't
|
|
|
|
* live in the carveout for the swapper_pg_dir. We can simply
|
|
|
|
* re-use the existing dir for the fixmap.
|
|
|
|
*/
|
|
|
|
set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
|
|
|
|
*pgd_offset_k(FIXADDR_START));
|
|
|
|
} else if (CONFIG_PGTABLE_LEVELS > 3) {
|
|
|
|
/*
|
|
|
|
* The fixmap shares its top level pgd entry with the kernel
|
|
|
|
* mapping. This can really only occur when we are running
|
|
|
|
* with 16k/4 levels, so we can simply reuse the pud level
|
|
|
|
* entry instead.
|
|
|
|
*/
|
|
|
|
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
|
|
|
|
set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
|
2017-01-11 05:35:49 +08:00
|
|
|
__pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE));
|
2016-02-16 20:52:40 +08:00
|
|
|
pud_clear_fixmap();
|
|
|
|
} else {
|
|
|
|
BUG();
|
|
|
|
}
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
|
|
|
|
kasan_copy_shadow(pgd);
|
|
|
|
}
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
/*
|
|
|
|
* paging_init() sets up the page tables, initialises the zone memory
|
|
|
|
* maps and sets up the zero page.
|
|
|
|
*/
|
|
|
|
void __init paging_init(void)
|
|
|
|
{
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
phys_addr_t pgd_phys = early_pgtable_alloc();
|
|
|
|
pgd_t *pgd = pgd_set_fixmap(pgd_phys);
|
|
|
|
|
|
|
|
map_kernel(pgd);
|
|
|
|
map_mem(pgd);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We want to reuse the original swapper_pg_dir so we don't have to
|
|
|
|
* communicate the new address to non-coherent secondaries in
|
|
|
|
* secondary_entry, and so cpu_switch_mm can generate the address with
|
|
|
|
* adrp+add rather than a load from some global variable.
|
|
|
|
*
|
|
|
|
* To do this we need to go via a temporary pgd.
|
|
|
|
*/
|
|
|
|
cpu_replace_ttbr1(__va(pgd_phys));
|
|
|
|
memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
|
2017-01-11 05:35:49 +08:00
|
|
|
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
|
|
|
|
pgd_clear_fixmap();
|
|
|
|
memblock_free(pgd_phys, PAGE_SIZE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
|
|
|
|
* allocated with it.
|
|
|
|
*/
|
2017-01-11 05:35:49 +08:00
|
|
|
memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
|
arm64: mm: create new fine-grained mappings at boot
At boot we may change the granularity of the tables mapping the kernel
(by splitting or making sections). This may happen when we create the
linear mapping (in __map_memblock), or at any point we try to apply
fine-grained permissions to the kernel (e.g. fixup_executable,
mark_rodata_ro, fixup_init).
Changing the active page tables in this manner may result in multiple
entries for the same address being allocated into TLBs, risking problems
such as TLB conflict aborts or issues derived from the amalgamation of
TLB entries. Generally, a break-before-make (BBM) approach is necessary
to avoid conflicts, but we cannot do this for the kernel tables as it
risks unmapping text or data being used to do so.
Instead, we can create a new set of tables from scratch in the safety of
the existing mappings, and subsequently migrate over to these using the
new cpu_replace_ttbr1 helper, which avoids the two sets of tables being
active simultaneously.
To avoid issues when we later modify permissions of the page tables
(e.g. in fixup_init), we must create the page tables at a granularity
such that later modification does not result in splitting of tables.
This patch applies this strategy, creating a new set of fine-grained
page tables from scratch, and safely migrating to them. The existing
fixmap and kasan shadow page tables are reused in the new fine-grained
tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-25 19:45:12 +08:00
|
|
|
SWAPPER_DIR_SIZE - PAGE_SIZE);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether a kernel address is valid (derived from arch/x86/).
|
|
|
|
*/
|
|
|
|
int kern_addr_valid(unsigned long addr)
|
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
pte_t *pte;
|
|
|
|
|
|
|
|
if ((((long)addr) >> VA_BITS) != -1UL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pgd = pgd_offset_k(addr);
|
|
|
|
if (pgd_none(*pgd))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
|
|
if (pud_none(*pud))
|
|
|
|
return 0;
|
|
|
|
|
2014-05-06 21:02:27 +08:00
|
|
|
if (pud_sect(*pud))
|
|
|
|
return pfn_valid(pud_pfn(*pud));
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
if (pmd_none(*pmd))
|
|
|
|
return 0;
|
|
|
|
|
2014-04-16 01:53:24 +08:00
|
|
|
if (pmd_sect(*pmd))
|
|
|
|
return pfn_valid(pmd_pfn(*pmd));
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
pte = pte_offset_kernel(pmd, addr);
|
|
|
|
if (pte_none(*pte))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return pfn_valid(pte_pfn(*pte));
|
|
|
|
}
|
|
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
2015-10-19 21:19:28 +08:00
|
|
|
#if !ARM64_SWAPPER_USES_SECTION_MAPS
|
2013-04-30 06:07:50 +08:00
|
|
|
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
2013-04-30 06:07:50 +08:00
|
|
|
return vmemmap_populate_basepages(start, end, node);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
2015-10-19 21:19:28 +08:00
|
|
|
#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
|
2013-04-30 06:07:50 +08:00
|
|
|
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
2013-04-30 06:07:50 +08:00
|
|
|
unsigned long addr = start;
|
2012-03-05 19:49:27 +08:00
|
|
|
unsigned long next;
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
|
|
|
|
do {
|
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
|
|
|
|
|
pgd = vmemmap_pgd_populate(addr, node);
|
|
|
|
if (!pgd)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pud = vmemmap_pud_populate(pgd, addr, node);
|
|
|
|
if (!pud)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
if (pmd_none(*pmd)) {
|
|
|
|
void *p = NULL;
|
|
|
|
|
|
|
|
p = vmemmap_alloc_block_buf(PMD_SIZE, node);
|
|
|
|
if (!p)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-04-03 22:57:15 +08:00
|
|
|
set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
|
2012-03-05 19:49:27 +08:00
|
|
|
} else
|
|
|
|
vmemmap_verify((pte_t *)pmd, node, addr, next);
|
|
|
|
} while (addr = next, addr != end);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_ARM64_64K_PAGES */
|
2013-04-30 06:07:50 +08:00
|
|
|
void vmemmap_free(unsigned long start, unsigned long end)
|
2013-02-23 08:33:08 +08:00
|
|
|
{
|
|
|
|
}
|
2012-03-05 19:49:27 +08:00
|
|
|
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
2014-11-22 05:50:42 +08:00
|
|
|
|
|
|
|
static inline pud_t * fixmap_pud(unsigned long addr)
|
|
|
|
{
|
|
|
|
pgd_t *pgd = pgd_offset_k(addr);
|
|
|
|
|
|
|
|
BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
|
|
|
|
|
2016-02-16 20:52:38 +08:00
|
|
|
return pud_offset_kimg(pgd, addr);
|
2014-11-22 05:50:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline pmd_t * fixmap_pmd(unsigned long addr)
|
|
|
|
{
|
|
|
|
pud_t *pud = fixmap_pud(addr);
|
|
|
|
|
|
|
|
BUG_ON(pud_none(*pud) || pud_bad(*pud));
|
|
|
|
|
2016-02-16 20:52:38 +08:00
|
|
|
return pmd_offset_kimg(pud, addr);
|
2014-11-22 05:50:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline pte_t * fixmap_pte(unsigned long addr)
|
|
|
|
{
|
2016-02-16 20:52:38 +08:00
|
|
|
return &bm_pte[pte_index(addr)];
|
2014-11-22 05:50:42 +08:00
|
|
|
}
|
|
|
|
|
2017-01-11 05:35:49 +08:00
|
|
|
/*
|
|
|
|
* The p*d_populate functions call virt_to_phys implicitly so they can't be used
|
|
|
|
* directly on kernel symbols (bm_p*d). This function is called too early to use
|
|
|
|
* lm_alias so __p*d_populate functions must be used to populate with the
|
|
|
|
* physical address from __pa_symbol.
|
|
|
|
*/
|
2014-11-22 05:50:42 +08:00
|
|
|
void __init early_fixmap_init(void)
|
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
unsigned long addr = FIXADDR_START;
|
|
|
|
|
|
|
|
pgd = pgd_offset_k(addr);
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 21:12:01 +08:00
|
|
|
if (CONFIG_PGTABLE_LEVELS > 3 &&
|
2017-01-11 05:35:49 +08:00
|
|
|
!(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) {
|
2016-02-16 20:52:40 +08:00
|
|
|
/*
|
|
|
|
* We only end up here if the kernel mapping and the fixmap
|
|
|
|
* share the top level pgd entry, which should only happen on
|
|
|
|
* 16k/4 levels configurations.
|
|
|
|
*/
|
|
|
|
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
|
|
|
|
pud = pud_offset_kimg(pgd, addr);
|
|
|
|
} else {
|
2017-01-11 05:35:49 +08:00
|
|
|
if (pgd_none(*pgd))
|
|
|
|
__pgd_populate(pgd, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
|
2016-02-16 20:52:40 +08:00
|
|
|
pud = fixmap_pud(addr);
|
|
|
|
}
|
2017-01-11 05:35:49 +08:00
|
|
|
if (pud_none(*pud))
|
|
|
|
__pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
|
2016-02-16 20:52:38 +08:00
|
|
|
pmd = fixmap_pmd(addr);
|
2017-01-11 05:35:49 +08:00
|
|
|
__pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
|
2014-11-22 05:50:42 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The boot-ioremap range spans multiple pmds, for which
|
2016-02-16 20:52:38 +08:00
|
|
|
* we are not prepared:
|
2014-11-22 05:50:42 +08:00
|
|
|
*/
|
|
|
|
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
|
|
|
|
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
|
|
|
|
|
|
|
|
if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
|
|
|
|
|| pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
|
|
|
|
WARN_ON(1);
|
|
|
|
pr_warn("pmd %p != %p, %p\n",
|
|
|
|
pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
|
|
|
|
fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
|
|
|
|
pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
|
|
|
|
fix_to_virt(FIX_BTMAP_BEGIN));
|
|
|
|
pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
|
|
|
|
fix_to_virt(FIX_BTMAP_END));
|
|
|
|
|
|
|
|
pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
|
|
|
|
pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void __set_fixmap(enum fixed_addresses idx,
|
|
|
|
phys_addr_t phys, pgprot_t flags)
|
|
|
|
{
|
|
|
|
unsigned long addr = __fix_to_virt(idx);
|
|
|
|
pte_t *pte;
|
|
|
|
|
2015-03-04 21:27:35 +08:00
|
|
|
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
|
2014-11-22 05:50:42 +08:00
|
|
|
|
|
|
|
pte = fixmap_pte(addr);
|
|
|
|
|
|
|
|
if (pgprot_val(flags)) {
|
|
|
|
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
|
|
|
|
} else {
|
|
|
|
pte_clear(&init_mm, addr, pte);
|
|
|
|
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
|
|
|
|
}
|
|
|
|
}
|
2015-06-01 19:40:32 +08:00
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 21:12:01 +08:00
|
|
|
void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
|
2015-06-01 19:40:32 +08:00
|
|
|
{
|
|
|
|
const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 21:12:01 +08:00
|
|
|
int offset;
|
2015-06-01 19:40:32 +08:00
|
|
|
void *dt_virt;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether the physical FDT address is set and meets the minimum
|
|
|
|
* alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
|
2016-08-01 19:29:31 +08:00
|
|
|
* at least 8 bytes so that we can always access the magic and size
|
|
|
|
* fields of the FDT header after mapping the first chunk, double check
|
|
|
|
* here if that is indeed the case.
|
2015-06-01 19:40:32 +08:00
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
|
|
|
|
if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure that the FDT region can be mapped without the need to
|
|
|
|
* allocate additional translation table pages, so that it is safe
|
2016-02-06 08:24:46 +08:00
|
|
|
* to call create_mapping_noalloc() this early.
|
2015-06-01 19:40:32 +08:00
|
|
|
*
|
|
|
|
* On 64k pages, the FDT will be mapped using PTEs, so we need to
|
|
|
|
* be in the same PMD as the rest of the fixmap.
|
|
|
|
* On 4k pages, we'll use section mappings for the FDT so we only
|
|
|
|
* have to be in the same PUD.
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(dt_virt_base % SZ_2M);
|
|
|
|
|
2015-10-19 21:19:28 +08:00
|
|
|
BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
|
|
|
|
__fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
|
2015-06-01 19:40:32 +08:00
|
|
|
|
2015-10-19 21:19:28 +08:00
|
|
|
offset = dt_phys % SWAPPER_BLOCK_SIZE;
|
2015-06-01 19:40:32 +08:00
|
|
|
dt_virt = (void *)dt_virt_base + offset;
|
|
|
|
|
|
|
|
/* map the first chunk so we can read the size from the header */
|
2016-02-06 08:24:46 +08:00
|
|
|
create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
|
|
|
|
dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
|
2015-06-01 19:40:32 +08:00
|
|
|
|
2016-08-01 19:29:31 +08:00
|
|
|
if (fdt_magic(dt_virt) != FDT_MAGIC)
|
2015-06-01 19:40:32 +08:00
|
|
|
return NULL;
|
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 21:12:01 +08:00
|
|
|
*size = fdt_totalsize(dt_virt);
|
|
|
|
if (*size > MAX_FDT_SIZE)
|
2015-06-01 19:40:32 +08:00
|
|
|
return NULL;
|
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 21:12:01 +08:00
|
|
|
if (offset + *size > SWAPPER_BLOCK_SIZE)
|
2016-02-06 08:24:46 +08:00
|
|
|
create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 21:12:01 +08:00
|
|
|
round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
|
2015-06-01 19:40:32 +08:00
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 21:12:01 +08:00
|
|
|
return dt_virt;
|
|
|
|
}
|
2015-06-01 19:40:32 +08:00
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 21:12:01 +08:00
|
|
|
void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
|
|
|
|
{
|
|
|
|
void *dt_virt;
|
|
|
|
int size;
|
|
|
|
|
|
|
|
dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
|
|
|
|
if (!dt_virt)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
memblock_reserve(dt_phys, size);
|
2015-06-01 19:40:32 +08:00
|
|
|
return dt_virt;
|
|
|
|
}
|
2016-02-16 20:52:35 +08:00
|
|
|
|
|
|
|
int __init arch_ioremap_pud_supported(void)
|
|
|
|
{
|
|
|
|
/* only 4k granule supports level 1 block mappings */
|
|
|
|
return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
|
|
|
|
}
|
|
|
|
|
|
|
|
int __init arch_ioremap_pmd_supported(void)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
|
|
|
|
{
|
|
|
|
BUG_ON(phys & ~PUD_MASK);
|
|
|
|
set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
|
|
|
|
{
|
|
|
|
BUG_ON(phys & ~PMD_MASK);
|
|
|
|
set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pud_clear_huge(pud_t *pud)
|
|
|
|
{
|
|
|
|
if (!pud_sect(*pud))
|
|
|
|
return 0;
|
|
|
|
pud_clear(pud);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pmd_clear_huge(pmd_t *pmd)
|
|
|
|
{
|
|
|
|
if (!pmd_sect(*pmd))
|
|
|
|
return 0;
|
|
|
|
pmd_clear(pmd);
|
|
|
|
return 1;
|
|
|
|
}
|