mirror of https://gitee.com/openkylin/linux.git
arm64: mte: Clear the tags when a page is mapped in user-space with PROT_MTE
Pages allocated by the kernel are not guaranteed to have the tags zeroed, especially as the kernel does not (yet) use MTE itself. To ensure the user can still access such pages when mapped into its address space, clear the tags via set_pte_at(). A new page flag - PG_mte_tagged (PG_arch_2) - is used to track pages with valid allocation tags. Since the zero page is mapped as pte_special(), it won't be covered by the above set_pte_at() mechanism. Clear its tags during early MTE initialisation. Co-developed-by: Steven Price <steven.price@arm.com> Signed-off-by: Steven Price <steven.price@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org>
This commit is contained in:
parent
72e6afa08e
commit
34bfeea4a9
|
@ -7,12 +7,28 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/page-flags.h>
|
||||
|
||||
#include <asm/pgtable-types.h>
|
||||
|
||||
void mte_clear_page_tags(void *addr);
|
||||
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
|
||||
/* track which pages have valid allocation tags */
|
||||
#define PG_mte_tagged PG_arch_2
|
||||
|
||||
void mte_sync_tags(pte_t *ptep, pte_t pte);
|
||||
void flush_mte_state(void);
|
||||
|
||||
#else
|
||||
|
||||
/* unused if !CONFIG_ARM64_MTE, silence the compiler */
|
||||
#define PG_mte_tagged 0
|
||||
|
||||
static inline void mte_sync_tags(pte_t *ptep, pte_t pte)
|
||||
{
|
||||
}
|
||||
static inline void flush_mte_state(void)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <asm/proc-fns.h>
|
||||
|
||||
#include <asm/memory.h>
|
||||
#include <asm/mte.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
#include <asm/pgtable-prot.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
@ -90,6 +91,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
|||
#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
|
||||
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
|
||||
#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
|
||||
#define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \
|
||||
PTE_ATTRINDX(MT_NORMAL_TAGGED))
|
||||
|
||||
#define pte_cont_addr_end(addr, end) \
|
||||
({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
|
||||
|
@ -284,6 +287,10 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
|
||||
__sync_icache_dcache(pte);
|
||||
|
||||
if (system_supports_mte() &&
|
||||
pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
|
||||
mte_sync_tags(ptep, pte);
|
||||
|
||||
__check_racy_pte_update(mm, ptep, pte);
|
||||
|
||||
set_pte(ptep, pte);
|
||||
|
|
|
@ -75,6 +75,7 @@
|
|||
#include <asm/cpu_ops.h>
|
||||
#include <asm/fpsimd.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/mte.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/traps.h>
|
||||
|
@ -1704,6 +1705,22 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
|
|||
}
|
||||
#endif /* CONFIG_ARM64_BTI */
|
||||
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
|
||||
{
|
||||
static bool cleared_zero_page = false;
|
||||
|
||||
/*
|
||||
* Clear the tags in the zero page. This needs to be done via the
|
||||
* linear map which has the Tagged attribute.
|
||||
*/
|
||||
if (!cleared_zero_page) {
|
||||
cleared_zero_page = true;
|
||||
mte_clear_page_tags(lm_alias(empty_zero_page));
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_ARM64_MTE */
|
||||
|
||||
/* Internal helper functions to match cpu capability type */
|
||||
static bool
|
||||
cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
|
||||
|
@ -2133,6 +2150,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||
.field_pos = ID_AA64PFR1_MTE_SHIFT,
|
||||
.min_field_value = ID_AA64PFR1_MTE,
|
||||
.sign = FTR_UNSIGNED,
|
||||
.cpu_enable = cpu_enable_mte,
|
||||
},
|
||||
#endif /* CONFIG_ARM64_MTE */
|
||||
{},
|
||||
|
|
|
@ -3,12 +3,26 @@
|
|||
* Copyright (C) 2020 ARM Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/thread_info.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/mte.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
void mte_sync_tags(pte_t *ptep, pte_t pte)
|
||||
{
|
||||
struct page *page = pte_page(pte);
|
||||
long i, nr_pages = compound_nr(page);
|
||||
|
||||
/* if PG_mte_tagged is set, tags have already been initialised */
|
||||
for (i = 0; i < nr_pages; i++, page++) {
|
||||
if (!test_and_set_bit(PG_mte_tagged, &page->flags))
|
||||
mte_clear_page_tags(page_address(page));
|
||||
}
|
||||
}
|
||||
|
||||
void flush_mte_state(void)
|
||||
{
|
||||
if (!system_supports_mte())
|
||||
|
|
|
@ -16,3 +16,5 @@ lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
|
|||
obj-$(CONFIG_CRC32) += crc32.o
|
||||
|
||||
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
|
||||
|
||||
obj-$(CONFIG_ARM64_MTE) += mte.o
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020 ARM Ltd.
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
.arch armv8.5-a+memtag
|
||||
|
||||
/*
|
||||
* multitag_transfer_size - set \reg to the block size that is accessed by the
|
||||
* LDGM/STGM instructions.
|
||||
*/
|
||||
.macro multitag_transfer_size, reg, tmp
|
||||
mrs_s \reg, SYS_GMID_EL1
|
||||
ubfx \reg, \reg, #SYS_GMID_EL1_BS_SHIFT, #SYS_GMID_EL1_BS_SIZE
|
||||
mov \tmp, #4
|
||||
lsl \reg, \tmp, \reg
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Clear the tags in a page
|
||||
* x0 - address of the page to be cleared
|
||||
*/
|
||||
SYM_FUNC_START(mte_clear_page_tags)
|
||||
multitag_transfer_size x1, x2
|
||||
1: stgm xzr, [x0]
|
||||
add x0, x0, x1
|
||||
tst x0, #(PAGE_SIZE - 1)
|
||||
b.ne 1b
|
||||
ret
|
||||
SYM_FUNC_END(mte_clear_page_tags)
|
Loading…
Reference in New Issue