arm64: mm: Move ASID from TTBR0 to TTBR1
In preparation for mapping kernelspace and userspace with different ASIDs, move the ASID to TTBR1 and update switch_mm to context-switch TTBR0 via an invalid mapping (the zero page). Reviewed-by: Mark Rutland <mark.rutland@arm.com> Tested-by: Laura Abbott <labbott@redhat.com> Tested-by: Shanker Donthineni <shankerd@codeaurora.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
376133b7ed
commit
7655abb953
|
@ -57,6 +57,13 @@ static inline void cpu_set_reserved_ttbr0(void)
|
||||||
isb();
|
isb();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
BUG_ON(pgd == swapper_pg_dir);
|
||||||
|
cpu_set_reserved_ttbr0();
|
||||||
|
cpu_do_switch_mm(virt_to_phys(pgd),mm);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TCR.T0SZ value to use when the ID map is active. Usually equals
|
* TCR.T0SZ value to use when the ID map is active. Usually equals
|
||||||
* TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
|
* TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
|
||||||
|
|
|
@ -272,6 +272,7 @@
|
||||||
#define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT)
|
#define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT)
|
||||||
#define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT)
|
#define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT)
|
||||||
|
|
||||||
|
#define TCR_A1 (UL(1) << 22)
|
||||||
#define TCR_ASID16 (UL(1) << 36)
|
#define TCR_ASID16 (UL(1) << 36)
|
||||||
#define TCR_TBI0 (UL(1) << 37)
|
#define TCR_TBI0 (UL(1) << 37)
|
||||||
#define TCR_HA (UL(1) << 39)
|
#define TCR_HA (UL(1) << 39)
|
||||||
|
|
|
@ -35,12 +35,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
|
||||||
|
|
||||||
#include <asm/memory.h>
|
#include <asm/memory.h>
|
||||||
|
|
||||||
#define cpu_switch_mm(pgd,mm) \
|
|
||||||
do { \
|
|
||||||
BUG_ON(pgd == swapper_pg_dir); \
|
|
||||||
cpu_do_switch_mm(virt_to_phys(pgd),mm); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* __ASM_PROCFNS_H */
|
#endif /* __ASM_PROCFNS_H */
|
||||||
|
|
|
@ -139,9 +139,12 @@ ENDPROC(cpu_do_resume)
|
||||||
*/
|
*/
|
||||||
ENTRY(cpu_do_switch_mm)
|
ENTRY(cpu_do_switch_mm)
|
||||||
pre_ttbr0_update_workaround x0, x2, x3
|
pre_ttbr0_update_workaround x0, x2, x3
|
||||||
|
mrs x2, ttbr1_el1
|
||||||
mmid x1, x1 // get mm->context.id
|
mmid x1, x1 // get mm->context.id
|
||||||
bfi x0, x1, #48, #16 // set the ASID
|
bfi x2, x1, #48, #16 // set the ASID
|
||||||
msr ttbr0_el1, x0 // set TTBR0
|
msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set)
|
||||||
|
isb
|
||||||
|
msr ttbr0_el1, x0 // now update TTBR0
|
||||||
isb
|
isb
|
||||||
post_ttbr0_update_workaround
|
post_ttbr0_update_workaround
|
||||||
ret
|
ret
|
||||||
|
@ -224,7 +227,7 @@ ENTRY(__cpu_setup)
|
||||||
* both user and kernel.
|
* both user and kernel.
|
||||||
*/
|
*/
|
||||||
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
|
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
|
||||||
TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
|
TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1
|
||||||
tcr_set_idmap_t0sz x10, x9
|
tcr_set_idmap_t0sz x10, x9
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue