2013-01-18 17:42:19 +08:00
|
|
|
/*
|
|
|
|
* TLB Exception Handling for ARC
|
|
|
|
*
|
|
|
|
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* Vineetg: April 2011 :
|
|
|
|
* -MMU v1: moved out legacy code into a seperate file
|
|
|
|
* -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
|
|
|
|
* helps avoid a shift when preparing PD0 from PTE
|
|
|
|
*
|
|
|
|
* Vineetg: July 2009
|
|
|
|
* -For MMU V2, we need not do heuristics at the time of commiting a D-TLB
|
|
|
|
* entry, so that it doesn't knock out it's I-TLB entry
|
|
|
|
* -Some more fine tuning:
|
|
|
|
* bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
|
|
|
|
*
|
|
|
|
* Vineetg: July 2009
|
|
|
|
* -Practically rewrote the I/D TLB Miss handlers
|
|
|
|
* Now 40 and 135 instructions a peice as compared to 131 and 449 resp.
|
|
|
|
* Hence Leaner by 1.5 K
|
|
|
|
* Used Conditional arithmetic to replace excessive branching
|
|
|
|
* Also used short instructions wherever possible
|
|
|
|
*
|
|
|
|
* Vineetg: Aug 13th 2008
|
|
|
|
* -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing
|
|
|
|
* more information in case of a Fatality
|
|
|
|
*
|
|
|
|
* Vineetg: March 25th Bug #92690
|
|
|
|
* -Added Debug Code to check if sw-ASID == hw-ASID
|
|
|
|
|
|
|
|
* Rahul Trivedi, Amit Bhor: Codito Technologies 2004
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/entry.h>
|
2013-05-14 15:58:17 +08:00
|
|
|
#include <asm/mmu.h>
|
2013-01-18 17:42:19 +08:00
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/arcregs.h>
|
|
|
|
#include <asm/cache.h>
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/tlb-mmu1.h>
|
|
|
|
|
2015-04-06 19:52:39 +08:00
|
|
|
#ifdef CONFIG_ISA_ARCOMPACT
|
2013-07-10 14:10:27 +08:00
|
|
|
;-----------------------------------------------------------------
|
|
|
|
; ARC700 Exception Handling doesn't auto-switch stack and it only provides
|
|
|
|
; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
|
|
|
|
;
|
|
|
|
; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
|
|
|
|
; "global" is used to free-up FIRST core reg to be able to code the rest of
|
|
|
|
; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
|
|
|
|
; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
|
|
|
|
; need to be saved as well by extending the "global" to be 4 words. Hence
|
|
|
|
; ".size ex_saved_reg1, 16"
|
|
|
|
; [All of this dance is to avoid stack switching for each TLB Miss, since we
|
|
|
|
; only need to save only a handful of regs, as opposed to complete reg file]
|
|
|
|
;
|
|
|
|
; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
|
|
|
|
; core reg as it will not be SMP safe.
|
|
|
|
; Thus scratch AUX reg is used (and no longer used to cache task PGD).
|
|
|
|
; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
|
|
|
|
; Epilogue thus has to locate the "per-cpu" storage for regs.
|
|
|
|
; To avoid cache line bouncing the per-cpu global is aligned/sized per
|
|
|
|
; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
|
|
|
|
; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
|
|
|
|
|
|
|
|
; As simple as that....
|
2013-01-18 17:42:19 +08:00
|
|
|
;--------------------------------------------------------------------------
|
|
|
|
|
2013-07-10 14:10:27 +08:00
|
|
|
; scratch memory to save [r0-r3] used to code TLB refill Handler
|
2013-01-18 17:42:25 +08:00
|
|
|
ARCFP_DATA ex_saved_reg1
|
2013-07-10 14:10:27 +08:00
|
|
|
.align 1 << L1_CACHE_SHIFT
|
2013-01-18 17:42:19 +08:00
|
|
|
.type ex_saved_reg1, @object
|
2013-01-18 17:42:23 +08:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
.size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
|
|
|
|
ex_saved_reg1:
|
|
|
|
.zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
|
|
|
|
#else
|
2013-01-18 17:42:19 +08:00
|
|
|
.size ex_saved_reg1, 16
|
|
|
|
ex_saved_reg1:
|
|
|
|
.zero 16
|
2013-01-18 17:42:23 +08:00
|
|
|
#endif
|
2013-01-18 17:42:19 +08:00
|
|
|
|
2013-07-10 14:10:27 +08:00
|
|
|
.macro TLBMISS_FREEUP_REGS
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
|
|
|
|
GET_CPU_ID r0 ; get to per cpu scratch mem,
|
2015-11-05 11:43:31 +08:00
|
|
|
asl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
|
2013-07-10 14:10:27 +08:00
|
|
|
add r0, @ex_saved_reg1, r0
|
|
|
|
#else
|
|
|
|
st r0, [@ex_saved_reg1]
|
|
|
|
mov_s r0, @ex_saved_reg1
|
|
|
|
#endif
|
|
|
|
st_s r1, [r0, 4]
|
|
|
|
st_s r2, [r0, 8]
|
|
|
|
st_s r3, [r0, 12]
|
|
|
|
|
|
|
|
; VERIFY if the ASID in MMU-PID Reg is same as
|
|
|
|
; one in Linux data structures
|
|
|
|
|
2013-08-23 20:07:18 +08:00
|
|
|
tlb_paranoid_check_asm
|
2013-07-10 14:10:27 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro TLBMISS_RESTORE_REGS
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
GET_CPU_ID r0 ; get to per cpu scratch mem
|
2015-11-05 11:43:31 +08:00
|
|
|
asl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
|
2013-07-10 14:10:27 +08:00
|
|
|
add r0, @ex_saved_reg1, r0
|
|
|
|
ld_s r3, [r0,12]
|
|
|
|
ld_s r2, [r0, 8]
|
|
|
|
ld_s r1, [r0, 4]
|
|
|
|
lr r0, [ARC_REG_SCRATCH_DATA0]
|
|
|
|
#else
|
|
|
|
mov_s r0, @ex_saved_reg1
|
|
|
|
ld_s r3, [r0,12]
|
|
|
|
ld_s r2, [r0, 8]
|
|
|
|
ld_s r1, [r0, 4]
|
|
|
|
ld_s r0, [r0]
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2015-04-06 19:52:39 +08:00
|
|
|
#else /* ARCv2 */
|
|
|
|
|
|
|
|
.macro TLBMISS_FREEUP_REGS
|
|
|
|
PUSH r0
|
|
|
|
PUSH r1
|
|
|
|
PUSH r2
|
|
|
|
PUSH r3
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro TLBMISS_RESTORE_REGS
|
|
|
|
POP r3
|
|
|
|
POP r2
|
|
|
|
POP r1
|
|
|
|
POP r0
|
|
|
|
.endm
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2013-01-18 17:42:19 +08:00
|
|
|
;============================================================================
|
|
|
|
; Troubleshooting Stuff
|
|
|
|
;============================================================================
|
|
|
|
|
|
|
|
; Linux keeps ASID (Address Space ID) in task->active_mm->context.asid
|
|
|
|
; When Creating TLB Entries, instead of doing 3 dependent loads from memory,
|
|
|
|
; we use the MMU PID Reg to get current ASID.
|
|
|
|
; In bizzare scenrios SW and HW ASID can get out-of-sync which is trouble.
|
|
|
|
; So we try to detect this in TLB Mis shandler
|
|
|
|
|
2013-08-23 20:07:18 +08:00
|
|
|
.macro tlb_paranoid_check_asm
|
2013-01-18 17:42:19 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
|
|
|
|
|
|
|
|
GET_CURR_TASK_ON_CPU r3
|
|
|
|
ld r0, [r3, TASK_ACT_MM]
|
|
|
|
ld r0, [r0, MM_CTXT+MM_CTXT_ASID]
|
ARC: [ASID] Track ASID allocation cycles/generations
This helps remove asid-to-mm reverse map
While mm->context.id contains the ASID assigned to a process, our ASID
allocator also used asid_mm_map[] reverse map. In a new allocation
cycle (mm->ASID >= @asid_cache), the Round Robin ASID allocator used this
to check if new @asid_cache belonged to some mm2 (from prev cycle).
If so, it could locate that mm using the ASID reverse map, and mark that
mm as unallocated ASID, to force it to refresh at the time of switch_mm()
However, for SMP, the reverse map has to be maintained per CPU, so
becomes 2 dimensional, hence got rid of it.
With reverse map gone, it is NOT possible to reach out to current
assignee. So we track the ASID allocation generation/cycle and
on every switch_mm(), check if the current generation of CPU ASID is
same as mm's ASID; If not it is refreshed.
(Based loosely on arch/sh implementation)
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-07-26 06:45:50 +08:00
|
|
|
breq r0, 0, 55f ; Error if no ASID allocated
|
2013-01-18 17:42:19 +08:00
|
|
|
|
|
|
|
lr r1, [ARC_REG_PID]
|
|
|
|
and r1, r1, 0xFF
|
2013-08-23 20:07:18 +08:00
|
|
|
|
ARC: [ASID] Track ASID allocation cycles/generations
This helps remove asid-to-mm reverse map
While mm->context.id contains the ASID assigned to a process, our ASID
allocator also used asid_mm_map[] reverse map. In a new allocation
cycle (mm->ASID >= @asid_cache), the Round Robin ASID allocator used this
to check if new @asid_cache belonged to some mm2 (from prev cycle).
If so, it could locate that mm using the ASID reverse map, and mark that
mm as unallocated ASID, to force it to refresh at the time of switch_mm()
However, for SMP, the reverse map has to be maintained per CPU, so
becomes 2 dimensional, hence got rid of it.
With reverse map gone, it is NOT possible to reach out to current
assignee. So we track the ASID allocation generation/cycle and
on every switch_mm(), check if the current generation of CPU ASID is
same as mm's ASID; If not it is refreshed.
(Based loosely on arch/sh implementation)
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-07-26 06:45:50 +08:00
|
|
|
and r2, r0, 0xFF ; MMU PID bits only for comparison
|
|
|
|
breq r1, r2, 5f
|
2013-01-18 17:42:19 +08:00
|
|
|
|
ARC: [ASID] Track ASID allocation cycles/generations
This helps remove asid-to-mm reverse map
While mm->context.id contains the ASID assigned to a process, our ASID
allocator also used asid_mm_map[] reverse map. In a new allocation
cycle (mm->ASID >= @asid_cache), the Round Robin ASID allocator used this
to check if new @asid_cache belonged to some mm2 (from prev cycle).
If so, it could locate that mm using the ASID reverse map, and mark that
mm as unallocated ASID, to force it to refresh at the time of switch_mm()
However, for SMP, the reverse map has to be maintained per CPU, so
becomes 2 dimensional, hence got rid of it.
With reverse map gone, it is NOT possible to reach out to current
assignee. So we track the ASID allocation generation/cycle and
on every switch_mm(), check if the current generation of CPU ASID is
same as mm's ASID; If not it is refreshed.
(Based loosely on arch/sh implementation)
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-07-26 06:45:50 +08:00
|
|
|
55:
|
2013-01-18 17:42:19 +08:00
|
|
|
; Error if H/w and S/w ASID don't match, but NOT if in kernel mode
|
2013-08-23 20:07:18 +08:00
|
|
|
lr r2, [erstatus]
|
|
|
|
bbit0 r2, STATUS_U_BIT, 5f
|
2013-01-18 17:42:19 +08:00
|
|
|
|
|
|
|
; We sure are in troubled waters, Flag the error, but to do so
|
|
|
|
; need to switch to kernel mode stack to call error routine
|
|
|
|
GET_TSK_STACK_BASE r3, sp
|
|
|
|
|
|
|
|
; Call printk to shoutout aloud
|
2013-08-23 20:07:18 +08:00
|
|
|
mov r2, 1
|
2013-01-18 17:42:19 +08:00
|
|
|
j print_asid_mismatch
|
|
|
|
|
2013-08-23 20:07:18 +08:00
|
|
|
5: ; ASIDs match so proceed normally
|
2013-01-18 17:42:19 +08:00
|
|
|
nop
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
.endm
|
|
|
|
|
|
|
|
;============================================================================
|
|
|
|
;TLB Miss handling Code
|
|
|
|
;============================================================================
|
|
|
|
|
|
|
|
;-----------------------------------------------------------------------------
|
|
|
|
; This macro does the page-table lookup for the faulting address.
|
|
|
|
; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address
|
|
|
|
.macro LOAD_FAULT_PTE
|
|
|
|
|
|
|
|
lr r2, [efa]
|
|
|
|
|
2013-01-18 17:42:23 +08:00
|
|
|
#ifndef CONFIG_SMP
|
2013-01-18 17:42:19 +08:00
|
|
|
lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd
|
2013-01-18 17:42:23 +08:00
|
|
|
#else
|
|
|
|
GET_CURR_TASK_ON_CPU r1
|
|
|
|
ld r1, [r1, TASK_ACT_MM]
|
|
|
|
ld r1, [r1, MM_PGD]
|
|
|
|
#endif
|
2013-01-18 17:42:19 +08:00
|
|
|
|
|
|
|
lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD
|
2014-07-08 21:13:47 +08:00
|
|
|
ld.as r3, [r1, r0] ; PGD entry corresp to faulting addr
|
|
|
|
tst r3, r3
|
|
|
|
bz do_slow_path_pf ; if no Page Table, do page fault
|
|
|
|
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
and.f 0, r3, _PAGE_HW_SZ ; Is this Huge PMD (thp)
|
|
|
|
add2.nz r1, r1, r0
|
|
|
|
bnz.d 2f ; YES: PGD == PMD has THP PTE: stop pgd walk
|
|
|
|
mov.nz r0, r3
|
|
|
|
|
|
|
|
#endif
|
|
|
|
and r1, r3, PAGE_MASK
|
2013-01-18 17:42:19 +08:00
|
|
|
|
|
|
|
; Get the PTE entry: The idea is
|
|
|
|
; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr
|
|
|
|
; (2) y = x & (PTRS_PER_PTE - 1) -> to get index
|
2015-10-05 18:11:36 +08:00
|
|
|
; (3) z = (pgtbl + y * 4)
|
2013-01-18 17:42:19 +08:00
|
|
|
|
2015-02-06 23:44:57 +08:00
|
|
|
#ifdef CONFIG_ARC_HAS_PAE40
|
|
|
|
#define PTE_SIZE_LOG 3 /* 8 == 2 ^ 3 */
|
|
|
|
#else
|
2015-10-05 18:11:36 +08:00
|
|
|
#define PTE_SIZE_LOG 2 /* 4 == 2 ^ 2 */
|
2015-02-06 23:44:57 +08:00
|
|
|
#endif
|
2015-10-05 18:11:36 +08:00
|
|
|
|
|
|
|
; multiply in step (3) above avoided by shifting lesser in step (1)
|
|
|
|
lsr r0, r2, ( PAGE_SHIFT - PTE_SIZE_LOG )
|
|
|
|
and r0, r0, ( (PTRS_PER_PTE - 1) << PTE_SIZE_LOG )
|
2015-02-06 23:44:57 +08:00
|
|
|
ld.aw r0, [r1, r0] ; r0: PTE (lower word only for PAE40)
|
2015-10-05 18:11:36 +08:00
|
|
|
; r1: PTE ptr
|
2014-07-08 21:13:47 +08:00
|
|
|
|
|
|
|
2:
|
|
|
|
|
2013-01-18 17:42:19 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
;-----------------------------------------------------------------
|
|
|
|
; Convert Linux PTE entry into TLB entry
|
|
|
|
; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
|
2015-02-06 23:44:57 +08:00
|
|
|
; (for PAE40, two-words PTE, while three-word TLB Entry [PD0:PD1:PD1HI])
|
2013-01-18 17:42:19 +08:00
|
|
|
; IN: r0 = PTE, r1 = ptr to PTE
|
|
|
|
|
|
|
|
.macro CONV_PTE_TO_TLB
|
2015-10-05 18:11:36 +08:00
|
|
|
and r3, r0, PTE_BITS_RWX ; r w x
|
2015-11-05 11:43:31 +08:00
|
|
|
asl r2, r3, 3 ; Kr Kw Kx 0 0 0 (GLOBAL, kernel only)
|
ARC: MMUv4 preps/1 - Fold PTE K/U access flags
The current ARC VM code has 13 flags in Page Table entry: some software
(accesed/dirty/non-linear-maps) and rest hardware specific. With 8k MMU
page, we need 19 bits for addressing page frame so remaining 13 bits is
just about enough to accomodate the current flags.
In MMUv4 there are 2 additional flags, SZ (normal or super page) and WT
(cache access mode write-thru) - and additionally PFN is 20 bits (vs. 19
before for 8k). Thus these can't be held in current PTE w/o making each
entry 64bit wide.
It seems there is some scope of compressing the current PTE flags (and
freeing up a few bits). Currently PTE contains fully orthogonal distinct
access permissions for kernel and user mode (Kr, Kw, Kx; Ur, Uw, Ux)
which can be folded into one set (R, W, X). The translation of 3 PTE
bits into 6 TLB bits (when programming the MMU) can be done based on
following pre-requites/assumptions:
1. For kernel-mode-only translations (vmalloc: 0x7000_0000 to
0x7FFF_FFFF), PTE additionally has PAGE_GLOBAL flag set (and user
space entries can never be global). Thus such a PTE can translate
to Kr, Kw, Kx (as appropriate) and zero for User mode counterparts.
2. For non global entries, the PTE flags can be used to create mirrored
K and U TLB bits. This is true after commit a950549c675f2c8c504
"ARC: copy_(to|from)_user() to honor usermode-access permissions"
which ensured that user-space translations _MUST_ have same access
permissions for both U/K mode accesses so that copy_{to,from}_user()
play fair with fault based CoW break and such...
There is no such thing as free lunch - the cost is slightly infalted
TLB-Miss Handlers.
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-06-17 20:42:13 +08:00
|
|
|
and.f 0, r0, _PAGE_GLOBAL
|
2015-10-05 18:11:36 +08:00
|
|
|
or.z r2, r2, r3 ; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page)
|
ARC: MMUv4 preps/1 - Fold PTE K/U access flags
The current ARC VM code has 13 flags in Page Table entry: some software
(accesed/dirty/non-linear-maps) and rest hardware specific. With 8k MMU
page, we need 19 bits for addressing page frame so remaining 13 bits is
just about enough to accomodate the current flags.
In MMUv4 there are 2 additional flags, SZ (normal or super page) and WT
(cache access mode write-thru) - and additionally PFN is 20 bits (vs. 19
before for 8k). Thus these can't be held in current PTE w/o making each
entry 64bit wide.
It seems there is some scope of compressing the current PTE flags (and
freeing up a few bits). Currently PTE contains fully orthogonal distinct
access permissions for kernel and user mode (Kr, Kw, Kx; Ur, Uw, Ux)
which can be folded into one set (R, W, X). The translation of 3 PTE
bits into 6 TLB bits (when programming the MMU) can be done based on
following pre-requites/assumptions:
1. For kernel-mode-only translations (vmalloc: 0x7000_0000 to
0x7FFF_FFFF), PTE additionally has PAGE_GLOBAL flag set (and user
space entries can never be global). Thus such a PTE can translate
to Kr, Kw, Kx (as appropriate) and zero for User mode counterparts.
2. For non global entries, the PTE flags can be used to create mirrored
K and U TLB bits. This is true after commit a950549c675f2c8c504
"ARC: copy_(to|from)_user() to honor usermode-access permissions"
which ensured that user-space translations _MUST_ have same access
permissions for both U/K mode accesses so that copy_{to,from}_user()
play fair with fault based CoW break and such...
There is no such thing as free lunch - the cost is slightly infalted
TLB-Miss Handlers.
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-06-17 20:42:13 +08:00
|
|
|
|
|
|
|
and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE
|
|
|
|
or r3, r3, r2
|
|
|
|
|
2015-10-05 18:11:36 +08:00
|
|
|
sr r3, [ARC_REG_TLBPD1] ; paddr[31..13] | Kr Kw Kx Ur Uw Ux | C
|
2015-02-06 23:44:57 +08:00
|
|
|
#ifdef CONFIG_ARC_HAS_PAE40
|
|
|
|
ld r3, [r1, 4] ; paddr[39..32]
|
|
|
|
sr r3, [ARC_REG_TLBPD1HI]
|
|
|
|
#endif
|
2013-01-18 17:42:19 +08:00
|
|
|
|
|
|
|
and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
|
|
|
|
|
|
|
|
lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid
|
|
|
|
|
|
|
|
or r3, r3, r2 ; S | vaddr | {sasid|asid}
|
|
|
|
sr r3,[ARC_REG_TLBPD0] ; rewrite PD0
|
|
|
|
.endm
|
|
|
|
|
|
|
|
;-----------------------------------------------------------------
|
|
|
|
; Commit the TLB entry into MMU
|
|
|
|
|
|
|
|
.macro COMMIT_ENTRY_TO_MMU
|
2015-04-06 19:52:39 +08:00
|
|
|
#if (CONFIG_ARC_MMU_VER < 4)
|
2013-01-18 17:42:19 +08:00
|
|
|
|
2017-05-28 14:52:03 +08:00
|
|
|
#ifdef CONFIG_EZNPS_MTM_EXT
|
|
|
|
/* verify if entry for this vaddr+ASID already exists */
|
|
|
|
sr TLBProbe, [ARC_REG_TLBCOMMAND]
|
|
|
|
lr r0, [ARC_REG_TLBINDEX]
|
|
|
|
bbit0 r0, 31, 88f
|
|
|
|
#endif
|
|
|
|
|
2013-01-18 17:42:19 +08:00
|
|
|
/* Get free TLB slot: Set = computed from vaddr, way = random */
|
|
|
|
sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
|
|
|
|
|
|
|
|
/* Commit the Write */
|
|
|
|
#if (CONFIG_ARC_MMU_VER >= 2) /* introduced in v2 */
|
|
|
|
sr TLBWriteNI, [ARC_REG_TLBCOMMAND]
|
|
|
|
#else
|
|
|
|
sr TLBWrite, [ARC_REG_TLBCOMMAND]
|
|
|
|
#endif
|
2015-04-06 19:52:39 +08:00
|
|
|
|
|
|
|
#else
|
|
|
|
sr TLBInsertEntry, [ARC_REG_TLBCOMMAND]
|
|
|
|
#endif
|
2017-05-28 14:52:03 +08:00
|
|
|
|
|
|
|
88:
|
2013-01-18 17:42:19 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
|
2013-01-18 17:42:25 +08:00
|
|
|
ARCFP_CODE ;Fast Path Code, candidate for ICCM
|
2013-01-18 17:42:19 +08:00
|
|
|
|
|
|
|
;-----------------------------------------------------------------------------
|
|
|
|
; I-TLB Miss Exception Handler
|
|
|
|
;-----------------------------------------------------------------------------
|
|
|
|
|
2014-02-07 16:17:43 +08:00
|
|
|
ENTRY(EV_TLBMissI)
|
2013-01-18 17:42:19 +08:00
|
|
|
|
|
|
|
TLBMISS_FREEUP_REGS
|
|
|
|
|
|
|
|
;----------------------------------------------------------------
|
2013-06-17 17:03:15 +08:00
|
|
|
; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
|
2013-01-18 17:42:19 +08:00
|
|
|
LOAD_FAULT_PTE
|
|
|
|
|
|
|
|
;----------------------------------------------------------------
|
|
|
|
; VERIFY_PTE: Check if PTE permissions approp for executing code
|
|
|
|
cmp_s r2, VMALLOC_START
|
ARC: MMUv4 preps/1 - Fold PTE K/U access flags
The current ARC VM code has 13 flags in Page Table entry: some software
(accesed/dirty/non-linear-maps) and rest hardware specific. With 8k MMU
page, we need 19 bits for addressing page frame so remaining 13 bits is
just about enough to accomodate the current flags.
In MMUv4 there are 2 additional flags, SZ (normal or super page) and WT
(cache access mode write-thru) - and additionally PFN is 20 bits (vs. 19
before for 8k). Thus these can't be held in current PTE w/o making each
entry 64bit wide.
It seems there is some scope of compressing the current PTE flags (and
freeing up a few bits). Currently PTE contains fully orthogonal distinct
access permissions for kernel and user mode (Kr, Kw, Kx; Ur, Uw, Ux)
which can be folded into one set (R, W, X). The translation of 3 PTE
bits into 6 TLB bits (when programming the MMU) can be done based on
following pre-requites/assumptions:
1. For kernel-mode-only translations (vmalloc: 0x7000_0000 to
0x7FFF_FFFF), PTE additionally has PAGE_GLOBAL flag set (and user
space entries can never be global). Thus such a PTE can translate
to Kr, Kw, Kx (as appropriate) and zero for User mode counterparts.
2. For non global entries, the PTE flags can be used to create mirrored
K and U TLB bits. This is true after commit a950549c675f2c8c504
"ARC: copy_(to|from)_user() to honor usermode-access permissions"
which ensured that user-space translations _MUST_ have same access
permissions for both U/K mode accesses so that copy_{to,from}_user()
play fair with fault based CoW break and such...
There is no such thing as free lunch - the cost is slightly infalted
TLB-Miss Handlers.
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-06-17 20:42:13 +08:00
|
|
|
mov_s r2, (_PAGE_PRESENT | _PAGE_EXECUTE)
|
|
|
|
or.hs r2, r2, _PAGE_GLOBAL
|
2013-01-18 17:42:19 +08:00
|
|
|
|
|
|
|
and r3, r0, r2 ; Mask out NON Flag bits from PTE
|
|
|
|
xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test )
|
|
|
|
bnz do_slow_path_pf
|
|
|
|
|
|
|
|
; Let Linux VM know that the page was accessed
|
2013-06-17 14:05:15 +08:00
|
|
|
or r0, r0, _PAGE_ACCESSED ; set Accessed Bit
|
|
|
|
st_s r0, [r1] ; Write back PTE
|
2013-01-18 17:42:19 +08:00
|
|
|
|
|
|
|
CONV_PTE_TO_TLB
|
|
|
|
COMMIT_ENTRY_TO_MMU
|
|
|
|
TLBMISS_RESTORE_REGS
|
2014-12-03 22:52:41 +08:00
|
|
|
EV_TLBMissI_fast_ret: ; additional label for VDK OS-kit instrumentation
|
2013-01-18 17:42:19 +08:00
|
|
|
rtie
|
|
|
|
|
2014-02-07 16:17:43 +08:00
|
|
|
END(EV_TLBMissI)
|
2013-01-18 17:42:19 +08:00
|
|
|
|
|
|
|
;-----------------------------------------------------------------------------
|
|
|
|
; D-TLB Miss Exception Handler
|
|
|
|
;-----------------------------------------------------------------------------
|
|
|
|
|
2014-02-07 16:17:43 +08:00
|
|
|
ENTRY(EV_TLBMissD)
|
2013-01-18 17:42:19 +08:00
|
|
|
|
|
|
|
TLBMISS_FREEUP_REGS
|
|
|
|
|
|
|
|
;----------------------------------------------------------------
|
|
|
|
; Get the PTE corresponding to V-addr accessed
|
2013-06-17 17:03:15 +08:00
|
|
|
; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA
|
2013-01-18 17:42:19 +08:00
|
|
|
LOAD_FAULT_PTE
|
|
|
|
|
|
|
|
;----------------------------------------------------------------
|
|
|
|
; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W)
|
|
|
|
|
ARC: MMUv4 preps/1 - Fold PTE K/U access flags
The current ARC VM code has 13 flags in Page Table entry: some software
(accesed/dirty/non-linear-maps) and rest hardware specific. With 8k MMU
page, we need 19 bits for addressing page frame so remaining 13 bits is
just about enough to accomodate the current flags.
In MMUv4 there are 2 additional flags, SZ (normal or super page) and WT
(cache access mode write-thru) - and additionally PFN is 20 bits (vs. 19
before for 8k). Thus these can't be held in current PTE w/o making each
entry 64bit wide.
It seems there is some scope of compressing the current PTE flags (and
freeing up a few bits). Currently PTE contains fully orthogonal distinct
access permissions for kernel and user mode (Kr, Kw, Kx; Ur, Uw, Ux)
which can be folded into one set (R, W, X). The translation of 3 PTE
bits into 6 TLB bits (when programming the MMU) can be done based on
following pre-requites/assumptions:
1. For kernel-mode-only translations (vmalloc: 0x7000_0000 to
0x7FFF_FFFF), PTE additionally has PAGE_GLOBAL flag set (and user
space entries can never be global). Thus such a PTE can translate
to Kr, Kw, Kx (as appropriate) and zero for User mode counterparts.
2. For non global entries, the PTE flags can be used to create mirrored
K and U TLB bits. This is true after commit a950549c675f2c8c504
"ARC: copy_(to|from)_user() to honor usermode-access permissions"
which ensured that user-space translations _MUST_ have same access
permissions for both U/K mode accesses so that copy_{to,from}_user()
play fair with fault based CoW break and such...
There is no such thing as free lunch - the cost is slightly infalted
TLB-Miss Handlers.
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-06-17 20:42:13 +08:00
|
|
|
cmp_s r2, VMALLOC_START
|
|
|
|
mov_s r2, _PAGE_PRESENT ; common bit for K/U PTE
|
|
|
|
or.hs r2, r2, _PAGE_GLOBAL ; kernel PTE only
|
|
|
|
|
|
|
|
; Linux PTE [RWX] bits are semantically overloaded:
|
|
|
|
; -If PAGE_GLOBAL set, they refer to kernel-only flags (vmalloc)
|
|
|
|
; -Otherwise they are user-mode permissions, and those are exactly
|
|
|
|
; same for kernel mode as well (e.g. copy_(to|from)_user)
|
|
|
|
|
2013-01-18 17:42:19 +08:00
|
|
|
lr r3, [ecr]
|
|
|
|
btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access
|
ARC: MMUv4 preps/1 - Fold PTE K/U access flags
The current ARC VM code has 13 flags in Page Table entry: some software
(accesed/dirty/non-linear-maps) and rest hardware specific. With 8k MMU
page, we need 19 bits for addressing page frame so remaining 13 bits is
just about enough to accomodate the current flags.
In MMUv4 there are 2 additional flags, SZ (normal or super page) and WT
(cache access mode write-thru) - and additionally PFN is 20 bits (vs. 19
before for 8k). Thus these can't be held in current PTE w/o making each
entry 64bit wide.
It seems there is some scope of compressing the current PTE flags (and
freeing up a few bits). Currently PTE contains fully orthogonal distinct
access permissions for kernel and user mode (Kr, Kw, Kx; Ur, Uw, Ux)
which can be folded into one set (R, W, X). The translation of 3 PTE
bits into 6 TLB bits (when programming the MMU) can be done based on
following pre-requites/assumptions:
1. For kernel-mode-only translations (vmalloc: 0x7000_0000 to
0x7FFF_FFFF), PTE additionally has PAGE_GLOBAL flag set (and user
space entries can never be global). Thus such a PTE can translate
to Kr, Kw, Kx (as appropriate) and zero for User mode counterparts.
2. For non global entries, the PTE flags can be used to create mirrored
K and U TLB bits. This is true after commit a950549c675f2c8c504
"ARC: copy_(to|from)_user() to honor usermode-access permissions"
which ensured that user-space translations _MUST_ have same access
permissions for both U/K mode accesses so that copy_{to,from}_user()
play fair with fault based CoW break and such...
There is no such thing as free lunch - the cost is slightly infalted
TLB-Miss Handlers.
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-06-17 20:42:13 +08:00
|
|
|
or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE
|
2013-01-18 17:42:19 +08:00
|
|
|
btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access
|
ARC: MMUv4 preps/1 - Fold PTE K/U access flags
The current ARC VM code has 13 flags in Page Table entry: some software
(accesed/dirty/non-linear-maps) and rest hardware specific. With 8k MMU
page, we need 19 bits for addressing page frame so remaining 13 bits is
just about enough to accomodate the current flags.
In MMUv4 there are 2 additional flags, SZ (normal or super page) and WT
(cache access mode write-thru) - and additionally PFN is 20 bits (vs. 19
before for 8k). Thus these can't be held in current PTE w/o making each
entry 64bit wide.
It seems there is some scope of compressing the current PTE flags (and
freeing up a few bits). Currently PTE contains fully orthogonal distinct
access permissions for kernel and user mode (Kr, Kw, Kx; Ur, Uw, Ux)
which can be folded into one set (R, W, X). The translation of 3 PTE
bits into 6 TLB bits (when programming the MMU) can be done based on
following pre-requites/assumptions:
1. For kernel-mode-only translations (vmalloc: 0x7000_0000 to
0x7FFF_FFFF), PTE additionally has PAGE_GLOBAL flag set (and user
space entries can never be global). Thus such a PTE can translate
to Kr, Kw, Kx (as appropriate) and zero for User mode counterparts.
2. For non global entries, the PTE flags can be used to create mirrored
K and U TLB bits. This is true after commit a950549c675f2c8c504
"ARC: copy_(to|from)_user() to honor usermode-access permissions"
which ensured that user-space translations _MUST_ have same access
permissions for both U/K mode accesses so that copy_{to,from}_user()
play fair with fault based CoW break and such...
There is no such thing as free lunch - the cost is slightly infalted
TLB-Miss Handlers.
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-06-17 20:42:13 +08:00
|
|
|
or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE
|
|
|
|
; Above laddering takes care of XCHG access (both R and W)
|
2013-01-18 17:42:19 +08:00
|
|
|
|
|
|
|
; By now, r2 setup with all the Flags we need to check in PTE
|
|
|
|
and r3, r0, r2 ; Mask out NON Flag bits from PTE
|
|
|
|
brne.d r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test)
|
|
|
|
|
|
|
|
;----------------------------------------------------------------
|
|
|
|
; UPDATE_PTE: Let Linux VM know that page was accessed/dirty
|
|
|
|
lr r3, [ecr]
|
2013-06-17 14:05:15 +08:00
|
|
|
or r0, r0, _PAGE_ACCESSED ; Accessed bit always
|
2013-01-18 17:42:19 +08:00
|
|
|
btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ?
|
2013-12-05 14:35:05 +08:00
|
|
|
or.nz r0, r0, _PAGE_DIRTY ; if Write, set Dirty bit as well
|
2013-01-18 17:42:19 +08:00
|
|
|
st_s r0, [r1] ; Write back PTE
|
|
|
|
|
|
|
|
CONV_PTE_TO_TLB
|
|
|
|
|
|
|
|
#if (CONFIG_ARC_MMU_VER == 1)
|
|
|
|
; MMU with 2 way set assoc J-TLB, needs some help in pathetic case of
|
|
|
|
; memcpy where 3 parties contend for 2 ways, ensuing a livelock.
|
|
|
|
; But only for old MMU or one with Metal Fix
|
|
|
|
TLB_WRITE_HEURISTICS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
COMMIT_ENTRY_TO_MMU
|
|
|
|
TLBMISS_RESTORE_REGS
|
2014-12-03 22:52:41 +08:00
|
|
|
EV_TLBMissD_fast_ret: ; additional label for VDK OS-kit instrumentation
|
2013-01-18 17:42:19 +08:00
|
|
|
rtie
|
|
|
|
|
|
|
|
;-------- Common routine to call Linux Page Fault Handler -----------
|
|
|
|
do_slow_path_pf:
|
|
|
|
|
|
|
|
; Restore the 4-scratch regs saved by fast path miss handler
|
|
|
|
TLBMISS_RESTORE_REGS
|
|
|
|
|
|
|
|
; Slow path TLB Miss handled as a regular ARC Exception
|
|
|
|
; (stack switching / save the complete reg-file).
|
2014-10-13 16:50:39 +08:00
|
|
|
b call_do_page_fault
|
2014-02-07 16:17:43 +08:00
|
|
|
END(EV_TLBMissD)
|