sh: TLB protection violation exception optimizations.

This adds a bit of rework to have the TLB protection violations skip the
TLB miss fastpath and go directly in to do_page_fault(), as these require
slow path handling.

Based on an earlier patch by SUGIOKA Toshinobu.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
Paul Mundt 2009-08-15 02:49:40 +09:00
parent e7b8b7f16e
commit 112e58471d
2 changed files with 22 additions and 14 deletions

View File

@ -113,34 +113,33 @@ OFF_TRA = (16*4+6*4)
#if defined(CONFIG_MMU)
.align 2
ENTRY(tlb_miss_load)
bra call_dpf
bra call_handle_tlbmiss
mov #0, r5
.align 2
ENTRY(tlb_miss_store)
bra call_dpf
bra call_handle_tlbmiss
mov #1, r5
.align 2
ENTRY(initial_page_write)
bra call_dpf
bra call_handle_tlbmiss
mov #1, r5
.align 2
ENTRY(tlb_protection_violation_load)
bra call_dpf
bra call_do_page_fault
mov #0, r5
.align 2
ENTRY(tlb_protection_violation_store)
bra call_dpf
bra call_do_page_fault
mov #1, r5
call_dpf:
call_handle_tlbmiss:
mov.l 1f, r0
mov r5, r8
mov.l @r0, r6
mov r6, r9
mov.l 2f, r0
sts pr, r10
jsr @r0
@ -151,16 +150,25 @@ call_dpf:
lds r10, pr
rts
nop
0: mov.l 3f, r0
mov r9, r6
0:
mov r8, r5
call_do_page_fault:
mov.l 1f, r0
mov.l @r0, r6
sti
mov.l 3f, r0
mov.l 4f, r1
mov r15, r4
jmp @r0
mov r15, r4
lds r1, pr
.align 2
1: .long MMU_TEA
2: .long __do_page_fault
2: .long handle_tlbmiss
3: .long do_page_fault
4: .long ret_from_exception
.align 2
ENTRY(address_error_load)

View File

@ -318,9 +318,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
/*
* Called with interrupts disabled.
*/
asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
unsigned long writeaccess,
unsigned long address)
asmlinkage int __kprobes
handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
unsigned long address)
{
pgd_t *pgd;
pud_t *pud;