powerpc/32: implement fast entry for syscalls on BOOKE
This patch implements a fast entry for syscalls. Syscalls don't have to preserve non volatile registers except LR. This patch then implement a fast entry for syscalls, where volatile registers get clobbered. As this entry is dedicated to syscall it always sets MSR_EE and warns in case MSR_EE was previously off It also assumes that the call is always from user, system calls are unexpected from kernel. Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
b86fb88855
commit
1a4b739bbb
|
@ -342,7 +342,6 @@ stack_ovf:
|
||||||
SYNC
|
SYNC
|
||||||
RFI
|
RFI
|
||||||
|
|
||||||
#ifndef CONFIG_BOOKE /* to be removed once BOOKE uses fast syscall entry */
|
|
||||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||||
trace_syscall_entry_irq_off:
|
trace_syscall_entry_irq_off:
|
||||||
/*
|
/*
|
||||||
|
@ -369,7 +368,6 @@ transfer_to_syscall:
|
||||||
andi. r12,r9,MSR_EE
|
andi. r12,r9,MSR_EE
|
||||||
beq- trace_syscall_entry_irq_off
|
beq- trace_syscall_entry_irq_off
|
||||||
#endif /* CONFIG_TRACE_IRQFLAGS */
|
#endif /* CONFIG_TRACE_IRQFLAGS */
|
||||||
#endif /* !CONFIG_BOOKE */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Handle a system call.
|
* Handle a system call.
|
||||||
|
@ -382,11 +380,6 @@ _GLOBAL(DoSyscall)
|
||||||
stw r3,ORIG_GPR3(r1)
|
stw r3,ORIG_GPR3(r1)
|
||||||
li r12,0
|
li r12,0
|
||||||
stw r12,RESULT(r1)
|
stw r12,RESULT(r1)
|
||||||
#ifdef CONFIG_BOOKE /* to be removed once BOOKE uses fast syscall entry */
|
|
||||||
lwz r11,_CCR(r1) /* Clear SO bit in CR */
|
|
||||||
rlwinm r11,r11,0,4,2
|
|
||||||
stw r11,_CCR(r1)
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||||
/* Make sure interrupts are enabled */
|
/* Make sure interrupts are enabled */
|
||||||
mfmsr r11
|
mfmsr r11
|
||||||
|
|
|
@ -285,8 +285,7 @@ interrupt_base:
|
||||||
#endif
|
#endif
|
||||||
/* System Call Interrupt */
|
/* System Call Interrupt */
|
||||||
START_EXCEPTION(SystemCall)
|
START_EXCEPTION(SystemCall)
|
||||||
NORMAL_EXCEPTION_PROLOG(BOOKE_INTERRUPT_SYSCALL)
|
SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL
|
||||||
EXC_XFER_SYS(0x0c00, DoSyscall)
|
|
||||||
|
|
||||||
/* Auxiliary Processor Unavailable Interrupt */
|
/* Auxiliary Processor Unavailable Interrupt */
|
||||||
EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
|
EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
|
||||||
|
|
|
@ -6,6 +6,8 @@
|
||||||
#include <asm/kvm_asm.h>
|
#include <asm/kvm_asm.h>
|
||||||
#include <asm/kvm_booke_hv_asm.h>
|
#include <asm/kvm_booke_hv_asm.h>
|
||||||
|
|
||||||
|
#ifdef __ASSEMBLY__
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Macros used for common Book-e exception handling
|
* Macros used for common Book-e exception handling
|
||||||
*/
|
*/
|
||||||
|
@ -81,6 +83,101 @@ END_BTB_FLUSH_SECTION
|
||||||
SAVE_4GPRS(3, r11); \
|
SAVE_4GPRS(3, r11); \
|
||||||
SAVE_2GPRS(7, r11)
|
SAVE_2GPRS(7, r11)
|
||||||
|
|
||||||
|
.macro SYSCALL_ENTRY trapno intno
|
||||||
|
mfspr r10, SPRN_SPRG_THREAD
|
||||||
|
#ifdef CONFIG_KVM_BOOKE_HV
|
||||||
|
BEGIN_FTR_SECTION
|
||||||
|
mtspr SPRN_SPRG_WSCRATCH0, r10
|
||||||
|
stw r11, THREAD_NORMSAVE(0)(r10)
|
||||||
|
stw r13, THREAD_NORMSAVE(2)(r10)
|
||||||
|
mfcr r13 /* save CR in r13 for now */
|
||||||
|
mfspr r11, SPRN_SRR1
|
||||||
|
mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */
|
||||||
|
bf 3, 1975f
|
||||||
|
b kvmppc_handler_BOOKE_INTERRUPT_\intno\()_SPRN_SRR1
|
||||||
|
1975:
|
||||||
|
mr r12, r13
|
||||||
|
lwz r13, THREAD_NORMSAVE(2)(r10)
|
||||||
|
FTR_SECTION_ELSE
|
||||||
|
#endif
|
||||||
|
mfcr r12
|
||||||
|
#ifdef CONFIG_KVM_BOOKE_HV
|
||||||
|
ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
|
||||||
|
#endif
|
||||||
|
BOOKE_CLEAR_BTB(r11)
|
||||||
|
lwz r11, TASK_STACK - THREAD(r10)
|
||||||
|
rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */
|
||||||
|
ALLOC_STACK_FRAME(r11, THREAD_SIZE - INT_FRAME_SIZE)
|
||||||
|
stw r12, _CCR(r11) /* save various registers */
|
||||||
|
mflr r12
|
||||||
|
stw r12,_LINK(r11)
|
||||||
|
mfspr r12,SPRN_SRR0
|
||||||
|
stw r1, GPR1(r11)
|
||||||
|
mfspr r9,SPRN_SRR1
|
||||||
|
stw r1, 0(r11)
|
||||||
|
mr r1, r11
|
||||||
|
stw r12,_NIP(r11)
|
||||||
|
rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
|
||||||
|
lis r12, STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
|
||||||
|
stw r2,GPR2(r11)
|
||||||
|
addi r12, r12, STACK_FRAME_REGS_MARKER@l
|
||||||
|
stw r9,_MSR(r11)
|
||||||
|
li r2, \trapno + 1
|
||||||
|
stw r12, 8(r11)
|
||||||
|
stw r2,_TRAP(r11)
|
||||||
|
SAVE_GPR(0, r11)
|
||||||
|
SAVE_4GPRS(3, r11)
|
||||||
|
SAVE_2GPRS(7, r11)
|
||||||
|
|
||||||
|
addi r11,r1,STACK_FRAME_OVERHEAD
|
||||||
|
addi r2,r10,-THREAD
|
||||||
|
stw r11,PT_REGS(r10)
|
||||||
|
/* Check to see if the dbcr0 register is set up to debug. Use the
|
||||||
|
internal debug mode bit to do this. */
|
||||||
|
lwz r12,THREAD_DBCR0(r10)
|
||||||
|
andis. r12,r12,DBCR0_IDM@h
|
||||||
|
ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
|
||||||
|
beq+ 3f
|
||||||
|
/* From user and task is ptraced - load up global dbcr0 */
|
||||||
|
li r12,-1 /* clear all pending debug events */
|
||||||
|
mtspr SPRN_DBSR,r12
|
||||||
|
lis r11,global_dbcr0@ha
|
||||||
|
tophys(r11,r11)
|
||||||
|
addi r11,r11,global_dbcr0@l
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
lwz r9,TASK_CPU(r2)
|
||||||
|
slwi r9,r9,3
|
||||||
|
add r11,r11,r9
|
||||||
|
#endif
|
||||||
|
lwz r12,0(r11)
|
||||||
|
mtspr SPRN_DBCR0,r12
|
||||||
|
lwz r12,4(r11)
|
||||||
|
addi r12,r12,-1
|
||||||
|
stw r12,4(r11)
|
||||||
|
|
||||||
|
3:
|
||||||
|
tovirt(r2, r2) /* set r2 to current */
|
||||||
|
lis r11, transfer_to_syscall@h
|
||||||
|
ori r11, r11, transfer_to_syscall@l
|
||||||
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||||
|
/*
|
||||||
|
* If MSR is changing we need to keep interrupts disabled at this point
|
||||||
|
* otherwise we might risk taking an interrupt before we tell lockdep
|
||||||
|
* they are enabled.
|
||||||
|
*/
|
||||||
|
lis r10, MSR_KERNEL@h
|
||||||
|
ori r10, r10, MSR_KERNEL@l
|
||||||
|
rlwimi r10, r9, 0, MSR_EE
|
||||||
|
#else
|
||||||
|
lis r10, (MSR_KERNEL | MSR_EE)@h
|
||||||
|
ori r10, r10, (MSR_KERNEL | MSR_EE)@l
|
||||||
|
#endif
|
||||||
|
mtspr SPRN_SRR1,r10
|
||||||
|
mtspr SPRN_SRR0,r11
|
||||||
|
SYNC
|
||||||
|
RFI /* jump to handler, enable MMU */
|
||||||
|
.endm
|
||||||
|
|
||||||
/* To handle the additional exception priority levels on 40x and Book-E
|
/* To handle the additional exception priority levels on 40x and Book-E
|
||||||
* processors we allocate a stack per additional priority level.
|
* processors we allocate a stack per additional priority level.
|
||||||
*
|
*
|
||||||
|
@ -245,10 +342,6 @@ END_BTB_FLUSH_SECTION
|
||||||
EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
|
EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
|
||||||
ret_from_except)
|
ret_from_except)
|
||||||
|
|
||||||
#define EXC_XFER_SYS(n, hdlr) \
|
|
||||||
EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL | MSR_EE, transfer_to_handler, \
|
|
||||||
ret_from_except)
|
|
||||||
|
|
||||||
/* Check for a single step debug exception while in an exception
|
/* Check for a single step debug exception while in an exception
|
||||||
* handler before state has been saved. This is to catch the case
|
* handler before state has been saved. This is to catch the case
|
||||||
* where an instruction that we are trying to single step causes
|
* where an instruction that we are trying to single step causes
|
||||||
|
@ -418,7 +511,7 @@ END_BTB_FLUSH_SECTION
|
||||||
1: addi r3,r1,STACK_FRAME_OVERHEAD; \
|
1: addi r3,r1,STACK_FRAME_OVERHEAD; \
|
||||||
EXC_XFER_STD(0x800, kernel_fp_unavailable_exception)
|
EXC_XFER_STD(0x800, kernel_fp_unavailable_exception)
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#else /* __ASSEMBLY__ */
|
||||||
struct exception_regs {
|
struct exception_regs {
|
||||||
unsigned long mas0;
|
unsigned long mas0;
|
||||||
unsigned long mas1;
|
unsigned long mas1;
|
||||||
|
|
|
@ -413,8 +413,7 @@ interrupt_base:
|
||||||
|
|
||||||
/* System Call Interrupt */
|
/* System Call Interrupt */
|
||||||
START_EXCEPTION(SystemCall)
|
START_EXCEPTION(SystemCall)
|
||||||
NORMAL_EXCEPTION_PROLOG(SYSCALL)
|
SYSCALL_ENTRY 0xc00 SYSCALL
|
||||||
EXC_XFER_SYS(0x0c00, DoSyscall)
|
|
||||||
|
|
||||||
/* Auxiliary Processor Unavailable Interrupt */
|
/* Auxiliary Processor Unavailable Interrupt */
|
||||||
EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \
|
EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \
|
||||||
|
|
Loading…
Reference in New Issue