powerpc/64: Interrupts save PPR on stack rather than thread_struct
PPR is the odd register out when it comes to interrupt handling, it is saved in current->thread.ppr while all others are saved on the stack. The difficulty with this is that accessing thread.ppr can cause a SLB fault, but the SLB fault handler implementation in C change had assumed the normal exception entry handlers would not cause an SLB fault. Fix this by allocating room in the interrupt stack to save PPR. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
3eeacd9f4e
commit
4c2de74cc8
|
@ -236,11 +236,10 @@
|
||||||
* PPR save/restore macros used in exceptions_64s.S
|
* PPR save/restore macros used in exceptions_64s.S
|
||||||
* Used for P7 or later processors
|
* Used for P7 or later processors
|
||||||
*/
|
*/
|
||||||
#define SAVE_PPR(area, ra, rb) \
|
#define SAVE_PPR(area, ra) \
|
||||||
BEGIN_FTR_SECTION_NESTED(940) \
|
BEGIN_FTR_SECTION_NESTED(940) \
|
||||||
ld ra,PACACURRENT(r13); \
|
ld ra,area+EX_PPR(r13); /* Read PPR from paca */ \
|
||||||
ld rb,area+EX_PPR(r13); /* Read PPR from paca */ \
|
std ra,_PPR(r1); \
|
||||||
std rb,TASKTHREADPPR(ra); \
|
|
||||||
END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
|
END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
|
||||||
|
|
||||||
#define RESTORE_PPR_PACA(area, ra) \
|
#define RESTORE_PPR_PACA(area, ra) \
|
||||||
|
@ -508,7 +507,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
|
||||||
3: EXCEPTION_PROLOG_COMMON_1(); \
|
3: EXCEPTION_PROLOG_COMMON_1(); \
|
||||||
beq 4f; /* if from kernel mode */ \
|
beq 4f; /* if from kernel mode */ \
|
||||||
ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \
|
ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \
|
||||||
SAVE_PPR(area, r9, r10); \
|
SAVE_PPR(area, r9); \
|
||||||
4: EXCEPTION_PROLOG_COMMON_2(area) \
|
4: EXCEPTION_PROLOG_COMMON_2(area) \
|
||||||
EXCEPTION_PROLOG_COMMON_3(n) \
|
EXCEPTION_PROLOG_COMMON_3(n) \
|
||||||
ACCOUNT_STOLEN_TIME
|
ACCOUNT_STOLEN_TIME
|
||||||
|
|
|
@ -32,9 +32,9 @@
|
||||||
/* Default SMT priority is set to 3. Use 11- 13bits to save priority. */
|
/* Default SMT priority is set to 3. Use 11- 13bits to save priority. */
|
||||||
#define PPR_PRIORITY 3
|
#define PPR_PRIORITY 3
|
||||||
#ifdef __ASSEMBLY__
|
#ifdef __ASSEMBLY__
|
||||||
#define INIT_PPR (PPR_PRIORITY << 50)
|
#define DEFAULT_PPR (PPR_PRIORITY << 50)
|
||||||
#else
|
#else
|
||||||
#define INIT_PPR ((u64)PPR_PRIORITY << 50)
|
#define DEFAULT_PPR ((u64)PPR_PRIORITY << 50)
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
#endif /* CONFIG_PPC64 */
|
#endif /* CONFIG_PPC64 */
|
||||||
|
|
||||||
|
@ -341,7 +341,6 @@ struct thread_struct {
|
||||||
* onwards.
|
* onwards.
|
||||||
*/
|
*/
|
||||||
int dscr_inherit;
|
int dscr_inherit;
|
||||||
unsigned long ppr; /* used to save/restore SMT priority */
|
|
||||||
unsigned long tidr;
|
unsigned long tidr;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
|
@ -389,7 +388,6 @@ struct thread_struct {
|
||||||
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
|
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
|
||||||
.addr_limit = KERNEL_DS, \
|
.addr_limit = KERNEL_DS, \
|
||||||
.fpexc_mode = 0, \
|
.fpexc_mode = 0, \
|
||||||
.ppr = INIT_PPR, \
|
|
||||||
.fscr = FSCR_TAR | FSCR_EBB \
|
.fscr = FSCR_TAR | FSCR_EBB \
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -51,6 +51,10 @@ struct pt_regs
|
||||||
unsigned long result;
|
unsigned long result;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC64
|
||||||
|
unsigned long ppr;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -89,7 +89,6 @@ int main(void)
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
DEFINE(SIGSEGV, SIGSEGV);
|
DEFINE(SIGSEGV, SIGSEGV);
|
||||||
DEFINE(NMI_MASK, NMI_MASK);
|
DEFINE(NMI_MASK, NMI_MASK);
|
||||||
OFFSET(TASKTHREADPPR, task_struct, thread.ppr);
|
|
||||||
#else
|
#else
|
||||||
OFFSET(THREAD_INFO, task_struct, stack);
|
OFFSET(THREAD_INFO, task_struct, stack);
|
||||||
DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
|
DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
|
||||||
|
@ -323,6 +322,7 @@ int main(void)
|
||||||
STACK_PT_REGS_OFFSET(_ESR, dsisr);
|
STACK_PT_REGS_OFFSET(_ESR, dsisr);
|
||||||
#else /* CONFIG_PPC64 */
|
#else /* CONFIG_PPC64 */
|
||||||
STACK_PT_REGS_OFFSET(SOFTE, softe);
|
STACK_PT_REGS_OFFSET(SOFTE, softe);
|
||||||
|
STACK_PT_REGS_OFFSET(_PPR, ppr);
|
||||||
#endif /* CONFIG_PPC64 */
|
#endif /* CONFIG_PPC64 */
|
||||||
|
|
||||||
#if defined(CONFIG_PPC32)
|
#if defined(CONFIG_PPC32)
|
||||||
|
|
|
@ -386,10 +386,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||||
|
|
||||||
4: /* Anything else left to do? */
|
4: /* Anything else left to do? */
|
||||||
BEGIN_FTR_SECTION
|
BEGIN_FTR_SECTION
|
||||||
lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */
|
lis r3,DEFAULT_PPR@highest /* Set default PPR */
|
||||||
ld r10,PACACURRENT(r13)
|
|
||||||
sldi r3,r3,32 /* bits 11-13 are used for ppr */
|
sldi r3,r3,32 /* bits 11-13 are used for ppr */
|
||||||
std r3,TASKTHREADPPR(r10)
|
std r3,_PPR(r1)
|
||||||
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||||
|
|
||||||
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
|
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
|
||||||
|
@ -942,12 +941,6 @@ fast_exception_return:
|
||||||
andi. r0,r3,MSR_RI
|
andi. r0,r3,MSR_RI
|
||||||
beq- .Lunrecov_restore
|
beq- .Lunrecov_restore
|
||||||
|
|
||||||
/* Load PPR from thread struct before we clear MSR:RI */
|
|
||||||
BEGIN_FTR_SECTION
|
|
||||||
ld r2,PACACURRENT(r13)
|
|
||||||
ld r2,TASKTHREADPPR(r2)
|
|
||||||
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear RI before restoring r13. If we are returning to
|
* Clear RI before restoring r13. If we are returning to
|
||||||
* userspace and we take an exception after restoring r13,
|
* userspace and we take an exception after restoring r13,
|
||||||
|
@ -968,7 +961,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||||
andi. r0,r3,MSR_PR
|
andi. r0,r3,MSR_PR
|
||||||
beq 1f
|
beq 1f
|
||||||
BEGIN_FTR_SECTION
|
BEGIN_FTR_SECTION
|
||||||
mtspr SPRN_PPR,r2 /* Restore PPR */
|
/* Restore PPR */
|
||||||
|
ld r2,_PPR(r1)
|
||||||
|
mtspr SPRN_PPR,r2
|
||||||
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||||
ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
|
ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
|
||||||
REST_GPR(13, r1)
|
REST_GPR(13, r1)
|
||||||
|
|
|
@ -1710,7 +1710,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||||
p->thread.dscr = mfspr(SPRN_DSCR);
|
p->thread.dscr = mfspr(SPRN_DSCR);
|
||||||
}
|
}
|
||||||
if (cpu_has_feature(CPU_FTR_HAS_PPR))
|
if (cpu_has_feature(CPU_FTR_HAS_PPR))
|
||||||
p->thread.ppr = INIT_PPR;
|
childregs->ppr = DEFAULT_PPR;
|
||||||
|
|
||||||
p->thread.tidr = 0;
|
p->thread.tidr = 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1609,7 +1609,7 @@ static int ppr_get(struct task_struct *target,
|
||||||
void *kbuf, void __user *ubuf)
|
void *kbuf, void __user *ubuf)
|
||||||
{
|
{
|
||||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||||
&target->thread.ppr, 0, sizeof(u64));
|
&target->thread.regs->ppr, 0, sizeof(u64));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ppr_set(struct task_struct *target,
|
static int ppr_set(struct task_struct *target,
|
||||||
|
@ -1618,7 +1618,7 @@ static int ppr_set(struct task_struct *target,
|
||||||
const void *kbuf, const void __user *ubuf)
|
const void *kbuf, const void __user *ubuf)
|
||||||
{
|
{
|
||||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||||
&target->thread.ppr, 0, sizeof(u64));
|
&target->thread.regs->ppr, 0, sizeof(u64));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dscr_get(struct task_struct *target,
|
static int dscr_get(struct task_struct *target,
|
||||||
|
|
Loading…
Reference in New Issue