diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 5a1395499508..1c966e47b36f 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -7,16 +7,30 @@ #include struct interrupt_state { -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_BOOK3E_64 enum ctx_state ctx_state; #endif }; static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) { -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_BOOK3E_64 state->ctx_state = exception_enter(); #endif + +#ifdef CONFIG_PPC_BOOK3S_64 + if (user_mode(regs)) { + CT_WARN_ON(ct_state() != CONTEXT_USER); + user_exit_irqoff(); + } else { + /* + * CT_WARN_ON comes here via program_check_exception, + * so avoid recursion. + */ + if (TRAP(regs) != 0x700) + CT_WARN_ON(ct_state() != CONTEXT_KERNEL); + } +#endif } /* @@ -35,9 +49,23 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup */ static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) { -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_BOOK3E_64 exception_exit(state->ctx_state); #endif + + /* + * Book3S exits to user via interrupt_exit_user_prepare(), which does + * context tracking, which is a cleaner way to handle PREEMPT=y + * and avoid context entry/exit in e.g., preempt_schedule_irq()), + * which is likely to be where the core code wants to end up. + * + * The above comment explains why we can't do the + * + * if (user_mode(regs)) + * user_exit_irqoff(); + * + * sequence here. + */ } static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c index 45c4420fe339..a2102e7a2713 100644 --- a/arch/powerpc/kernel/syscall_64.c +++ b/arch/powerpc/kernel/syscall_64.c @@ -255,9 +255,9 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ret |= _TIF_RESTOREALL; } -again: local_irq_disable(); +again: ti_flags = READ_ONCE(*ti_flagsp); while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { local_irq_enable(); @@ -307,6 +307,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, if (unlikely(!__prep_irq_for_enabled_exit(!scv))) { user_exit_irqoff(); local_irq_enable(); + local_irq_disable(); goto again; } @@ -341,6 +342,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned BUG_ON(!(regs->msr & MSR_PR)); BUG_ON(!FULL_REGS(regs)); BUG_ON(regs->softe != IRQS_ENABLED); + CT_WARN_ON(ct_state() == CONTEXT_USER); /* * We don't need to restore AMR on the way back to userspace for KUAP. @@ -383,8 +385,14 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned } } - if (unlikely(!prep_irq_for_enabled_exit(true, !irqs_disabled_flags(flags)))) + user_enter_irqoff(); + + if (unlikely(!__prep_irq_for_enabled_exit(true))) { + user_exit_irqoff(); + local_irq_enable(); + local_irq_disable(); goto again; + } #ifdef CONFIG_PPC_BOOK3E if (unlikely(ts->debug.dbcr0 & DBCR0_IDM)) { @@ -425,6 +433,12 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign unrecoverable_exception(regs); BUG_ON(regs->msr & MSR_PR); BUG_ON(!FULL_REGS(regs)); + /* + * CT_WARN_ON comes here via program_check_exception, + * so avoid recursion. + */ + if (TRAP(regs) != 0x700) + CT_WARN_ON(ct_state() == CONTEXT_USER); amr = kuap_get_and_check_amr();