2020-02-26 01:35:34 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
|
2021-01-30 21:08:40 +08:00
|
|
|
#include <linux/context_tracking.h>
|
2020-02-26 01:35:34 +08:00
|
|
|
#include <linux/err.h>
|
2021-02-08 23:10:29 +08:00
|
|
|
#include <linux/compat.h>
|
|
|
|
|
2020-02-26 01:35:34 +08:00
|
|
|
#include <asm/asm-prototypes.h>
|
2020-11-19 20:43:53 +08:00
|
|
|
#include <asm/kup.h>
|
2020-02-26 01:35:34 +08:00
|
|
|
#include <asm/cputime.h>
|
2021-01-30 21:08:40 +08:00
|
|
|
#include <asm/interrupt.h>
|
2020-02-26 01:35:34 +08:00
|
|
|
#include <asm/hw_irq.h>
|
2021-01-30 21:08:38 +08:00
|
|
|
#include <asm/interrupt.h>
|
2020-02-26 01:35:34 +08:00
|
|
|
#include <asm/kprobes.h>
|
|
|
|
#include <asm/paca.h>
|
|
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <asm/reg.h>
|
|
|
|
#include <asm/signal.h>
|
|
|
|
#include <asm/switch_to.h>
|
|
|
|
#include <asm/syscall.h>
|
|
|
|
#include <asm/time.h>
|
|
|
|
#include <asm/unistd.h>
|
|
|
|
|
2021-03-12 20:50:34 +08:00
|
|
|
#if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
|
|
|
|
unsigned long global_dbcr0[NR_CPUS];
|
|
|
|
#endif
|
|
|
|
|
2020-02-26 01:35:34 +08:00
|
|
|
typedef long (*syscall_fn)(long, long, long, long, long, long);
|
|
|
|
|
2020-02-26 01:35:39 +08:00
|
|
|
/* Has to run notrace because it is entered not completely "reconciled" */
|
|
|
|
notrace long system_call_exception(long r3, long r4, long r5,
|
|
|
|
long r6, long r7, long r8,
|
|
|
|
unsigned long r0, struct pt_regs *regs)
|
2020-02-26 01:35:34 +08:00
|
|
|
{
|
|
|
|
syscall_fn f;
|
|
|
|
|
2021-03-12 20:50:47 +08:00
|
|
|
kuep_lock();
|
2021-03-12 20:50:51 +08:00
|
|
|
#ifdef CONFIG_PPC32
|
|
|
|
kuap_save_and_lock(regs);
|
|
|
|
#endif
|
2021-03-12 20:50:47 +08:00
|
|
|
|
2021-02-08 23:10:30 +08:00
|
|
|
regs->orig_gpr3 = r3;
|
|
|
|
|
2020-02-26 01:35:39 +08:00
|
|
|
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
|
|
|
BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
|
|
|
|
|
2021-03-16 18:41:55 +08:00
|
|
|
trace_hardirqs_off(); /* finish reconciling */
|
|
|
|
|
2021-01-30 21:08:40 +08:00
|
|
|
CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
|
|
|
|
user_exit_irqoff();
|
|
|
|
|
2021-02-08 23:10:31 +08:00
|
|
|
if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
BUG_ON(!(regs->msr & MSR_RI));
|
2020-02-26 01:35:34 +08:00
|
|
|
BUG_ON(!(regs->msr & MSR_PR));
|
2021-02-08 23:10:28 +08:00
|
|
|
BUG_ON(arch_irq_disabled_regs(regs));
|
2020-02-26 01:35:34 +08:00
|
|
|
|
2020-11-27 12:44:12 +08:00
|
|
|
#ifdef CONFIG_PPC_PKEY
|
|
|
|
if (mmu_has_feature(MMU_FTR_PKEY)) {
|
|
|
|
unsigned long amr, iamr;
|
2020-11-27 12:44:24 +08:00
|
|
|
bool flush_needed = false;
|
2020-11-27 12:44:12 +08:00
|
|
|
/*
|
|
|
|
* When entering from userspace we mostly have the AMR/IAMR
|
|
|
|
* different from kernel default values. Hence don't compare.
|
|
|
|
*/
|
|
|
|
amr = mfspr(SPRN_AMR);
|
|
|
|
iamr = mfspr(SPRN_IAMR);
|
|
|
|
regs->amr = amr;
|
|
|
|
regs->iamr = iamr;
|
2020-11-27 12:44:24 +08:00
|
|
|
if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
|
2020-11-27 12:44:12 +08:00
|
|
|
mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
|
2020-11-27 12:44:24 +08:00
|
|
|
flush_needed = true;
|
|
|
|
}
|
|
|
|
if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
|
2020-11-27 12:44:12 +08:00
|
|
|
mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
|
2020-11-27 12:44:24 +08:00
|
|
|
flush_needed = true;
|
|
|
|
}
|
|
|
|
if (flush_needed)
|
|
|
|
isync();
|
2020-11-27 12:44:12 +08:00
|
|
|
} else
|
|
|
|
#endif
|
2021-03-12 20:50:48 +08:00
|
|
|
kuap_assert_locked();
|
2020-04-29 14:56:49 +08:00
|
|
|
|
2021-02-10 03:29:28 +08:00
|
|
|
booke_restore_dbcr0();
|
|
|
|
|
2020-02-26 01:35:34 +08:00
|
|
|
account_cpu_user_entry();
|
|
|
|
|
2021-01-30 21:08:47 +08:00
|
|
|
account_stolen_time();
|
2020-02-26 01:35:34 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is not required for the syscall exit path, but makes the
|
|
|
|
* stack frame look nicer. If this was initialised in the first stack
|
|
|
|
* frame, or if the unwinder was taught the first stack frame always
|
|
|
|
* returns to user with IRQS_ENABLED, this store could be avoided!
|
|
|
|
*/
|
2021-02-08 23:10:28 +08:00
|
|
|
irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
|
2020-02-26 01:35:34 +08:00
|
|
|
|
2020-02-26 01:35:39 +08:00
|
|
|
local_irq_enable();
|
2020-02-26 01:35:34 +08:00
|
|
|
|
2020-03-20 18:20:16 +08:00
|
|
|
if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
|
2021-02-10 03:29:27 +08:00
|
|
|
if (unlikely(trap_is_unsupported_scv(regs))) {
|
2020-06-11 16:12:03 +08:00
|
|
|
/* Unsupported scv vector */
|
|
|
|
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
|
|
|
|
return regs->gpr[3];
|
|
|
|
}
|
2020-02-26 01:35:34 +08:00
|
|
|
/*
|
|
|
|
* We use the return value of do_syscall_trace_enter() as the
|
|
|
|
* syscall number. If the syscall was rejected for any reason
|
|
|
|
* do_syscall_trace_enter() returns an invalid syscall number
|
|
|
|
* and the test against NR_syscalls will fail and the return
|
|
|
|
* value to be used is in regs->gpr[3].
|
|
|
|
*/
|
|
|
|
r0 = do_syscall_trace_enter(regs);
|
|
|
|
if (unlikely(r0 >= NR_syscalls))
|
|
|
|
return regs->gpr[3];
|
|
|
|
r3 = regs->gpr[3];
|
|
|
|
r4 = regs->gpr[4];
|
|
|
|
r5 = regs->gpr[5];
|
|
|
|
r6 = regs->gpr[6];
|
|
|
|
r7 = regs->gpr[7];
|
|
|
|
r8 = regs->gpr[8];
|
|
|
|
|
|
|
|
} else if (unlikely(r0 >= NR_syscalls)) {
|
2021-02-10 03:29:27 +08:00
|
|
|
if (unlikely(trap_is_unsupported_scv(regs))) {
|
2020-06-11 16:12:03 +08:00
|
|
|
/* Unsupported scv vector */
|
|
|
|
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
|
|
|
|
return regs->gpr[3];
|
|
|
|
}
|
2020-02-26 01:35:34 +08:00
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* May be faster to do array_index_nospec? */
|
|
|
|
barrier_nospec();
|
|
|
|
|
2021-02-08 23:10:29 +08:00
|
|
|
if (unlikely(is_compat_task())) {
|
2020-02-26 01:35:34 +08:00
|
|
|
f = (void *)compat_sys_call_table[r0];
|
|
|
|
|
|
|
|
r3 &= 0x00000000ffffffffULL;
|
|
|
|
r4 &= 0x00000000ffffffffULL;
|
|
|
|
r5 &= 0x00000000ffffffffULL;
|
|
|
|
r6 &= 0x00000000ffffffffULL;
|
|
|
|
r7 &= 0x00000000ffffffffULL;
|
|
|
|
r8 &= 0x00000000ffffffffULL;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
f = (void *)sys_call_table[r0];
|
|
|
|
}
|
|
|
|
|
|
|
|
return f(r3, r4, r5, r6, r7, r8);
|
|
|
|
}
|
|
|
|
|
2020-04-29 14:24:21 +08:00
|
|
|
/*
|
|
|
|
* local irqs must be disabled. Returns false if the caller must re-enable
|
|
|
|
* them, check for new work, and try again.
|
2021-01-30 21:08:11 +08:00
|
|
|
*
|
|
|
|
* This should be called with local irqs disabled, but if they were previously
|
|
|
|
* enabled when the interrupt handler returns (indicating a process-context /
|
|
|
|
* synchronous interrupt) then irqs_enabled should be true.
|
2020-04-29 14:24:21 +08:00
|
|
|
*/
|
2021-02-24 14:34:22 +08:00
|
|
|
static notrace __always_inline bool __prep_irq_for_enabled_exit(bool clear_ri)
|
2020-04-29 14:24:21 +08:00
|
|
|
{
|
|
|
|
/* This must be done with RI=1 because tracing may touch vmaps */
|
|
|
|
trace_hardirqs_on();
|
|
|
|
|
|
|
|
/* This pattern matches prep_irq_for_idle */
|
2020-06-11 16:12:03 +08:00
|
|
|
if (clear_ri)
|
|
|
|
__hard_EE_RI_disable();
|
|
|
|
else
|
|
|
|
__hard_irq_disable();
|
2021-02-08 23:10:28 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
2020-04-29 14:24:21 +08:00
|
|
|
if (unlikely(lazy_irq_pending_nocheck())) {
|
|
|
|
/* Took an interrupt, may have more exit work to do. */
|
2020-06-11 16:12:03 +08:00
|
|
|
if (clear_ri)
|
|
|
|
__hard_RI_enable();
|
2020-04-29 14:24:21 +08:00
|
|
|
trace_hardirqs_off();
|
|
|
|
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
local_paca->irq_happened = 0;
|
|
|
|
irq_soft_mask_set(IRQS_ENABLED);
|
2021-02-08 23:10:28 +08:00
|
|
|
#endif
|
2020-04-29 14:24:21 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-01-30 21:08:40 +08:00
|
|
|
static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri, bool irqs_enabled)
|
|
|
|
{
|
|
|
|
if (__prep_irq_for_enabled_exit(clear_ri))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Must replay pending soft-masked interrupts now. Don't just
|
|
|
|
* local_irq_enabe(); local_irq_disable(); because if we are
|
|
|
|
* returning from an asynchronous interrupt here, another one
|
|
|
|
* might hit after irqs are enabled, and it would exit via this
|
|
|
|
* same path allowing another to fire, and so on unbounded.
|
|
|
|
*
|
|
|
|
* If interrupts were enabled when this interrupt exited,
|
|
|
|
* indicating a process context (synchronous) interrupt,
|
|
|
|
* local_irq_enable/disable can be used, which will enable
|
|
|
|
* interrupts rather than keeping them masked (unclear how
|
|
|
|
* much benefit this is over just replaying for all cases,
|
|
|
|
* because we immediately disable again, so all we're really
|
|
|
|
* doing is allowing hard interrupts to execute directly for
|
|
|
|
* a very small time, rather than being masked and replayed).
|
|
|
|
*/
|
|
|
|
if (irqs_enabled) {
|
|
|
|
local_irq_enable();
|
|
|
|
local_irq_disable();
|
|
|
|
} else {
|
|
|
|
replay_soft_interrupts();
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-02-10 03:29:28 +08:00
|
|
|
static notrace void booke_load_dbcr0(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
|
|
|
unsigned long dbcr0 = current->thread.debug.dbcr0;
|
|
|
|
|
|
|
|
if (likely(!(dbcr0 & DBCR0_IDM)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if the dbcr0 register is set up to debug.
|
|
|
|
* Use the internal debug mode bit to do this.
|
|
|
|
*/
|
|
|
|
mtmsr(mfmsr() & ~MSR_DE);
|
|
|
|
if (IS_ENABLED(CONFIG_PPC32)) {
|
|
|
|
isync();
|
|
|
|
global_dbcr0[smp_processor_id()] = mfspr(SPRN_DBCR0);
|
|
|
|
}
|
|
|
|
mtspr(SPRN_DBCR0, dbcr0);
|
|
|
|
mtspr(SPRN_DBSR, -1);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-02-26 01:35:34 +08:00
|
|
|
/*
|
|
|
|
* This should be called after a syscall returns, with r3 the return value
|
|
|
|
* from the syscall. If this function returns non-zero, the system call
|
|
|
|
* exit assembly should additionally load all GPR registers and CTR and XER
|
|
|
|
* from the interrupt frame.
|
|
|
|
*
|
|
|
|
* The function graph tracer can not trace the return side of this function,
|
|
|
|
* because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
|
|
|
|
*/
|
|
|
|
notrace unsigned long syscall_exit_prepare(unsigned long r3,
|
2020-06-11 16:12:03 +08:00
|
|
|
struct pt_regs *regs,
|
|
|
|
long scv)
|
2020-02-26 01:35:34 +08:00
|
|
|
{
|
|
|
|
unsigned long ti_flags;
|
|
|
|
unsigned long ret = 0;
|
2021-02-10 03:29:27 +08:00
|
|
|
bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
|
2020-02-26 01:35:34 +08:00
|
|
|
|
2021-01-30 21:08:40 +08:00
|
|
|
CT_WARN_ON(ct_state() == CONTEXT_USER);
|
|
|
|
|
2021-03-12 20:50:48 +08:00
|
|
|
kuap_assert_locked();
|
2020-04-29 14:56:49 +08:00
|
|
|
|
2020-02-26 01:35:34 +08:00
|
|
|
regs->result = r3;
|
|
|
|
|
|
|
|
/* Check whether the syscall is issued inside a restartable sequence */
|
|
|
|
rseq_syscall(regs);
|
|
|
|
|
2021-02-10 16:44:09 +08:00
|
|
|
ti_flags = current_thread_info()->flags;
|
2020-02-26 01:35:34 +08:00
|
|
|
|
2021-02-10 03:29:27 +08:00
|
|
|
if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
|
2020-02-26 01:35:34 +08:00
|
|
|
if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
|
|
|
|
r3 = -r3;
|
|
|
|
regs->ccr |= 0x10000000; /* Set SO bit in CR */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
|
|
|
|
if (ti_flags & _TIF_RESTOREALL)
|
|
|
|
ret = _TIF_RESTOREALL;
|
|
|
|
else
|
|
|
|
regs->gpr[3] = r3;
|
2021-02-10 16:44:09 +08:00
|
|
|
clear_bits(_TIF_PERSYSCALL_MASK, ¤t_thread_info()->flags);
|
2020-02-26 01:35:34 +08:00
|
|
|
} else {
|
|
|
|
regs->gpr[3] = r3;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
|
|
|
|
do_syscall_trace_leave(regs);
|
|
|
|
ret |= _TIF_RESTOREALL;
|
|
|
|
}
|
|
|
|
|
|
|
|
local_irq_disable();
|
2021-01-30 21:08:11 +08:00
|
|
|
|
2021-01-30 21:08:45 +08:00
|
|
|
again:
|
2021-02-10 16:44:09 +08:00
|
|
|
ti_flags = READ_ONCE(current_thread_info()->flags);
|
2020-02-26 01:35:34 +08:00
|
|
|
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
|
|
|
|
local_irq_enable();
|
|
|
|
if (ti_flags & _TIF_NEED_RESCHED) {
|
|
|
|
schedule();
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* SIGPENDING must restore signal handler function
|
|
|
|
* argument GPRs, and some non-volatiles (e.g., r1).
|
|
|
|
* Restore all for now. This could be made lighter.
|
|
|
|
*/
|
|
|
|
if (ti_flags & _TIF_SIGPENDING)
|
|
|
|
ret |= _TIF_RESTOREALL;
|
|
|
|
do_notify_resume(regs, ti_flags);
|
|
|
|
}
|
|
|
|
local_irq_disable();
|
2021-02-10 16:44:09 +08:00
|
|
|
ti_flags = READ_ONCE(current_thread_info()->flags);
|
2020-02-26 01:35:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
|
|
|
|
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
|
|
|
|
unlikely((ti_flags & _TIF_RESTORE_TM))) {
|
|
|
|
restore_tm_state(regs);
|
|
|
|
} else {
|
|
|
|
unsigned long mathflags = MSR_FP;
|
|
|
|
|
|
|
|
if (cpu_has_feature(CPU_FTR_VSX))
|
|
|
|
mathflags |= MSR_VEC | MSR_VSX;
|
|
|
|
else if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
|
|
|
mathflags |= MSR_VEC;
|
|
|
|
|
powerpc/64s: Fix restore_math unnecessarily changing MSR
Before returning to user, if there are missing FP/VEC/VSX bits from the
user MSR then those registers had been saved and must be restored again
before use. restore_math will decide whether to restore immediately, or
skip the restore and let fp/vec/vsx unavailable faults demand load the
registers.
Each time restore_math restores one of the FP/VSX or VEC register sets
is loaded, an 8-bit counter is incremented (load_fp and load_vec). When
these wrap to zero, restore_math no longer restores that register set
until after they are next demand faulted.
It's quite usual for those counters to have different values, so if one
wraps to zero and restore_math no longer restores its registers or user
MSR bit but the other is not zero yet does not need to be restored
(because the kernel is not frequently using the FPU), then restore_math
will be called and it will also not return in the early exit check.
This causes msr_check_and_set to test and set the MSR at every kernel
exit despite having no work to do.
This can cause workloads (e.g., a NULL syscall microbenchmark) to run
fast for a time while both counters are non-zero, then slow down when
one of the counters reaches zero, then speed up again after the second
counter reaches zero. The cost is significant, about 10% slowdown on a
NULL syscall benchmark, and the jittery behaviour is very undesirable.
Fix this by having restore_math test all conditions first, and only
update MSR if we will be loading registers.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200623234139.2262227-2-npiggin@gmail.com
2020-06-24 07:41:38 +08:00
|
|
|
/*
|
|
|
|
* If userspace MSR has all available FP bits set,
|
|
|
|
* then they are live and no need to restore. If not,
|
|
|
|
* it means the regs were given up and restore_math
|
|
|
|
* may decide to restore them (to avoid taking an FP
|
|
|
|
* fault).
|
|
|
|
*/
|
2020-02-26 01:35:34 +08:00
|
|
|
if ((regs->msr & mathflags) != mathflags)
|
|
|
|
restore_math(regs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-30 21:08:40 +08:00
|
|
|
user_enter_irqoff();
|
|
|
|
|
2020-06-11 16:12:03 +08:00
|
|
|
/* scv need not set RI=0 because SRRs are not used */
|
2021-02-10 03:29:27 +08:00
|
|
|
if (unlikely(!__prep_irq_for_enabled_exit(is_not_scv))) {
|
2021-01-30 21:08:40 +08:00
|
|
|
user_exit_irqoff();
|
|
|
|
local_irq_enable();
|
2021-01-30 21:08:45 +08:00
|
|
|
local_irq_disable();
|
2020-02-26 01:35:34 +08:00
|
|
|
goto again;
|
2021-01-30 21:08:40 +08:00
|
|
|
}
|
2020-02-26 01:35:34 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
local_paca->tm_scratch = regs->msr;
|
|
|
|
#endif
|
|
|
|
|
2021-02-10 03:29:28 +08:00
|
|
|
booke_load_dbcr0();
|
|
|
|
|
2020-02-26 01:35:34 +08:00
|
|
|
account_cpu_user_exit();
|
|
|
|
|
2021-03-16 18:41:57 +08:00
|
|
|
/* Restore user access locks last */
|
2020-11-27 12:44:12 +08:00
|
|
|
kuap_user_restore(regs);
|
2021-03-12 20:50:47 +08:00
|
|
|
kuep_unlock();
|
|
|
|
|
2020-02-26 01:35:34 +08:00
|
|
|
return ret;
|
|
|
|
}
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
|
|
|
|
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
|
|
|
|
{
|
|
|
|
unsigned long ti_flags;
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned long ret = 0;
|
|
|
|
|
2021-02-08 23:10:31 +08:00
|
|
|
if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
BUG_ON(!(regs->msr & MSR_RI));
|
|
|
|
BUG_ON(!(regs->msr & MSR_PR));
|
2021-02-08 23:10:28 +08:00
|
|
|
BUG_ON(arch_irq_disabled_regs(regs));
|
2021-01-30 21:08:45 +08:00
|
|
|
CT_WARN_ON(ct_state() == CONTEXT_USER);
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
|
2020-04-29 14:56:51 +08:00
|
|
|
/*
|
|
|
|
* We don't need to restore AMR on the way back to userspace for KUAP.
|
|
|
|
* AMR can only have been unlocked if we interrupted the kernel.
|
|
|
|
*/
|
2021-03-12 20:50:48 +08:00
|
|
|
kuap_assert_locked();
|
2020-04-29 14:56:49 +08:00
|
|
|
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
|
|
again:
|
2021-02-10 16:44:09 +08:00
|
|
|
ti_flags = READ_ONCE(current_thread_info()->flags);
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
|
|
|
|
local_irq_enable(); /* returning to user: may enable */
|
|
|
|
if (ti_flags & _TIF_NEED_RESCHED) {
|
|
|
|
schedule();
|
|
|
|
} else {
|
|
|
|
if (ti_flags & _TIF_SIGPENDING)
|
|
|
|
ret |= _TIF_RESTOREALL;
|
|
|
|
do_notify_resume(regs, ti_flags);
|
|
|
|
}
|
|
|
|
local_irq_disable();
|
2021-02-10 16:44:09 +08:00
|
|
|
ti_flags = READ_ONCE(current_thread_info()->flags);
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
}
|
|
|
|
|
2021-03-12 20:50:34 +08:00
|
|
|
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
|
|
|
|
unlikely((ti_flags & _TIF_RESTORE_TM))) {
|
|
|
|
restore_tm_state(regs);
|
|
|
|
} else {
|
|
|
|
unsigned long mathflags = MSR_FP;
|
|
|
|
|
|
|
|
if (cpu_has_feature(CPU_FTR_VSX))
|
|
|
|
mathflags |= MSR_VEC | MSR_VSX;
|
|
|
|
else if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
|
|
|
mathflags |= MSR_VEC;
|
|
|
|
|
powerpc/64s: Fix restore_math unnecessarily changing MSR
Before returning to user, if there are missing FP/VEC/VSX bits from the
user MSR then those registers had been saved and must be restored again
before use. restore_math will decide whether to restore immediately, or
skip the restore and let fp/vec/vsx unavailable faults demand load the
registers.
Each time restore_math restores one of the FP/VSX or VEC register sets
is loaded, an 8-bit counter is incremented (load_fp and load_vec). When
these wrap to zero, restore_math no longer restores that register set
until after they are next demand faulted.
It's quite usual for those counters to have different values, so if one
wraps to zero and restore_math no longer restores its registers or user
MSR bit but the other is not zero yet does not need to be restored
(because the kernel is not frequently using the FPU), then restore_math
will be called and it will also not return in the early exit check.
This causes msr_check_and_set to test and set the MSR at every kernel
exit despite having no work to do.
This can cause workloads (e.g., a NULL syscall microbenchmark) to run
fast for a time while both counters are non-zero, then slow down when
one of the counters reaches zero, then speed up again after the second
counter reaches zero. The cost is significant, about 10% slowdown on a
NULL syscall benchmark, and the jittery behaviour is very undesirable.
Fix this by having restore_math test all conditions first, and only
update MSR if we will be loading registers.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200623234139.2262227-2-npiggin@gmail.com
2020-06-24 07:41:38 +08:00
|
|
|
/* See above restore_math comment */
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
if ((regs->msr & mathflags) != mathflags)
|
|
|
|
restore_math(regs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-30 21:08:45 +08:00
|
|
|
user_enter_irqoff();
|
|
|
|
|
|
|
|
if (unlikely(!__prep_irq_for_enabled_exit(true))) {
|
|
|
|
user_exit_irqoff();
|
|
|
|
local_irq_enable();
|
|
|
|
local_irq_disable();
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
goto again;
|
2021-01-30 21:08:45 +08:00
|
|
|
}
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
|
2021-02-10 03:29:28 +08:00
|
|
|
booke_load_dbcr0();
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
local_paca->tm_scratch = regs->msr;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
account_cpu_user_exit();
|
|
|
|
|
2021-03-16 18:41:57 +08:00
|
|
|
/* Restore user access locks last */
|
2020-11-27 12:44:12 +08:00
|
|
|
kuap_user_restore(regs);
|
2021-03-16 18:41:57 +08:00
|
|
|
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void preempt_schedule_irq(void);
|
|
|
|
|
|
|
|
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned long ret = 0;
|
2021-03-12 20:50:48 +08:00
|
|
|
unsigned long kuap;
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
|
2021-02-08 23:10:31 +08:00
|
|
|
if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x) &&
|
|
|
|
unlikely(!(regs->msr & MSR_RI)))
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
unrecoverable_exception(regs);
|
|
|
|
BUG_ON(regs->msr & MSR_PR);
|
2021-01-30 21:08:45 +08:00
|
|
|
/*
|
|
|
|
* CT_WARN_ON comes here via program_check_exception,
|
|
|
|
* so avoid recursion.
|
|
|
|
*/
|
2021-04-14 19:00:33 +08:00
|
|
|
if (TRAP(regs) != INTERRUPT_PROGRAM)
|
2021-01-30 21:08:45 +08:00
|
|
|
CT_WARN_ON(ct_state() == CONTEXT_USER);
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
|
2021-03-12 20:50:48 +08:00
|
|
|
kuap = kuap_get_and_assert_locked();
|
2020-04-29 14:56:49 +08:00
|
|
|
|
2021-02-10 16:44:09 +08:00
|
|
|
if (unlikely(current_thread_info()->flags & _TIF_EMULATE_STACK_STORE)) {
|
|
|
|
clear_bits(_TIF_EMULATE_STACK_STORE, ¤t_thread_info()->flags);
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
2021-02-08 23:10:28 +08:00
|
|
|
if (!arch_irq_disabled_regs(regs)) {
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
/* Returning to a kernel context with local irqs enabled. */
|
|
|
|
WARN_ON_ONCE(!(regs->msr & MSR_EE));
|
|
|
|
again:
|
|
|
|
if (IS_ENABLED(CONFIG_PREEMPT)) {
|
|
|
|
/* Return to preemptible kernel context */
|
2021-02-10 16:44:09 +08:00
|
|
|
if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED)) {
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
if (preempt_count() == 0)
|
|
|
|
preempt_schedule_irq();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-30 21:08:11 +08:00
|
|
|
if (unlikely(!prep_irq_for_enabled_exit(true, !irqs_disabled_flags(flags))))
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
goto again;
|
|
|
|
} else {
|
|
|
|
/* Returning to a kernel context with local irqs disabled. */
|
|
|
|
__hard_EE_RI_disable();
|
2021-02-08 23:10:28 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
if (regs->msr & MSR_EE)
|
|
|
|
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
|
2021-02-08 23:10:28 +08:00
|
|
|
#endif
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
local_paca->tm_scratch = regs->msr;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
2021-03-16 18:41:57 +08:00
|
|
|
* 64s does not want to mfspr(SPRN_AMR) here, because this comes after
|
|
|
|
* mtmsr, which would cause Read-After-Write stalls. Hence, take the
|
|
|
|
* AMR value from the check above.
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
*/
|
2021-03-12 20:50:48 +08:00
|
|
|
kuap_kernel_restore(regs, kuap);
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 01:35:37 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|