arm64: kernel: Rework finisher callback out of __cpu_suspend_enter()
Hibernate could make use of the cpu_suspend() code to save/restore cpu state, however it needs to be able to return '0' from the 'finisher'. Rework cpu_suspend() so that the finisher is called from C code, independently from the save/restore of cpu state. Space to save the context in is allocated in the caller's stack frame, and passed into __cpu_suspend_enter(). Hibernate's use of this API will look like a copy of the cpu_suspend() function. Signed-off-by: James Morse <james.morse@arm.com> Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
67f6919766
commit
adc9b2dfd0
|
@ -2,6 +2,7 @@
|
||||||
#define __ASM_SUSPEND_H
|
#define __ASM_SUSPEND_H
|
||||||
|
|
||||||
#define NR_CTX_REGS 11
|
#define NR_CTX_REGS 11
|
||||||
|
#define NR_CALLEE_SAVED_REGS 12
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
|
* struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
|
||||||
|
@ -21,6 +22,25 @@ struct sleep_save_sp {
|
||||||
phys_addr_t save_ptr_stash_phys;
|
phys_addr_t save_ptr_stash_phys;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Memory to save the cpu state is allocated on the stack by
|
||||||
|
* __cpu_suspend_enter()'s caller, and populated by __cpu_suspend_enter().
|
||||||
|
* This data must survive until cpu_resume() is called.
|
||||||
|
*
|
||||||
|
* This struct desribes the size and the layout of the saved cpu state.
|
||||||
|
* The layout of the callee_saved_regs is defined by the implementation
|
||||||
|
* of __cpu_suspend_enter(), and cpu_resume(). This struct must be passed
|
||||||
|
* in by the caller as __cpu_suspend_enter()'s stack-frame is gone once it
|
||||||
|
* returns, and the data would be subsequently corrupted by the call to the
|
||||||
|
* finisher.
|
||||||
|
*/
|
||||||
|
struct sleep_stack_data {
|
||||||
|
struct cpu_suspend_ctx system_regs;
|
||||||
|
unsigned long callee_saved_regs[NR_CALLEE_SAVED_REGS];
|
||||||
|
};
|
||||||
|
|
||||||
extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
|
extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
|
||||||
extern void cpu_resume(void);
|
extern void cpu_resume(void);
|
||||||
|
int __cpu_suspend_enter(struct sleep_stack_data *state);
|
||||||
|
void __cpu_suspend_exit(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -122,6 +122,8 @@ int main(void)
|
||||||
DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp));
|
DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp));
|
||||||
DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
|
DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
|
||||||
DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
|
DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
|
||||||
|
DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS, offsetof(struct sleep_stack_data, system_regs));
|
||||||
|
DEFINE(SLEEP_STACK_DATA_CALLEE_REGS, offsetof(struct sleep_stack_data, callee_saved_regs));
|
||||||
#endif
|
#endif
|
||||||
DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0));
|
DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0));
|
||||||
DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
|
DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
|
||||||
|
|
|
@ -49,37 +49,30 @@
|
||||||
orr \dst, \dst, \mask // dst|=(aff3>>rs3)
|
orr \dst, \dst, \mask // dst|=(aff3>>rs3)
|
||||||
.endm
|
.endm
|
||||||
/*
|
/*
|
||||||
* Save CPU state for a suspend and execute the suspend finisher.
|
* Save CPU state in the provided sleep_stack_data area, and publish its
|
||||||
* On success it will return 0 through cpu_resume - ie through a CPU
|
* location for cpu_resume()'s use in sleep_save_stash.
|
||||||
* soft/hard reboot from the reset vector.
|
|
||||||
* On failure it returns the suspend finisher return value or force
|
|
||||||
* -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
|
|
||||||
* is not allowed to return, if it does this must be considered failure).
|
|
||||||
* It saves callee registers, and allocates space on the kernel stack
|
|
||||||
* to save the CPU specific registers + some other data for resume.
|
|
||||||
*
|
*
|
||||||
* x0 = suspend finisher argument
|
* cpu_resume() will restore this saved state, and return. Because the
|
||||||
* x1 = suspend finisher function pointer
|
* link-register is saved and restored, it will appear to return from this
|
||||||
|
* function. So that the caller can tell the suspend/resume paths apart,
|
||||||
|
* __cpu_suspend_enter() will always return a non-zero value, whereas the
|
||||||
|
* path through cpu_resume() will return 0.
|
||||||
|
*
|
||||||
|
* x0 = struct sleep_stack_data area
|
||||||
*/
|
*/
|
||||||
ENTRY(__cpu_suspend_enter)
|
ENTRY(__cpu_suspend_enter)
|
||||||
stp x29, lr, [sp, #-96]!
|
stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS]
|
||||||
stp x19, x20, [sp,#16]
|
stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16]
|
||||||
stp x21, x22, [sp,#32]
|
stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32]
|
||||||
stp x23, x24, [sp,#48]
|
stp x23, x24, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+48]
|
||||||
stp x25, x26, [sp,#64]
|
stp x25, x26, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+64]
|
||||||
stp x27, x28, [sp,#80]
|
stp x27, x28, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+80]
|
||||||
/*
|
|
||||||
* Stash suspend finisher and its argument in x20 and x19
|
/* save the sp in cpu_suspend_ctx */
|
||||||
*/
|
|
||||||
mov x19, x0
|
|
||||||
mov x20, x1
|
|
||||||
mov x2, sp
|
mov x2, sp
|
||||||
sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
|
str x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP]
|
||||||
mov x0, sp
|
|
||||||
/*
|
/* find the mpidr_hash */
|
||||||
* x0 now points to struct cpu_suspend_ctx allocated on the stack
|
|
||||||
*/
|
|
||||||
str x2, [x0, #CPU_CTX_SP]
|
|
||||||
ldr x1, =sleep_save_sp
|
ldr x1, =sleep_save_sp
|
||||||
ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
|
ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
|
||||||
mrs x7, mpidr_el1
|
mrs x7, mpidr_el1
|
||||||
|
@ -93,34 +86,11 @@ ENTRY(__cpu_suspend_enter)
|
||||||
ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
|
ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
|
||||||
compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
|
compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
|
||||||
add x1, x1, x8, lsl #3
|
add x1, x1, x8, lsl #3
|
||||||
|
|
||||||
|
stp x29, lr, [sp, #-16]!
|
||||||
bl __cpu_suspend_save
|
bl __cpu_suspend_save
|
||||||
/*
|
ldp x29, lr, [sp], #16
|
||||||
* Grab suspend finisher in x20 and its argument in x19
|
mov x0, #1
|
||||||
*/
|
|
||||||
mov x0, x19
|
|
||||||
mov x1, x20
|
|
||||||
/*
|
|
||||||
* We are ready for power down, fire off the suspend finisher
|
|
||||||
* in x1, with argument in x0
|
|
||||||
*/
|
|
||||||
blr x1
|
|
||||||
/*
|
|
||||||
* Never gets here, unless suspend finisher fails.
|
|
||||||
* Successful cpu_suspend should return from cpu_resume, returning
|
|
||||||
* through this code path is considered an error
|
|
||||||
* If the return value is set to 0 force x0 = -EOPNOTSUPP
|
|
||||||
* to make sure a proper error condition is propagated
|
|
||||||
*/
|
|
||||||
cmp x0, #0
|
|
||||||
mov x3, #-EOPNOTSUPP
|
|
||||||
csel x0, x3, x0, eq
|
|
||||||
add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer
|
|
||||||
ldp x19, x20, [sp, #16]
|
|
||||||
ldp x21, x22, [sp, #32]
|
|
||||||
ldp x23, x24, [sp, #48]
|
|
||||||
ldp x25, x26, [sp, #64]
|
|
||||||
ldp x27, x28, [sp, #80]
|
|
||||||
ldp x29, lr, [sp], #96
|
|
||||||
ret
|
ret
|
||||||
ENDPROC(__cpu_suspend_enter)
|
ENDPROC(__cpu_suspend_enter)
|
||||||
.ltorg
|
.ltorg
|
||||||
|
@ -150,12 +120,6 @@ cpu_resume_after_mmu:
|
||||||
bl kasan_unpoison_remaining_stack
|
bl kasan_unpoison_remaining_stack
|
||||||
#endif
|
#endif
|
||||||
mov x0, #0 // return zero on success
|
mov x0, #0 // return zero on success
|
||||||
ldp x19, x20, [sp, #16]
|
|
||||||
ldp x21, x22, [sp, #32]
|
|
||||||
ldp x23, x24, [sp, #48]
|
|
||||||
ldp x25, x26, [sp, #64]
|
|
||||||
ldp x27, x28, [sp, #80]
|
|
||||||
ldp x29, lr, [sp], #96
|
|
||||||
ret
|
ret
|
||||||
ENDPROC(cpu_resume_after_mmu)
|
ENDPROC(cpu_resume_after_mmu)
|
||||||
|
|
||||||
|
@ -172,6 +136,8 @@ ENTRY(cpu_resume)
|
||||||
/* x7 contains hash index, let's use it to grab context pointer */
|
/* x7 contains hash index, let's use it to grab context pointer */
|
||||||
ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
|
ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
|
||||||
ldr x0, [x0, x7, lsl #3]
|
ldr x0, [x0, x7, lsl #3]
|
||||||
|
add x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS
|
||||||
|
add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
|
||||||
/* load sp from context */
|
/* load sp from context */
|
||||||
ldr x2, [x0, #CPU_CTX_SP]
|
ldr x2, [x0, #CPU_CTX_SP]
|
||||||
/* load physical address of identity map page table in x1 */
|
/* load physical address of identity map page table in x1 */
|
||||||
|
@ -185,5 +151,12 @@ ENTRY(cpu_resume)
|
||||||
* pointer and x1 to contain physical address of 1:1 page tables
|
* pointer and x1 to contain physical address of 1:1 page tables
|
||||||
*/
|
*/
|
||||||
bl cpu_do_resume // PC relative jump, MMU off
|
bl cpu_do_resume // PC relative jump, MMU off
|
||||||
|
/* Can't access these by physical address once the MMU is on */
|
||||||
|
ldp x19, x20, [x29, #16]
|
||||||
|
ldp x21, x22, [x29, #32]
|
||||||
|
ldp x23, x24, [x29, #48]
|
||||||
|
ldp x25, x26, [x29, #64]
|
||||||
|
ldp x27, x28, [x29, #80]
|
||||||
|
ldp x29, lr, [x29]
|
||||||
b cpu_resume_mmu // Resume MMU, never returns
|
b cpu_resume_mmu // Resume MMU, never returns
|
||||||
ENDPROC(cpu_resume)
|
ENDPROC(cpu_resume)
|
||||||
|
|
|
@ -10,22 +10,22 @@
|
||||||
#include <asm/suspend.h>
|
#include <asm/suspend.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
|
||||||
extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
|
|
||||||
/*
|
/*
|
||||||
* This is called by __cpu_suspend_enter() to save the state, and do whatever
|
* This is called by __cpu_suspend_enter() to save the state, and do whatever
|
||||||
* flushing is required to ensure that when the CPU goes to sleep we have
|
* flushing is required to ensure that when the CPU goes to sleep we have
|
||||||
* the necessary data available when the caches are not searched.
|
* the necessary data available when the caches are not searched.
|
||||||
*
|
*
|
||||||
* ptr: CPU context virtual address
|
* ptr: sleep_stack_data containing cpu state virtual address.
|
||||||
* save_ptr: address of the location where the context physical address
|
* save_ptr: address of the location where the context physical address
|
||||||
* must be saved
|
* must be saved
|
||||||
*/
|
*/
|
||||||
void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
|
void notrace __cpu_suspend_save(struct sleep_stack_data *ptr,
|
||||||
phys_addr_t *save_ptr)
|
phys_addr_t *save_ptr)
|
||||||
{
|
{
|
||||||
*save_ptr = virt_to_phys(ptr);
|
*save_ptr = virt_to_phys(ptr);
|
||||||
|
|
||||||
cpu_do_suspend(ptr);
|
cpu_do_suspend(&ptr->system_regs);
|
||||||
/*
|
/*
|
||||||
* Only flush the context that must be retrieved with the MMU
|
* Only flush the context that must be retrieved with the MMU
|
||||||
* off. VA primitives ensure the flush is applied to all
|
* off. VA primitives ensure the flush is applied to all
|
||||||
|
@ -51,6 +51,30 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
|
||||||
hw_breakpoint_restore = hw_bp_restore;
|
hw_breakpoint_restore = hw_bp_restore;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void notrace __cpu_suspend_exit(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We are resuming from reset with the idmap active in TTBR0_EL1.
|
||||||
|
* We must uninstall the idmap and restore the expected MMU
|
||||||
|
* state before we can possibly return to userspace.
|
||||||
|
*/
|
||||||
|
cpu_uninstall_idmap();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Restore per-cpu offset before any kernel
|
||||||
|
* subsystem relying on it has a chance to run.
|
||||||
|
*/
|
||||||
|
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Restore HW breakpoint registers to sane values
|
||||||
|
* before debug exceptions are possibly reenabled
|
||||||
|
* through local_dbg_restore.
|
||||||
|
*/
|
||||||
|
if (hw_breakpoint_restore)
|
||||||
|
hw_breakpoint_restore(NULL);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* cpu_suspend
|
* cpu_suspend
|
||||||
*
|
*
|
||||||
|
@ -60,8 +84,9 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
|
||||||
*/
|
*/
|
||||||
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||||
{
|
{
|
||||||
int ret;
|
int ret = 0;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct sleep_stack_data state;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* From this point debug exceptions are disabled to prevent
|
* From this point debug exceptions are disabled to prevent
|
||||||
|
@ -77,34 +102,21 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||||
*/
|
*/
|
||||||
pause_graph_tracing();
|
pause_graph_tracing();
|
||||||
|
|
||||||
/*
|
if (__cpu_suspend_enter(&state)) {
|
||||||
* mm context saved on the stack, it will be restored when
|
/* Call the suspend finisher */
|
||||||
* the cpu comes out of reset through the identity mapped
|
ret = fn(arg);
|
||||||
* page tables, so that the thread address space is properly
|
|
||||||
* set-up on function return.
|
|
||||||
*/
|
|
||||||
ret = __cpu_suspend_enter(arg, fn);
|
|
||||||
if (ret == 0) {
|
|
||||||
/*
|
|
||||||
* We are resuming from reset with the idmap active in TTBR0_EL1.
|
|
||||||
* We must uninstall the idmap and restore the expected MMU
|
|
||||||
* state before we can possibly return to userspace.
|
|
||||||
*/
|
|
||||||
cpu_uninstall_idmap();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Restore per-cpu offset before any kernel
|
* Never gets here, unless the suspend finisher fails.
|
||||||
* subsystem relying on it has a chance to run.
|
* Successful cpu_suspend() should return from cpu_resume(),
|
||||||
|
* returning through this code path is considered an error
|
||||||
|
* If the return value is set to 0 force ret = -EOPNOTSUPP
|
||||||
|
* to make sure a proper error condition is propagated
|
||||||
*/
|
*/
|
||||||
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
|
if (!ret)
|
||||||
|
ret = -EOPNOTSUPP;
|
||||||
/*
|
} else {
|
||||||
* Restore HW breakpoint registers to sane values
|
__cpu_suspend_exit();
|
||||||
* before debug exceptions are possibly reenabled
|
|
||||||
* through local_dbg_restore.
|
|
||||||
*/
|
|
||||||
if (hw_breakpoint_restore)
|
|
||||||
hw_breakpoint_restore(NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unpause_graph_tracing();
|
unpause_graph_tracing();
|
||||||
|
|
Loading…
Reference in New Issue