2018-09-05 14:25:14 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/version.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/sched/task_stack.h>
|
|
|
|
#include <linux/sched/debug.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/kallsyms.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/ptrace.h>
|
|
|
|
|
|
|
|
#include <asm/elf.h>
|
|
|
|
#include <abi/reg_ops.h>
|
|
|
|
|
|
|
|
struct cpuinfo_csky cpu_data[NR_CPUS];
|
|
|
|
|
2019-10-11 10:56:55 +08:00
|
|
|
#ifdef CONFIG_STACKPROTECTOR
|
|
|
|
#include <linux/stackprotector.h>
|
|
|
|
unsigned long __stack_chk_guard __read_mostly;
|
|
|
|
EXPORT_SYMBOL(__stack_chk_guard);
|
|
|
|
#endif
|
|
|
|
|
2018-09-05 14:25:14 +08:00
|
|
|
asmlinkage void ret_from_fork(void);
|
|
|
|
asmlinkage void ret_from_kernel_thread(void);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some archs flush debug and FPU info here
|
|
|
|
*/
|
|
|
|
void flush_thread(void){}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return saved PC from a blocked thread
|
|
|
|
*/
|
|
|
|
unsigned long thread_saved_pc(struct task_struct *tsk)
|
|
|
|
{
|
2020-05-14 16:04:31 +08:00
|
|
|
struct switch_stack *sw = (struct switch_stack *)tsk->thread.sp;
|
2018-09-05 14:25:14 +08:00
|
|
|
|
|
|
|
return sw->r15;
|
|
|
|
}
|
|
|
|
|
2020-02-12 10:24:52 +08:00
|
|
|
int copy_thread_tls(unsigned long clone_flags,
|
2018-09-05 14:25:14 +08:00
|
|
|
unsigned long usp,
|
|
|
|
unsigned long kthread_arg,
|
2020-02-12 10:24:52 +08:00
|
|
|
struct task_struct *p,
|
|
|
|
unsigned long tls)
|
2018-09-05 14:25:14 +08:00
|
|
|
{
|
|
|
|
struct switch_stack *childstack;
|
|
|
|
struct pt_regs *childregs = task_pt_regs(p);
|
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_HAS_FPU
|
|
|
|
save_to_user_fp(&p->thread.user_fp);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
childstack = ((struct switch_stack *) childregs) - 1;
|
|
|
|
memset(childstack, 0, sizeof(struct switch_stack));
|
|
|
|
|
2020-05-14 16:04:31 +08:00
|
|
|
/* setup thread.sp for switch_to !!! */
|
|
|
|
p->thread.sp = (unsigned long)childstack;
|
2018-09-05 14:25:14 +08:00
|
|
|
|
|
|
|
if (unlikely(p->flags & PF_KTHREAD)) {
|
|
|
|
memset(childregs, 0, sizeof(struct pt_regs));
|
|
|
|
childstack->r15 = (unsigned long) ret_from_kernel_thread;
|
2019-09-25 19:56:16 +08:00
|
|
|
childstack->r10 = kthread_arg;
|
2018-09-05 14:25:14 +08:00
|
|
|
childstack->r9 = usp;
|
|
|
|
childregs->sr = mfcr("psr");
|
|
|
|
} else {
|
|
|
|
*childregs = *(current_pt_regs());
|
|
|
|
if (usp)
|
|
|
|
childregs->usp = usp;
|
|
|
|
if (clone_flags & CLONE_SETTLS)
|
|
|
|
task_thread_info(p)->tp_value = childregs->tls
|
2020-02-12 10:24:52 +08:00
|
|
|
= tls;
|
2018-09-05 14:25:14 +08:00
|
|
|
|
|
|
|
childregs->a0 = 0;
|
|
|
|
childstack->r15 = (unsigned long) ret_from_fork;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill in the fpu structure for a core dump. */
|
|
|
|
int dump_fpu(struct pt_regs *regs, struct user_fp *fpu)
|
|
|
|
{
|
|
|
|
memcpy(fpu, ¤t->thread.user_fp, sizeof(*fpu));
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dump_fpu);
|
|
|
|
|
|
|
|
int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs)
|
|
|
|
{
|
|
|
|
struct pt_regs *regs = task_pt_regs(tsk);
|
|
|
|
|
|
|
|
/* NOTE: usp is error value. */
|
|
|
|
ELF_CORE_COPY_REGS((*pr_regs), regs)
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef CONFIG_CPU_PM_NONE
|
|
|
|
void arch_cpu_idle(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_CPU_PM_WAIT
|
|
|
|
asm volatile("wait\n");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_PM_DOZE
|
|
|
|
asm volatile("doze\n");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_PM_STOP
|
|
|
|
asm volatile("stop\n");
|
|
|
|
#endif
|
|
|
|
local_irq_enable();
|
|
|
|
}
|
|
|
|
#endif
|