RISC-V Patches for the 5.19 Merge Window, Part 1
* Support for the Svpbmt extension, which allows memory attributes to be encoded in pages. * Support for the Allwinner D1's implementation of page-based memory attributes. * Support for running rv32 binaries on rv64 systems, via the compat subsystem. * Support for kexec_file(). * Support for the new generic ticket-based spinlocks, which allows us to also move to qrwlock. These should have already gone in through the asm-geneic tree as well. * A handful of cleanups and fixes, include some larger ones around atomics and XIP. -----BEGIN PGP SIGNATURE----- iQJHBAABCAAxFiEEKzw3R0RoQ7JKlDp6LhMZ81+7GIkFAmKWOx8THHBhbG1lckBk YWJiZWx0LmNvbQAKCRAuExnzX7sYieAiEADAUdP7ctoaSQwk5skd/fdA3b4KJuKn 1Zjl+Br32WP0DlbirYBYWRUQZnCCsvABbTiwSJMcG7NBpU5pyQ5XDtB3OA5kJswO Fdp8Nd53//+GK1M5zdEM9OdgvT9fbfTZ3qTu8bKsROOQhGwnYL+Csc9KjFRqEmzN oQii0jlb3n5PM4FL3GsbV4uMn9zzkP9mnVAPQktcock2EKFEK/Fy3uNYMQiO2KPi n8O6bIDaeRdQ6SurzWOuOkt0cro0tEF85ilzT04mynQsOU0el5oGqCxnOhNH3VWg ndqPT6Yafw12hZOtbKJeP+nF8IIR6aJLP3jOtRwEVgcfbXYAw4QwbAV8kQZISefN ipn8JGY7GX9Y9TYU692OUGkcmAb3/dxb6c0WihBdvJ0M6YyLD5X+YKHNuG2onLgK ss43C5Mxsu629rsjdu/PV91B1+pve3rG9siVmF+g4eo0x9rjMq6/JB0Kal/8SLI1 Je5T55d5ujV1a2XxhZLQOSD5owrK7J1M9owb0bloTnr9nVwFTWDrfEQEU82o3kP+ Xm+FfXktnz9ai55NjkMbbEur5D++dKJhBavwCTnBcTrJmMtEH0R45GTK9ZehP+WC rNVrRXjIsS18wsTfJxnkZeFQA38as6VBKTzvwHvOgzTrrZU1/xk3lpkouYtAO6BG gKacHshVilmUuA== =Loi6 -----END PGP SIGNATURE----- Merge tag 'riscv-for-linus-5.19-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux Pull RISC-V updates from Palmer Dabbelt: - Support for the Svpbmt extension, which allows memory attributes to be encoded in pages - Support for the Allwinner D1's implementation of page-based memory attributes - Support for running rv32 binaries on rv64 systems, via the compat subsystem - Support for kexec_file() - Support for the new generic ticket-based spinlocks, which allows us to also move to qrwlock. These should have already gone in through the asm-geneic tree as well - A handful of cleanups and fixes, include some larger ones around atomics and XIP * tag 'riscv-for-linus-5.19-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (51 commits) RISC-V: Prepare dropping week attribute from arch_kexec_apply_relocations[_add] riscv: compat: Using seperated vdso_maps for compat_vdso_info RISC-V: Fix the XIP build RISC-V: Split out the XIP fixups into their own file RISC-V: ignore xipImage RISC-V: Avoid empty create_*_mapping definitions riscv: Don't output a bogus mmu-type on a no MMU kernel riscv: atomic: Add custom conditional atomic operation implementation riscv: atomic: Optimize dec_if_positive functions riscv: atomic: Cleanup unnecessary definition RISC-V: Load purgatory in kexec_file RISC-V: Add purgatory RISC-V: Support for kexec_file on panic RISC-V: Add kexec_file support RISC-V: use memcpy for kexec_file mode kexec_file: Fix kexec_file.c build error for riscv platform riscv: compat: Add COMPAT Kbuild skeletal support riscv: compat: ptrace: Add compat_arch_ptrace implement riscv: compat: signal: Add rt_frame implementation riscv: add memory-type errata for T-Head ...
This commit is contained in:
commit
35b51afd23
|
@ -2157,10 +2157,6 @@ config DMI
|
|||
|
||||
endmenu # "Boot options"
|
||||
|
||||
config SYSVIPC_COMPAT
|
||||
def_bool y
|
||||
depends on COMPAT && SYSVIPC
|
||||
|
||||
menu "Power management options"
|
||||
|
||||
source "kernel/power/Kconfig"
|
||||
|
|
|
@ -8,6 +8,15 @@
|
|||
#define compat_mode_t compat_mode_t
|
||||
typedef u16 compat_mode_t;
|
||||
|
||||
#define __compat_uid_t __compat_uid_t
|
||||
typedef u16 __compat_uid_t;
|
||||
typedef u16 __compat_gid_t;
|
||||
|
||||
#define compat_ipc_pid_t compat_ipc_pid_t
|
||||
typedef u16 compat_ipc_pid_t;
|
||||
|
||||
#define compat_statfs compat_statfs
|
||||
|
||||
#include <asm-generic/compat.h>
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
@ -19,21 +28,15 @@ typedef u16 compat_mode_t;
|
|||
#include <linux/sched.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
|
||||
#define COMPAT_USER_HZ 100
|
||||
#ifdef __AARCH64EB__
|
||||
#define COMPAT_UTS_MACHINE "armv8b\0\0"
|
||||
#else
|
||||
#define COMPAT_UTS_MACHINE "armv8l\0\0"
|
||||
#endif
|
||||
|
||||
typedef u16 __compat_uid_t;
|
||||
typedef u16 __compat_gid_t;
|
||||
typedef u16 __compat_uid16_t;
|
||||
typedef u16 __compat_gid16_t;
|
||||
typedef u32 compat_dev_t;
|
||||
typedef s32 compat_nlink_t;
|
||||
typedef u16 compat_ipc_pid_t;
|
||||
typedef __kernel_fsid_t compat_fsid_t;
|
||||
|
||||
struct compat_stat {
|
||||
#ifdef __AARCH64EB__
|
||||
|
@ -65,26 +68,6 @@ struct compat_stat {
|
|||
compat_ulong_t __unused4[2];
|
||||
};
|
||||
|
||||
struct compat_flock {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_off_t l_start;
|
||||
compat_off_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
};
|
||||
|
||||
#define F_GETLK64 12 /* using 'struct flock64' */
|
||||
#define F_SETLK64 13
|
||||
#define F_SETLKW64 14
|
||||
|
||||
struct compat_flock64 {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_loff_t l_start;
|
||||
compat_loff_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
};
|
||||
|
||||
struct compat_statfs {
|
||||
int f_type;
|
||||
int f_bsize;
|
||||
|
@ -107,64 +90,6 @@ struct compat_statfs {
|
|||
#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
|
||||
#define COMPAT_MINSIGSTKSZ 2048
|
||||
|
||||
struct compat_ipc64_perm {
|
||||
compat_key_t key;
|
||||
__compat_uid32_t uid;
|
||||
__compat_gid32_t gid;
|
||||
__compat_uid32_t cuid;
|
||||
__compat_gid32_t cgid;
|
||||
unsigned short mode;
|
||||
unsigned short __pad1;
|
||||
unsigned short seq;
|
||||
unsigned short __pad2;
|
||||
compat_ulong_t unused1;
|
||||
compat_ulong_t unused2;
|
||||
};
|
||||
|
||||
struct compat_semid64_ds {
|
||||
struct compat_ipc64_perm sem_perm;
|
||||
compat_ulong_t sem_otime;
|
||||
compat_ulong_t sem_otime_high;
|
||||
compat_ulong_t sem_ctime;
|
||||
compat_ulong_t sem_ctime_high;
|
||||
compat_ulong_t sem_nsems;
|
||||
compat_ulong_t __unused3;
|
||||
compat_ulong_t __unused4;
|
||||
};
|
||||
|
||||
struct compat_msqid64_ds {
|
||||
struct compat_ipc64_perm msg_perm;
|
||||
compat_ulong_t msg_stime;
|
||||
compat_ulong_t msg_stime_high;
|
||||
compat_ulong_t msg_rtime;
|
||||
compat_ulong_t msg_rtime_high;
|
||||
compat_ulong_t msg_ctime;
|
||||
compat_ulong_t msg_ctime_high;
|
||||
compat_ulong_t msg_cbytes;
|
||||
compat_ulong_t msg_qnum;
|
||||
compat_ulong_t msg_qbytes;
|
||||
compat_pid_t msg_lspid;
|
||||
compat_pid_t msg_lrpid;
|
||||
compat_ulong_t __unused4;
|
||||
compat_ulong_t __unused5;
|
||||
};
|
||||
|
||||
struct compat_shmid64_ds {
|
||||
struct compat_ipc64_perm shm_perm;
|
||||
compat_size_t shm_segsz;
|
||||
compat_ulong_t shm_atime;
|
||||
compat_ulong_t shm_atime_high;
|
||||
compat_ulong_t shm_dtime;
|
||||
compat_ulong_t shm_dtime_high;
|
||||
compat_ulong_t shm_ctime;
|
||||
compat_ulong_t shm_ctime_high;
|
||||
compat_pid_t shm_cpid;
|
||||
compat_pid_t shm_lpid;
|
||||
compat_ulong_t shm_nattch;
|
||||
compat_ulong_t __unused4;
|
||||
compat_ulong_t __unused5;
|
||||
};
|
||||
|
||||
static inline int is_compat_task(void)
|
||||
{
|
||||
return test_thread_flag(TIF_32BIT);
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
* Copyright (C) 2012 ARM Ltd.
|
||||
*/
|
||||
#ifdef CONFIG_COMPAT
|
||||
#define __ARCH_WANT_COMPAT_STAT
|
||||
#define __ARCH_WANT_COMPAT_STAT64
|
||||
#define __ARCH_WANT_SYS_GETHOSTNAME
|
||||
#define __ARCH_WANT_SYS_PAUSE
|
||||
|
|
|
@ -3198,16 +3198,12 @@ config MIPS32_COMPAT
|
|||
config COMPAT
|
||||
bool
|
||||
|
||||
config SYSVIPC_COMPAT
|
||||
bool
|
||||
|
||||
config MIPS32_O32
|
||||
bool "Kernel support for o32 binaries"
|
||||
depends on 64BIT
|
||||
select ARCH_WANT_OLD_COMPAT_IPC
|
||||
select COMPAT
|
||||
select MIPS32_COMPAT
|
||||
select SYSVIPC_COMPAT if SYSVIPC
|
||||
help
|
||||
Select this option if you want to run o32 binaries. These are pure
|
||||
32-bit binaries as used by the 32-bit Linux/MIPS port. Most of
|
||||
|
@ -3221,7 +3217,6 @@ config MIPS32_N32
|
|||
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
|
||||
select COMPAT
|
||||
select MIPS32_COMPAT
|
||||
select SYSVIPC_COMPAT if SYSVIPC
|
||||
help
|
||||
Select this option if you want to run n32 binaries. These are
|
||||
64-bit binaries using 32-bit quantities for addressing and certain
|
||||
|
|
|
@ -9,28 +9,28 @@
|
|||
#include <asm/page.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#define __compat_uid_t __compat_uid_t
|
||||
typedef s32 __compat_uid_t;
|
||||
typedef s32 __compat_gid_t;
|
||||
|
||||
typedef __compat_uid_t __compat_uid32_t;
|
||||
typedef __compat_gid_t __compat_gid32_t;
|
||||
#define __compat_uid32_t __compat_uid32_t
|
||||
#define __compat_gid32_t __compat_gid32_t
|
||||
|
||||
#define compat_statfs compat_statfs
|
||||
#define compat_ipc64_perm compat_ipc64_perm
|
||||
|
||||
#define _COMPAT_NSIG 128 /* Don't ask !$@#% ... */
|
||||
#define _COMPAT_NSIG_BPW 32
|
||||
typedef u32 compat_sigset_word;
|
||||
|
||||
#define COMPAT_RLIM_INFINITY 0x7fffffffUL
|
||||
|
||||
#include <asm-generic/compat.h>
|
||||
|
||||
#define COMPAT_USER_HZ 100
|
||||
#define COMPAT_UTS_MACHINE "mips\0\0\0"
|
||||
|
||||
typedef u32 compat_dev_t;
|
||||
typedef u32 compat_nlink_t;
|
||||
typedef s32 compat_ipc_pid_t;
|
||||
typedef struct {
|
||||
s32 val[2];
|
||||
} compat_fsid_t;
|
||||
|
||||
struct compat_stat {
|
||||
compat_dev_t st_dev;
|
||||
|
@ -55,27 +55,8 @@ struct compat_stat {
|
|||
s32 st_pad4[14];
|
||||
};
|
||||
|
||||
struct compat_flock {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_off_t l_start;
|
||||
compat_off_t l_len;
|
||||
s32 l_sysid;
|
||||
compat_pid_t l_pid;
|
||||
s32 pad[4];
|
||||
};
|
||||
|
||||
#define F_GETLK64 33
|
||||
#define F_SETLK64 34
|
||||
#define F_SETLKW64 35
|
||||
|
||||
struct compat_flock64 {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_loff_t l_start;
|
||||
compat_loff_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
};
|
||||
#define __ARCH_COMPAT_FLOCK_EXTRA_SYSID s32 l_sysid;
|
||||
#define __ARCH_COMPAT_FLOCK_PAD s32 pad[4];
|
||||
|
||||
struct compat_statfs {
|
||||
int f_type;
|
||||
|
@ -92,10 +73,6 @@ struct compat_statfs {
|
|||
int f_spare[5];
|
||||
};
|
||||
|
||||
#define COMPAT_RLIM_INFINITY 0x7fffffffUL
|
||||
|
||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
||||
|
||||
struct compat_ipc64_perm {
|
||||
compat_key_t key;
|
||||
__compat_uid32_t uid;
|
||||
|
|
|
@ -50,6 +50,8 @@
|
|||
# ifdef CONFIG_32BIT
|
||||
# define __ARCH_WANT_STAT64
|
||||
# define __ARCH_WANT_SYS_TIME32
|
||||
# else
|
||||
# define __ARCH_WANT_COMPAT_STAT
|
||||
# endif
|
||||
# ifdef CONFIG_MIPS32_O32
|
||||
# define __ARCH_WANT_SYS_TIME32
|
||||
|
|
|
@ -44,36 +44,16 @@
|
|||
#define F_SETOWN 24 /* for sockets. */
|
||||
#define F_GETOWN 23 /* for sockets. */
|
||||
|
||||
#ifndef __mips64
|
||||
#if __BITS_PER_LONG == 32 || defined(__KERNEL__)
|
||||
#define F_GETLK64 33 /* using 'struct flock64' */
|
||||
#define F_SETLK64 34
|
||||
#define F_SETLKW64 35
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The flavours of struct flock. "struct flock" is the ABI compliant
|
||||
* variant. Finally struct flock64 is the LFS variant of struct flock. As
|
||||
* a historic accident and inconsistence with the ABI definition it doesn't
|
||||
* contain all the same fields as struct flock.
|
||||
*/
|
||||
#endif /* __BITS_PER_LONG == 32 || defined(__KERNEL__) */
|
||||
|
||||
#if _MIPS_SIM != _MIPS_SIM_ABI64
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct flock {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
__kernel_off_t l_start;
|
||||
__kernel_off_t l_len;
|
||||
long l_sysid;
|
||||
__kernel_pid_t l_pid;
|
||||
long pad[4];
|
||||
};
|
||||
|
||||
#define HAVE_ARCH_STRUCT_FLOCK
|
||||
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
|
||||
#define __ARCH_FLOCK_EXTRA_SYSID long l_sysid;
|
||||
#define __ARCH_FLOCK_PAD long pad[4];
|
||||
#endif
|
||||
|
||||
#include <asm-generic/fcntl.h>
|
||||
|
||||
|
|
|
@ -332,10 +332,6 @@ config COMPAT
|
|||
def_bool y
|
||||
depends on 64BIT
|
||||
|
||||
config SYSVIPC_COMPAT
|
||||
def_bool y
|
||||
depends on COMPAT && SYSVIPC
|
||||
|
||||
config AUDIT_ARCH
|
||||
def_bool y
|
||||
|
||||
|
|
|
@ -11,16 +11,16 @@
|
|||
#define compat_mode_t compat_mode_t
|
||||
typedef u16 compat_mode_t;
|
||||
|
||||
#define compat_ipc_pid_t compat_ipc_pid_t
|
||||
typedef u16 compat_ipc_pid_t;
|
||||
|
||||
#define compat_ipc64_perm compat_ipc64_perm
|
||||
|
||||
#include <asm-generic/compat.h>
|
||||
|
||||
#define COMPAT_USER_HZ 100
|
||||
#define COMPAT_UTS_MACHINE "parisc\0\0"
|
||||
|
||||
typedef u32 __compat_uid_t;
|
||||
typedef u32 __compat_gid_t;
|
||||
typedef u32 compat_dev_t;
|
||||
typedef u16 compat_nlink_t;
|
||||
typedef u16 compat_ipc_pid_t;
|
||||
|
||||
struct compat_stat {
|
||||
compat_dev_t st_dev; /* dev_t is 32 bits on parisc */
|
||||
|
@ -53,37 +53,6 @@ struct compat_stat {
|
|||
u32 st_spare4[3];
|
||||
};
|
||||
|
||||
struct compat_flock {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_off_t l_start;
|
||||
compat_off_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
};
|
||||
|
||||
struct compat_flock64 {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_loff_t l_start;
|
||||
compat_loff_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
};
|
||||
|
||||
struct compat_statfs {
|
||||
s32 f_type;
|
||||
s32 f_bsize;
|
||||
s32 f_blocks;
|
||||
s32 f_bfree;
|
||||
s32 f_bavail;
|
||||
s32 f_files;
|
||||
s32 f_ffree;
|
||||
__kernel_fsid_t f_fsid;
|
||||
s32 f_namelen;
|
||||
s32 f_frsize;
|
||||
s32 f_flags;
|
||||
s32 f_spare[4];
|
||||
};
|
||||
|
||||
struct compat_sigcontext {
|
||||
compat_int_t sc_flags;
|
||||
compat_int_t sc_gr[32]; /* PSW in sc_gr[0] */
|
||||
|
@ -93,10 +62,6 @@ struct compat_sigcontext {
|
|||
compat_int_t sc_sar; /* cr11 */
|
||||
};
|
||||
|
||||
#define COMPAT_RLIM_INFINITY 0xffffffff
|
||||
|
||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
||||
|
||||
struct compat_ipc64_perm {
|
||||
compat_key_t key;
|
||||
__compat_uid_t uid;
|
||||
|
|
|
@ -162,6 +162,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
|
|||
#define __ARCH_WANT_SYS_CLONE
|
||||
#define __ARCH_WANT_SYS_CLONE3
|
||||
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
|
||||
#define __ARCH_WANT_COMPAT_STAT
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define __ARCH_WANT_SYS_TIME
|
||||
|
|
|
@ -303,11 +303,6 @@ config COMPAT
|
|||
select ARCH_WANT_OLD_COMPAT_IPC
|
||||
select COMPAT_OLD_SIGACTION
|
||||
|
||||
config SYSVIPC_COMPAT
|
||||
bool
|
||||
depends on COMPAT && SYSVIPC
|
||||
default y
|
||||
|
||||
config SCHED_OMIT_FRAME_POINTER
|
||||
bool
|
||||
default y
|
||||
|
|
|
@ -8,21 +8,20 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#define compat_ipc_pid_t compat_ipc_pid_t
|
||||
typedef u16 compat_ipc_pid_t;
|
||||
|
||||
#define compat_ipc64_perm compat_ipc64_perm
|
||||
|
||||
#include <asm-generic/compat.h>
|
||||
|
||||
#define COMPAT_USER_HZ 100
|
||||
#ifdef __BIG_ENDIAN__
|
||||
#define COMPAT_UTS_MACHINE "ppc\0\0"
|
||||
#else
|
||||
#define COMPAT_UTS_MACHINE "ppcle\0\0"
|
||||
#endif
|
||||
|
||||
typedef u32 __compat_uid_t;
|
||||
typedef u32 __compat_gid_t;
|
||||
typedef u32 compat_dev_t;
|
||||
typedef s16 compat_nlink_t;
|
||||
typedef u16 compat_ipc_pid_t;
|
||||
typedef __kernel_fsid_t compat_fsid_t;
|
||||
|
||||
struct compat_stat {
|
||||
compat_dev_t st_dev;
|
||||
|
@ -44,45 +43,6 @@ struct compat_stat {
|
|||
u32 __unused4[2];
|
||||
};
|
||||
|
||||
struct compat_flock {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_off_t l_start;
|
||||
compat_off_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
};
|
||||
|
||||
#define F_GETLK64 12 /* using 'struct flock64' */
|
||||
#define F_SETLK64 13
|
||||
#define F_SETLKW64 14
|
||||
|
||||
struct compat_flock64 {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_loff_t l_start;
|
||||
compat_loff_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
};
|
||||
|
||||
struct compat_statfs {
|
||||
int f_type;
|
||||
int f_bsize;
|
||||
int f_blocks;
|
||||
int f_bfree;
|
||||
int f_bavail;
|
||||
int f_files;
|
||||
int f_ffree;
|
||||
compat_fsid_t f_fsid;
|
||||
int f_namelen; /* SunOS ignores this field. */
|
||||
int f_frsize;
|
||||
int f_flags;
|
||||
int f_spare[4];
|
||||
};
|
||||
|
||||
#define COMPAT_RLIM_INFINITY 0xffffffff
|
||||
|
||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
||||
|
||||
/*
|
||||
* ipc64_perm is actually 32/64bit clean but since the compat layer refers to
|
||||
* it we may as well define it.
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
#define __ARCH_WANT_SYS_TIME
|
||||
#define __ARCH_WANT_SYS_UTIME
|
||||
#define __ARCH_WANT_SYS_NEWFSTATAT
|
||||
#define __ARCH_WANT_COMPAT_STAT
|
||||
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
|
||||
#endif
|
||||
#define __ARCH_WANT_SYS_FORK
|
||||
|
|
|
@ -3,5 +3,7 @@
|
|||
obj-y += kernel/ mm/ net/
|
||||
obj-$(CONFIG_BUILTIN_DTB) += boot/dts/
|
||||
|
||||
obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += purgatory/
|
||||
|
||||
# for cleaning
|
||||
subdir- += boot
|
||||
|
|
|
@ -78,6 +78,7 @@ config RISCV
|
|||
select HAVE_ARCH_KGDB if !XIP_KERNEL
|
||||
select HAVE_ARCH_KGDB_QXFER_PKT
|
||||
select HAVE_ARCH_MMAP_RND_BITS if MMU
|
||||
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT && MMU
|
||||
|
@ -129,12 +130,18 @@ config ARCH_MMAP_RND_BITS_MIN
|
|||
default 18 if 64BIT
|
||||
default 8
|
||||
|
||||
config ARCH_MMAP_RND_COMPAT_BITS_MIN
|
||||
default 8
|
||||
|
||||
# max bits determined by the following formula:
|
||||
# VA_BITS - PAGE_SHIFT - 3
|
||||
config ARCH_MMAP_RND_BITS_MAX
|
||||
default 24 if 64BIT # SV39 based
|
||||
default 17
|
||||
|
||||
config ARCH_MMAP_RND_COMPAT_BITS_MAX
|
||||
default 17
|
||||
|
||||
# set if we run in machine mode, cleared if we run in supervisor mode
|
||||
config RISCV_M_MODE
|
||||
bool
|
||||
|
@ -326,6 +333,21 @@ config NODES_SHIFT
|
|||
Specify the maximum number of NUMA Nodes available on the target
|
||||
system. Increases memory reserved to accommodate various tables.
|
||||
|
||||
config RISCV_ALTERNATIVE
|
||||
bool
|
||||
depends on !XIP_KERNEL
|
||||
help
|
||||
This Kconfig allows the kernel to automatically patch the
|
||||
errata required by the execution platform at run time. The
|
||||
code patching is performed once in the boot stages. It means
|
||||
that the overhead from this mechanism is just taken once.
|
||||
|
||||
config RISCV_ALTERNATIVE_EARLY
|
||||
bool
|
||||
depends on RISCV_ALTERNATIVE
|
||||
help
|
||||
Allows early patching of the kernel for special errata
|
||||
|
||||
config RISCV_ISA_C
|
||||
bool "Emit compressed instructions when building Linux"
|
||||
default y
|
||||
|
@ -336,6 +358,19 @@ config RISCV_ISA_C
|
|||
|
||||
If you don't know what to do here, say Y.
|
||||
|
||||
config RISCV_ISA_SVPBMT
|
||||
bool "SVPBMT extension support"
|
||||
depends on 64BIT && MMU
|
||||
select RISCV_ALTERNATIVE
|
||||
default y
|
||||
help
|
||||
Adds support to dynamically detect the presence of the SVPBMT extension
|
||||
(Supervisor-mode: page-based memory types) and enable its usage.
|
||||
|
||||
The SVPBMT extension is only available on 64Bit cpus.
|
||||
|
||||
If you don't know what to do here, say Y.
|
||||
|
||||
config FPU
|
||||
bool "FPU support"
|
||||
default y
|
||||
|
@ -385,6 +420,26 @@ config KEXEC
|
|||
|
||||
The name comes from the similarity to the exec system call.
|
||||
|
||||
config KEXEC_FILE
|
||||
bool "kexec file based systmem call"
|
||||
select KEXEC_CORE
|
||||
select KEXEC_ELF
|
||||
select HAVE_IMA_KEXEC if IMA
|
||||
depends on 64BIT
|
||||
help
|
||||
This is new version of kexec system call. This system call is
|
||||
file based and takes file descriptors as system call argument
|
||||
for kernel and initramfs as opposed to list of segments as
|
||||
accepted by previous system call.
|
||||
|
||||
If you don't know what to do here, say Y.
|
||||
|
||||
config ARCH_HAS_KEXEC_PURGATORY
|
||||
def_bool KEXEC_FILE
|
||||
select BUILD_BIN2C
|
||||
depends on CRYPTO=y
|
||||
depends on CRYPTO_SHA256=y
|
||||
|
||||
config CRASH_DUMP
|
||||
bool "Build kdump crash kernel"
|
||||
help
|
||||
|
@ -396,6 +451,18 @@ config CRASH_DUMP
|
|||
|
||||
For more details see Documentation/admin-guide/kdump/kdump.rst
|
||||
|
||||
config COMPAT
|
||||
bool "Kernel support for 32-bit U-mode"
|
||||
default 64BIT
|
||||
depends on 64BIT && MMU
|
||||
help
|
||||
This option enables support for a 32-bit U-mode running under a 64-bit
|
||||
kernel at S-mode. riscv32-specific components such as system calls,
|
||||
the user helper functions (vdso), signal rt_frame functions and the
|
||||
ptrace interface are handled appropriately by the kernel.
|
||||
|
||||
If you want to execute 32-bit userspace applications, say Y.
|
||||
|
||||
endmenu
|
||||
|
||||
menu "Boot options"
|
||||
|
|
|
@ -1,18 +1,9 @@
|
|||
menu "CPU errata selection"
|
||||
|
||||
config RISCV_ERRATA_ALTERNATIVE
|
||||
bool "RISC-V alternative scheme"
|
||||
depends on !XIP_KERNEL
|
||||
default y
|
||||
help
|
||||
This Kconfig allows the kernel to automatically patch the
|
||||
errata required by the execution platform at run time. The
|
||||
code patching is performed once in the boot stages. It means
|
||||
that the overhead from this mechanism is just taken once.
|
||||
|
||||
config ERRATA_SIFIVE
|
||||
bool "SiFive errata"
|
||||
depends on RISCV_ERRATA_ALTERNATIVE
|
||||
depends on !XIP_KERNEL
|
||||
select RISCV_ALTERNATIVE
|
||||
help
|
||||
All SiFive errata Kconfig depend on this Kconfig. Disabling
|
||||
this Kconfig will disable all SiFive errata. Please say "Y"
|
||||
|
@ -42,4 +33,25 @@ config ERRATA_SIFIVE_CIP_1200
|
|||
|
||||
If you don't know what to do here, say "Y".
|
||||
|
||||
config ERRATA_THEAD
|
||||
bool "T-HEAD errata"
|
||||
select RISCV_ALTERNATIVE
|
||||
help
|
||||
All T-HEAD errata Kconfig depend on this Kconfig. Disabling
|
||||
this Kconfig will disable all T-HEAD errata. Please say "Y"
|
||||
here if your platform uses T-HEAD CPU cores.
|
||||
|
||||
Otherwise, please say "N" here to avoid unnecessary overhead.
|
||||
|
||||
config ERRATA_THEAD_PBMT
|
||||
bool "Apply T-Head memory type errata"
|
||||
depends on ERRATA_THEAD && 64BIT
|
||||
select RISCV_ALTERNATIVE_EARLY
|
||||
default y
|
||||
help
|
||||
This will apply the memory type errata to handle the non-standard
|
||||
memory type bits in page-table-entries on T-Head SoCs.
|
||||
|
||||
If you don't know what to do here, say "Y".
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -14,7 +14,6 @@ config SOC_SIFIVE
|
|||
select CLK_SIFIVE
|
||||
select CLK_SIFIVE_PRCI
|
||||
select SIFIVE_PLIC
|
||||
select RISCV_ERRATA_ALTERNATIVE if !XIP_KERNEL
|
||||
select ERRATA_SIFIVE if !XIP_KERNEL
|
||||
help
|
||||
This enables support for SiFive SoC platform hardware.
|
||||
|
|
|
@ -103,7 +103,7 @@ endif
|
|||
|
||||
head-y := arch/riscv/kernel/head.o
|
||||
|
||||
core-$(CONFIG_RISCV_ERRATA_ALTERNATIVE) += arch/riscv/errata/
|
||||
core-y += arch/riscv/errata/
|
||||
core-$(CONFIG_KVM) += arch/riscv/kvm/
|
||||
|
||||
libs-y += arch/riscv/lib/
|
||||
|
@ -112,12 +112,17 @@ libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
|
|||
PHONY += vdso_install
|
||||
vdso_install:
|
||||
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
|
||||
$(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
|
||||
$(build)=arch/riscv/kernel/compat_vdso $@)
|
||||
|
||||
ifeq ($(KBUILD_EXTMOD),)
|
||||
ifeq ($(CONFIG_MMU),y)
|
||||
prepare: vdso_prepare
|
||||
vdso_prepare: prepare0
|
||||
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso include/generated/vdso-offsets.h
|
||||
$(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
|
||||
$(build)=arch/riscv/kernel/compat_vdso include/generated/compat_vdso-offsets.h)
|
||||
|
||||
endif
|
||||
endif
|
||||
|
||||
|
@ -153,3 +158,7 @@ PHONY += rv64_randconfig
|
|||
rv64_randconfig:
|
||||
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/riscv/configs/64-bit.config \
|
||||
-f $(srctree)/Makefile randconfig
|
||||
|
||||
PHONY += rv32_defconfig
|
||||
rv32_defconfig:
|
||||
$(Q)$(MAKE) -f $(srctree)/Makefile defconfig 32-bit.config
|
||||
|
|
|
@ -4,3 +4,4 @@ Image.*
|
|||
loader
|
||||
loader.lds
|
||||
loader.bin
|
||||
xipImage
|
||||
|
|
|
@ -168,11 +168,12 @@ uart0: serial@10010000 {
|
|||
status = "disabled";
|
||||
};
|
||||
dma: dma-controller@3000000 {
|
||||
compatible = "sifive,fu540-c000-pdma";
|
||||
compatible = "sifive,fu540-c000-pdma", "sifive,pdma0";
|
||||
reg = <0x0 0x3000000 0x0 0x8000>;
|
||||
interrupt-parent = <&plic0>;
|
||||
interrupts = <23>, <24>, <25>, <26>, <27>, <28>, <29>,
|
||||
<30>;
|
||||
dma-channels = <4>;
|
||||
#dma-cells = <1>;
|
||||
};
|
||||
uart1: serial@10011000 {
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
obj-y += alternative.o
|
||||
obj-$(CONFIG_ERRATA_SIFIVE) += sifive/
|
||||
obj-$(CONFIG_ERRATA_THEAD) += thead/
|
||||
|
|
|
@ -1,75 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* alternative runtime patching
|
||||
* inspired by the ARM64 and x86 version
|
||||
*
|
||||
* Copyright (C) 2021 Sifive.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/vendorid_list.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/csr.h>
|
||||
|
||||
static struct cpu_manufacturer_info_t {
|
||||
unsigned long vendor_id;
|
||||
unsigned long arch_id;
|
||||
unsigned long imp_id;
|
||||
} cpu_mfr_info;
|
||||
|
||||
static void (*vendor_patch_func)(struct alt_entry *begin, struct alt_entry *end,
|
||||
unsigned long archid,
|
||||
unsigned long impid) __initdata;
|
||||
|
||||
static inline void __init riscv_fill_cpu_mfr_info(void)
|
||||
{
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
cpu_mfr_info.vendor_id = csr_read(CSR_MVENDORID);
|
||||
cpu_mfr_info.arch_id = csr_read(CSR_MARCHID);
|
||||
cpu_mfr_info.imp_id = csr_read(CSR_MIMPID);
|
||||
#else
|
||||
cpu_mfr_info.vendor_id = sbi_get_mvendorid();
|
||||
cpu_mfr_info.arch_id = sbi_get_marchid();
|
||||
cpu_mfr_info.imp_id = sbi_get_mimpid();
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __init init_alternative(void)
|
||||
{
|
||||
riscv_fill_cpu_mfr_info();
|
||||
|
||||
switch (cpu_mfr_info.vendor_id) {
|
||||
#ifdef CONFIG_ERRATA_SIFIVE
|
||||
case SIFIVE_VENDOR_ID:
|
||||
vendor_patch_func = sifive_errata_patch_func;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
vendor_patch_func = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called very early in the boot process (directly after we run
|
||||
* a feature detect on the boot CPU). No need to worry about other CPUs
|
||||
* here.
|
||||
*/
|
||||
void __init apply_boot_alternatives(void)
|
||||
{
|
||||
/* If called on non-boot cpu things could go wrong */
|
||||
WARN_ON(smp_processor_id() != 0);
|
||||
|
||||
init_alternative();
|
||||
|
||||
if (!vendor_patch_func)
|
||||
return;
|
||||
|
||||
vendor_patch_func((struct alt_entry *)__alt_start,
|
||||
(struct alt_entry *)__alt_end,
|
||||
cpu_mfr_info.arch_id, cpu_mfr_info.imp_id);
|
||||
}
|
||||
|
|
@ -4,6 +4,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/bug.h>
|
||||
#include <asm/patch.h>
|
||||
|
@ -54,7 +55,8 @@ static struct errata_info_t errata_list[ERRATA_SIFIVE_NUMBER] = {
|
|||
},
|
||||
};
|
||||
|
||||
static u32 __init sifive_errata_probe(unsigned long archid, unsigned long impid)
|
||||
static u32 __init_or_module sifive_errata_probe(unsigned long archid,
|
||||
unsigned long impid)
|
||||
{
|
||||
int idx;
|
||||
u32 cpu_req_errata = 0;
|
||||
|
@ -66,7 +68,7 @@ static u32 __init sifive_errata_probe(unsigned long archid, unsigned long impid)
|
|||
return cpu_req_errata;
|
||||
}
|
||||
|
||||
static void __init warn_miss_errata(u32 miss_errata)
|
||||
static void __init_or_module warn_miss_errata(u32 miss_errata)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -79,14 +81,22 @@ static void __init warn_miss_errata(u32 miss_errata)
|
|||
pr_warn("----------------------------------------------------------------\n");
|
||||
}
|
||||
|
||||
void __init sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
|
||||
unsigned long archid, unsigned long impid)
|
||||
void __init_or_module sifive_errata_patch_func(struct alt_entry *begin,
|
||||
struct alt_entry *end,
|
||||
unsigned long archid,
|
||||
unsigned long impid,
|
||||
unsigned int stage)
|
||||
{
|
||||
struct alt_entry *alt;
|
||||
u32 cpu_req_errata = sifive_errata_probe(archid, impid);
|
||||
u32 cpu_req_errata;
|
||||
u32 cpu_apply_errata = 0;
|
||||
u32 tmp;
|
||||
|
||||
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
|
||||
return;
|
||||
|
||||
cpu_req_errata = sifive_errata_probe(archid, impid);
|
||||
|
||||
for (alt = begin; alt < end; alt++) {
|
||||
if (alt->vendor_id != SIFIVE_VENDOR_ID)
|
||||
continue;
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
|
||||
CFLAGS_errata.o := -mcmodel=medany
|
||||
ifdef CONFIG_FTRACE
|
||||
CFLAGS_REMOVE_errata.o = $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
ifdef CONFIG_KASAN
|
||||
KASAN_SANITIZE_errata.o := n
|
||||
endif
|
||||
endif
|
||||
|
||||
obj-y += errata.o
|
|
@ -0,0 +1,82 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2021 Heiko Stuebner <heiko@sntech.de>
|
||||
*/
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/errata_list.h>
|
||||
#include <asm/patch.h>
|
||||
#include <asm/vendorid_list.h>
|
||||
|
||||
struct errata_info {
|
||||
char name[ERRATA_STRING_LENGTH_MAX];
|
||||
bool (*check_func)(unsigned long arch_id, unsigned long impid);
|
||||
unsigned int stage;
|
||||
};
|
||||
|
||||
static bool errata_mt_check_func(unsigned long arch_id, unsigned long impid)
|
||||
{
|
||||
if (arch_id != 0 || impid != 0)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct errata_info errata_list[ERRATA_THEAD_NUMBER] = {
|
||||
{
|
||||
.name = "memory-types",
|
||||
.stage = RISCV_ALTERNATIVES_EARLY_BOOT,
|
||||
.check_func = errata_mt_check_func
|
||||
},
|
||||
};
|
||||
|
||||
static u32 thead_errata_probe(unsigned int stage, unsigned long archid, unsigned long impid)
|
||||
{
|
||||
const struct errata_info *info;
|
||||
u32 cpu_req_errata = 0;
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < ERRATA_THEAD_NUMBER; idx++) {
|
||||
info = &errata_list[idx];
|
||||
|
||||
if ((stage == RISCV_ALTERNATIVES_MODULE ||
|
||||
info->stage == stage) && info->check_func(archid, impid))
|
||||
cpu_req_errata |= (1U << idx);
|
||||
}
|
||||
|
||||
return cpu_req_errata;
|
||||
}
|
||||
|
||||
void __init_or_module thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
|
||||
unsigned long archid, unsigned long impid,
|
||||
unsigned int stage)
|
||||
{
|
||||
struct alt_entry *alt;
|
||||
u32 cpu_req_errata = thead_errata_probe(stage, archid, impid);
|
||||
u32 tmp;
|
||||
|
||||
for (alt = begin; alt < end; alt++) {
|
||||
if (alt->vendor_id != THEAD_VENDOR_ID)
|
||||
continue;
|
||||
if (alt->errata_id >= ERRATA_THEAD_NUMBER)
|
||||
continue;
|
||||
|
||||
tmp = (1U << alt->errata_id);
|
||||
if (cpu_req_errata & tmp) {
|
||||
/* On vm-alternatives, the mmu isn't running yet */
|
||||
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
|
||||
memcpy((void *)__pa_symbol(alt->old_ptr),
|
||||
(void *)__pa_symbol(alt->alt_ptr), alt->alt_len);
|
||||
else
|
||||
patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
|
||||
}
|
||||
}
|
||||
|
||||
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
|
||||
local_flush_icache_all();
|
||||
}
|
|
@ -2,7 +2,7 @@
|
|||
#ifndef __ASM_ALTERNATIVE_MACROS_H
|
||||
#define __ASM_ALTERNATIVE_MACROS_H
|
||||
|
||||
#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE
|
||||
#ifdef CONFIG_RISCV_ALTERNATIVE
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
|
@ -21,7 +21,11 @@
|
|||
.popsection
|
||||
.subsection 1
|
||||
888 :
|
||||
.option push
|
||||
.option norvc
|
||||
.option norelax
|
||||
\new_c
|
||||
.option pop
|
||||
889 :
|
||||
.previous
|
||||
.org . - (889b - 888b) + (887b - 886b)
|
||||
|
@ -31,7 +35,11 @@
|
|||
|
||||
.macro __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable
|
||||
886 :
|
||||
.option push
|
||||
.option norvc
|
||||
.option norelax
|
||||
\old_c
|
||||
.option pop
|
||||
887 :
|
||||
ALT_NEW_CONTENT \vendor_id, \errata_id, \enable, \new_c
|
||||
.endm
|
||||
|
@ -39,44 +47,97 @@
|
|||
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
|
||||
__ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k)
|
||||
|
||||
.macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
|
||||
new_c_2, vendor_id_2, errata_id_2, enable_2
|
||||
886 :
|
||||
.option push
|
||||
.option norvc
|
||||
.option norelax
|
||||
\old_c
|
||||
.option pop
|
||||
887 :
|
||||
ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1
|
||||
ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2
|
||||
.endm
|
||||
|
||||
#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
|
||||
CONFIG_k_1, \
|
||||
new_c_2, vendor_id_2, errata_id_2, \
|
||||
CONFIG_k_2) \
|
||||
__ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, \
|
||||
IS_ENABLED(CONFIG_k_1), \
|
||||
new_c_2, vendor_id_2, errata_id_2, \
|
||||
IS_ENABLED(CONFIG_k_2)
|
||||
|
||||
#else /* !__ASSEMBLY__ */
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
#define ALT_ENTRY(oldptr, newptr, vendor_id, errata_id, newlen) \
|
||||
RISCV_PTR " " oldptr "\n" \
|
||||
RISCV_PTR " " newptr "\n" \
|
||||
REG_ASM " " vendor_id "\n" \
|
||||
REG_ASM " " newlen "\n" \
|
||||
#define ALT_ENTRY(oldptr, newptr, vendor_id, errata_id, newlen) \
|
||||
RISCV_PTR " " oldptr "\n" \
|
||||
RISCV_PTR " " newptr "\n" \
|
||||
REG_ASM " " vendor_id "\n" \
|
||||
REG_ASM " " newlen "\n" \
|
||||
".word " errata_id "\n"
|
||||
|
||||
#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \
|
||||
#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \
|
||||
".if " __stringify(enable) " == 1\n" \
|
||||
".pushsection .alternative, \"a\"\n" \
|
||||
ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \
|
||||
".popsection\n" \
|
||||
".subsection 1\n" \
|
||||
"888 :\n" \
|
||||
".option push\n" \
|
||||
".option norvc\n" \
|
||||
".option norelax\n" \
|
||||
new_c "\n" \
|
||||
".option pop\n" \
|
||||
"889 :\n" \
|
||||
".previous\n" \
|
||||
".org . - (887b - 886b) + (889b - 888b)\n" \
|
||||
".org . - (889b - 888b) + (887b - 886b)\n" \
|
||||
".endif\n"
|
||||
|
||||
#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, enable) \
|
||||
"886 :\n" \
|
||||
old_c "\n" \
|
||||
"887 :\n" \
|
||||
#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, enable) \
|
||||
"886 :\n" \
|
||||
".option push\n" \
|
||||
".option norvc\n" \
|
||||
".option norelax\n" \
|
||||
old_c "\n" \
|
||||
".option pop\n" \
|
||||
"887 :\n" \
|
||||
ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c)
|
||||
|
||||
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
|
||||
__ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k))
|
||||
|
||||
#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
|
||||
enable_1, \
|
||||
new_c_2, vendor_id_2, errata_id_2, \
|
||||
enable_2) \
|
||||
"886 :\n" \
|
||||
".option push\n" \
|
||||
".option norvc\n" \
|
||||
".option norelax\n" \
|
||||
old_c "\n" \
|
||||
".option pop\n" \
|
||||
"887 :\n" \
|
||||
ALT_NEW_CONTENT(vendor_id_1, errata_id_1, enable_1, new_c_1) \
|
||||
ALT_NEW_CONTENT(vendor_id_2, errata_id_2, enable_2, new_c_2)
|
||||
|
||||
#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
|
||||
CONFIG_k_1, \
|
||||
new_c_2, vendor_id_2, errata_id_2, \
|
||||
CONFIG_k_2) \
|
||||
__ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
|
||||
IS_ENABLED(CONFIG_k_1), \
|
||||
new_c_2, vendor_id_2, errata_id_2, \
|
||||
IS_ENABLED(CONFIG_k_2))
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#else /* !CONFIG_RISCV_ERRATA_ALTERNATIVE*/
|
||||
#else /* CONFIG_RISCV_ALTERNATIVE */
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
.macro __ALTERNATIVE_CFG old_c
|
||||
|
@ -86,6 +147,12 @@
|
|||
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
|
||||
__ALTERNATIVE_CFG old_c
|
||||
|
||||
#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
|
||||
CONFIG_k_1, \
|
||||
new_c_2, vendor_id_2, errata_id_2, \
|
||||
CONFIG_k_2) \
|
||||
__ALTERNATIVE_CFG old_c
|
||||
|
||||
#else /* !__ASSEMBLY__ */
|
||||
|
||||
#define __ALTERNATIVE_CFG(old_c) \
|
||||
|
@ -94,8 +161,15 @@
|
|||
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
|
||||
__ALTERNATIVE_CFG(old_c)
|
||||
|
||||
#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
|
||||
CONFIG_k_1, \
|
||||
new_c_2, vendor_id_2, errata_id_2, \
|
||||
CONFIG_k_2) \
|
||||
__ALTERNATIVE_CFG(old_c)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* CONFIG_RISCV_ERRATA_ALTERNATIVE */
|
||||
#endif /* CONFIG_RISCV_ALTERNATIVE */
|
||||
|
||||
/*
|
||||
* Usage:
|
||||
* ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k)
|
||||
|
@ -118,25 +192,14 @@
|
|||
* this case, this vendor can create a new macro ALTERNATIVE_2() based
|
||||
* on the following sample code and then replace ALTERNATIVE() with
|
||||
* ALTERNATIVE_2() to append its customized content.
|
||||
*
|
||||
* .macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
|
||||
* new_c_2, vendor_id_2, errata_id_2, enable_2
|
||||
* 886 :
|
||||
* \old_c
|
||||
* 887 :
|
||||
* ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1
|
||||
* ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2
|
||||
* .endm
|
||||
*
|
||||
* #define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
|
||||
* new_c_2, vendor_id_2, errata_id_2, CONFIG_k_2) \
|
||||
* __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, IS_ENABLED(CONFIG_k_1), \
|
||||
* new_c_2, vendor_id_2, errata_id_2, IS_ENABLED(CONFIG_k_2) \
|
||||
*
|
||||
* #define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
|
||||
* new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) \
|
||||
* _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
|
||||
* new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2)
|
||||
*
|
||||
*/
|
||||
#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, \
|
||||
errata_id_1, CONFIG_k_1, \
|
||||
new_content_2, vendor_id_2, \
|
||||
errata_id_2, CONFIG_k_2) \
|
||||
_ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, \
|
||||
errata_id_1, CONFIG_k_1, \
|
||||
new_content_2, vendor_id_2, \
|
||||
errata_id_2, CONFIG_k_2)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -12,12 +12,20 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_RISCV_ALTERNATIVE
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <asm/hwcap.h>
|
||||
|
||||
#define RISCV_ALTERNATIVES_BOOT 0 /* alternatives applied during regular boot */
|
||||
#define RISCV_ALTERNATIVES_MODULE 1 /* alternatives applied during module-init */
|
||||
#define RISCV_ALTERNATIVES_EARLY_BOOT 2 /* alternatives applied before mmu start */
|
||||
|
||||
void __init apply_boot_alternatives(void);
|
||||
void __init apply_early_boot_alternatives(void);
|
||||
void apply_module_alternatives(void *start, size_t length);
|
||||
|
||||
struct alt_entry {
|
||||
void *old_ptr; /* address of original instruciton or data */
|
||||
|
@ -33,7 +41,22 @@ struct errata_checkfunc_id {
|
|||
};
|
||||
|
||||
void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
|
||||
unsigned long archid, unsigned long impid);
|
||||
unsigned long archid, unsigned long impid,
|
||||
unsigned int stage);
|
||||
void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
|
||||
unsigned long archid, unsigned long impid,
|
||||
unsigned int stage);
|
||||
|
||||
void riscv_cpufeature_patch_func(struct alt_entry *begin, struct alt_entry *end,
|
||||
unsigned int stage);
|
||||
|
||||
#else /* CONFIG_RISCV_ALTERNATIVE */
|
||||
|
||||
static inline void apply_boot_alternatives(void) { }
|
||||
static inline void apply_early_boot_alternatives(void) { }
|
||||
static inline void apply_module_alternatives(void *start, size_t length) { }
|
||||
|
||||
#endif /* CONFIG_RISCV_ALTERNATIVE */
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -67,30 +67,4 @@
|
|||
#error "Unexpected __SIZEOF_SHORT__"
|
||||
#endif
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
/* Common assembly source macros */
|
||||
|
||||
#ifdef CONFIG_XIP_KERNEL
|
||||
.macro XIP_FIXUP_OFFSET reg
|
||||
REG_L t0, _xip_fixup
|
||||
add \reg, \reg, t0
|
||||
.endm
|
||||
.macro XIP_FIXUP_FLASH_OFFSET reg
|
||||
la t1, __data_loc
|
||||
REG_L t1, _xip_phys_offset
|
||||
sub \reg, \reg, t1
|
||||
add \reg, \reg, t0
|
||||
.endm
|
||||
_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
|
||||
_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
|
||||
#else
|
||||
.macro XIP_FIXUP_OFFSET reg
|
||||
.endm
|
||||
.macro XIP_FIXUP_FLASH_OFFSET reg
|
||||
.endm
|
||||
#endif /* CONFIG_XIP_KERNEL */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_RISCV_ASM_H */
|
||||
|
|
|
@ -310,47 +310,129 @@ ATOMIC_OPS()
|
|||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP
|
||||
|
||||
static __always_inline int arch_atomic_sub_if_positive(atomic_t *v, int offset)
|
||||
static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
|
||||
{
|
||||
int prev, rc;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"0: lr.w %[p], %[c]\n"
|
||||
" bltz %[p], 1f\n"
|
||||
" addi %[rc], %[p], 1\n"
|
||||
" sc.w.rl %[rc], %[rc], %[c]\n"
|
||||
" bnez %[rc], 0b\n"
|
||||
" fence rw, rw\n"
|
||||
"1:\n"
|
||||
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
|
||||
:
|
||||
: "memory");
|
||||
return !(prev < 0);
|
||||
}
|
||||
|
||||
#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
|
||||
|
||||
static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
|
||||
{
|
||||
int prev, rc;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"0: lr.w %[p], %[c]\n"
|
||||
" bgtz %[p], 1f\n"
|
||||
" addi %[rc], %[p], -1\n"
|
||||
" sc.w.rl %[rc], %[rc], %[c]\n"
|
||||
" bnez %[rc], 0b\n"
|
||||
" fence rw, rw\n"
|
||||
"1:\n"
|
||||
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
|
||||
:
|
||||
: "memory");
|
||||
return !(prev > 0);
|
||||
}
|
||||
|
||||
#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
|
||||
|
||||
static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
|
||||
{
|
||||
int prev, rc;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"0: lr.w %[p], %[c]\n"
|
||||
" sub %[rc], %[p], %[o]\n"
|
||||
" addi %[rc], %[p], -1\n"
|
||||
" bltz %[rc], 1f\n"
|
||||
" sc.w.rl %[rc], %[rc], %[c]\n"
|
||||
" bnez %[rc], 0b\n"
|
||||
" fence rw, rw\n"
|
||||
"1:\n"
|
||||
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
|
||||
: [o]"r" (offset)
|
||||
:
|
||||
: "memory");
|
||||
return prev - offset;
|
||||
return prev - 1;
|
||||
}
|
||||
|
||||
#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(v, 1)
|
||||
#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
|
||||
|
||||
#ifndef CONFIG_GENERIC_ATOMIC64
|
||||
static __always_inline s64 arch_atomic64_sub_if_positive(atomic64_t *v, s64 offset)
|
||||
static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
|
||||
{
|
||||
s64 prev;
|
||||
long rc;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"0: lr.d %[p], %[c]\n"
|
||||
" bltz %[p], 1f\n"
|
||||
" addi %[rc], %[p], 1\n"
|
||||
" sc.d.rl %[rc], %[rc], %[c]\n"
|
||||
" bnez %[rc], 0b\n"
|
||||
" fence rw, rw\n"
|
||||
"1:\n"
|
||||
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
|
||||
:
|
||||
: "memory");
|
||||
return !(prev < 0);
|
||||
}
|
||||
|
||||
#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
|
||||
|
||||
static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
|
||||
{
|
||||
s64 prev;
|
||||
long rc;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"0: lr.d %[p], %[c]\n"
|
||||
" bgtz %[p], 1f\n"
|
||||
" addi %[rc], %[p], -1\n"
|
||||
" sc.d.rl %[rc], %[rc], %[c]\n"
|
||||
" bnez %[rc], 0b\n"
|
||||
" fence rw, rw\n"
|
||||
"1:\n"
|
||||
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
|
||||
:
|
||||
: "memory");
|
||||
return !(prev > 0);
|
||||
}
|
||||
|
||||
#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
|
||||
|
||||
static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
s64 prev;
|
||||
long rc;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"0: lr.d %[p], %[c]\n"
|
||||
" sub %[rc], %[p], %[o]\n"
|
||||
" addi %[rc], %[p], -1\n"
|
||||
" bltz %[rc], 1f\n"
|
||||
" sc.d.rl %[rc], %[rc], %[c]\n"
|
||||
" bnez %[rc], 0b\n"
|
||||
" fence rw, rw\n"
|
||||
"1:\n"
|
||||
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
|
||||
: [o]"r" (offset)
|
||||
:
|
||||
: "memory");
|
||||
return prev - offset;
|
||||
return prev - 1;
|
||||
}
|
||||
|
||||
#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(v, 1)
|
||||
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_RISCV_ATOMIC_H */
|
||||
|
|
|
@ -348,18 +348,6 @@
|
|||
#define arch_cmpxchg_local(ptr, o, n) \
|
||||
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
|
||||
|
||||
#define cmpxchg32(ptr, o, n) \
|
||||
({ \
|
||||
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
|
||||
arch_cmpxchg((ptr), (o), (n)); \
|
||||
})
|
||||
|
||||
#define cmpxchg32_local(ptr, o, n) \
|
||||
({ \
|
||||
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
|
||||
arch_cmpxchg_relaxed((ptr), (o), (n)) \
|
||||
})
|
||||
|
||||
#define arch_cmpxchg64(ptr, o, n) \
|
||||
({ \
|
||||
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef __ASM_COMPAT_H
|
||||
#define __ASM_COMPAT_H
|
||||
|
||||
#define COMPAT_UTS_MACHINE "riscv\0\0"
|
||||
|
||||
/*
|
||||
* Architecture specific compatibility types
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <asm-generic/compat.h>
|
||||
|
||||
static inline int is_compat_task(void)
|
||||
{
|
||||
return test_thread_flag(TIF_32BIT);
|
||||
}
|
||||
|
||||
struct compat_user_regs_struct {
|
||||
compat_ulong_t pc;
|
||||
compat_ulong_t ra;
|
||||
compat_ulong_t sp;
|
||||
compat_ulong_t gp;
|
||||
compat_ulong_t tp;
|
||||
compat_ulong_t t0;
|
||||
compat_ulong_t t1;
|
||||
compat_ulong_t t2;
|
||||
compat_ulong_t s0;
|
||||
compat_ulong_t s1;
|
||||
compat_ulong_t a0;
|
||||
compat_ulong_t a1;
|
||||
compat_ulong_t a2;
|
||||
compat_ulong_t a3;
|
||||
compat_ulong_t a4;
|
||||
compat_ulong_t a5;
|
||||
compat_ulong_t a6;
|
||||
compat_ulong_t a7;
|
||||
compat_ulong_t s2;
|
||||
compat_ulong_t s3;
|
||||
compat_ulong_t s4;
|
||||
compat_ulong_t s5;
|
||||
compat_ulong_t s6;
|
||||
compat_ulong_t s7;
|
||||
compat_ulong_t s8;
|
||||
compat_ulong_t s9;
|
||||
compat_ulong_t s10;
|
||||
compat_ulong_t s11;
|
||||
compat_ulong_t t3;
|
||||
compat_ulong_t t4;
|
||||
compat_ulong_t t5;
|
||||
compat_ulong_t t6;
|
||||
};
|
||||
|
||||
static inline void regs_to_cregs(struct compat_user_regs_struct *cregs,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
cregs->pc = (compat_ulong_t) regs->epc;
|
||||
cregs->ra = (compat_ulong_t) regs->ra;
|
||||
cregs->sp = (compat_ulong_t) regs->sp;
|
||||
cregs->gp = (compat_ulong_t) regs->gp;
|
||||
cregs->tp = (compat_ulong_t) regs->tp;
|
||||
cregs->t0 = (compat_ulong_t) regs->t0;
|
||||
cregs->t1 = (compat_ulong_t) regs->t1;
|
||||
cregs->t2 = (compat_ulong_t) regs->t2;
|
||||
cregs->s0 = (compat_ulong_t) regs->s0;
|
||||
cregs->s1 = (compat_ulong_t) regs->s1;
|
||||
cregs->a0 = (compat_ulong_t) regs->a0;
|
||||
cregs->a1 = (compat_ulong_t) regs->a1;
|
||||
cregs->a2 = (compat_ulong_t) regs->a2;
|
||||
cregs->a3 = (compat_ulong_t) regs->a3;
|
||||
cregs->a4 = (compat_ulong_t) regs->a4;
|
||||
cregs->a5 = (compat_ulong_t) regs->a5;
|
||||
cregs->a6 = (compat_ulong_t) regs->a6;
|
||||
cregs->a7 = (compat_ulong_t) regs->a7;
|
||||
cregs->s2 = (compat_ulong_t) regs->s2;
|
||||
cregs->s3 = (compat_ulong_t) regs->s3;
|
||||
cregs->s4 = (compat_ulong_t) regs->s4;
|
||||
cregs->s5 = (compat_ulong_t) regs->s5;
|
||||
cregs->s6 = (compat_ulong_t) regs->s6;
|
||||
cregs->s7 = (compat_ulong_t) regs->s7;
|
||||
cregs->s8 = (compat_ulong_t) regs->s8;
|
||||
cregs->s9 = (compat_ulong_t) regs->s9;
|
||||
cregs->s10 = (compat_ulong_t) regs->s10;
|
||||
cregs->s11 = (compat_ulong_t) regs->s11;
|
||||
cregs->t3 = (compat_ulong_t) regs->t3;
|
||||
cregs->t4 = (compat_ulong_t) regs->t4;
|
||||
cregs->t5 = (compat_ulong_t) regs->t5;
|
||||
cregs->t6 = (compat_ulong_t) regs->t6;
|
||||
};
|
||||
|
||||
static inline void cregs_to_regs(struct compat_user_regs_struct *cregs,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
regs->epc = (unsigned long) cregs->pc;
|
||||
regs->ra = (unsigned long) cregs->ra;
|
||||
regs->sp = (unsigned long) cregs->sp;
|
||||
regs->gp = (unsigned long) cregs->gp;
|
||||
regs->tp = (unsigned long) cregs->tp;
|
||||
regs->t0 = (unsigned long) cregs->t0;
|
||||
regs->t1 = (unsigned long) cregs->t1;
|
||||
regs->t2 = (unsigned long) cregs->t2;
|
||||
regs->s0 = (unsigned long) cregs->s0;
|
||||
regs->s1 = (unsigned long) cregs->s1;
|
||||
regs->a0 = (unsigned long) cregs->a0;
|
||||
regs->a1 = (unsigned long) cregs->a1;
|
||||
regs->a2 = (unsigned long) cregs->a2;
|
||||
regs->a3 = (unsigned long) cregs->a3;
|
||||
regs->a4 = (unsigned long) cregs->a4;
|
||||
regs->a5 = (unsigned long) cregs->a5;
|
||||
regs->a6 = (unsigned long) cregs->a6;
|
||||
regs->a7 = (unsigned long) cregs->a7;
|
||||
regs->s2 = (unsigned long) cregs->s2;
|
||||
regs->s3 = (unsigned long) cregs->s3;
|
||||
regs->s4 = (unsigned long) cregs->s4;
|
||||
regs->s5 = (unsigned long) cregs->s5;
|
||||
regs->s6 = (unsigned long) cregs->s6;
|
||||
regs->s7 = (unsigned long) cregs->s7;
|
||||
regs->s8 = (unsigned long) cregs->s8;
|
||||
regs->s9 = (unsigned long) cregs->s9;
|
||||
regs->s10 = (unsigned long) cregs->s10;
|
||||
regs->s11 = (unsigned long) cregs->s11;
|
||||
regs->t3 = (unsigned long) cregs->t3;
|
||||
regs->t4 = (unsigned long) cregs->t4;
|
||||
regs->t5 = (unsigned long) cregs->t5;
|
||||
regs->t6 = (unsigned long) cregs->t6;
|
||||
};
|
||||
|
||||
#endif /* __ASM_COMPAT_H */
|
|
@ -36,6 +36,13 @@
|
|||
#define SR_SD _AC(0x8000000000000000, UL) /* FS/XS dirty */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define SR_UXL _AC(0x300000000, UL) /* XLEN mask for U-mode */
|
||||
#define SR_UXL_32 _AC(0x100000000, UL) /* XLEN = 32 for U-mode */
|
||||
#define SR_UXL_64 _AC(0x200000000, UL) /* XLEN = 64 for U-mode */
|
||||
#define SR_UXL_SHIFT 32
|
||||
#endif
|
||||
|
||||
/* SATP flags */
|
||||
#ifndef CONFIG_64BIT
|
||||
#define SATP_PPN _AC(0x003FFFFF, UL)
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
#ifndef _ASM_RISCV_ELF_H
|
||||
#define _ASM_RISCV_ELF_H
|
||||
|
||||
#include <uapi/linux/elf.h>
|
||||
#include <linux/compat.h>
|
||||
#include <uapi/asm/elf.h>
|
||||
#include <asm/auxvec.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
@ -18,18 +20,24 @@
|
|||
*/
|
||||
#define ELF_ARCH EM_RISCV
|
||||
|
||||
#ifndef ELF_CLASS
|
||||
#ifdef CONFIG_64BIT
|
||||
#define ELF_CLASS ELFCLASS64
|
||||
#else
|
||||
#define ELF_CLASS ELFCLASS32
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define ELF_DATA ELFDATA2LSB
|
||||
|
||||
/*
|
||||
* This is used to ensure we don't load something for the wrong architecture.
|
||||
*/
|
||||
#define elf_check_arch(x) ((x)->e_machine == EM_RISCV)
|
||||
#define elf_check_arch(x) (((x)->e_machine == EM_RISCV) && \
|
||||
((x)->e_ident[EI_CLASS] == ELF_CLASS))
|
||||
|
||||
extern bool compat_elf_check_arch(Elf32_Ehdr *hdr);
|
||||
#define compat_elf_check_arch compat_elf_check_arch
|
||||
|
||||
#define CORE_DUMP_USE_REGSET
|
||||
#define ELF_EXEC_PAGESIZE (PAGE_SIZE)
|
||||
|
@ -43,8 +51,14 @@
|
|||
#define ELF_ET_DYN_BASE ((TASK_SIZE / 3) * 2)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#ifdef CONFIG_COMPAT
|
||||
#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
|
||||
0x7ff >> (PAGE_SHIFT - 12) : \
|
||||
0x3ffff >> (PAGE_SHIFT - 12))
|
||||
#else
|
||||
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
|
||||
#endif
|
||||
#endif
|
||||
/*
|
||||
* This yields a mask that user programs can use to figure out what
|
||||
* instruction set this CPU supports. This could be done in user space,
|
||||
|
@ -60,11 +74,19 @@ extern unsigned long elf_hwcap;
|
|||
*/
|
||||
#define ELF_PLATFORM (NULL)
|
||||
|
||||
#define COMPAT_ELF_PLATFORM (NULL)
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#define ARCH_DLINFO \
|
||||
do { \
|
||||
/* \
|
||||
* Note that we add ulong after elf_addr_t because \
|
||||
* casting current->mm->context.vdso triggers a cast \
|
||||
* warning of cast from pointer to integer for \
|
||||
* COMPAT ELFCLASS32. \
|
||||
*/ \
|
||||
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
|
||||
(elf_addr_t)current->mm->context.vdso); \
|
||||
(elf_addr_t)(ulong)current->mm->context.vdso); \
|
||||
NEW_AUX_ENT(AT_L1I_CACHESIZE, \
|
||||
get_cache_size(1, CACHE_TYPE_INST)); \
|
||||
NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, \
|
||||
|
@ -90,4 +112,28 @@ do { \
|
|||
*(struct user_regs_struct *)regs; \
|
||||
} while (0);
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
#define SET_PERSONALITY(ex) \
|
||||
do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
|
||||
set_thread_flag(TIF_32BIT); \
|
||||
else \
|
||||
clear_thread_flag(TIF_32BIT); \
|
||||
if (personality(current->personality) != PER_LINUX32) \
|
||||
set_personality(PER_LINUX | \
|
||||
(current->personality & (~PER_MASK))); \
|
||||
} while (0)
|
||||
|
||||
#define COMPAT_ELF_ET_DYN_BASE ((TASK_SIZE_32 / 3) * 2)
|
||||
|
||||
/* rv32 registers */
|
||||
typedef compat_ulong_t compat_elf_greg_t;
|
||||
typedef compat_elf_greg_t compat_elf_gregset_t[ELF_NGREG];
|
||||
|
||||
extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||
int uses_interp);
|
||||
#define compat_arch_setup_additional_pages \
|
||||
compat_arch_setup_additional_pages
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
#endif /* _ASM_RISCV_ELF_H */
|
||||
|
|
|
@ -14,6 +14,14 @@
|
|||
#define ERRATA_SIFIVE_NUMBER 2
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ERRATA_THEAD
|
||||
#define ERRATA_THEAD_PBMT 0
|
||||
#define ERRATA_THEAD_NUMBER 1
|
||||
#endif
|
||||
|
||||
#define CPUFEATURE_SVPBMT 0
|
||||
#define CPUFEATURE_NUMBER 1
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
#define ALT_INSN_FAULT(x) \
|
||||
|
@ -34,6 +42,57 @@ asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \
|
|||
ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \
|
||||
: : "r" (addr) : "memory")
|
||||
|
||||
/*
|
||||
* _val is marked as "will be overwritten", so need to set it to 0
|
||||
* in the default case.
|
||||
*/
|
||||
#define ALT_SVPBMT_SHIFT 61
|
||||
#define ALT_THEAD_PBMT_SHIFT 59
|
||||
#define ALT_SVPBMT(_val, prot) \
|
||||
asm(ALTERNATIVE_2("li %0, 0\t\nnop", \
|
||||
"li %0, %1\t\nslli %0,%0,%3", 0, \
|
||||
CPUFEATURE_SVPBMT, CONFIG_RISCV_ISA_SVPBMT, \
|
||||
"li %0, %2\t\nslli %0,%0,%4", THEAD_VENDOR_ID, \
|
||||
ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \
|
||||
: "=r"(_val) \
|
||||
: "I"(prot##_SVPBMT >> ALT_SVPBMT_SHIFT), \
|
||||
"I"(prot##_THEAD >> ALT_THEAD_PBMT_SHIFT), \
|
||||
"I"(ALT_SVPBMT_SHIFT), \
|
||||
"I"(ALT_THEAD_PBMT_SHIFT))
|
||||
|
||||
#ifdef CONFIG_ERRATA_THEAD_PBMT
|
||||
/*
|
||||
* IO/NOCACHE memory types are handled together with svpbmt,
|
||||
* so on T-Head chips, check if no other memory type is set,
|
||||
* and set the non-0 PMA type if applicable.
|
||||
*/
|
||||
#define ALT_THEAD_PMA(_val) \
|
||||
asm volatile(ALTERNATIVE( \
|
||||
"nop\n\t" \
|
||||
"nop\n\t" \
|
||||
"nop\n\t" \
|
||||
"nop\n\t" \
|
||||
"nop\n\t" \
|
||||
"nop\n\t" \
|
||||
"nop", \
|
||||
"li t3, %2\n\t" \
|
||||
"slli t3, t3, %4\n\t" \
|
||||
"and t3, %0, t3\n\t" \
|
||||
"bne t3, zero, 2f\n\t" \
|
||||
"li t3, %3\n\t" \
|
||||
"slli t3, t3, %4\n\t" \
|
||||
"or %0, %0, t3\n\t" \
|
||||
"2:", THEAD_VENDOR_ID, \
|
||||
ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \
|
||||
: "+r"(_val) \
|
||||
: "0"(_val), \
|
||||
"I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_PBMT_SHIFT), \
|
||||
"I"(_PAGE_PMA_THEAD >> ALT_THEAD_PBMT_SHIFT), \
|
||||
"I"(ALT_THEAD_PBMT_SHIFT))
|
||||
#else
|
||||
#define ALT_THEAD_PMA(_val)
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -45,8 +45,6 @@ enum fixed_addresses {
|
|||
__end_of_fixed_addresses
|
||||
};
|
||||
|
||||
#define FIXMAP_PAGE_IO PAGE_KERNEL
|
||||
|
||||
#define __early_set_fixmap __set_fixmap
|
||||
|
||||
#define __late_set_fixmap __set_fixmap
|
||||
|
|
|
@ -52,6 +52,7 @@ extern unsigned long elf_hwcap;
|
|||
*/
|
||||
enum riscv_isa_ext_id {
|
||||
RISCV_ISA_EXT_SSCOFPMF = RISCV_ISA_EXT_BASE,
|
||||
RISCV_ISA_EXT_SVPBMT,
|
||||
RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX,
|
||||
};
|
||||
|
||||
|
|
|
@ -53,4 +53,15 @@ typedef void (*riscv_kexec_method)(unsigned long first_ind_entry,
|
|||
|
||||
extern riscv_kexec_method riscv_kexec_norelocate;
|
||||
|
||||
#ifdef CONFIG_KEXEC_FILE
|
||||
extern const struct kexec_file_ops elf_kexec_ops;
|
||||
|
||||
struct purgatory_info;
|
||||
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
|
||||
Elf_Shdr *section,
|
||||
const Elf_Shdr *relsec,
|
||||
const Elf_Shdr *symtab);
|
||||
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -16,6 +16,7 @@ typedef struct {
|
|||
atomic_long_t id;
|
||||
#endif
|
||||
void *vdso;
|
||||
void *vdso_info;
|
||||
#ifdef CONFIG_SMP
|
||||
/* A local icache flush is needed before user execution can resume. */
|
||||
cpumask_t icache_stale_mask;
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#define _ASM_RISCV_PGTABLE_32_H
|
||||
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
#include <linux/bits.h>
|
||||
#include <linux/const.h>
|
||||
|
||||
/* Size of region mapped by a page global directory */
|
||||
|
@ -16,4 +17,20 @@
|
|||
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 34
|
||||
|
||||
/*
|
||||
* rv32 PTE format:
|
||||
* | XLEN-1 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
|
||||
* PFN reserved for SW D A G U X W R V
|
||||
*/
|
||||
#define _PAGE_PFN_MASK GENMASK(31, 10)
|
||||
|
||||
#define _PAGE_NOCACHE 0
|
||||
#define _PAGE_IO 0
|
||||
#define _PAGE_MTMASK 0
|
||||
|
||||
/* Set of bits to preserve across pte_modify() */
|
||||
#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
|
||||
_PAGE_WRITE | _PAGE_EXEC | \
|
||||
_PAGE_USER | _PAGE_GLOBAL))
|
||||
|
||||
#endif /* _ASM_RISCV_PGTABLE_32_H */
|
||||
|
|
|
@ -6,7 +6,9 @@
|
|||
#ifndef _ASM_RISCV_PGTABLE_64_H
|
||||
#define _ASM_RISCV_PGTABLE_64_H
|
||||
|
||||
#include <linux/bits.h>
|
||||
#include <linux/const.h>
|
||||
#include <asm/errata_list.h>
|
||||
|
||||
extern bool pgtable_l4_enabled;
|
||||
extern bool pgtable_l5_enabled;
|
||||
|
@ -65,6 +67,71 @@ typedef struct {
|
|||
|
||||
#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
|
||||
|
||||
/*
|
||||
* rv64 PTE format:
|
||||
* | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
|
||||
* N MT RSV PFN reserved for SW D A G U X W R V
|
||||
*/
|
||||
#define _PAGE_PFN_MASK GENMASK(53, 10)
|
||||
|
||||
/*
|
||||
* [62:61] Svpbmt Memory Type definitions:
|
||||
*
|
||||
* 00 - PMA Normal Cacheable, No change to implied PMA memory type
|
||||
* 01 - NC Non-cacheable, idempotent, weakly-ordered Main Memory
|
||||
* 10 - IO Non-cacheable, non-idempotent, strongly-ordered I/O memory
|
||||
* 11 - Rsvd Reserved for future standard use
|
||||
*/
|
||||
#define _PAGE_NOCACHE_SVPBMT (1UL << 61)
|
||||
#define _PAGE_IO_SVPBMT (1UL << 62)
|
||||
#define _PAGE_MTMASK_SVPBMT (_PAGE_NOCACHE_SVPBMT | _PAGE_IO_SVPBMT)
|
||||
|
||||
/*
|
||||
* [63:59] T-Head Memory Type definitions:
|
||||
*
|
||||
* 00000 - NC Weakly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
|
||||
* 01110 - PMA Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable
|
||||
* 10000 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
|
||||
*/
|
||||
#define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60))
|
||||
#define _PAGE_NOCACHE_THEAD 0UL
|
||||
#define _PAGE_IO_THEAD (1UL << 63)
|
||||
#define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
|
||||
|
||||
static inline u64 riscv_page_mtmask(void)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
ALT_SVPBMT(val, _PAGE_MTMASK);
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline u64 riscv_page_nocache(void)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
ALT_SVPBMT(val, _PAGE_NOCACHE);
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline u64 riscv_page_io(void)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
ALT_SVPBMT(val, _PAGE_IO);
|
||||
return val;
|
||||
}
|
||||
|
||||
#define _PAGE_NOCACHE riscv_page_nocache()
|
||||
#define _PAGE_IO riscv_page_io()
|
||||
#define _PAGE_MTMASK riscv_page_mtmask()
|
||||
|
||||
/* Set of bits to preserve across pte_modify() */
|
||||
#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
|
||||
_PAGE_WRITE | _PAGE_EXEC | \
|
||||
_PAGE_USER | _PAGE_GLOBAL | \
|
||||
_PAGE_MTMASK))
|
||||
|
||||
static inline int pud_present(pud_t pud)
|
||||
{
|
||||
return (pud_val(pud) & _PAGE_PRESENT);
|
||||
|
@ -113,12 +180,12 @@ static inline unsigned long _pud_pfn(pud_t pud)
|
|||
|
||||
static inline pmd_t *pud_pgtable(pud_t pud)
|
||||
{
|
||||
return (pmd_t *)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
|
||||
return (pmd_t *)pfn_to_virt(__page_val_to_pfn(pud_val(pud)));
|
||||
}
|
||||
|
||||
static inline struct page *pud_page(pud_t pud)
|
||||
{
|
||||
return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
|
||||
return pfn_to_page(__page_val_to_pfn(pud_val(pud)));
|
||||
}
|
||||
|
||||
#define mm_p4d_folded mm_p4d_folded
|
||||
|
@ -143,12 +210,16 @@ static inline bool mm_pud_folded(struct mm_struct *mm)
|
|||
|
||||
static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
|
||||
{
|
||||
return __pmd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
|
||||
unsigned long prot_val = pgprot_val(prot);
|
||||
|
||||
ALT_THEAD_PMA(prot_val);
|
||||
|
||||
return __pmd((pfn << _PAGE_PFN_SHIFT) | prot_val);
|
||||
}
|
||||
|
||||
static inline unsigned long _pmd_pfn(pmd_t pmd)
|
||||
{
|
||||
return pmd_val(pmd) >> _PAGE_PFN_SHIFT;
|
||||
return __page_val_to_pfn(pmd_val(pmd));
|
||||
}
|
||||
|
||||
#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot)
|
||||
|
|
|
@ -6,12 +6,6 @@
|
|||
#ifndef _ASM_RISCV_PGTABLE_BITS_H
|
||||
#define _ASM_RISCV_PGTABLE_BITS_H
|
||||
|
||||
/*
|
||||
* PTE format:
|
||||
* | XLEN-1 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
|
||||
* PFN reserved for SW D A G U X W R V
|
||||
*/
|
||||
|
||||
#define _PAGE_ACCESSED_OFFSET 6
|
||||
|
||||
#define _PAGE_PRESENT (1 << 0)
|
||||
|
@ -35,10 +29,6 @@
|
|||
|
||||
#define _PAGE_PFN_SHIFT 10
|
||||
|
||||
/* Set of bits to preserve across pte_modify() */
|
||||
#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
|
||||
_PAGE_WRITE | _PAGE_EXEC | \
|
||||
_PAGE_USER | _PAGE_GLOBAL))
|
||||
/*
|
||||
* when all of R/W/X are zero, the PTE is a pointer to the next level
|
||||
* of the page table; otherwise, it is a leaf PTE.
|
||||
|
|
|
@ -108,6 +108,8 @@
|
|||
#include <asm/tlbflush.h>
|
||||
#include <linux/mm_types.h>
|
||||
|
||||
#define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#include <asm/pgtable-64.h>
|
||||
#else
|
||||
|
@ -179,11 +181,8 @@ extern struct pt_alloc_ops pt_ops __initdata;
|
|||
|
||||
#define PAGE_TABLE __pgprot(_PAGE_TABLE)
|
||||
|
||||
/*
|
||||
* The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
|
||||
* change the properties of memory regions.
|
||||
*/
|
||||
#define _PAGE_IOREMAP _PAGE_KERNEL
|
||||
#define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
|
||||
#define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP)
|
||||
|
||||
extern pgd_t swapper_pg_dir[];
|
||||
|
||||
|
@ -253,7 +252,11 @@ static inline void pmd_clear(pmd_t *pmdp)
|
|||
|
||||
static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
|
||||
{
|
||||
return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
|
||||
unsigned long prot_val = pgprot_val(prot);
|
||||
|
||||
ALT_THEAD_PMA(prot_val);
|
||||
|
||||
return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
|
||||
}
|
||||
|
||||
static inline unsigned long _pgd_pfn(pgd_t pgd)
|
||||
|
@ -263,12 +266,12 @@ static inline unsigned long _pgd_pfn(pgd_t pgd)
|
|||
|
||||
static inline struct page *pmd_page(pmd_t pmd)
|
||||
{
|
||||
return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
|
||||
return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
|
||||
{
|
||||
return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
|
||||
return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
|
||||
}
|
||||
|
||||
static inline pte_t pmd_pte(pmd_t pmd)
|
||||
|
@ -284,7 +287,7 @@ static inline pte_t pud_pte(pud_t pud)
|
|||
/* Yields the page frame number (PFN) of a page table entry */
|
||||
static inline unsigned long pte_pfn(pte_t pte)
|
||||
{
|
||||
return (pte_val(pte) >> _PAGE_PFN_SHIFT);
|
||||
return __page_val_to_pfn(pte_val(pte));
|
||||
}
|
||||
|
||||
#define pte_page(x) pfn_to_page(pte_pfn(x))
|
||||
|
@ -292,7 +295,11 @@ static inline unsigned long pte_pfn(pte_t pte)
|
|||
/* Constructs a page table entry */
|
||||
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
|
||||
{
|
||||
return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
|
||||
unsigned long prot_val = pgprot_val(prot);
|
||||
|
||||
ALT_THEAD_PMA(prot_val);
|
||||
|
||||
return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
|
||||
}
|
||||
|
||||
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
|
||||
|
@ -406,7 +413,11 @@ static inline int pmd_protnone(pmd_t pmd)
|
|||
/* Modify page protection bits */
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{
|
||||
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
|
||||
unsigned long newprot_val = pgprot_val(newprot);
|
||||
|
||||
ALT_THEAD_PMA(newprot_val);
|
||||
|
||||
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
|
||||
}
|
||||
|
||||
#define pgd_ERROR(e) \
|
||||
|
@ -539,6 +550,28 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
|
|||
return ptep_test_and_clear_young(vma, address, ptep);
|
||||
}
|
||||
|
||||
#define pgprot_noncached pgprot_noncached
|
||||
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
|
||||
{
|
||||
unsigned long prot = pgprot_val(_prot);
|
||||
|
||||
prot &= ~_PAGE_MTMASK;
|
||||
prot |= _PAGE_IO;
|
||||
|
||||
return __pgprot(prot);
|
||||
}
|
||||
|
||||
#define pgprot_writecombine pgprot_writecombine
|
||||
static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
|
||||
{
|
||||
unsigned long prot = pgprot_val(_prot);
|
||||
|
||||
prot &= ~_PAGE_MTMASK;
|
||||
prot |= _PAGE_NOCACHE;
|
||||
|
||||
return __pgprot(prot);
|
||||
}
|
||||
|
||||
/*
|
||||
* THP functions
|
||||
*/
|
||||
|
@ -761,8 +794,17 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
|
|||
* 63–48 all equal to bit 47, or else a page-fault exception will occur."
|
||||
*/
|
||||
#ifdef CONFIG_64BIT
|
||||
#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
|
||||
#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
|
||||
#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
|
||||
#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
|
||||
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
|
||||
TASK_SIZE_32 : TASK_SIZE_64)
|
||||
#else
|
||||
#define TASK_SIZE TASK_SIZE_64
|
||||
#endif
|
||||
|
||||
#else
|
||||
#define TASK_SIZE FIXADDR_START
|
||||
#define TASK_SIZE_MIN TASK_SIZE
|
||||
|
|
|
@ -19,7 +19,11 @@
|
|||
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
|
||||
|
||||
#define STACK_TOP TASK_SIZE
|
||||
#define STACK_TOP_MAX STACK_TOP
|
||||
#ifdef CONFIG_64BIT
|
||||
#define STACK_TOP_MAX TASK_SIZE_64
|
||||
#else
|
||||
#define STACK_TOP_MAX TASK_SIZE
|
||||
#endif
|
||||
#define STACK_ALIGN 16
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#ifndef __ASM_SIGNAL32_H
|
||||
#define __ASM_SIGNAL32_H
|
||||
|
||||
#if IS_ENABLED(CONFIG_COMPAT)
|
||||
int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
||||
struct pt_regs *regs);
|
||||
#else
|
||||
static inline
|
||||
int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
/* The array of function pointers for syscalls. */
|
||||
extern void * const sys_call_table[];
|
||||
extern void * const compat_sys_call_table[];
|
||||
|
||||
/*
|
||||
* Only the low 32 bits of orig_r0 are meaningful, so we return int.
|
||||
|
|
|
@ -97,6 +97,7 @@ struct thread_info {
|
|||
#define TIF_SECCOMP 8 /* syscall secure computing */
|
||||
#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */
|
||||
#define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */
|
||||
#define TIF_32BIT 11 /* compat-mode 32bit process */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
|
|
|
@ -11,6 +11,17 @@
|
|||
#define __ARCH_WANT_SYS_CLONE
|
||||
#define __ARCH_WANT_MEMFD_SECRET
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#define __ARCH_WANT_COMPAT_TRUNCATE64
|
||||
#define __ARCH_WANT_COMPAT_FTRUNCATE64
|
||||
#define __ARCH_WANT_COMPAT_FALLOCATE
|
||||
#define __ARCH_WANT_COMPAT_PREAD64
|
||||
#define __ARCH_WANT_COMPAT_PWRITE64
|
||||
#define __ARCH_WANT_COMPAT_SYNC_FILE_RANGE
|
||||
#define __ARCH_WANT_COMPAT_READAHEAD
|
||||
#define __ARCH_WANT_COMPAT_FADVISE64_64
|
||||
#endif
|
||||
|
||||
#include <uapi/asm/unistd.h>
|
||||
|
||||
#define NR_syscalls (__NR_syscalls)
|
||||
|
|
|
@ -21,6 +21,15 @@
|
|||
|
||||
#define VDSO_SYMBOL(base, name) \
|
||||
(void __user *)((unsigned long)(base) + __vdso_##name##_offset)
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#include <generated/compat_vdso-offsets.h>
|
||||
|
||||
#define COMPAT_VDSO_SYMBOL(base, name) \
|
||||
(void __user *)((unsigned long)(base) + compat__vdso_##name##_offset)
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
|
|
@ -6,5 +6,6 @@
|
|||
#define ASM_VENDOR_LIST_H
|
||||
|
||||
#define SIFIVE_VENDOR_ID 0x489
|
||||
#define THEAD_VENDOR_ID 0x5b7
|
||||
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* XIP fixup macros, only useful in assembly.
|
||||
*/
|
||||
#ifndef _ASM_RISCV_XIP_FIXUP_H
|
||||
#define _ASM_RISCV_XIP_FIXUP_H
|
||||
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
#ifdef CONFIG_XIP_KERNEL
|
||||
.macro XIP_FIXUP_OFFSET reg
|
||||
REG_L t0, _xip_fixup
|
||||
add \reg, \reg, t0
|
||||
.endm
|
||||
.macro XIP_FIXUP_FLASH_OFFSET reg
|
||||
la t1, __data_loc
|
||||
REG_L t1, _xip_phys_offset
|
||||
sub \reg, \reg, t1
|
||||
add \reg, \reg, t0
|
||||
.endm
|
||||
|
||||
_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
|
||||
_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
|
||||
#else
|
||||
.macro XIP_FIXUP_OFFSET reg
|
||||
.endm
|
||||
.macro XIP_FIXUP_FLASH_OFFSET reg
|
||||
.endm
|
||||
#endif /* CONFIG_XIP_KERNEL */
|
||||
|
||||
#endif
|
|
@ -15,7 +15,7 @@
|
|||
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifdef __LP64__
|
||||
#if defined(__LP64__) && !defined(__SYSCALL_COMPAT)
|
||||
#define __ARCH_WANT_NEW_STAT
|
||||
#define __ARCH_WANT_SET_GET_RLIMIT
|
||||
#endif /* __LP64__ */
|
||||
|
|
|
@ -14,10 +14,25 @@ ifdef CONFIG_KEXEC
|
|||
AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
|
||||
endif
|
||||
|
||||
# cmodel=medany and notrace when patching early
|
||||
ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
|
||||
CFLAGS_alternative.o := -mcmodel=medany
|
||||
CFLAGS_cpufeature.o := -mcmodel=medany
|
||||
ifdef CONFIG_FTRACE
|
||||
CFLAGS_REMOVE_alternative.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_cpufeature.o = $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
ifdef CONFIG_KASAN
|
||||
KASAN_SANITIZE_alternative.o := n
|
||||
KASAN_SANITIZE_cpufeature.o := n
|
||||
endif
|
||||
endif
|
||||
|
||||
extra-y += head.o
|
||||
extra-y += vmlinux.lds
|
||||
|
||||
obj-y += soc.o
|
||||
obj-$(CONFIG_RISCV_ALTERNATIVE) += alternative.o
|
||||
obj-y += cpu.o
|
||||
obj-y += cpufeature.o
|
||||
obj-y += entry.o
|
||||
|
@ -64,8 +79,12 @@ endif
|
|||
obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o
|
||||
obj-$(CONFIG_KGDB) += kgdb.o
|
||||
obj-$(CONFIG_KEXEC) += kexec_relocate.o crash_save_regs.o machine_kexec.o
|
||||
obj-$(CONFIG_KEXEC_FILE) += elf_kexec.o machine_kexec_file.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
|
||||
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
|
||||
|
||||
obj-$(CONFIG_EFI) += efi.o
|
||||
obj-$(CONFIG_COMPAT) += compat_syscall_table.o
|
||||
obj-$(CONFIG_COMPAT) += compat_signal.o
|
||||
obj-$(CONFIG_COMPAT) += compat_vdso/
|
||||
|
|
|
@ -0,0 +1,118 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* alternative runtime patching
|
||||
* inspired by the ARM64 and x86 version
|
||||
*
|
||||
* Copyright (C) 2021 Sifive.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/vendorid_list.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/csr.h>
|
||||
|
||||
struct cpu_manufacturer_info_t {
|
||||
unsigned long vendor_id;
|
||||
unsigned long arch_id;
|
||||
unsigned long imp_id;
|
||||
void (*vendor_patch_func)(struct alt_entry *begin, struct alt_entry *end,
|
||||
unsigned long archid, unsigned long impid,
|
||||
unsigned int stage);
|
||||
};
|
||||
|
||||
static void __init_or_module riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info)
|
||||
{
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
cpu_mfr_info->vendor_id = csr_read(CSR_MVENDORID);
|
||||
cpu_mfr_info->arch_id = csr_read(CSR_MARCHID);
|
||||
cpu_mfr_info->imp_id = csr_read(CSR_MIMPID);
|
||||
#else
|
||||
cpu_mfr_info->vendor_id = sbi_get_mvendorid();
|
||||
cpu_mfr_info->arch_id = sbi_get_marchid();
|
||||
cpu_mfr_info->imp_id = sbi_get_mimpid();
|
||||
#endif
|
||||
|
||||
switch (cpu_mfr_info->vendor_id) {
|
||||
#ifdef CONFIG_ERRATA_SIFIVE
|
||||
case SIFIVE_VENDOR_ID:
|
||||
cpu_mfr_info->vendor_patch_func = sifive_errata_patch_func;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_ERRATA_THEAD
|
||||
case THEAD_VENDOR_ID:
|
||||
cpu_mfr_info->vendor_patch_func = thead_errata_patch_func;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
cpu_mfr_info->vendor_patch_func = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called very early in the boot process (directly after we run
|
||||
* a feature detect on the boot CPU). No need to worry about other CPUs
|
||||
* here.
|
||||
*/
|
||||
static void __init_or_module _apply_alternatives(struct alt_entry *begin,
|
||||
struct alt_entry *end,
|
||||
unsigned int stage)
|
||||
{
|
||||
struct cpu_manufacturer_info_t cpu_mfr_info;
|
||||
|
||||
riscv_fill_cpu_mfr_info(&cpu_mfr_info);
|
||||
|
||||
riscv_cpufeature_patch_func(begin, end, stage);
|
||||
|
||||
if (!cpu_mfr_info.vendor_patch_func)
|
||||
return;
|
||||
|
||||
cpu_mfr_info.vendor_patch_func(begin, end,
|
||||
cpu_mfr_info.arch_id,
|
||||
cpu_mfr_info.imp_id,
|
||||
stage);
|
||||
}
|
||||
|
||||
void __init apply_boot_alternatives(void)
|
||||
{
|
||||
/* If called on non-boot cpu things could go wrong */
|
||||
WARN_ON(smp_processor_id() != 0);
|
||||
|
||||
_apply_alternatives((struct alt_entry *)__alt_start,
|
||||
(struct alt_entry *)__alt_end,
|
||||
RISCV_ALTERNATIVES_BOOT);
|
||||
}
|
||||
|
||||
/*
|
||||
* apply_early_boot_alternatives() is called from setup_vm() with MMU-off.
|
||||
*
|
||||
* Following requirements should be honoured for it to work correctly:
|
||||
* 1) It should use PC-relative addressing for accessing kernel symbols.
|
||||
* To achieve this we always use GCC cmodel=medany.
|
||||
* 2) The compiler instrumentation for FTRACE will not work for setup_vm()
|
||||
* so disable compiler instrumentation when FTRACE is enabled.
|
||||
*
|
||||
* Currently, the above requirements are honoured by using custom CFLAGS
|
||||
* for alternative.o in kernel/Makefile.
|
||||
*/
|
||||
void __init apply_early_boot_alternatives(void)
|
||||
{
|
||||
#ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
|
||||
_apply_alternatives((struct alt_entry *)__alt_start,
|
||||
(struct alt_entry *)__alt_end,
|
||||
RISCV_ALTERNATIVES_EARLY_BOOT);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
void apply_module_alternatives(void *start, size_t length)
|
||||
{
|
||||
_apply_alternatives((struct alt_entry *)start,
|
||||
(struct alt_entry *)(start + length),
|
||||
RISCV_ALTERNATIVES_MODULE);
|
||||
}
|
||||
#endif
|
|
@ -0,0 +1,243 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/csr.h>
|
||||
#include <asm/signal32.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/vdso.h>
|
||||
|
||||
#define COMPAT_DEBUG_SIG 0
|
||||
|
||||
struct compat_sigcontext {
|
||||
struct compat_user_regs_struct sc_regs;
|
||||
union __riscv_fp_state sc_fpregs;
|
||||
};
|
||||
|
||||
struct compat_ucontext {
|
||||
compat_ulong_t uc_flags;
|
||||
struct compat_ucontext *uc_link;
|
||||
compat_stack_t uc_stack;
|
||||
sigset_t uc_sigmask;
|
||||
/* There's some padding here to allow sigset_t to be expanded in the
|
||||
* future. Though this is unlikely, other architectures put uc_sigmask
|
||||
* at the end of this structure and explicitly state it can be
|
||||
* expanded, so we didn't want to box ourselves in here. */
|
||||
__u8 __unused[1024 / 8 - sizeof(sigset_t)];
|
||||
/* We can't put uc_sigmask at the end of this structure because we need
|
||||
* to be able to expand sigcontext in the future. For example, the
|
||||
* vector ISA extension will almost certainly add ISA state. We want
|
||||
* to ensure all user-visible ISA state can be saved and restored via a
|
||||
* ucontext, so we're putting this at the end in order to allow for
|
||||
* infinite extensibility. Since we know this will be extended and we
|
||||
* assume sigset_t won't be extended an extreme amount, we're
|
||||
* prioritizing this. */
|
||||
struct compat_sigcontext uc_mcontext;
|
||||
};
|
||||
|
||||
struct compat_rt_sigframe {
|
||||
struct compat_siginfo info;
|
||||
struct compat_ucontext uc;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_FPU
|
||||
static long compat_restore_fp_state(struct pt_regs *regs,
|
||||
union __riscv_fp_state __user *sc_fpregs)
|
||||
{
|
||||
long err;
|
||||
struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
|
||||
size_t i;
|
||||
|
||||
err = __copy_from_user(¤t->thread.fstate, state, sizeof(*state));
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
fstate_restore(current, regs);
|
||||
|
||||
/* We support no other extension state at this time. */
|
||||
for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) {
|
||||
u32 value;
|
||||
|
||||
err = __get_user(value, &sc_fpregs->q.reserved[i]);
|
||||
if (unlikely(err))
|
||||
break;
|
||||
if (value != 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static long compat_save_fp_state(struct pt_regs *regs,
|
||||
union __riscv_fp_state __user *sc_fpregs)
|
||||
{
|
||||
long err;
|
||||
struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
|
||||
size_t i;
|
||||
|
||||
fstate_save(current, regs);
|
||||
err = __copy_to_user(state, ¤t->thread.fstate, sizeof(*state));
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
/* We support no other extension state at this time. */
|
||||
for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) {
|
||||
err = __put_user(0, &sc_fpregs->q.reserved[i]);
|
||||
if (unlikely(err))
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
#else
|
||||
#define compat_save_fp_state(task, regs) (0)
|
||||
#define compat_restore_fp_state(task, regs) (0)
|
||||
#endif
|
||||
|
||||
static long compat_restore_sigcontext(struct pt_regs *regs,
|
||||
struct compat_sigcontext __user *sc)
|
||||
{
|
||||
long err;
|
||||
struct compat_user_regs_struct cregs;
|
||||
|
||||
/* sc_regs is structured the same as the start of pt_regs */
|
||||
err = __copy_from_user(&cregs, &sc->sc_regs, sizeof(sc->sc_regs));
|
||||
|
||||
cregs_to_regs(&cregs, regs);
|
||||
|
||||
/* Restore the floating-point state. */
|
||||
if (has_fpu())
|
||||
err |= compat_restore_fp_state(regs, &sc->sc_fpregs);
|
||||
return err;
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
|
||||
{
|
||||
struct pt_regs *regs = current_pt_regs();
|
||||
struct compat_rt_sigframe __user *frame;
|
||||
struct task_struct *task;
|
||||
sigset_t set;
|
||||
|
||||
/* Always make any pending restarted system calls return -EINTR */
|
||||
current->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
frame = (struct compat_rt_sigframe __user *)regs->sp;
|
||||
|
||||
if (!access_ok(frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
|
||||
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
|
||||
goto badframe;
|
||||
|
||||
set_current_blocked(&set);
|
||||
|
||||
if (compat_restore_sigcontext(regs, &frame->uc.uc_mcontext))
|
||||
goto badframe;
|
||||
|
||||
if (compat_restore_altstack(&frame->uc.uc_stack))
|
||||
goto badframe;
|
||||
|
||||
return regs->a0;
|
||||
|
||||
badframe:
|
||||
task = current;
|
||||
if (show_unhandled_signals) {
|
||||
pr_info_ratelimited(
|
||||
"%s[%d]: bad frame in %s: frame=%p pc=%p sp=%p\n",
|
||||
task->comm, task_pid_nr(task), __func__,
|
||||
frame, (void *)regs->epc, (void *)regs->sp);
|
||||
}
|
||||
force_sig(SIGSEGV);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long compat_setup_sigcontext(struct compat_rt_sigframe __user *frame,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct compat_sigcontext __user *sc = &frame->uc.uc_mcontext;
|
||||
struct compat_user_regs_struct cregs;
|
||||
long err;
|
||||
|
||||
regs_to_cregs(&cregs, regs);
|
||||
|
||||
/* sc_regs is structured the same as the start of pt_regs */
|
||||
err = __copy_to_user(&sc->sc_regs, &cregs, sizeof(sc->sc_regs));
|
||||
/* Save the floating-point state. */
|
||||
if (has_fpu())
|
||||
err |= compat_save_fp_state(regs, &sc->sc_fpregs);
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline void __user *compat_get_sigframe(struct ksignal *ksig,
|
||||
struct pt_regs *regs, size_t framesize)
|
||||
{
|
||||
unsigned long sp;
|
||||
/* Default to using normal stack */
|
||||
sp = regs->sp;
|
||||
|
||||
/*
|
||||
* If we are on the alternate signal stack and would overflow it, don't.
|
||||
* Return an always-bogus address instead so we will die with SIGSEGV.
|
||||
*/
|
||||
if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
|
||||
return (void __user __force *)(-1UL);
|
||||
|
||||
/* This is the X/Open sanctioned signal stack switching. */
|
||||
sp = sigsp(sp, ksig) - framesize;
|
||||
|
||||
/* Align the stack frame. */
|
||||
sp &= ~0xfUL;
|
||||
|
||||
return (void __user *)sp;
|
||||
}
|
||||
|
||||
int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct compat_rt_sigframe __user *frame;
|
||||
long err = 0;
|
||||
|
||||
frame = compat_get_sigframe(ksig, regs, sizeof(*frame));
|
||||
if (!access_ok(frame, sizeof(*frame)))
|
||||
return -EFAULT;
|
||||
|
||||
err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
|
||||
|
||||
/* Create the ucontext. */
|
||||
err |= __put_user(0, &frame->uc.uc_flags);
|
||||
err |= __put_user(NULL, &frame->uc.uc_link);
|
||||
err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
|
||||
err |= compat_setup_sigcontext(frame, regs);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
regs->ra = (unsigned long)COMPAT_VDSO_SYMBOL(
|
||||
current->mm->context.vdso, rt_sigreturn);
|
||||
|
||||
/*
|
||||
* Set up registers for signal handler.
|
||||
* Registers that we don't modify keep the value they had from
|
||||
* user-space at the time we took the signal.
|
||||
* We always pass siginfo and mcontext, regardless of SA_SIGINFO,
|
||||
* since some things rely on this (e.g. glibc's debug/segfault.c).
|
||||
*/
|
||||
regs->epc = (unsigned long)ksig->ka.sa.sa_handler;
|
||||
regs->sp = (unsigned long)frame;
|
||||
regs->a0 = ksig->sig; /* a0: signal number */
|
||||
regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */
|
||||
regs->a2 = (unsigned long)(&frame->uc); /* a2: ucontext pointer */
|
||||
|
||||
#if COMPAT_DEBUG_SIG
|
||||
pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n",
|
||||
current->comm, task_pid_nr(current), ksig->sig,
|
||||
(void *)regs->epc, (void *)regs->ra, frame);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#define __SYSCALL_COMPAT
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <asm-generic/mman-common.h>
|
||||
#include <asm-generic/syscalls.h>
|
||||
#include <asm/syscall.h>
|
||||
|
||||
#undef __SYSCALL
|
||||
#define __SYSCALL(nr, call) [nr] = (call),
|
||||
|
||||
asmlinkage long compat_sys_rt_sigreturn(void);
|
||||
|
||||
void * const compat_sys_call_table[__NR_syscalls] = {
|
||||
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
|
||||
#include <asm/unistd.h>
|
||||
};
|
|
@ -0,0 +1,2 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
compat_vdso.lds
|
|
@ -0,0 +1,78 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Makefile for compat_vdso
|
||||
#
|
||||
|
||||
# Symbols present in the compat_vdso
|
||||
compat_vdso-syms = rt_sigreturn
|
||||
compat_vdso-syms += getcpu
|
||||
compat_vdso-syms += flush_icache
|
||||
|
||||
COMPAT_CC := $(CC)
|
||||
COMPAT_LD := $(LD)
|
||||
|
||||
COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
|
||||
COMPAT_LD_FLAGS := -melf32lriscv
|
||||
|
||||
# Files to link into the compat_vdso
|
||||
obj-compat_vdso = $(patsubst %, %.o, $(compat_vdso-syms)) note.o
|
||||
|
||||
# Build rules
|
||||
targets := $(obj-compat_vdso) compat_vdso.so compat_vdso.so.dbg compat_vdso.lds
|
||||
obj-compat_vdso := $(addprefix $(obj)/, $(obj-compat_vdso))
|
||||
|
||||
obj-y += compat_vdso.o
|
||||
CPPFLAGS_compat_vdso.lds += -P -C -U$(ARCH)
|
||||
|
||||
# Disable profiling and instrumentation for VDSO code
|
||||
GCOV_PROFILE := n
|
||||
KCOV_INSTRUMENT := n
|
||||
KASAN_SANITIZE := n
|
||||
UBSAN_SANITIZE := n
|
||||
|
||||
# Force dependency
|
||||
$(obj)/compat_vdso.o: $(obj)/compat_vdso.so
|
||||
|
||||
# link rule for the .so file, .lds has to be first
|
||||
$(obj)/compat_vdso.so.dbg: $(obj)/compat_vdso.lds $(obj-compat_vdso) FORCE
|
||||
$(call if_changed,compat_vdsold)
|
||||
LDFLAGS_compat_vdso.so.dbg = -shared -S -soname=linux-compat_vdso.so.1 \
|
||||
--build-id=sha1 --hash-style=both --eh-frame-hdr
|
||||
|
||||
$(obj-compat_vdso): %.o: %.S FORCE
|
||||
$(call if_changed_dep,compat_vdsoas)
|
||||
|
||||
# strip rule for the .so file
|
||||
$(obj)/%.so: OBJCOPYFLAGS := -S
|
||||
$(obj)/%.so: $(obj)/%.so.dbg FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
# Generate VDSO offsets using helper script
|
||||
gen-compat_vdsosym := $(srctree)/$(src)/gen_compat_vdso_offsets.sh
|
||||
quiet_cmd_compat_vdsosym = VDSOSYM $@
|
||||
cmd_compat_vdsosym = $(NM) $< | $(gen-compat_vdsosym) | LC_ALL=C sort > $@
|
||||
|
||||
include/generated/compat_vdso-offsets.h: $(obj)/compat_vdso.so.dbg FORCE
|
||||
$(call if_changed,compat_vdsosym)
|
||||
|
||||
# actual build commands
|
||||
# The DSO images are built using a special linker script
|
||||
# Make sure only to export the intended __compat_vdso_xxx symbol offsets.
|
||||
quiet_cmd_compat_vdsold = VDSOLD $@
|
||||
cmd_compat_vdsold = $(COMPAT_LD) $(ld_flags) $(COMPAT_LD_FLAGS) -T $(filter-out FORCE,$^) -o $@.tmp && \
|
||||
$(OBJCOPY) $(patsubst %, -G __compat_vdso_%, $(compat_vdso-syms)) $@.tmp $@ && \
|
||||
rm $@.tmp
|
||||
|
||||
# actual build commands
|
||||
quiet_cmd_compat_vdsoas = VDSOAS $@
|
||||
cmd_compat_vdsoas = $(COMPAT_CC) $(a_flags) $(COMPAT_CC_FLAGS) -c -o $@ $<
|
||||
|
||||
# install commands for the unstripped file
|
||||
quiet_cmd_compat_vdso_install = INSTALL $@
|
||||
cmd_compat_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/compat_vdso/$@
|
||||
|
||||
compat_vdso.so: $(obj)/compat_vdso.so.dbg
|
||||
@mkdir -p $(MODLIB)/compat_vdso
|
||||
$(call cmd,compat_vdso_install)
|
||||
|
||||
compat_vdso_install: compat_vdso.so
|
|
@ -0,0 +1,8 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#define vdso_start compat_vdso_start
|
||||
#define vdso_end compat_vdso_end
|
||||
|
||||
#define __VDSO_PATH "arch/riscv/kernel/compat_vdso/compat_vdso.so"
|
||||
|
||||
#include "../vdso/vdso.S"
|
|
@ -0,0 +1,3 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#include "../vdso/vdso.lds.S"
|
|
@ -0,0 +1,3 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#include "../vdso/flush_icache.S"
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/sh
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
LC_ALL=C
|
||||
sed -n -e 's/^[0]\+\(0[0-9a-fA-F]*\) . \(__vdso_[a-zA-Z0-9_]*\)$/\#define compat\2_offset\t0x\1/p'
|
|
@ -0,0 +1,3 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#include "../vdso/getcpu.S"
|
|
@ -0,0 +1,3 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#include "../vdso/note.S"
|
|
@ -0,0 +1,3 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#include "../vdso/rt_sigreturn.S"
|
|
@ -88,6 +88,7 @@ int riscv_of_parent_hartid(struct device_node *node)
|
|||
*/
|
||||
static struct riscv_isa_ext_data isa_ext_arr[] = {
|
||||
__RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
|
||||
__RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
|
||||
__RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX),
|
||||
};
|
||||
|
||||
|
@ -138,6 +139,7 @@ static void print_mmu(struct seq_file *f)
|
|||
{
|
||||
char sv_type[16];
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#if defined(CONFIG_32BIT)
|
||||
strncpy(sv_type, "sv32", 5);
|
||||
#elif defined(CONFIG_64BIT)
|
||||
|
@ -148,6 +150,9 @@ static void print_mmu(struct seq_file *f)
|
|||
else
|
||||
strncpy(sv_type, "sv39", 5);
|
||||
#endif
|
||||
#else
|
||||
strncpy(sv_type, "none", 5);
|
||||
#endif /* CONFIG_MMU */
|
||||
seq_printf(f, "mmu\t\t: %s\n", sv_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -8,9 +8,15 @@
|
|||
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/libfdt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/errata_list.h>
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/patch.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/switch_to.h>
|
||||
|
||||
|
@ -192,6 +198,7 @@ void __init riscv_fill_hwcap(void)
|
|||
set_bit(*ext - 'a', this_isa);
|
||||
} else {
|
||||
SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF);
|
||||
SET_ISA_EXT_MAP("svpbmt", RISCV_ISA_EXT_SVPBMT);
|
||||
}
|
||||
#undef SET_ISA_EXT_MAP
|
||||
}
|
||||
|
@ -237,3 +244,74 @@ void __init riscv_fill_hwcap(void)
|
|||
static_branch_enable(&cpu_hwcap_fpu);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RISCV_ALTERNATIVE
|
||||
struct cpufeature_info {
|
||||
char name[ERRATA_STRING_LENGTH_MAX];
|
||||
bool (*check_func)(unsigned int stage);
|
||||
};
|
||||
|
||||
static bool __init_or_module cpufeature_svpbmt_check_func(unsigned int stage)
|
||||
{
|
||||
#ifdef CONFIG_RISCV_ISA_SVPBMT
|
||||
switch (stage) {
|
||||
case RISCV_ALTERNATIVES_EARLY_BOOT:
|
||||
return false;
|
||||
default:
|
||||
return riscv_isa_extension_available(NULL, SVPBMT);
|
||||
}
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static const struct cpufeature_info __initdata_or_module
|
||||
cpufeature_list[CPUFEATURE_NUMBER] = {
|
||||
{
|
||||
.name = "svpbmt",
|
||||
.check_func = cpufeature_svpbmt_check_func
|
||||
},
|
||||
};
|
||||
|
||||
static u32 __init_or_module cpufeature_probe(unsigned int stage)
|
||||
{
|
||||
const struct cpufeature_info *info;
|
||||
u32 cpu_req_feature = 0;
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < CPUFEATURE_NUMBER; idx++) {
|
||||
info = &cpufeature_list[idx];
|
||||
|
||||
if (info->check_func(stage))
|
||||
cpu_req_feature |= (1U << idx);
|
||||
}
|
||||
|
||||
return cpu_req_feature;
|
||||
}
|
||||
|
||||
void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
|
||||
struct alt_entry *end,
|
||||
unsigned int stage)
|
||||
{
|
||||
u32 cpu_req_feature = cpufeature_probe(stage);
|
||||
u32 cpu_apply_feature = 0;
|
||||
struct alt_entry *alt;
|
||||
u32 tmp;
|
||||
|
||||
for (alt = begin; alt < end; alt++) {
|
||||
if (alt->vendor_id != 0)
|
||||
continue;
|
||||
if (alt->errata_id >= CPUFEATURE_NUMBER) {
|
||||
WARN(1, "This feature id:%d is not in kernel cpufeature list",
|
||||
alt->errata_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
tmp = (1U << alt->errata_id);
|
||||
if (cpu_req_feature & tmp) {
|
||||
patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
|
||||
cpu_apply_feature |= tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,448 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Load ELF vmlinux file for the kexec_file_load syscall.
|
||||
*
|
||||
* Copyright (C) 2021 Huawei Technologies Co, Ltd.
|
||||
*
|
||||
* Author: Liao Chang (liaochang1@huawei.com)
|
||||
*
|
||||
* Based on kexec-tools' kexec-elf-riscv.c, heavily modified
|
||||
* for kernel.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "kexec_image: " fmt
|
||||
|
||||
#include <linux/elf.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/libfdt.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
static int riscv_kexec_elf_load(struct kimage *image, struct elfhdr *ehdr,
|
||||
struct kexec_elf_info *elf_info, unsigned long old_pbase,
|
||||
unsigned long new_pbase)
|
||||
{
|
||||
int i;
|
||||
int ret = 0;
|
||||
size_t size;
|
||||
struct kexec_buf kbuf;
|
||||
const struct elf_phdr *phdr;
|
||||
|
||||
kbuf.image = image;
|
||||
|
||||
for (i = 0; i < ehdr->e_phnum; i++) {
|
||||
phdr = &elf_info->proghdrs[i];
|
||||
if (phdr->p_type != PT_LOAD)
|
||||
continue;
|
||||
|
||||
size = phdr->p_filesz;
|
||||
if (size > phdr->p_memsz)
|
||||
size = phdr->p_memsz;
|
||||
|
||||
kbuf.buffer = (void *) elf_info->buffer + phdr->p_offset;
|
||||
kbuf.bufsz = size;
|
||||
kbuf.buf_align = phdr->p_align;
|
||||
kbuf.mem = phdr->p_paddr - old_pbase + new_pbase;
|
||||
kbuf.memsz = phdr->p_memsz;
|
||||
kbuf.top_down = false;
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Go through the available phsyical memory regions and find one that hold
|
||||
* an image of the specified size.
|
||||
*/
|
||||
static int elf_find_pbase(struct kimage *image, unsigned long kernel_len,
|
||||
struct elfhdr *ehdr, struct kexec_elf_info *elf_info,
|
||||
unsigned long *old_pbase, unsigned long *new_pbase)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
struct kexec_buf kbuf;
|
||||
const struct elf_phdr *phdr;
|
||||
unsigned long lowest_paddr = ULONG_MAX;
|
||||
unsigned long lowest_vaddr = ULONG_MAX;
|
||||
|
||||
for (i = 0; i < ehdr->e_phnum; i++) {
|
||||
phdr = &elf_info->proghdrs[i];
|
||||
if (phdr->p_type != PT_LOAD)
|
||||
continue;
|
||||
|
||||
if (lowest_paddr > phdr->p_paddr)
|
||||
lowest_paddr = phdr->p_paddr;
|
||||
|
||||
if (lowest_vaddr > phdr->p_vaddr)
|
||||
lowest_vaddr = phdr->p_vaddr;
|
||||
}
|
||||
|
||||
kbuf.image = image;
|
||||
kbuf.buf_min = lowest_paddr;
|
||||
kbuf.buf_max = ULONG_MAX;
|
||||
kbuf.buf_align = PAGE_SIZE;
|
||||
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||
kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE);
|
||||
kbuf.top_down = false;
|
||||
ret = arch_kexec_locate_mem_hole(&kbuf);
|
||||
if (!ret) {
|
||||
*old_pbase = lowest_paddr;
|
||||
*new_pbase = kbuf.mem;
|
||||
image->start = ehdr->e_entry - lowest_vaddr + kbuf.mem;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
|
||||
{
|
||||
unsigned int *nr_ranges = arg;
|
||||
|
||||
(*nr_ranges)++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
|
||||
{
|
||||
struct crash_mem *cmem = arg;
|
||||
|
||||
cmem->ranges[cmem->nr_ranges].start = res->start;
|
||||
cmem->ranges[cmem->nr_ranges].end = res->end;
|
||||
cmem->nr_ranges++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int prepare_elf_headers(void **addr, unsigned long *sz)
|
||||
{
|
||||
struct crash_mem *cmem;
|
||||
unsigned int nr_ranges;
|
||||
int ret;
|
||||
|
||||
nr_ranges = 1; /* For exclusion of crashkernel region */
|
||||
walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback);
|
||||
|
||||
cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
|
||||
if (!cmem)
|
||||
return -ENOMEM;
|
||||
|
||||
cmem->max_nr_ranges = nr_ranges;
|
||||
cmem->nr_ranges = 0;
|
||||
ret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* Exclude crashkernel region */
|
||||
ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
|
||||
if (!ret)
|
||||
ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
|
||||
|
||||
out:
|
||||
kfree(cmem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static char *setup_kdump_cmdline(struct kimage *image, char *cmdline,
|
||||
unsigned long cmdline_len)
|
||||
{
|
||||
int elfcorehdr_strlen;
|
||||
char *cmdline_ptr;
|
||||
|
||||
cmdline_ptr = kzalloc(COMMAND_LINE_SIZE, GFP_KERNEL);
|
||||
if (!cmdline_ptr)
|
||||
return NULL;
|
||||
|
||||
elfcorehdr_strlen = sprintf(cmdline_ptr, "elfcorehdr=0x%lx ",
|
||||
image->elf_load_addr);
|
||||
|
||||
if (elfcorehdr_strlen + cmdline_len > COMMAND_LINE_SIZE) {
|
||||
pr_err("Appending elfcorehdr=<addr> exceeds cmdline size\n");
|
||||
kfree(cmdline_ptr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memcpy(cmdline_ptr + elfcorehdr_strlen, cmdline, cmdline_len);
|
||||
/* Ensure it's nul terminated */
|
||||
cmdline_ptr[COMMAND_LINE_SIZE - 1] = '\0';
|
||||
return cmdline_ptr;
|
||||
}
|
||||
|
||||
static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
|
||||
unsigned long kernel_len, char *initrd,
|
||||
unsigned long initrd_len, char *cmdline,
|
||||
unsigned long cmdline_len)
|
||||
{
|
||||
int ret;
|
||||
unsigned long old_kernel_pbase = ULONG_MAX;
|
||||
unsigned long new_kernel_pbase = 0UL;
|
||||
unsigned long initrd_pbase = 0UL;
|
||||
unsigned long headers_sz;
|
||||
unsigned long kernel_start;
|
||||
void *fdt, *headers;
|
||||
struct elfhdr ehdr;
|
||||
struct kexec_buf kbuf;
|
||||
struct kexec_elf_info elf_info;
|
||||
char *modified_cmdline = NULL;
|
||||
|
||||
ret = kexec_build_elf_info(kernel_buf, kernel_len, &ehdr, &elf_info);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = elf_find_pbase(image, kernel_len, &ehdr, &elf_info,
|
||||
&old_kernel_pbase, &new_kernel_pbase);
|
||||
if (ret)
|
||||
goto out;
|
||||
kernel_start = image->start;
|
||||
pr_notice("The entry point of kernel at 0x%lx\n", image->start);
|
||||
|
||||
/* Add the kernel binary to the image */
|
||||
ret = riscv_kexec_elf_load(image, &ehdr, &elf_info,
|
||||
old_kernel_pbase, new_kernel_pbase);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
kbuf.image = image;
|
||||
kbuf.buf_min = new_kernel_pbase + kernel_len;
|
||||
kbuf.buf_max = ULONG_MAX;
|
||||
|
||||
/* Add elfcorehdr */
|
||||
if (image->type == KEXEC_TYPE_CRASH) {
|
||||
ret = prepare_elf_headers(&headers, &headers_sz);
|
||||
if (ret) {
|
||||
pr_err("Preparing elf core header failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
kbuf.buffer = headers;
|
||||
kbuf.bufsz = headers_sz;
|
||||
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||
kbuf.memsz = headers_sz;
|
||||
kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
|
||||
kbuf.top_down = true;
|
||||
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret) {
|
||||
vfree(headers);
|
||||
goto out;
|
||||
}
|
||||
image->elf_headers = headers;
|
||||
image->elf_load_addr = kbuf.mem;
|
||||
image->elf_headers_sz = headers_sz;
|
||||
|
||||
pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
|
||||
image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
|
||||
|
||||
/* Setup cmdline for kdump kernel case */
|
||||
modified_cmdline = setup_kdump_cmdline(image, cmdline,
|
||||
cmdline_len);
|
||||
if (!modified_cmdline) {
|
||||
pr_err("Setting up cmdline for kdump kernel failed\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
cmdline = modified_cmdline;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_KEXEC_PURGATORY
|
||||
/* Add purgatory to the image */
|
||||
kbuf.top_down = true;
|
||||
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||
ret = kexec_load_purgatory(image, &kbuf);
|
||||
if (ret) {
|
||||
pr_err("Error loading purgatory ret=%d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
ret = kexec_purgatory_get_set_symbol(image, "riscv_kernel_entry",
|
||||
&kernel_start,
|
||||
sizeof(kernel_start), 0);
|
||||
if (ret)
|
||||
pr_err("Error update purgatory ret=%d\n", ret);
|
||||
#endif /* CONFIG_ARCH_HAS_KEXEC_PURGATORY */
|
||||
|
||||
/* Add the initrd to the image */
|
||||
if (initrd != NULL) {
|
||||
kbuf.buffer = initrd;
|
||||
kbuf.bufsz = kbuf.memsz = initrd_len;
|
||||
kbuf.buf_align = PAGE_SIZE;
|
||||
kbuf.top_down = false;
|
||||
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret)
|
||||
goto out;
|
||||
initrd_pbase = kbuf.mem;
|
||||
pr_notice("Loaded initrd at 0x%lx\n", initrd_pbase);
|
||||
}
|
||||
|
||||
/* Add the DTB to the image */
|
||||
fdt = of_kexec_alloc_and_setup_fdt(image, initrd_pbase,
|
||||
initrd_len, cmdline, 0);
|
||||
if (!fdt) {
|
||||
pr_err("Error setting up the new device tree.\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fdt_pack(fdt);
|
||||
kbuf.buffer = fdt;
|
||||
kbuf.bufsz = kbuf.memsz = fdt_totalsize(fdt);
|
||||
kbuf.buf_align = PAGE_SIZE;
|
||||
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||
kbuf.top_down = true;
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret) {
|
||||
pr_err("Error add DTB kbuf ret=%d\n", ret);
|
||||
goto out_free_fdt;
|
||||
}
|
||||
pr_notice("Loaded device tree at 0x%lx\n", kbuf.mem);
|
||||
goto out;
|
||||
|
||||
out_free_fdt:
|
||||
kvfree(fdt);
|
||||
out:
|
||||
kfree(modified_cmdline);
|
||||
kexec_free_elf_info(&elf_info);
|
||||
return ret ? ERR_PTR(ret) : NULL;
|
||||
}
|
||||
|
||||
#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
|
||||
#define RISCV_IMM_BITS 12
|
||||
#define RISCV_IMM_REACH (1LL << RISCV_IMM_BITS)
|
||||
#define RISCV_CONST_HIGH_PART(x) \
|
||||
(((x) + (RISCV_IMM_REACH >> 1)) & ~(RISCV_IMM_REACH - 1))
|
||||
#define RISCV_CONST_LOW_PART(x) ((x) - RISCV_CONST_HIGH_PART(x))
|
||||
|
||||
#define ENCODE_ITYPE_IMM(x) \
|
||||
(RV_X(x, 0, 12) << 20)
|
||||
#define ENCODE_BTYPE_IMM(x) \
|
||||
((RV_X(x, 1, 4) << 8) | (RV_X(x, 5, 6) << 25) | \
|
||||
(RV_X(x, 11, 1) << 7) | (RV_X(x, 12, 1) << 31))
|
||||
#define ENCODE_UTYPE_IMM(x) \
|
||||
(RV_X(x, 12, 20) << 12)
|
||||
#define ENCODE_JTYPE_IMM(x) \
|
||||
((RV_X(x, 1, 10) << 21) | (RV_X(x, 11, 1) << 20) | \
|
||||
(RV_X(x, 12, 8) << 12) | (RV_X(x, 20, 1) << 31))
|
||||
#define ENCODE_CBTYPE_IMM(x) \
|
||||
((RV_X(x, 1, 2) << 3) | (RV_X(x, 3, 2) << 10) | (RV_X(x, 5, 1) << 2) | \
|
||||
(RV_X(x, 6, 2) << 5) | (RV_X(x, 8, 1) << 12))
|
||||
#define ENCODE_CJTYPE_IMM(x) \
|
||||
((RV_X(x, 1, 3) << 3) | (RV_X(x, 4, 1) << 11) | (RV_X(x, 5, 1) << 2) | \
|
||||
(RV_X(x, 6, 1) << 7) | (RV_X(x, 7, 1) << 6) | (RV_X(x, 8, 2) << 9) | \
|
||||
(RV_X(x, 10, 1) << 8) | (RV_X(x, 11, 1) << 12))
|
||||
#define ENCODE_UJTYPE_IMM(x) \
|
||||
(ENCODE_UTYPE_IMM(RISCV_CONST_HIGH_PART(x)) | \
|
||||
(ENCODE_ITYPE_IMM(RISCV_CONST_LOW_PART(x)) << 32))
|
||||
#define ENCODE_UITYPE_IMM(x) \
|
||||
(ENCODE_UTYPE_IMM(x) | (ENCODE_ITYPE_IMM(x) << 32))
|
||||
|
||||
#define CLEAN_IMM(type, x) \
|
||||
((~ENCODE_##type##_IMM((uint64_t)(-1))) & (x))
|
||||
|
||||
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
|
||||
Elf_Shdr *section,
|
||||
const Elf_Shdr *relsec,
|
||||
const Elf_Shdr *symtab)
|
||||
{
|
||||
const char *strtab, *name, *shstrtab;
|
||||
const Elf_Shdr *sechdrs;
|
||||
Elf_Rela *relas;
|
||||
int i, r_type;
|
||||
|
||||
/* String & section header string table */
|
||||
sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
|
||||
strtab = (char *)pi->ehdr + sechdrs[symtab->sh_link].sh_offset;
|
||||
shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset;
|
||||
|
||||
relas = (void *)pi->ehdr + relsec->sh_offset;
|
||||
|
||||
for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) {
|
||||
const Elf_Sym *sym; /* symbol to relocate */
|
||||
unsigned long addr; /* final location after relocation */
|
||||
unsigned long val; /* relocated symbol value */
|
||||
unsigned long sec_base; /* relocated symbol value */
|
||||
void *loc; /* tmp location to modify */
|
||||
|
||||
sym = (void *)pi->ehdr + symtab->sh_offset;
|
||||
sym += ELF64_R_SYM(relas[i].r_info);
|
||||
|
||||
if (sym->st_name)
|
||||
name = strtab + sym->st_name;
|
||||
else
|
||||
name = shstrtab + sechdrs[sym->st_shndx].sh_name;
|
||||
|
||||
loc = pi->purgatory_buf;
|
||||
loc += section->sh_offset;
|
||||
loc += relas[i].r_offset;
|
||||
|
||||
if (sym->st_shndx == SHN_ABS)
|
||||
sec_base = 0;
|
||||
else if (sym->st_shndx >= pi->ehdr->e_shnum) {
|
||||
pr_err("Invalid section %d for symbol %s\n",
|
||||
sym->st_shndx, name);
|
||||
return -ENOEXEC;
|
||||
} else
|
||||
sec_base = pi->sechdrs[sym->st_shndx].sh_addr;
|
||||
|
||||
val = sym->st_value;
|
||||
val += sec_base;
|
||||
val += relas[i].r_addend;
|
||||
|
||||
addr = section->sh_addr + relas[i].r_offset;
|
||||
|
||||
r_type = ELF64_R_TYPE(relas[i].r_info);
|
||||
|
||||
switch (r_type) {
|
||||
case R_RISCV_BRANCH:
|
||||
*(u32 *)loc = CLEAN_IMM(BTYPE, *(u32 *)loc) |
|
||||
ENCODE_BTYPE_IMM(val - addr);
|
||||
break;
|
||||
case R_RISCV_JAL:
|
||||
*(u32 *)loc = CLEAN_IMM(JTYPE, *(u32 *)loc) |
|
||||
ENCODE_JTYPE_IMM(val - addr);
|
||||
break;
|
||||
/*
|
||||
* With no R_RISCV_PCREL_LO12_S, R_RISCV_PCREL_LO12_I
|
||||
* sym is expected to be next to R_RISCV_PCREL_HI20
|
||||
* in purgatory relsec. Handle it like R_RISCV_CALL
|
||||
* sym, instead of searching the whole relsec.
|
||||
*/
|
||||
case R_RISCV_PCREL_HI20:
|
||||
case R_RISCV_CALL:
|
||||
*(u64 *)loc = CLEAN_IMM(UITYPE, *(u64 *)loc) |
|
||||
ENCODE_UJTYPE_IMM(val - addr);
|
||||
break;
|
||||
case R_RISCV_RVC_BRANCH:
|
||||
*(u32 *)loc = CLEAN_IMM(CBTYPE, *(u32 *)loc) |
|
||||
ENCODE_CBTYPE_IMM(val - addr);
|
||||
break;
|
||||
case R_RISCV_RVC_JUMP:
|
||||
*(u32 *)loc = CLEAN_IMM(CJTYPE, *(u32 *)loc) |
|
||||
ENCODE_CJTYPE_IMM(val - addr);
|
||||
break;
|
||||
case R_RISCV_ADD32:
|
||||
*(u32 *)loc += val;
|
||||
break;
|
||||
case R_RISCV_SUB32:
|
||||
*(u32 *)loc -= val;
|
||||
break;
|
||||
/* It has been applied by R_RISCV_PCREL_HI20 sym */
|
||||
case R_RISCV_PCREL_LO12_I:
|
||||
case R_RISCV_ALIGN:
|
||||
case R_RISCV_RELAX:
|
||||
break;
|
||||
default:
|
||||
pr_err("Unknown rela relocation: %d\n", r_type);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct kexec_file_ops elf_kexec_ops = {
|
||||
.probe = kexec_elf_probe,
|
||||
.load = elf_kexec_load,
|
||||
};
|
|
@ -207,13 +207,27 @@ check_syscall_nr:
|
|||
* Syscall number held in a7.
|
||||
* If syscall number is above allowed value, redirect to ni_syscall.
|
||||
*/
|
||||
bgeu a7, t0, 1f
|
||||
bgeu a7, t0, 3f
|
||||
#ifdef CONFIG_COMPAT
|
||||
REG_L s0, PT_STATUS(sp)
|
||||
srli s0, s0, SR_UXL_SHIFT
|
||||
andi s0, s0, (SR_UXL >> SR_UXL_SHIFT)
|
||||
li t0, (SR_UXL_32 >> SR_UXL_SHIFT)
|
||||
sub t0, s0, t0
|
||||
bnez t0, 1f
|
||||
|
||||
/* Call compat_syscall */
|
||||
la s0, compat_sys_call_table
|
||||
j 2f
|
||||
1:
|
||||
#endif
|
||||
/* Call syscall */
|
||||
la s0, sys_call_table
|
||||
2:
|
||||
slli t0, a7, RISCV_LGPTR
|
||||
add s0, s0, t0
|
||||
REG_L s0, 0(s0)
|
||||
1:
|
||||
3:
|
||||
jalr s0
|
||||
|
||||
ret_from_syscall:
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <asm/cpu_ops_sbi.h>
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/image.h>
|
||||
#include <asm/xip_fixup.h>
|
||||
#include "efi-header.S"
|
||||
|
||||
__HEAD
|
||||
|
|
|
@ -65,7 +65,9 @@ machine_kexec_prepare(struct kimage *image)
|
|||
if (image->segment[i].memsz <= sizeof(fdt))
|
||||
continue;
|
||||
|
||||
if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt)))
|
||||
if (image->file_mode)
|
||||
memcpy(&fdt, image->segment[i].buf, sizeof(fdt));
|
||||
else if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt)))
|
||||
continue;
|
||||
|
||||
if (fdt_check_header(&fdt))
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* kexec_file for riscv, use vmlinux as the dump-capture kernel image.
|
||||
*
|
||||
* Copyright (C) 2021 Huawei Technologies Co, Ltd.
|
||||
*
|
||||
* Author: Liao Chang (liaochang1@huawei.com)
|
||||
*/
|
||||
#include <linux/kexec.h>
|
||||
|
||||
const struct kexec_file_ops * const kexec_file_loaders[] = {
|
||||
&elf_kexec_ops,
|
||||
NULL
|
||||
};
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/vmalloc.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
/*
|
||||
|
@ -427,3 +428,31 @@ void *module_alloc(unsigned long size)
|
|||
__builtin_return_address(0));
|
||||
}
|
||||
#endif
|
||||
|
||||
static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
|
||||
const Elf_Shdr *sechdrs,
|
||||
const char *name)
|
||||
{
|
||||
const Elf_Shdr *s, *se;
|
||||
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
|
||||
|
||||
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
|
||||
if (strcmp(name, secstrs + s->sh_name) == 0)
|
||||
return s;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int module_finalize(const Elf_Ehdr *hdr,
|
||||
const Elf_Shdr *sechdrs,
|
||||
struct module *me)
|
||||
{
|
||||
const Elf_Shdr *s;
|
||||
|
||||
s = find_section(hdr, sechdrs, ".alternative");
|
||||
if (s)
|
||||
apply_module_alternatives((void *)s->sh_addr, s->sh_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -84,6 +84,34 @@ void show_regs(struct pt_regs *regs)
|
|||
dump_backtrace(regs, NULL, KERN_DEFAULT);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static bool compat_mode_supported __read_mostly;
|
||||
|
||||
bool compat_elf_check_arch(Elf32_Ehdr *hdr)
|
||||
{
|
||||
return compat_mode_supported &&
|
||||
hdr->e_machine == EM_RISCV &&
|
||||
hdr->e_ident[EI_CLASS] == ELFCLASS32;
|
||||
}
|
||||
|
||||
static int __init compat_mode_detect(void)
|
||||
{
|
||||
unsigned long tmp = csr_read(CSR_STATUS);
|
||||
|
||||
csr_write(CSR_STATUS, (tmp & ~SR_UXL) | SR_UXL_32);
|
||||
compat_mode_supported =
|
||||
(csr_read(CSR_STATUS) & SR_UXL) == SR_UXL_32;
|
||||
|
||||
csr_write(CSR_STATUS, tmp);
|
||||
|
||||
pr_info("riscv: ELF compat mode %s",
|
||||
compat_mode_supported ? "supported" : "failed");
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(compat_mode_detect);
|
||||
#endif
|
||||
|
||||
void start_thread(struct pt_regs *regs, unsigned long pc,
|
||||
unsigned long sp)
|
||||
{
|
||||
|
@ -98,6 +126,15 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
|
|||
}
|
||||
regs->epc = pc;
|
||||
regs->sp = sp;
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
regs->status &= ~SR_UXL;
|
||||
|
||||
if (is_compat_task())
|
||||
regs->status |= SR_UXL_32;
|
||||
else
|
||||
regs->status |= SR_UXL_64;
|
||||
#endif
|
||||
}
|
||||
|
||||
void flush_thread(void)
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <asm/thread_info.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <linux/audit.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/regset.h>
|
||||
|
@ -110,11 +111,6 @@ static const struct user_regset_view riscv_user_native_view = {
|
|||
.n = ARRAY_SIZE(riscv_user_regset),
|
||||
};
|
||||
|
||||
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
|
||||
{
|
||||
return &riscv_user_native_view;
|
||||
}
|
||||
|
||||
struct pt_regs_offset {
|
||||
const char *name;
|
||||
int offset;
|
||||
|
@ -272,3 +268,84 @@ __visible void do_syscall_trace_exit(struct pt_regs *regs)
|
|||
trace_sys_exit(regs, regs_return_value(regs));
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static int compat_riscv_gpr_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
struct membuf to)
|
||||
{
|
||||
struct compat_user_regs_struct cregs;
|
||||
|
||||
regs_to_cregs(&cregs, task_pt_regs(target));
|
||||
|
||||
return membuf_write(&to, &cregs,
|
||||
sizeof(struct compat_user_regs_struct));
|
||||
}
|
||||
|
||||
static int compat_riscv_gpr_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
struct compat_user_regs_struct cregs;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &cregs, 0, -1);
|
||||
|
||||
cregs_to_regs(&cregs, task_pt_regs(target));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct user_regset compat_riscv_user_regset[] = {
|
||||
[REGSET_X] = {
|
||||
.core_note_type = NT_PRSTATUS,
|
||||
.n = ELF_NGREG,
|
||||
.size = sizeof(compat_elf_greg_t),
|
||||
.align = sizeof(compat_elf_greg_t),
|
||||
.regset_get = compat_riscv_gpr_get,
|
||||
.set = compat_riscv_gpr_set,
|
||||
},
|
||||
#ifdef CONFIG_FPU
|
||||
[REGSET_F] = {
|
||||
.core_note_type = NT_PRFPREG,
|
||||
.n = ELF_NFPREG,
|
||||
.size = sizeof(elf_fpreg_t),
|
||||
.align = sizeof(elf_fpreg_t),
|
||||
.regset_get = riscv_fpr_get,
|
||||
.set = riscv_fpr_set,
|
||||
},
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct user_regset_view compat_riscv_user_native_view = {
|
||||
.name = "riscv",
|
||||
.e_machine = EM_RISCV,
|
||||
.regsets = compat_riscv_user_regset,
|
||||
.n = ARRAY_SIZE(compat_riscv_user_regset),
|
||||
};
|
||||
|
||||
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
||||
compat_ulong_t caddr, compat_ulong_t cdata)
|
||||
{
|
||||
long ret = -EIO;
|
||||
|
||||
switch (request) {
|
||||
default:
|
||||
ret = compat_ptrace_request(child, request, caddr, cdata);
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
|
||||
{
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (test_tsk_thread_flag(task, TIF_32BIT))
|
||||
return &compat_riscv_user_native_view;
|
||||
else
|
||||
#endif
|
||||
return &riscv_user_native_view;
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/efi.h>
|
||||
#include <linux/crash_dump.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
#include <asm/early_ioremap.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
@ -295,6 +296,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
#endif
|
||||
|
||||
riscv_fill_hwcap();
|
||||
apply_boot_alternatives();
|
||||
}
|
||||
|
||||
static int __init topology_init(void)
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
* Copyright (C) 2012 Regents of the University of California
|
||||
*/
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/syscalls.h>
|
||||
|
@ -14,6 +15,7 @@
|
|||
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/vdso.h>
|
||||
#include <asm/signal32.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/csr.h>
|
||||
|
||||
|
@ -261,7 +263,10 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
|||
rseq_signal_deliver(ksig, regs);
|
||||
|
||||
/* Set up the stack frame */
|
||||
ret = setup_rt_frame(ksig, oldset, regs);
|
||||
if (is_compat_task())
|
||||
ret = compat_setup_rt_frame(ksig, oldset, regs);
|
||||
else
|
||||
ret = setup_rt_frame(ksig, oldset, regs);
|
||||
|
||||
signal_setup_done(ret, ksig, 0);
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include <asm/sections.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/alternative.h>
|
||||
|
||||
#include "head.h"
|
||||
|
||||
|
@ -41,9 +40,6 @@ static DECLARE_COMPLETION(cpu_running);
|
|||
void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
init_cpu_topology();
|
||||
#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE
|
||||
apply_boot_alternatives();
|
||||
#endif
|
||||
}
|
||||
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <asm/asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/xip_fixup.h>
|
||||
|
||||
.text
|
||||
.altmacro
|
||||
|
|
|
@ -33,7 +33,9 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
|
|||
{
|
||||
return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0);
|
||||
}
|
||||
#else
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
|
||||
SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
|
||||
unsigned long, prot, unsigned long, flags,
|
||||
unsigned long, fd, off_t, offset)
|
||||
|
@ -44,7 +46,7 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
|
|||
*/
|
||||
return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12);
|
||||
}
|
||||
#endif /* !CONFIG_64BIT */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Allows the instruction cache to be flushed from userspace. Despite RISC-V
|
||||
|
|
|
@ -86,7 +86,7 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
|
|||
}
|
||||
}
|
||||
|
||||
#if defined (CONFIG_XIP_KERNEL) && defined (CONFIG_RISCV_ERRATA_ALTERNATIVE)
|
||||
#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE)
|
||||
#define __trap_section __section(".xip.traps")
|
||||
#else
|
||||
#define __trap_section
|
||||
|
|
|
@ -23,6 +23,9 @@ struct vdso_data {
|
|||
#endif
|
||||
|
||||
extern char vdso_start[], vdso_end[];
|
||||
#ifdef CONFIG_COMPAT
|
||||
extern char compat_vdso_start[], compat_vdso_end[];
|
||||
#endif
|
||||
|
||||
enum vvar_pages {
|
||||
VVAR_DATA_PAGE_OFFSET,
|
||||
|
@ -30,6 +33,11 @@ enum vvar_pages {
|
|||
VVAR_NR_PAGES,
|
||||
};
|
||||
|
||||
enum rv_vdso_map {
|
||||
RV_VDSO_MAP_VVAR,
|
||||
RV_VDSO_MAP_VDSO,
|
||||
};
|
||||
|
||||
#define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
|
@ -52,12 +60,6 @@ struct __vdso_info {
|
|||
struct vm_special_mapping *cm;
|
||||
};
|
||||
|
||||
static struct __vdso_info vdso_info __ro_after_init = {
|
||||
.name = "vdso",
|
||||
.vdso_code_start = vdso_start,
|
||||
.vdso_code_end = vdso_end,
|
||||
};
|
||||
|
||||
static int vdso_mremap(const struct vm_special_mapping *sm,
|
||||
struct vm_area_struct *new_vma)
|
||||
{
|
||||
|
@ -66,37 +68,33 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __init __vdso_init(void)
|
||||
static void __init __vdso_init(struct __vdso_info *vdso_info)
|
||||
{
|
||||
unsigned int i;
|
||||
struct page **vdso_pagelist;
|
||||
unsigned long pfn;
|
||||
|
||||
if (memcmp(vdso_info.vdso_code_start, "\177ELF", 4)) {
|
||||
pr_err("vDSO is not a valid ELF object!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (memcmp(vdso_info->vdso_code_start, "\177ELF", 4))
|
||||
panic("vDSO is not a valid ELF object!\n");
|
||||
|
||||
vdso_info.vdso_pages = (
|
||||
vdso_info.vdso_code_end -
|
||||
vdso_info.vdso_code_start) >>
|
||||
vdso_info->vdso_pages = (
|
||||
vdso_info->vdso_code_end -
|
||||
vdso_info->vdso_code_start) >>
|
||||
PAGE_SHIFT;
|
||||
|
||||
vdso_pagelist = kcalloc(vdso_info.vdso_pages,
|
||||
vdso_pagelist = kcalloc(vdso_info->vdso_pages,
|
||||
sizeof(struct page *),
|
||||
GFP_KERNEL);
|
||||
if (vdso_pagelist == NULL)
|
||||
return -ENOMEM;
|
||||
panic("vDSO kcalloc failed!\n");
|
||||
|
||||
/* Grab the vDSO code pages. */
|
||||
pfn = sym_to_pfn(vdso_info.vdso_code_start);
|
||||
pfn = sym_to_pfn(vdso_info->vdso_code_start);
|
||||
|
||||
for (i = 0; i < vdso_info.vdso_pages; i++)
|
||||
for (i = 0; i < vdso_info->vdso_pages; i++)
|
||||
vdso_pagelist[i] = pfn_to_page(pfn + i);
|
||||
|
||||
vdso_info.cm->pages = vdso_pagelist;
|
||||
|
||||
return 0;
|
||||
vdso_info->cm->pages = vdso_pagelist;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TIME_NS
|
||||
|
@ -116,13 +114,14 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
|
|||
{
|
||||
struct mm_struct *mm = task->mm;
|
||||
struct vm_area_struct *vma;
|
||||
struct __vdso_info *vdso_info = mm->context.vdso_info;
|
||||
|
||||
mmap_read_lock(mm);
|
||||
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
unsigned long size = vma->vm_end - vma->vm_start;
|
||||
|
||||
if (vma_is_special_mapping(vma, vdso_info.dm))
|
||||
if (vma_is_special_mapping(vma, vdso_info->dm))
|
||||
zap_page_range(vma, vma->vm_start, size);
|
||||
}
|
||||
|
||||
|
@ -187,11 +186,6 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
|
|||
return vmf_insert_pfn(vma, vmf->address, pfn);
|
||||
}
|
||||
|
||||
enum rv_vdso_map {
|
||||
RV_VDSO_MAP_VVAR,
|
||||
RV_VDSO_MAP_VDSO,
|
||||
};
|
||||
|
||||
static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = {
|
||||
[RV_VDSO_MAP_VVAR] = {
|
||||
.name = "[vvar]",
|
||||
|
@ -203,25 +197,57 @@ static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = {
|
|||
},
|
||||
};
|
||||
|
||||
static struct __vdso_info vdso_info __ro_after_init = {
|
||||
.name = "vdso",
|
||||
.vdso_code_start = vdso_start,
|
||||
.vdso_code_end = vdso_end,
|
||||
.dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR],
|
||||
.cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO],
|
||||
};
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static struct vm_special_mapping rv_compat_vdso_maps[] __ro_after_init = {
|
||||
[RV_VDSO_MAP_VVAR] = {
|
||||
.name = "[vvar]",
|
||||
.fault = vvar_fault,
|
||||
},
|
||||
[RV_VDSO_MAP_VDSO] = {
|
||||
.name = "[vdso]",
|
||||
.mremap = vdso_mremap,
|
||||
},
|
||||
};
|
||||
|
||||
static struct __vdso_info compat_vdso_info __ro_after_init = {
|
||||
.name = "compat_vdso",
|
||||
.vdso_code_start = compat_vdso_start,
|
||||
.vdso_code_end = compat_vdso_end,
|
||||
.dm = &rv_compat_vdso_maps[RV_VDSO_MAP_VVAR],
|
||||
.cm = &rv_compat_vdso_maps[RV_VDSO_MAP_VDSO],
|
||||
};
|
||||
#endif
|
||||
|
||||
static int __init vdso_init(void)
|
||||
{
|
||||
vdso_info.dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR];
|
||||
vdso_info.cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO];
|
||||
__vdso_init(&vdso_info);
|
||||
#ifdef CONFIG_COMPAT
|
||||
__vdso_init(&compat_vdso_info);
|
||||
#endif
|
||||
|
||||
return __vdso_init();
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(vdso_init);
|
||||
|
||||
static int __setup_additional_pages(struct mm_struct *mm,
|
||||
struct linux_binprm *bprm,
|
||||
int uses_interp)
|
||||
int uses_interp,
|
||||
struct __vdso_info *vdso_info)
|
||||
{
|
||||
unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
|
||||
void *ret;
|
||||
|
||||
BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
|
||||
|
||||
vdso_text_len = vdso_info.vdso_pages << PAGE_SHIFT;
|
||||
vdso_text_len = vdso_info->vdso_pages << PAGE_SHIFT;
|
||||
/* Be sure to map the data page */
|
||||
vdso_mapping_len = vdso_text_len + VVAR_SIZE;
|
||||
|
||||
|
@ -232,16 +258,18 @@ static int __setup_additional_pages(struct mm_struct *mm,
|
|||
}
|
||||
|
||||
ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE,
|
||||
(VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info.dm);
|
||||
(VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info->dm);
|
||||
if (IS_ERR(ret))
|
||||
goto up_fail;
|
||||
|
||||
vdso_base += VVAR_SIZE;
|
||||
mm->context.vdso = (void *)vdso_base;
|
||||
mm->context.vdso_info = (void *)vdso_info;
|
||||
|
||||
ret =
|
||||
_install_special_mapping(mm, vdso_base, vdso_text_len,
|
||||
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
|
||||
vdso_info.cm);
|
||||
vdso_info->cm);
|
||||
|
||||
if (IS_ERR(ret))
|
||||
goto up_fail;
|
||||
|
@ -253,6 +281,24 @@ static int __setup_additional_pages(struct mm_struct *mm,
|
|||
return PTR_ERR(ret);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||
int uses_interp)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
int ret;
|
||||
|
||||
if (mmap_write_lock_killable(mm))
|
||||
return -EINTR;
|
||||
|
||||
ret = __setup_additional_pages(mm, bprm, uses_interp,
|
||||
&compat_vdso_info);
|
||||
mmap_write_unlock(mm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
|
@ -261,7 +307,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|||
if (mmap_write_lock_killable(mm))
|
||||
return -EINTR;
|
||||
|
||||
ret = __setup_additional_pages(mm, bprm, uses_interp);
|
||||
ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso_info);
|
||||
mmap_write_unlock(mm);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -7,12 +7,16 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#ifndef __VDSO_PATH
|
||||
#define __VDSO_PATH "arch/riscv/kernel/vdso/vdso.so"
|
||||
#endif
|
||||
|
||||
__PAGE_ALIGNED_DATA
|
||||
|
||||
.globl vdso_start, vdso_end
|
||||
.balign PAGE_SIZE
|
||||
vdso_start:
|
||||
.incbin "arch/riscv/kernel/vdso/vdso.so"
|
||||
.incbin __VDSO_PATH
|
||||
.balign PAGE_SIZE
|
||||
vdso_end:
|
||||
|
||||
|
|
|
@ -102,9 +102,9 @@ static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code
|
|||
static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd, *pgd_k;
|
||||
pud_t *pud, *pud_k;
|
||||
p4d_t *p4d, *p4d_k;
|
||||
pmd_t *pmd, *pmd_k;
|
||||
pud_t *pud_k;
|
||||
p4d_t *p4d_k;
|
||||
pmd_t *pmd_k;
|
||||
pte_t *pte_k;
|
||||
int index;
|
||||
unsigned long pfn;
|
||||
|
@ -132,14 +132,12 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
|
|||
}
|
||||
set_pgd(pgd, *pgd_k);
|
||||
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
p4d_k = p4d_offset(pgd_k, addr);
|
||||
if (!p4d_present(*p4d_k)) {
|
||||
no_context(regs, addr);
|
||||
return;
|
||||
}
|
||||
|
||||
pud = pud_offset(p4d, addr);
|
||||
pud_k = pud_offset(p4d_k, addr);
|
||||
if (!pud_present(*pud_k)) {
|
||||
no_context(regs, addr);
|
||||
|
@ -150,13 +148,11 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
|
|||
* Since the vmalloc area is global, it is unnecessary
|
||||
* to copy individual PTEs
|
||||
*/
|
||||
pmd = pmd_offset(pud, addr);
|
||||
pmd_k = pmd_offset(pud_k, addr);
|
||||
if (!pmd_present(*pmd_k)) {
|
||||
no_context(regs, addr);
|
||||
return;
|
||||
}
|
||||
set_pmd(pmd, *pmd_k);
|
||||
|
||||
/*
|
||||
* Make sure the actual PTE exists as well to
|
||||
|
|
|
@ -578,9 +578,9 @@ static void __init create_p4d_mapping(p4d_t *p4dp,
|
|||
create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
|
||||
#define fixmap_pgd_next ((uintptr_t)fixmap_pte)
|
||||
#define early_dtb_pgd_next ((uintptr_t)early_dtb_pmd)
|
||||
#define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot)
|
||||
#define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot)
|
||||
#define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot)
|
||||
#define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
|
||||
#define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
|
||||
#define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
|
||||
#endif /* __PAGETABLE_PMD_FOLDED */
|
||||
|
||||
void __init create_pgd_mapping(pgd_t *pgdp,
|
||||
|
@ -671,7 +671,7 @@ static __init pgprot_t pgprot_from_va(uintptr_t va)
|
|||
}
|
||||
#endif /* CONFIG_STRICT_KERNEL_RWX */
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
|
||||
static void __init disable_pgtable_l5(void)
|
||||
{
|
||||
pgtable_l5_enabled = false;
|
||||
|
@ -947,6 +947,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
|
|||
BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
|
||||
#endif
|
||||
|
||||
apply_early_boot_alternatives();
|
||||
pt_ops_set_early();
|
||||
|
||||
/* Setup early PGD for fixmap */
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
purgatory.chk
|
||||
purgatory.ro
|
||||
kexec-purgatory.c
|
|
@ -0,0 +1,95 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
OBJECT_FILES_NON_STANDARD := y
|
||||
|
||||
purgatory-y := purgatory.o sha256.o entry.o string.o ctype.o memcpy.o memset.o
|
||||
|
||||
targets += $(purgatory-y)
|
||||
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
|
||||
|
||||
$(obj)/string.o: $(srctree)/lib/string.c FORCE
|
||||
$(call if_changed_rule,cc_o_c)
|
||||
|
||||
$(obj)/ctype.o: $(srctree)/lib/ctype.c FORCE
|
||||
$(call if_changed_rule,cc_o_c)
|
||||
|
||||
$(obj)/memcpy.o: $(srctree)/arch/riscv/lib/memcpy.S FORCE
|
||||
$(call if_changed_rule,as_o_S)
|
||||
|
||||
$(obj)/memset.o: $(srctree)/arch/riscv/lib/memset.S FORCE
|
||||
$(call if_changed_rule,as_o_S)
|
||||
|
||||
$(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
|
||||
$(call if_changed_rule,cc_o_c)
|
||||
|
||||
CFLAGS_sha256.o := -D__DISABLE_EXPORTS
|
||||
CFLAGS_string.o := -D__DISABLE_EXPORTS
|
||||
CFLAGS_ctype.o := -D__DISABLE_EXPORTS
|
||||
|
||||
# When linking purgatory.ro with -r unresolved symbols are not checked,
|
||||
# also link a purgatory.chk binary without -r to check for unresolved symbols.
|
||||
PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib
|
||||
LDFLAGS_purgatory.ro := -r $(PURGATORY_LDFLAGS)
|
||||
LDFLAGS_purgatory.chk := $(PURGATORY_LDFLAGS)
|
||||
targets += purgatory.ro purgatory.chk
|
||||
|
||||
# Sanitizer, etc. runtimes are unavailable and cannot be linked here.
|
||||
GCOV_PROFILE := n
|
||||
KASAN_SANITIZE := n
|
||||
UBSAN_SANITIZE := n
|
||||
KCSAN_SANITIZE := n
|
||||
KCOV_INSTRUMENT := n
|
||||
|
||||
# These are adjustments to the compiler flags used for objects that
|
||||
# make up the standalone purgatory.ro
|
||||
|
||||
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
|
||||
PURGATORY_CFLAGS := -mcmodel=medany -ffreestanding -fno-zero-initialized-in-bss
|
||||
PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
|
||||
PURGATORY_CFLAGS += -fno-stack-protector -g0
|
||||
|
||||
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
|
||||
# in turn leaves some undefined symbols like __fentry__ in purgatory and not
|
||||
# sure how to relocate those.
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
|
||||
ifdef CONFIG_STACKPROTECTOR
|
||||
PURGATORY_CFLAGS_REMOVE += -fstack-protector
|
||||
endif
|
||||
|
||||
ifdef CONFIG_STACKPROTECTOR_STRONG
|
||||
PURGATORY_CFLAGS_REMOVE += -fstack-protector-strong
|
||||
endif
|
||||
|
||||
CFLAGS_REMOVE_purgatory.o += $(PURGATORY_CFLAGS_REMOVE)
|
||||
CFLAGS_purgatory.o += $(PURGATORY_CFLAGS)
|
||||
|
||||
CFLAGS_REMOVE_sha256.o += $(PURGATORY_CFLAGS_REMOVE)
|
||||
CFLAGS_sha256.o += $(PURGATORY_CFLAGS)
|
||||
|
||||
CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE)
|
||||
CFLAGS_string.o += $(PURGATORY_CFLAGS)
|
||||
|
||||
CFLAGS_REMOVE_ctype.o += $(PURGATORY_CFLAGS_REMOVE)
|
||||
CFLAGS_ctype.o += $(PURGATORY_CFLAGS)
|
||||
|
||||
AFLAGS_REMOVE_entry.o += -Wa,-gdwarf-2
|
||||
AFLAGS_REMOVE_memcpy.o += -Wa,-gdwarf-2
|
||||
AFLAGS_REMOVE_memset.o += -Wa,-gdwarf-2
|
||||
|
||||
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
|
||||
$(call if_changed,ld)
|
||||
|
||||
$(obj)/purgatory.chk: $(obj)/purgatory.ro FORCE
|
||||
$(call if_changed,ld)
|
||||
|
||||
targets += kexec-purgatory.c
|
||||
|
||||
quiet_cmd_bin2c = BIN2C $@
|
||||
cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
|
||||
|
||||
$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro $(obj)/purgatory.chk FORCE
|
||||
$(call if_changed,bin2c)
|
||||
|
||||
obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += kexec-purgatory.o
|
|
@ -0,0 +1,47 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* purgatory: Runs between two kernels
|
||||
*
|
||||
* Copyright (C) 2022 Huawei Technologies Co, Ltd.
|
||||
*
|
||||
* Author: Li Zhengyu (lizhengyu3@huawei.com)
|
||||
*
|
||||
*/
|
||||
|
||||
.macro size, sym:req
|
||||
.size \sym, . - \sym
|
||||
.endm
|
||||
|
||||
.text
|
||||
|
||||
.globl purgatory_start
|
||||
purgatory_start:
|
||||
|
||||
lla sp, .Lstack
|
||||
mv s0, a0 /* The hartid of the current hart */
|
||||
mv s1, a1 /* Phys address of the FDT image */
|
||||
|
||||
jal purgatory
|
||||
|
||||
/* Start new image. */
|
||||
mv a0, s0
|
||||
mv a1, s1
|
||||
ld a2, riscv_kernel_entry
|
||||
jr a2
|
||||
|
||||
size purgatory_start
|
||||
|
||||
.align 4
|
||||
.rept 256
|
||||
.quad 0
|
||||
.endr
|
||||
.Lstack:
|
||||
|
||||
.data
|
||||
|
||||
.globl riscv_kernel_entry
|
||||
riscv_kernel_entry:
|
||||
.quad 0
|
||||
size riscv_kernel_entry
|
||||
|
||||
.end
|
|
@ -0,0 +1,45 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* purgatory: Runs between two kernels
|
||||
*
|
||||
* Copyright (C) 2022 Huawei Technologies Co, Ltd.
|
||||
*
|
||||
* Author: Li Zhengyu (lizhengyu3@huawei.com)
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/purgatory.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/string.h>
|
||||
|
||||
u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(".kexec-purgatory");
|
||||
|
||||
struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(".kexec-purgatory");
|
||||
|
||||
static int verify_sha256_digest(void)
|
||||
{
|
||||
struct kexec_sha_region *ptr, *end;
|
||||
struct sha256_state ss;
|
||||
u8 digest[SHA256_DIGEST_SIZE];
|
||||
|
||||
sha256_init(&ss);
|
||||
end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions);
|
||||
for (ptr = purgatory_sha_regions; ptr < end; ptr++)
|
||||
sha256_update(&ss, (uint8_t *)(ptr->start), ptr->len);
|
||||
sha256_final(&ss, digest);
|
||||
if (memcmp(digest, purgatory_sha256_digest, sizeof(digest)) != 0)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* workaround for a warning with -Wmissing-prototypes */
|
||||
void purgatory(void);
|
||||
|
||||
void purgatory(void)
|
||||
{
|
||||
if (verify_sha256_digest())
|
||||
for (;;)
|
||||
/* loop forever */
|
||||
;
|
||||
}
|
|
@ -418,9 +418,6 @@ config COMPAT
|
|||
(and some other stuff like libraries and such) is needed for
|
||||
executing 31 bit applications. It is safe to say "Y".
|
||||
|
||||
config SYSVIPC_COMPAT
|
||||
def_bool y if COMPAT && SYSVIPC
|
||||
|
||||
config SMP
|
||||
def_bool y
|
||||
|
||||
|
|
|
@ -13,6 +13,18 @@
|
|||
#define compat_mode_t compat_mode_t
|
||||
typedef u16 compat_mode_t;
|
||||
|
||||
#define __compat_uid_t __compat_uid_t
|
||||
typedef u16 __compat_uid_t;
|
||||
typedef u16 __compat_gid_t;
|
||||
|
||||
#define compat_dev_t compat_dev_t
|
||||
typedef u16 compat_dev_t;
|
||||
|
||||
#define compat_ipc_pid_t compat_ipc_pid_t
|
||||
typedef u16 compat_ipc_pid_t;
|
||||
|
||||
#define compat_statfs compat_statfs
|
||||
|
||||
#include <asm-generic/compat.h>
|
||||
|
||||
#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p( \
|
||||
|
@ -30,15 +42,9 @@ typedef u16 compat_mode_t;
|
|||
PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \
|
||||
PSW32_ASC_PRIMARY)
|
||||
|
||||
#define COMPAT_USER_HZ 100
|
||||
#define COMPAT_UTS_MACHINE "s390\0\0\0\0"
|
||||
|
||||
typedef u16 __compat_uid_t;
|
||||
typedef u16 __compat_gid_t;
|
||||
typedef u16 compat_dev_t;
|
||||
typedef u16 compat_nlink_t;
|
||||
typedef u16 compat_ipc_pid_t;
|
||||
typedef __kernel_fsid_t compat_fsid_t;
|
||||
|
||||
typedef struct {
|
||||
u32 mask;
|
||||
|
@ -79,26 +85,6 @@ struct compat_stat {
|
|||
u32 __unused5;
|
||||
};
|
||||
|
||||
struct compat_flock {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_off_t l_start;
|
||||
compat_off_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
};
|
||||
|
||||
#define F_GETLK64 12
|
||||
#define F_SETLK64 13
|
||||
#define F_SETLKW64 14
|
||||
|
||||
struct compat_flock64 {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_loff_t l_start;
|
||||
compat_loff_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
};
|
||||
|
||||
struct compat_statfs {
|
||||
u32 f_type;
|
||||
u32 f_bsize;
|
||||
|
@ -129,10 +115,6 @@ struct compat_statfs64 {
|
|||
u32 f_spare[4];
|
||||
};
|
||||
|
||||
#define COMPAT_RLIM_INFINITY 0xffffffff
|
||||
|
||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
||||
|
||||
/*
|
||||
* A pointer passed in from user mode. This should not
|
||||
* be used for syscall parameters, just declare them
|
||||
|
@ -155,61 +137,4 @@ static inline int is_compat_task(void)
|
|||
|
||||
#endif
|
||||
|
||||
struct compat_ipc64_perm {
|
||||
compat_key_t key;
|
||||
__compat_uid32_t uid;
|
||||
__compat_gid32_t gid;
|
||||
__compat_uid32_t cuid;
|
||||
__compat_gid32_t cgid;
|
||||
compat_mode_t mode;
|
||||
unsigned short __pad1;
|
||||
unsigned short seq;
|
||||
unsigned short __pad2;
|
||||
unsigned int __unused1;
|
||||
unsigned int __unused2;
|
||||
};
|
||||
|
||||
struct compat_semid64_ds {
|
||||
struct compat_ipc64_perm sem_perm;
|
||||
compat_ulong_t sem_otime;
|
||||
compat_ulong_t sem_otime_high;
|
||||
compat_ulong_t sem_ctime;
|
||||
compat_ulong_t sem_ctime_high;
|
||||
compat_ulong_t sem_nsems;
|
||||
compat_ulong_t __unused1;
|
||||
compat_ulong_t __unused2;
|
||||
};
|
||||
|
||||
struct compat_msqid64_ds {
|
||||
struct compat_ipc64_perm msg_perm;
|
||||
compat_ulong_t msg_stime;
|
||||
compat_ulong_t msg_stime_high;
|
||||
compat_ulong_t msg_rtime;
|
||||
compat_ulong_t msg_rtime_high;
|
||||
compat_ulong_t msg_ctime;
|
||||
compat_ulong_t msg_ctime_high;
|
||||
compat_ulong_t msg_cbytes;
|
||||
compat_ulong_t msg_qnum;
|
||||
compat_ulong_t msg_qbytes;
|
||||
compat_pid_t msg_lspid;
|
||||
compat_pid_t msg_lrpid;
|
||||
compat_ulong_t __unused1;
|
||||
compat_ulong_t __unused2;
|
||||
};
|
||||
|
||||
struct compat_shmid64_ds {
|
||||
struct compat_ipc64_perm shm_perm;
|
||||
compat_size_t shm_segsz;
|
||||
compat_ulong_t shm_atime;
|
||||
compat_ulong_t shm_atime_high;
|
||||
compat_ulong_t shm_dtime;
|
||||
compat_ulong_t shm_dtime_high;
|
||||
compat_ulong_t shm_ctime;
|
||||
compat_ulong_t shm_ctime_high;
|
||||
compat_pid_t shm_cpid;
|
||||
compat_pid_t shm_lpid;
|
||||
compat_ulong_t shm_nattch;
|
||||
compat_ulong_t __unused1;
|
||||
compat_ulong_t __unused2;
|
||||
};
|
||||
#endif /* _ASM_S390X_COMPAT_H */
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#define __ARCH_WANT_SYS_SIGPENDING
|
||||
#define __ARCH_WANT_SYS_SIGPROCMASK
|
||||
# ifdef CONFIG_COMPAT
|
||||
# define __ARCH_WANT_COMPAT_STAT
|
||||
# define __ARCH_WANT_SYS_TIME32
|
||||
# define __ARCH_WANT_SYS_UTIME32
|
||||
# endif
|
||||
|
|
|
@ -489,9 +489,4 @@ config COMPAT
|
|||
select ARCH_WANT_OLD_COMPAT_IPC
|
||||
select COMPAT_OLD_SIGACTION
|
||||
|
||||
config SYSVIPC_COMPAT
|
||||
bool
|
||||
depends on COMPAT && SYSVIPC
|
||||
default y
|
||||
|
||||
source "drivers/sbus/char/Kconfig"
|
||||
|
|
|
@ -9,17 +9,25 @@
|
|||
#define compat_mode_t compat_mode_t
|
||||
typedef u16 compat_mode_t;
|
||||
|
||||
#include <asm-generic/compat.h>
|
||||
|
||||
#define COMPAT_USER_HZ 100
|
||||
#define COMPAT_UTS_MACHINE "sparc\0\0"
|
||||
|
||||
#define __compat_uid_t __compat_uid_t
|
||||
typedef u16 __compat_uid_t;
|
||||
typedef u16 __compat_gid_t;
|
||||
|
||||
#define compat_dev_t compat_dev_t
|
||||
typedef u16 compat_dev_t;
|
||||
|
||||
#define compat_ipc_pid_t compat_ipc_pid_t
|
||||
typedef u16 compat_ipc_pid_t;
|
||||
|
||||
#define compat_ipc64_perm compat_ipc64_perm
|
||||
|
||||
#define COMPAT_RLIM_INFINITY 0x7fffffff
|
||||
|
||||
#include <asm-generic/compat.h>
|
||||
|
||||
#define COMPAT_UTS_MACHINE "sparc\0\0"
|
||||
|
||||
typedef s16 compat_nlink_t;
|
||||
typedef u16 compat_ipc_pid_t;
|
||||
typedef __kernel_fsid_t compat_fsid_t;
|
||||
|
||||
struct compat_stat {
|
||||
compat_dev_t st_dev;
|
||||
|
@ -75,46 +83,7 @@ struct compat_stat64 {
|
|||
unsigned int __unused5;
|
||||
};
|
||||
|
||||
struct compat_flock {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_off_t l_start;
|
||||
compat_off_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
short __unused;
|
||||
};
|
||||
|
||||
#define F_GETLK64 12
|
||||
#define F_SETLK64 13
|
||||
#define F_SETLKW64 14
|
||||
|
||||
struct compat_flock64 {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_loff_t l_start;
|
||||
compat_loff_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
short __unused;
|
||||
};
|
||||
|
||||
struct compat_statfs {
|
||||
int f_type;
|
||||
int f_bsize;
|
||||
int f_blocks;
|
||||
int f_bfree;
|
||||
int f_bavail;
|
||||
int f_files;
|
||||
int f_ffree;
|
||||
compat_fsid_t f_fsid;
|
||||
int f_namelen; /* SunOS ignores this field. */
|
||||
int f_frsize;
|
||||
int f_flags;
|
||||
int f_spare[4];
|
||||
};
|
||||
|
||||
#define COMPAT_RLIM_INFINITY 0x7fffffff
|
||||
|
||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
||||
#define __ARCH_COMPAT_FLOCK_PAD short __unused;
|
||||
|
||||
struct compat_ipc64_perm {
|
||||
compat_key_t key;
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#define __ARCH_WANT_SYS_TIME
|
||||
#define __ARCH_WANT_SYS_UTIME
|
||||
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
|
||||
#define __ARCH_WANT_COMPAT_STAT
|
||||
#endif
|
||||
|
||||
#ifdef __32bit_syscall_numbers__
|
||||
|
|
|
@ -2870,10 +2870,6 @@ config COMPAT
|
|||
if COMPAT
|
||||
config COMPAT_FOR_U64_ALIGNMENT
|
||||
def_bool y
|
||||
|
||||
config SYSVIPC_COMPAT
|
||||
def_bool y
|
||||
depends on SYSVIPC
|
||||
endif
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -15,17 +15,23 @@
|
|||
#define compat_mode_t compat_mode_t
|
||||
typedef u16 compat_mode_t;
|
||||
|
||||
#include <asm-generic/compat.h>
|
||||
|
||||
#define COMPAT_USER_HZ 100
|
||||
#define COMPAT_UTS_MACHINE "i686\0\0"
|
||||
|
||||
#define __compat_uid_t __compat_uid_t
|
||||
typedef u16 __compat_uid_t;
|
||||
typedef u16 __compat_gid_t;
|
||||
|
||||
#define compat_dev_t compat_dev_t
|
||||
typedef u16 compat_dev_t;
|
||||
|
||||
#define compat_ipc_pid_t compat_ipc_pid_t
|
||||
typedef u16 compat_ipc_pid_t;
|
||||
|
||||
#define compat_statfs compat_statfs
|
||||
|
||||
#include <asm-generic/compat.h>
|
||||
|
||||
#define COMPAT_UTS_MACHINE "i686\0\0"
|
||||
|
||||
typedef u16 compat_nlink_t;
|
||||
typedef u16 compat_ipc_pid_t;
|
||||
typedef __kernel_fsid_t compat_fsid_t;
|
||||
|
||||
struct compat_stat {
|
||||
u32 st_dev;
|
||||
|
@ -48,29 +54,11 @@ struct compat_stat {
|
|||
u32 __unused5;
|
||||
};
|
||||
|
||||
struct compat_flock {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_off_t l_start;
|
||||
compat_off_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
};
|
||||
|
||||
#define F_GETLK64 12 /* using 'struct flock64' */
|
||||
#define F_SETLK64 13
|
||||
#define F_SETLKW64 14
|
||||
|
||||
/*
|
||||
* IA32 uses 4 byte alignment for 64 bit quantities,
|
||||
* so we need to pack this structure.
|
||||
* IA32 uses 4 byte alignment for 64 bit quantities, so we need to pack the
|
||||
* compat flock64 structure.
|
||||
*/
|
||||
struct compat_flock64 {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_loff_t l_start;
|
||||
compat_loff_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
} __attribute__((packed));
|
||||
#define __ARCH_NEED_COMPAT_FLOCK64_PACKED
|
||||
|
||||
struct compat_statfs {
|
||||
int f_type;
|
||||
|
@ -87,68 +75,6 @@ struct compat_statfs {
|
|||
int f_spare[4];
|
||||
};
|
||||
|
||||
#define COMPAT_RLIM_INFINITY 0xffffffff
|
||||
|
||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
||||
|
||||
struct compat_ipc64_perm {
|
||||
compat_key_t key;
|
||||
__compat_uid32_t uid;
|
||||
__compat_gid32_t gid;
|
||||
__compat_uid32_t cuid;
|
||||
__compat_gid32_t cgid;
|
||||
unsigned short mode;
|
||||
unsigned short __pad1;
|
||||
unsigned short seq;
|
||||
unsigned short __pad2;
|
||||
compat_ulong_t unused1;
|
||||
compat_ulong_t unused2;
|
||||
};
|
||||
|
||||
struct compat_semid64_ds {
|
||||
struct compat_ipc64_perm sem_perm;
|
||||
compat_ulong_t sem_otime;
|
||||
compat_ulong_t sem_otime_high;
|
||||
compat_ulong_t sem_ctime;
|
||||
compat_ulong_t sem_ctime_high;
|
||||
compat_ulong_t sem_nsems;
|
||||
compat_ulong_t __unused3;
|
||||
compat_ulong_t __unused4;
|
||||
};
|
||||
|
||||
struct compat_msqid64_ds {
|
||||
struct compat_ipc64_perm msg_perm;
|
||||
compat_ulong_t msg_stime;
|
||||
compat_ulong_t msg_stime_high;
|
||||
compat_ulong_t msg_rtime;
|
||||
compat_ulong_t msg_rtime_high;
|
||||
compat_ulong_t msg_ctime;
|
||||
compat_ulong_t msg_ctime_high;
|
||||
compat_ulong_t msg_cbytes;
|
||||
compat_ulong_t msg_qnum;
|
||||
compat_ulong_t msg_qbytes;
|
||||
compat_pid_t msg_lspid;
|
||||
compat_pid_t msg_lrpid;
|
||||
compat_ulong_t __unused4;
|
||||
compat_ulong_t __unused5;
|
||||
};
|
||||
|
||||
struct compat_shmid64_ds {
|
||||
struct compat_ipc64_perm shm_perm;
|
||||
compat_size_t shm_segsz;
|
||||
compat_ulong_t shm_atime;
|
||||
compat_ulong_t shm_atime_high;
|
||||
compat_ulong_t shm_dtime;
|
||||
compat_ulong_t shm_dtime_high;
|
||||
compat_ulong_t shm_ctime;
|
||||
compat_ulong_t shm_ctime_high;
|
||||
compat_pid_t shm_cpid;
|
||||
compat_pid_t shm_lpid;
|
||||
compat_ulong_t shm_nattch;
|
||||
compat_ulong_t __unused4;
|
||||
compat_ulong_t __unused5;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_X86_X32_ABI
|
||||
#define COMPAT_USE_64BIT_TIME \
|
||||
(!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
# include <asm/unistd_32_ia32.h>
|
||||
# define __ARCH_WANT_SYS_TIME
|
||||
# define __ARCH_WANT_SYS_UTIME
|
||||
# define __ARCH_WANT_COMPAT_STAT
|
||||
# define __ARCH_WANT_COMPAT_SYS_PREADV64
|
||||
# define __ARCH_WANT_COMPAT_SYS_PWRITEV64
|
||||
# define __ARCH_WANT_COMPAT_SYS_PREADV64V2
|
||||
|
|
24
fs/open.c
24
fs/open.c
|
@ -224,6 +224,21 @@ SYSCALL_DEFINE2(ftruncate64, unsigned int, fd, loff_t, length)
|
|||
}
|
||||
#endif /* BITS_PER_LONG == 32 */
|
||||
|
||||
#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_TRUNCATE64)
|
||||
COMPAT_SYSCALL_DEFINE3(truncate64, const char __user *, pathname,
|
||||
compat_arg_u64_dual(length))
|
||||
{
|
||||
return ksys_truncate(pathname, compat_arg_u64_glue(length));
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_FTRUNCATE64)
|
||||
COMPAT_SYSCALL_DEFINE3(ftruncate64, unsigned int, fd,
|
||||
compat_arg_u64_dual(length))
|
||||
{
|
||||
return ksys_ftruncate(fd, compat_arg_u64_glue(length));
|
||||
}
|
||||
#endif
|
||||
|
||||
int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
{
|
||||
|
@ -339,6 +354,15 @@ SYSCALL_DEFINE4(fallocate, int, fd, int, mode, loff_t, offset, loff_t, len)
|
|||
return ksys_fallocate(fd, mode, offset, len);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_FALLOCATE)
|
||||
COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode, compat_arg_u64_dual(offset),
|
||||
compat_arg_u64_dual(len))
|
||||
{
|
||||
return ksys_fallocate(fd, mode, compat_arg_u64_glue(offset),
|
||||
compat_arg_u64_glue(len));
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* access() needs to use the real uid/gid, not the effective uid/gid.
|
||||
* We do this by temporarily clearing all FS-related capabilities and
|
||||
|
|
|
@ -682,6 +682,14 @@ SYSCALL_DEFINE4(pread64, unsigned int, fd, char __user *, buf,
|
|||
return ksys_pread64(fd, buf, count, pos);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_PREAD64)
|
||||
COMPAT_SYSCALL_DEFINE5(pread64, unsigned int, fd, char __user *, buf,
|
||||
size_t, count, compat_arg_u64_dual(pos))
|
||||
{
|
||||
return ksys_pread64(fd, buf, count, compat_arg_u64_glue(pos));
|
||||
}
|
||||
#endif
|
||||
|
||||
ssize_t ksys_pwrite64(unsigned int fd, const char __user *buf,
|
||||
size_t count, loff_t pos)
|
||||
{
|
||||
|
@ -708,6 +716,14 @@ SYSCALL_DEFINE4(pwrite64, unsigned int, fd, const char __user *, buf,
|
|||
return ksys_pwrite64(fd, buf, count, pos);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_PWRITE64)
|
||||
COMPAT_SYSCALL_DEFINE5(pwrite64, unsigned int, fd, const char __user *, buf,
|
||||
size_t, count, compat_arg_u64_dual(pos))
|
||||
{
|
||||
return ksys_pwrite64(fd, buf, count, compat_arg_u64_glue(pos));
|
||||
}
|
||||
#endif
|
||||
|
||||
static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
|
||||
loff_t *ppos, int type, rwf_t flags)
|
||||
{
|
||||
|
|
|
@ -659,7 +659,7 @@ SYSCALL_DEFINE5(statx,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT)
|
||||
static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
|
||||
{
|
||||
struct compat_stat tmp;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue