powerpc updates for 5.13
- Enable KFENCE for 32-bit. - Implement EBPF for 32-bit. - Convert 32-bit to do interrupt entry/exit in C. - Convert 64-bit BookE to do interrupt entry/exit in C. - Changes to our signal handling code to use user_access_begin/end() more extensively. - Add support for time namespaces (CONFIG_TIME_NS) - A series of fixes that allow us to reenable STRICT_KERNEL_RWX. - Other smaller features, fixes & cleanups. Thanks to: Alexey Kardashevskiy, Andreas Schwab, Andrew Donnellan, Aneesh Kumar K.V, Athira Rajeev, Bhaskar Chowdhury, Bixuan Cui, Cédric Le Goater, Chen Huang, Chris Packham, Christophe Leroy, Christopher M. Riedl, Colin Ian King, Dan Carpenter, Daniel Axtens, Daniel Henrique Barboza, David Gibson, Davidlohr Bueso, Denis Efremov, dingsenjie, Dmitry Safonov, Dominic DeMarco, Fabiano Rosas, Ganesh Goudar, Geert Uytterhoeven, Geetika Moolchandani, Greg Kurz, Guenter Roeck, Haren Myneni, He Ying, Jiapeng Chong, Jordan Niethe, Laurent Dufour, Lee Jones, Leonardo Bras, Li Huafei, Madhavan Srinivasan, Mahesh Salgaonkar, Masahiro Yamada, Nathan Chancellor, Nathan Lynch, Nicholas Piggin, Oliver O'Halloran, Paul Menzel, Pu Lehui, Randy Dunlap, Ravi Bangoria, Rosen Penev, Russell Currey, Santosh Sivaraj, Sebastian Andrzej Siewior, Segher Boessenkool, Shivaprasad G Bhat, Srikar Dronamraju, Stephen Rothwell, Thadeu Lima de Souza Cascardo, Thomas Gleixner, Tony Ambardar, Tyrel Datwyler, Vaibhav Jain, Vincenzo Frascino, Xiongwei Song, Yang Li, Yu Kuai, Zhang Yunkai. -----BEGIN PGP SIGNATURE----- iQJHBAABCAAxFiEEJFGtCPCthwEv2Y/bUevqPMjhpYAFAmCLV1kTHG1wZUBlbGxl cm1hbi5pZC5hdQAKCRBR6+o8yOGlgLUyD/4jrTolG4sVec211hYO+0VuJzoqN4Cf j2CA2Ju39butnSMiq4LJUPRB7QRZY1OofkoNFpZeDQspjfZXPz2ulpYAz+SxHWE2 ReHPmWH1rOABlUPXFboePF4OLwmAs9eR5mN2z9HpKXbT3k78HaToLqiONyB4fVCr Q5TkJeRn/Y7ZJLdyPLTpczHHleQ8KoM6kT7ncXnTm6p97JOBJSrGaJ5N/8X5a4+e 6jtgB7Pvw8jNDShSr8BDLBgBZZcmoTiuG8KfgwRZ+m+mKB1yI2X8S/a54w/lDi9g UcSv3jQcFLJuW+T/pYe4R330uWDYa0cwjJOtMmsJ98S4EYOevoe9fZuL97qNshme xtBr4q1i03G1icYOJJ8dXtvabG2rUzj8t1SCDpwYfrynzTWVRikiQYTXUBhRSFoK nsoklvKd2IZa485XYJ2ljSyClMy8S4yJJ9RuzZ94DTXDSJUesKuyRWGnso4mhkcl wvl4wwMTJvnCMKVo6dsJyV24QWfd6dABxzm04uPA94CKhG33UwK8252jXVeaohSb WSO7qWBONgDXQLJ0mXRcEYa9NHvFS4Jnp6APbxnHr1gS+K+PNkD4gPBf34FoyN0E 9s27kvEYk5vr8APUclETF6+FkbGUD5bFbusjt3hYloFpAoHQ/k5pFVDsOZNPA8sW fDIRp05KunDojw== =dfKL -----END PGP SIGNATURE----- Merge tag 'powerpc-5.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc updates from Michael Ellerman: - Enable KFENCE for 32-bit. - Implement EBPF for 32-bit. - Convert 32-bit to do interrupt entry/exit in C. - Convert 64-bit BookE to do interrupt entry/exit in C. - Changes to our signal handling code to use user_access_begin/end() more extensively. - Add support for time namespaces (CONFIG_TIME_NS) - A series of fixes that allow us to reenable STRICT_KERNEL_RWX. - Other smaller features, fixes & cleanups. Thanks to Alexey Kardashevskiy, Andreas Schwab, Andrew Donnellan, Aneesh Kumar K.V, Athira Rajeev, Bhaskar Chowdhury, Bixuan Cui, Cédric Le Goater, Chen Huang, Chris Packham, Christophe Leroy, Christopher M. Riedl, Colin Ian King, Dan Carpenter, Daniel Axtens, Daniel Henrique Barboza, David Gibson, Davidlohr Bueso, Denis Efremov, dingsenjie, Dmitry Safonov, Dominic DeMarco, Fabiano Rosas, Ganesh Goudar, Geert Uytterhoeven, Geetika Moolchandani, Greg Kurz, Guenter Roeck, Haren Myneni, He Ying, Jiapeng Chong, Jordan Niethe, Laurent Dufour, Lee Jones, Leonardo Bras, Li Huafei, Madhavan Srinivasan, Mahesh Salgaonkar, Masahiro Yamada, Nathan Chancellor, Nathan Lynch, Nicholas Piggin, Oliver O'Halloran, Paul Menzel, Pu Lehui, Randy Dunlap, Ravi Bangoria, Rosen Penev, Russell Currey, Santosh Sivaraj, Sebastian Andrzej Siewior, Segher Boessenkool, Shivaprasad G Bhat, Srikar Dronamraju, Stephen Rothwell, Thadeu Lima de Souza Cascardo, Thomas Gleixner, Tony Ambardar, Tyrel Datwyler, Vaibhav Jain, Vincenzo Frascino, Xiongwei Song, Yang Li, Yu Kuai, and Zhang Yunkai. * tag 'powerpc-5.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (302 commits) powerpc/signal32: Fix erroneous SIGSEGV on RT signal return powerpc: Avoid clang uninitialized warning in __get_user_size_allowed powerpc/papr_scm: Mark nvdimm as unarmed if needed during probe powerpc/kvm: Fix build error when PPC_MEM_KEYS/PPC_PSERIES=n powerpc/kasan: Fix shadow start address with modules powerpc/kernel/iommu: Use largepool as a last resort when !largealloc powerpc/kernel/iommu: Align size for IOMMU_PAGE_SIZE() to save TCEs powerpc/44x: fix spelling mistake in Kconfig "varients" -> "variants" powerpc/iommu: Annotate nested lock for lockdep powerpc/iommu: Do not immediately panic when failed IOMMU table allocation powerpc/iommu: Allocate it_map by vmalloc selftests/powerpc: remove unneeded semicolon powerpc/64s: remove unneeded semicolon powerpc/eeh: remove unneeded semicolon powerpc/selftests: Add selftest to test concurrent perf/ptrace events powerpc/selftests/perf-hwbreak: Add testcases for 2nd DAWR powerpc/selftests/perf-hwbreak: Coalesce event creation code powerpc/selftests/ptrace-hwbreak: Add testcases for 2nd DAWR powerpc/configs: Add IBMVNIC to some 64-bit configs selftests/powerpc: Add uaccess flush test ...
This commit is contained in:
commit
c70a4be130
|
@ -64,6 +64,7 @@ two flavors of JITs, the newer eBPF JIT currently supported on:
|
|||
- arm64
|
||||
- arm32
|
||||
- ppc64
|
||||
- ppc32
|
||||
- sparc64
|
||||
- mips64
|
||||
- s390x
|
||||
|
@ -73,7 +74,6 @@ two flavors of JITs, the newer eBPF JIT currently supported on:
|
|||
And the older cBPF JIT supported on the following archs:
|
||||
|
||||
- mips
|
||||
- ppc
|
||||
- sparc
|
||||
|
||||
eBPF JITs are a superset of cBPF JITs, meaning the kernel will
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
| nios2: | TODO |
|
||||
| openrisc: | TODO |
|
||||
| parisc: | TODO |
|
||||
| powerpc: | TODO |
|
||||
| powerpc: | ok |
|
||||
| riscv: | ok |
|
||||
| s390: | ok |
|
||||
| sh: | TODO |
|
||||
|
|
|
@ -275,6 +275,20 @@ Health Bitmap Flags:
|
|||
Given a DRC Index collect the performance statistics for NVDIMM and copy them
|
||||
to the resultBuffer.
|
||||
|
||||
**H_SCM_FLUSH**
|
||||
|
||||
| Input: *drcIndex, continue-token*
|
||||
| Out: *continue-token*
|
||||
| Return Value: *H_SUCCESS, H_Parameter, H_P2, H_BUSY*
|
||||
|
||||
Given a DRC Index Flush the data to backend NVDIMM device.
|
||||
|
||||
The hcall returns H_BUSY when the flush takes longer time and the hcall needs
|
||||
to be issued multiple times in order to be completely serviced. The
|
||||
*continue-token* from the output to be passed in the argument list of
|
||||
subsequent hcalls to the hypervisor until the hcall is completely serviced
|
||||
at which point H_SUCCESS or other error is returned by the hypervisor.
|
||||
|
||||
References
|
||||
==========
|
||||
.. [1] "Power Architecture Platform Reference"
|
||||
|
|
|
@ -254,7 +254,7 @@ using this window. the signal will be issued to the thread group leader
|
|||
signals.
|
||||
|
||||
NX-GZIP User's Manual:
|
||||
https://github.com/libnxz/power-gzip/blob/master/power_nx_gzip_um.pdf
|
||||
https://github.com/libnxz/power-gzip/blob/master/doc/power_nx_gzip_um.pdf
|
||||
|
||||
Simple example
|
||||
==============
|
||||
|
@ -301,5 +301,5 @@ Simple example
|
|||
close(fd) or window can be closed upon process exit
|
||||
}
|
||||
|
||||
Refer https://github.com/abalib/power-gzip for tests or more
|
||||
Refer https://github.com/libnxz/power-gzip for tests or more
|
||||
use cases.
|
||||
|
|
|
@ -155,7 +155,8 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_TIME_NS
|
||||
static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
|
||||
static __always_inline
|
||||
const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
|
||||
{
|
||||
const struct vdso_data *ret;
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ const struct vdso_data *__arch_get_vdso_data(void)
|
|||
|
||||
#ifdef CONFIG_TIME_NS
|
||||
static __always_inline
|
||||
const struct vdso_data *__arch_get_timens_vdso_data(void)
|
||||
const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
|
||||
{
|
||||
return _timens_data;
|
||||
}
|
||||
|
|
|
@ -119,6 +119,7 @@ config PPC
|
|||
#
|
||||
select ARCH_32BIT_OFF_T if PPC32
|
||||
select ARCH_HAS_DEBUG_VIRTUAL
|
||||
select ARCH_HAS_DEBUG_VM_PGTABLE
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
|
@ -135,7 +136,7 @@ config PPC
|
|||
select ARCH_HAS_MEMBARRIER_CALLBACKS
|
||||
select ARCH_HAS_MEMBARRIER_SYNC_CORE
|
||||
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX if (PPC32 && !HIBERNATION)
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select ARCH_HAS_UACCESS_FLUSHCACHE
|
||||
select ARCH_HAS_COPY_MC if PPC64
|
||||
|
@ -145,6 +146,7 @@ config PPC
|
|||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_STACKWALK
|
||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC32 || PPC_BOOK3S_64
|
||||
select ARCH_USE_BUILTIN_BSWAP
|
||||
|
@ -171,6 +173,7 @@ config PPC
|
|||
select GENERIC_CPU_AUTOPROBE
|
||||
select GENERIC_CPU_VULNERABILITIES if PPC_BARRIER_NOSPEC
|
||||
select GENERIC_EARLY_IOREMAP
|
||||
select GENERIC_GETTIMEOFDAY
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_IRQ_SHOW_LEVEL
|
||||
select GENERIC_PCI_IOMAP if PCI
|
||||
|
@ -178,13 +181,15 @@ config PPC
|
|||
select GENERIC_STRNCPY_FROM_USER
|
||||
select GENERIC_STRNLEN_USER
|
||||
select GENERIC_TIME_VSYSCALL
|
||||
select GENERIC_GETTIMEOFDAY
|
||||
select GENERIC_VDSO_TIME_NS
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU
|
||||
select HAVE_ARCH_JUMP_LABEL
|
||||
select HAVE_ARCH_JUMP_LABEL_RELATIVE
|
||||
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
|
||||
select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14
|
||||
select HAVE_ARCH_KGDB
|
||||
select HAVE_ARCH_KFENCE if PPC32
|
||||
select HAVE_ARCH_MMAP_RND_BITS
|
||||
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
|
||||
select HAVE_ARCH_NVRAM_OPS
|
||||
|
@ -192,7 +197,6 @@ config PPC
|
|||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_ASM_MODVERSIONS
|
||||
select HAVE_C_RECORDMCOUNT
|
||||
select HAVE_CBPF_JIT if !PPC64
|
||||
select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
|
||||
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
|
||||
select HAVE_CONTEXT_TRACKING if PPC64
|
||||
|
@ -200,7 +204,7 @@ config PPC
|
|||
select HAVE_DEBUG_STACKOVERFLOW
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
|
||||
select HAVE_EBPF_JIT if PPC64
|
||||
select HAVE_EBPF_JIT
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
|
||||
select HAVE_FAST_GUP
|
||||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
|
@ -224,8 +228,8 @@ config PPC
|
|||
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
|
||||
select HAVE_HARDLOCKUP_DETECTOR_ARCH if (PPC64 && PPC_BOOK3S)
|
||||
select HAVE_OPTPROBES if PPC64
|
||||
select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC64 && PPC_BOOK3S && SMP
|
||||
select HAVE_OPTPROBES
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_EVENTS_NMI if PPC64
|
||||
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
|
||||
|
@ -234,7 +238,7 @@ config PPC
|
|||
select MMU_GATHER_RCU_TABLE_FREE
|
||||
select MMU_GATHER_PAGE_SIZE
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
|
||||
select HAVE_RELIABLE_STACKTRACE
|
||||
select HAVE_SOFTIRQ_ON_OWN_STACK
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_VIRT_CPU_ACCOUNTING
|
||||
|
@ -786,7 +790,7 @@ config THREAD_SHIFT
|
|||
config DATA_SHIFT_BOOL
|
||||
bool "Set custom data alignment"
|
||||
depends on ADVANCED_OPTIONS
|
||||
depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC
|
||||
depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE
|
||||
depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX)
|
||||
help
|
||||
This option allows you to set the kernel data alignment. When
|
||||
|
@ -798,13 +802,13 @@ config DATA_SHIFT_BOOL
|
|||
config DATA_SHIFT
|
||||
int "Data shift" if DATA_SHIFT_BOOL
|
||||
default 24 if STRICT_KERNEL_RWX && PPC64
|
||||
range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_BOOK3S_32
|
||||
range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_8xx
|
||||
range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
|
||||
range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
|
||||
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
|
||||
default 18 if DEBUG_PAGEALLOC && PPC_BOOK3S_32
|
||||
default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
|
||||
default 23 if STRICT_KERNEL_RWX && PPC_8xx
|
||||
default 23 if DEBUG_PAGEALLOC && PPC_8xx && PIN_TLB_DATA
|
||||
default 19 if DEBUG_PAGEALLOC && PPC_8xx
|
||||
default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA
|
||||
default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
|
||||
default PPC_PAGE_SHIFT
|
||||
help
|
||||
On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO.
|
||||
|
@ -1217,7 +1221,7 @@ config TASK_SIZE_BOOL
|
|||
config TASK_SIZE
|
||||
hex "Size of user task space" if TASK_SIZE_BOOL
|
||||
default "0x80000000" if PPC_8xx
|
||||
default "0xb0000000" if PPC_BOOK3S_32 && STRICT_KERNEL_RWX
|
||||
default "0xb0000000" if PPC_BOOK3S_32
|
||||
default "0xc0000000"
|
||||
endmenu
|
||||
|
||||
|
|
|
@ -353,6 +353,7 @@ config PPC_EARLY_DEBUG_CPM_ADDR
|
|||
config FAIL_IOMMU
|
||||
bool "Fault-injection capability for IOMMU"
|
||||
depends on FAULT_INJECTION
|
||||
depends on PCI || IBMVIO
|
||||
help
|
||||
Provide fault-injection capability for IOMMU. Each device can
|
||||
be selectively enabled via the fail_iommu property.
|
||||
|
|
|
@ -181,12 +181,6 @@ CC_FLAGS_FTRACE := -pg
|
|||
ifdef CONFIG_MPROFILE_KERNEL
|
||||
CC_FLAGS_FTRACE += -mprofile-kernel
|
||||
endif
|
||||
# Work around gcc code-gen bugs with -pg / -fno-omit-frame-pointer in gcc <= 4.8
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=44199
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52828
|
||||
ifndef CONFIG_CC_IS_CLANG
|
||||
CC_FLAGS_FTRACE += $(call cc-ifversion, -lt, 0409, -mno-sched-epilog)
|
||||
endif
|
||||
endif
|
||||
|
||||
CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
|
||||
|
@ -444,12 +438,15 @@ endif
|
|||
endif
|
||||
|
||||
ifdef CONFIG_SMP
|
||||
ifdef CONFIG_PPC32
|
||||
prepare: task_cpu_prepare
|
||||
|
||||
PHONY += task_cpu_prepare
|
||||
task_cpu_prepare: prepare0
|
||||
$(eval KBUILD_CFLAGS += -D_TASK_CPU=$(shell awk '{if ($$2 == "TASK_CPU") print $$3;}' include/generated/asm-offsets.h))
|
||||
endif
|
||||
|
||||
endif # CONFIG_PPC32
|
||||
endif # CONFIG_SMP
|
||||
|
||||
PHONY += checkbin
|
||||
# Check toolchain versions:
|
||||
|
|
|
@ -50,6 +50,7 @@ CONFIG_PPC_TRANSACTIONAL_MEM=y
|
|||
CONFIG_KEXEC=y
|
||||
CONFIG_KEXEC_FILE=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_FA_DUMP=y
|
||||
CONFIG_IRQ_ALL_CPUS=y
|
||||
CONFIG_PPC_64K_PAGES=y
|
||||
CONFIG_SCHED_SMT=y
|
||||
|
@ -177,6 +178,7 @@ CONFIG_CHELSIO_T1=m
|
|||
CONFIG_BE2NET=m
|
||||
CONFIG_IBMVETH=m
|
||||
CONFIG_EHEA=m
|
||||
CONFIG_IBMVNIC=m
|
||||
CONFIG_E100=y
|
||||
CONFIG_E1000=y
|
||||
CONFIG_E1000E=y
|
||||
|
|
|
@ -41,6 +41,7 @@ CONFIG_DTL=y
|
|||
CONFIG_SCANLOG=m
|
||||
CONFIG_PPC_SMLPAR=y
|
||||
CONFIG_IBMEBUS=y
|
||||
CONFIG_PAPR_SCM=m
|
||||
CONFIG_PPC_SVM=y
|
||||
# CONFIG_PPC_PMAC is not set
|
||||
CONFIG_RTAS_FLASH=m
|
||||
|
@ -159,6 +160,7 @@ CONFIG_BE2NET=m
|
|||
CONFIG_S2IO=m
|
||||
CONFIG_IBMVETH=y
|
||||
CONFIG_EHEA=y
|
||||
CONFIG_IBMVNIC=y
|
||||
CONFIG_E100=y
|
||||
CONFIG_E1000=y
|
||||
CONFIG_E1000E=y
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
generated-y += syscall_table_32.h
|
||||
generated-y += syscall_table_64.h
|
||||
generated-y += syscall_table_c32.h
|
||||
generated-y += syscall_table_spu.h
|
||||
generic-y += export.h
|
||||
generic-y += kvm_types.h
|
||||
|
|
|
@ -77,8 +77,6 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
|
|||
long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
|
||||
u32 len_high, u32 len_low);
|
||||
long sys_switch_endian(void);
|
||||
notrace unsigned int __check_irq_replay(void);
|
||||
void notrace restore_interrupts(void);
|
||||
|
||||
/* prom_init (OpenFirmware) */
|
||||
unsigned long __init prom_init(unsigned long r3, unsigned long r4,
|
||||
|
|
|
@ -80,22 +80,6 @@ do { \
|
|||
___p1; \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
|
||||
typeof(ptr) __PTR = (ptr); \
|
||||
__unqual_scalar_typeof(*ptr) VAL; \
|
||||
VAL = READ_ONCE(*__PTR); \
|
||||
if (unlikely(!(cond_expr))) { \
|
||||
spin_begin(); \
|
||||
do { \
|
||||
VAL = READ_ONCE(*__PTR); \
|
||||
} while (!(cond_expr)); \
|
||||
spin_end(); \
|
||||
} \
|
||||
(typeof(*ptr))VAL; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
#define NOSPEC_BARRIER_SLOT nop
|
||||
#elif defined(CONFIG_PPC_FSL_BOOK3E)
|
||||
|
|
|
@ -5,86 +5,7 @@
|
|||
#include <asm/bug.h>
|
||||
#include <asm/book3s/32/mmu-hash.h>
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
.macro kuep_update_sr gpr1, gpr2 /* NEVER use r0 as gpr2 due to addis */
|
||||
101: mtsrin \gpr1, \gpr2
|
||||
addi \gpr1, \gpr1, 0x111 /* next VSID */
|
||||
rlwinm \gpr1, \gpr1, 0, 0xf0ffffff /* clear VSID overflow */
|
||||
addis \gpr2, \gpr2, 0x1000 /* address of next segment */
|
||||
bdnz 101b
|
||||
isync
|
||||
.endm
|
||||
|
||||
.macro kuep_lock gpr1, gpr2
|
||||
#ifdef CONFIG_PPC_KUEP
|
||||
li \gpr1, NUM_USER_SEGMENTS
|
||||
li \gpr2, 0
|
||||
mtctr \gpr1
|
||||
mfsrin \gpr1, \gpr2
|
||||
oris \gpr1, \gpr1, SR_NX@h /* set Nx */
|
||||
kuep_update_sr \gpr1, \gpr2
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro kuep_unlock gpr1, gpr2
|
||||
#ifdef CONFIG_PPC_KUEP
|
||||
li \gpr1, NUM_USER_SEGMENTS
|
||||
li \gpr2, 0
|
||||
mtctr \gpr1
|
||||
mfsrin \gpr1, \gpr2
|
||||
rlwinm \gpr1, \gpr1, 0, ~SR_NX /* Clear Nx */
|
||||
kuep_update_sr \gpr1, \gpr2
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_PPC_KUAP
|
||||
|
||||
.macro kuap_update_sr gpr1, gpr2, gpr3 /* NEVER use r0 as gpr2 due to addis */
|
||||
101: mtsrin \gpr1, \gpr2
|
||||
addi \gpr1, \gpr1, 0x111 /* next VSID */
|
||||
rlwinm \gpr1, \gpr1, 0, 0xf0ffffff /* clear VSID overflow */
|
||||
addis \gpr2, \gpr2, 0x1000 /* address of next segment */
|
||||
cmplw \gpr2, \gpr3
|
||||
blt- 101b
|
||||
isync
|
||||
.endm
|
||||
|
||||
.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
|
||||
lwz \gpr2, KUAP(\thread)
|
||||
rlwinm. \gpr3, \gpr2, 28, 0xf0000000
|
||||
stw \gpr2, STACK_REGS_KUAP(\sp)
|
||||
beq+ 102f
|
||||
li \gpr1, 0
|
||||
stw \gpr1, KUAP(\thread)
|
||||
mfsrin \gpr1, \gpr2
|
||||
oris \gpr1, \gpr1, SR_KS@h /* set Ks */
|
||||
kuap_update_sr \gpr1, \gpr2, \gpr3
|
||||
102:
|
||||
.endm
|
||||
|
||||
.macro kuap_restore sp, current, gpr1, gpr2, gpr3
|
||||
lwz \gpr2, STACK_REGS_KUAP(\sp)
|
||||
rlwinm. \gpr3, \gpr2, 28, 0xf0000000
|
||||
stw \gpr2, THREAD + KUAP(\current)
|
||||
beq+ 102f
|
||||
mfsrin \gpr1, \gpr2
|
||||
rlwinm \gpr1, \gpr1, 0, ~SR_KS /* Clear Ks */
|
||||
kuap_update_sr \gpr1, \gpr2, \gpr3
|
||||
102:
|
||||
.endm
|
||||
|
||||
.macro kuap_check current, gpr
|
||||
#ifdef CONFIG_PPC_KUAP_DEBUG
|
||||
lwz \gpr, THREAD + KUAP(\current)
|
||||
999: twnei \gpr, 0
|
||||
EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#endif /* CONFIG_PPC_KUAP */
|
||||
|
||||
#else /* !__ASSEMBLY__ */
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_PPC_KUAP
|
||||
|
||||
|
@ -103,6 +24,51 @@ static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
|
|||
isync(); /* Context sync required after mtsr() */
|
||||
}
|
||||
|
||||
static inline void kuap_save_and_lock(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long kuap = current->thread.kuap;
|
||||
u32 addr = kuap & 0xf0000000;
|
||||
u32 end = kuap << 28;
|
||||
|
||||
regs->kuap = kuap;
|
||||
if (unlikely(!kuap))
|
||||
return;
|
||||
|
||||
current->thread.kuap = 0;
|
||||
kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* Set Ks */
|
||||
}
|
||||
|
||||
static inline void kuap_user_restore(struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
|
||||
{
|
||||
u32 addr = regs->kuap & 0xf0000000;
|
||||
u32 end = regs->kuap << 28;
|
||||
|
||||
current->thread.kuap = regs->kuap;
|
||||
|
||||
if (unlikely(regs->kuap == kuap))
|
||||
return;
|
||||
|
||||
kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */
|
||||
}
|
||||
|
||||
static inline unsigned long kuap_get_and_assert_locked(void)
|
||||
{
|
||||
unsigned long kuap = current->thread.kuap;
|
||||
|
||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != 0);
|
||||
|
||||
return kuap;
|
||||
}
|
||||
|
||||
static inline void kuap_assert_locked(void)
|
||||
{
|
||||
kuap_get_and_assert_locked();
|
||||
}
|
||||
|
||||
static __always_inline void allow_user_access(void __user *to, const void __user *from,
|
||||
u32 size, unsigned long dir)
|
||||
{
|
||||
|
|
|
@ -194,10 +194,8 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
|
|||
#define VMALLOC_END ioremap_bot
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_STRICT_KERNEL_RWX
|
||||
#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
|
||||
#define MODULES_VADDR (MODULES_END - SZ_256M)
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/sched.h>
|
||||
|
|
|
@ -79,4 +79,4 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
|
|||
flush_tlb_mm(mm);
|
||||
}
|
||||
|
||||
#endif /* _ASM_POWERPC_TLBFLUSH_H */
|
||||
#endif /* _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H */
|
||||
|
|
|
@ -287,7 +287,7 @@ static inline void kuap_kernel_restore(struct pt_regs *regs,
|
|||
*/
|
||||
}
|
||||
|
||||
static inline unsigned long kuap_get_and_check_amr(void)
|
||||
static inline unsigned long kuap_get_and_assert_locked(void)
|
||||
{
|
||||
if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
|
||||
unsigned long amr = mfspr(SPRN_AMR);
|
||||
|
@ -298,27 +298,7 @@ static inline unsigned long kuap_get_and_check_amr(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#else /* CONFIG_PPC_PKEY */
|
||||
|
||||
static inline void kuap_user_restore(struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
|
||||
{
|
||||
}
|
||||
|
||||
static inline unsigned long kuap_get_and_check_amr(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC_PKEY */
|
||||
|
||||
|
||||
#ifdef CONFIG_PPC_KUAP
|
||||
|
||||
static inline void kuap_check_amr(void)
|
||||
static inline void kuap_assert_locked(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
|
||||
WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
* complete pgtable.h but only a portion of it.
|
||||
*/
|
||||
#include <asm/book3s/64/pgtable.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/task_size_64.h>
|
||||
#include <asm/cpu_has_feature.h>
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#ifndef __ASSEMBLY__
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/sizes.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -116,6 +117,7 @@
|
|||
*/
|
||||
#define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
|
||||
#define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_READ)
|
||||
#define _PAGE_KERNEL_ROX (_PAGE_PRIVILEGED | _PAGE_READ | _PAGE_EXEC)
|
||||
#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \
|
||||
_PAGE_RW | _PAGE_EXEC)
|
||||
/*
|
||||
|
@ -323,7 +325,8 @@ extern unsigned long pci_io_base;
|
|||
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
|
||||
#define IOREMAP_BASE (PHB_IO_END)
|
||||
#define IOREMAP_START (ioremap_bot)
|
||||
#define IOREMAP_END (KERN_IO_END)
|
||||
#define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE)
|
||||
#define FIXADDR_SIZE SZ_32M
|
||||
|
||||
/* Advertise special mapping type for AGP */
|
||||
#define HAVE_PAGE_AGP
|
||||
|
|
|
@ -222,8 +222,10 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
* from ptesync, it should probably go into update_mmu_cache, rather
|
||||
* than set_pte_at (which is used to set ptes unrelated to faults).
|
||||
*
|
||||
* Spurious faults to vmalloc region are not tolerated, so there is
|
||||
* a ptesync in flush_cache_vmap.
|
||||
* Spurious faults from the kernel memory are not tolerated, so there
|
||||
* is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
|
||||
* the pte update sequence from ISA Book III 6.10 Translation Table
|
||||
* Update Synchronization Requirements.
|
||||
*/
|
||||
}
|
||||
|
||||
|
|
|
@ -111,11 +111,8 @@
|
|||
#ifndef __ASSEMBLY__
|
||||
|
||||
struct pt_regs;
|
||||
long do_page_fault(struct pt_regs *);
|
||||
long hash__do_page_fault(struct pt_regs *);
|
||||
void hash__do_page_fault(struct pt_regs *);
|
||||
void bad_page_fault(struct pt_regs *, int);
|
||||
void __bad_page_fault(struct pt_regs *regs, int sig);
|
||||
void do_bad_page_fault_segv(struct pt_regs *regs);
|
||||
extern void _exception(int, struct pt_regs *, int, unsigned long);
|
||||
extern void _exception_pkey(struct pt_regs *, unsigned long, int);
|
||||
extern void die(const char *, struct pt_regs *, long);
|
||||
|
|
|
@ -30,7 +30,19 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
|
|||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
extern void flush_dcache_page(struct page *page);
|
||||
/*
|
||||
* This is called when a page has been modified by the kernel.
|
||||
* It just marks the page as not i-cache clean. We do the i-cache
|
||||
* flush later when the page is given to a user process, if necessary.
|
||||
*/
|
||||
static inline void flush_dcache_page(struct page *page)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
|
||||
return;
|
||||
/* avoid an atomic op if possible */
|
||||
if (test_bit(PG_dcache_clean, &page->flags))
|
||||
clear_bit(PG_dcache_clean, &page->flags);
|
||||
}
|
||||
|
||||
void flush_icache_range(unsigned long start, unsigned long stop);
|
||||
#define flush_icache_range flush_icache_range
|
||||
|
@ -40,7 +52,6 @@ void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
|
|||
#define flush_icache_user_page flush_icache_user_page
|
||||
|
||||
void flush_dcache_icache_page(struct page *page);
|
||||
void __flush_dcache_icache(void *page);
|
||||
|
||||
/**
|
||||
* flush_dcache_range(): Write any modified data cache blocks out to memory and
|
||||
|
|
|
@ -594,7 +594,7 @@ typedef struct fcc_enet {
|
|||
uint fen_p256c; /* Total packets 256 < bytes <= 511 */
|
||||
uint fen_p512c; /* Total packets 512 < bytes <= 1023 */
|
||||
uint fen_p1024c; /* Total packets 1024 < bytes <= 1518 */
|
||||
uint fen_cambuf; /* Internal CAM buffer poiner */
|
||||
uint fen_cambuf; /* Internal CAM buffer pointer */
|
||||
ushort fen_rfthr; /* Received frames threshold */
|
||||
ushort fen_rfcnt; /* Received frames count */
|
||||
} fcc_enet_t;
|
||||
|
|
|
@ -23,12 +23,17 @@
|
|||
#include <asm/kmap_size.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)
|
||||
#else
|
||||
#define FIXADDR_SIZE 0
|
||||
#ifdef CONFIG_KASAN
|
||||
#include <asm/kasan.h>
|
||||
#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
|
||||
#else
|
||||
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Here we define all the compile-time 'special' virtual
|
||||
|
@ -50,6 +55,7 @@
|
|||
*/
|
||||
enum fixed_addresses {
|
||||
FIX_HOLE,
|
||||
#ifdef CONFIG_PPC32
|
||||
/* reserve the top 128K for early debugging purposes */
|
||||
FIX_EARLY_DEBUG_TOP = FIX_HOLE,
|
||||
FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
|
||||
|
@ -72,6 +78,7 @@ enum fixed_addresses {
|
|||
FIX_IMMR_SIZE,
|
||||
#endif
|
||||
/* FIX_PCIE_MCFG, */
|
||||
#endif /* CONFIG_PPC32 */
|
||||
__end_of_permanent_fixed_addresses,
|
||||
|
||||
#define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE)
|
||||
|
@ -98,6 +105,8 @@ enum fixed_addresses {
|
|||
static inline void __set_fixmap(enum fixed_addresses idx,
|
||||
phys_addr_t phys, pgprot_t flags)
|
||||
{
|
||||
BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC64) && __FIXADDR_SIZE > FIXADDR_SIZE);
|
||||
|
||||
if (__builtin_constant_p(idx))
|
||||
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
|
||||
else if (WARN_ON(idx >= __end_of_fixed_addresses))
|
||||
|
|
|
@ -33,9 +33,8 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
|
|||
{
|
||||
int oldval = 0, ret;
|
||||
|
||||
if (!access_ok(uaddr, sizeof(u32)))
|
||||
if (!user_access_begin(uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
allow_read_write_user(uaddr, uaddr, sizeof(*uaddr));
|
||||
|
||||
switch (op) {
|
||||
case FUTEX_OP_SET:
|
||||
|
@ -56,10 +55,10 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
|
|||
default:
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
user_access_end();
|
||||
|
||||
*oval = oldval;
|
||||
|
||||
prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -70,11 +69,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|||
int ret = 0;
|
||||
u32 prev;
|
||||
|
||||
if (!access_ok(uaddr, sizeof(u32)))
|
||||
if (!user_access_begin(uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
allow_read_write_user(uaddr, uaddr, sizeof(*uaddr));
|
||||
|
||||
__asm__ __volatile__ (
|
||||
PPC_ATOMIC_ENTRY_BARRIER
|
||||
"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
|
||||
|
@ -93,8 +90,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|||
: "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
|
||||
: "cc", "memory");
|
||||
|
||||
user_access_end();
|
||||
|
||||
*uval = prev;
|
||||
prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -315,7 +315,8 @@
|
|||
#define H_SCM_HEALTH 0x400
|
||||
#define H_SCM_PERFORMANCE_STATS 0x418
|
||||
#define H_RPT_INVALIDATE 0x448
|
||||
#define MAX_HCALL_OPCODE H_RPT_INVALIDATE
|
||||
#define H_SCM_FLUSH 0x44C
|
||||
#define MAX_HCALL_OPCODE H_SCM_FLUSH
|
||||
|
||||
/* Scope args for H_SCM_UNBIND_ALL */
|
||||
#define H_UNBIND_SCOPE_ALL (0x1)
|
||||
|
@ -389,6 +390,7 @@
|
|||
#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
|
||||
#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
|
||||
#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
|
||||
#define H_CPU_BEHAV_FAVOUR_SECURITY_H (1ull << 60) // IBM bit 3
|
||||
#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
|
||||
#define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6
|
||||
|
||||
|
|
|
@ -24,5 +24,8 @@
|
|||
extern int hvc_get_chars(uint32_t vtermno, char *buf, int count);
|
||||
extern int hvc_put_chars(uint32_t vtermno, const char *buf, int count);
|
||||
|
||||
/* Provided by HVC VIO */
|
||||
void hvc_vio_init_early(void);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _PPC64_HVCONSOLE_H */
|
||||
|
|
|
@ -94,8 +94,6 @@ extern volatile struct Hydra __iomem *Hydra;
|
|||
#define HYDRA_INT_EXT7 18 /* Power Off Request */
|
||||
#define HYDRA_INT_SPARE 19
|
||||
|
||||
extern int hydra_init(void);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASMPPC_HYDRA_H */
|
||||
|
|
|
@ -4,6 +4,40 @@
|
|||
|
||||
#include <asm/ppc-opcode.h>
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
|
||||
#define ___get_user_instr(gu_op, dest, ptr) \
|
||||
({ \
|
||||
long __gui_ret = 0; \
|
||||
unsigned long __gui_ptr = (unsigned long)ptr; \
|
||||
struct ppc_inst __gui_inst; \
|
||||
unsigned int __prefix, __suffix; \
|
||||
__gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \
|
||||
if (__gui_ret == 0) { \
|
||||
if ((__prefix >> 26) == OP_PREFIX) { \
|
||||
__gui_ret = gu_op(__suffix, \
|
||||
(unsigned int __user *)__gui_ptr + 1); \
|
||||
__gui_inst = ppc_inst_prefix(__prefix, \
|
||||
__suffix); \
|
||||
} else { \
|
||||
__gui_inst = ppc_inst(__prefix); \
|
||||
} \
|
||||
if (__gui_ret == 0) \
|
||||
(dest) = __gui_inst; \
|
||||
} \
|
||||
__gui_ret; \
|
||||
})
|
||||
#else /* !CONFIG_PPC64 */
|
||||
#define ___get_user_instr(gu_op, dest, ptr) \
|
||||
gu_op((dest).val, (u32 __user *)(ptr))
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
#define get_user_instr(x, ptr) \
|
||||
___get_user_instr(get_user, x, ptr)
|
||||
|
||||
#define __get_user_instr(x, ptr) \
|
||||
___get_user_instr(__get_user, x, ptr)
|
||||
|
||||
/*
|
||||
* Instruction data type for POWER
|
||||
*/
|
||||
|
@ -68,6 +102,8 @@ static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
|
|||
|
||||
#define ppc_inst(x) ((struct ppc_inst){ .val = x })
|
||||
|
||||
#define ppc_inst_prefix(x, y) ppc_inst(x)
|
||||
|
||||
static inline bool ppc_inst_prefixed(struct ppc_inst x)
|
||||
{
|
||||
return false;
|
||||
|
@ -113,13 +149,14 @@ static inline struct ppc_inst *ppc_inst_next(void *location, struct ppc_inst *va
|
|||
return location + ppc_inst_len(tmp);
|
||||
}
|
||||
|
||||
static inline u64 ppc_inst_as_u64(struct ppc_inst x)
|
||||
static inline unsigned long ppc_inst_as_ulong(struct ppc_inst x)
|
||||
{
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
return (u64)ppc_inst_suffix(x) << 32 | ppc_inst_val(x);
|
||||
#else
|
||||
return (u64)ppc_inst_val(x) << 32 | ppc_inst_suffix(x);
|
||||
#endif
|
||||
if (IS_ENABLED(CONFIG_PPC32))
|
||||
return ppc_inst_val(x);
|
||||
else if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
|
||||
return (u64)ppc_inst_suffix(x) << 32 | ppc_inst_val(x);
|
||||
else
|
||||
return (u64)ppc_inst_val(x) << 32 | ppc_inst_suffix(x);
|
||||
}
|
||||
|
||||
#define PPC_INST_STR_LEN sizeof("00000000 00000000")
|
||||
|
@ -141,10 +178,6 @@ static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], struct ppc_ins
|
|||
__str; \
|
||||
})
|
||||
|
||||
int probe_user_read_inst(struct ppc_inst *inst,
|
||||
struct ppc_inst __user *nip);
|
||||
|
||||
int probe_kernel_read_inst(struct ppc_inst *inst,
|
||||
struct ppc_inst *src);
|
||||
int copy_inst_from_kernel_nofault(struct ppc_inst *inst, struct ppc_inst *src);
|
||||
|
||||
#endif /* _ASM_POWERPC_INST_H */
|
||||
|
|
|
@ -2,6 +2,70 @@
|
|||
#ifndef _ASM_POWERPC_INTERRUPT_H
|
||||
#define _ASM_POWERPC_INTERRUPT_H
|
||||
|
||||
/* BookE/4xx */
|
||||
#define INTERRUPT_CRITICAL_INPUT 0x100
|
||||
|
||||
/* BookE */
|
||||
#define INTERRUPT_DEBUG 0xd00
|
||||
#ifdef CONFIG_BOOKE
|
||||
#define INTERRUPT_PERFMON 0x260
|
||||
#define INTERRUPT_DOORBELL 0x280
|
||||
#endif
|
||||
|
||||
/* BookS/4xx/8xx */
|
||||
#define INTERRUPT_MACHINE_CHECK 0x200
|
||||
|
||||
/* BookS/8xx */
|
||||
#define INTERRUPT_SYSTEM_RESET 0x100
|
||||
|
||||
/* BookS */
|
||||
#define INTERRUPT_DATA_SEGMENT 0x380
|
||||
#define INTERRUPT_INST_SEGMENT 0x480
|
||||
#define INTERRUPT_TRACE 0xd00
|
||||
#define INTERRUPT_H_DATA_STORAGE 0xe00
|
||||
#define INTERRUPT_HMI 0xe60
|
||||
#define INTERRUPT_H_FAC_UNAVAIL 0xf80
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
#define INTERRUPT_DOORBELL 0xa00
|
||||
#define INTERRUPT_PERFMON 0xf00
|
||||
#define INTERRUPT_ALTIVEC_UNAVAIL 0xf20
|
||||
#endif
|
||||
|
||||
/* BookE/BookS/4xx/8xx */
|
||||
#define INTERRUPT_DATA_STORAGE 0x300
|
||||
#define INTERRUPT_INST_STORAGE 0x400
|
||||
#define INTERRUPT_EXTERNAL 0x500
|
||||
#define INTERRUPT_ALIGNMENT 0x600
|
||||
#define INTERRUPT_PROGRAM 0x700
|
||||
#define INTERRUPT_SYSCALL 0xc00
|
||||
#define INTERRUPT_TRACE 0xd00
|
||||
|
||||
/* BookE/BookS/44x */
|
||||
#define INTERRUPT_FP_UNAVAIL 0x800
|
||||
|
||||
/* BookE/BookS/44x/8xx */
|
||||
#define INTERRUPT_DECREMENTER 0x900
|
||||
|
||||
#ifndef INTERRUPT_PERFMON
|
||||
#define INTERRUPT_PERFMON 0x0
|
||||
#endif
|
||||
|
||||
/* 8xx */
|
||||
#define INTERRUPT_SOFT_EMU_8xx 0x1000
|
||||
#define INTERRUPT_INST_TLB_MISS_8xx 0x1100
|
||||
#define INTERRUPT_DATA_TLB_MISS_8xx 0x1200
|
||||
#define INTERRUPT_INST_TLB_ERROR_8xx 0x1300
|
||||
#define INTERRUPT_DATA_TLB_ERROR_8xx 0x1400
|
||||
#define INTERRUPT_DATA_BREAKPOINT_8xx 0x1c00
|
||||
#define INTERRUPT_INST_BREAKPOINT_8xx 0x1d00
|
||||
|
||||
/* 603 */
|
||||
#define INTERRUPT_INST_TLB_MISS_603 0x1000
|
||||
#define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100
|
||||
#define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <asm/cputime.h>
|
||||
|
@ -9,10 +73,18 @@
|
|||
#include <asm/kprobes.h>
|
||||
#include <asm/runlatch.h>
|
||||
|
||||
struct interrupt_state {
|
||||
#ifdef CONFIG_PPC_BOOK3E_64
|
||||
enum ctx_state ctx_state;
|
||||
static inline void nap_adjust_return(struct pt_regs *regs)
|
||||
{
|
||||
#ifdef CONFIG_PPC_970_NAP
|
||||
if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
|
||||
/* Can avoid a test-and-clear because NMIs do not call this */
|
||||
clear_thread_local_flags(_TLF_NAPPING);
|
||||
regs->nip = (unsigned long)power4_idle_nap_return;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
struct interrupt_state {
|
||||
};
|
||||
|
||||
static inline void booke_restore_dbcr0(void)
|
||||
|
@ -29,10 +101,19 @@ static inline void booke_restore_dbcr0(void)
|
|||
|
||||
static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
|
||||
{
|
||||
/*
|
||||
* Book3E reconciles irq soft mask in asm
|
||||
*/
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
#ifdef CONFIG_PPC32
|
||||
if (!arch_irq_disabled_regs(regs))
|
||||
trace_hardirqs_off();
|
||||
|
||||
if (user_mode(regs)) {
|
||||
kuep_lock();
|
||||
account_cpu_user_entry();
|
||||
} else {
|
||||
kuap_save_and_lock(regs);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
|
||||
trace_hardirqs_off();
|
||||
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||
|
@ -48,16 +129,12 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
|
|||
* CT_WARN_ON comes here via program_check_exception,
|
||||
* so avoid recursion.
|
||||
*/
|
||||
if (TRAP(regs) != 0x700)
|
||||
if (TRAP(regs) != INTERRUPT_PROGRAM)
|
||||
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3E_64
|
||||
state->ctx_state = exception_enter();
|
||||
if (user_mode(regs))
|
||||
account_cpu_user_entry();
|
||||
#endif
|
||||
booke_restore_dbcr0();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -76,23 +153,8 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
|
|||
*/
|
||||
static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
|
||||
{
|
||||
#ifdef CONFIG_PPC_BOOK3E_64
|
||||
exception_exit(state->ctx_state);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Book3S exits to user via interrupt_exit_user_prepare(), which does
|
||||
* context tracking, which is a cleaner way to handle PREEMPT=y
|
||||
* and avoid context entry/exit in e.g., preempt_schedule_irq()),
|
||||
* which is likely to be where the core code wants to end up.
|
||||
*
|
||||
* The above comment explains why we can't do the
|
||||
*
|
||||
* if (user_mode(regs))
|
||||
* user_exit_irqoff();
|
||||
*
|
||||
* sequence here.
|
||||
*/
|
||||
if (user_mode(regs))
|
||||
kuep_unlock();
|
||||
}
|
||||
|
||||
static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
|
||||
|
@ -109,24 +171,46 @@ static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct in
|
|||
|
||||
static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
|
||||
{
|
||||
/*
|
||||
* Adjust at exit so the main handler sees the true NIA. This must
|
||||
* come before irq_exit() because irq_exit can enable interrupts, and
|
||||
* if another interrupt is taken before nap_adjust_return has run
|
||||
* here, then that interrupt would return directly to idle nap return.
|
||||
*/
|
||||
nap_adjust_return(regs);
|
||||
|
||||
irq_exit();
|
||||
interrupt_exit_prepare(regs, state);
|
||||
}
|
||||
|
||||
struct interrupt_nmi_state {
|
||||
#ifdef CONFIG_PPC64
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
u8 irq_soft_mask;
|
||||
u8 irq_happened;
|
||||
#endif
|
||||
u8 ftrace_enabled;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline bool nmi_disables_ftrace(struct pt_regs *regs)
|
||||
{
|
||||
/* Allow DEC and PMI to be traced when they are soft-NMI */
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
|
||||
if (TRAP(regs) == INTERRUPT_DECREMENTER)
|
||||
return false;
|
||||
if (TRAP(regs) == INTERRUPT_PERFMON)
|
||||
return false;
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3E)) {
|
||||
if (TRAP(regs) == INTERRUPT_PERFMON)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
|
||||
{
|
||||
#ifdef CONFIG_PPC64
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
state->irq_soft_mask = local_paca->irq_soft_mask;
|
||||
state->irq_happened = local_paca->irq_happened;
|
||||
|
||||
|
@ -139,9 +223,8 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
|
|||
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||
|
||||
/* Don't do any per-CPU operations until interrupt state is fixed */
|
||||
#endif
|
||||
/* Allow DEC and PMI to be traced when they are soft-NMI */
|
||||
if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) {
|
||||
|
||||
if (nmi_disables_ftrace(regs)) {
|
||||
state->ftrace_enabled = this_cpu_get_ftrace_enabled();
|
||||
this_cpu_set_ftrace_enabled(0);
|
||||
}
|
||||
|
@ -164,17 +247,20 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
|
|||
radix_enabled() || (mfmsr() & MSR_DR))
|
||||
nmi_exit();
|
||||
|
||||
/*
|
||||
* nmi does not call nap_adjust_return because nmi should not create
|
||||
* new work to do (must use irq_work for that).
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260)
|
||||
if (nmi_disables_ftrace(regs))
|
||||
this_cpu_set_ftrace_enabled(state->ftrace_enabled);
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/* Check we didn't change the pending interrupt mask. */
|
||||
WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
|
||||
local_paca->irq_happened = state->irq_happened;
|
||||
local_paca->irq_soft_mask = state->irq_soft_mask;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -387,6 +473,7 @@ DECLARE_INTERRUPT_HANDLER(SMIException);
|
|||
DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
|
||||
DECLARE_INTERRUPT_HANDLER(unknown_exception);
|
||||
DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
|
||||
DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception);
|
||||
DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
|
||||
DECLARE_INTERRUPT_HANDLER(RunModeException);
|
||||
DECLARE_INTERRUPT_HANDLER(single_step_exception);
|
||||
|
@ -410,7 +497,7 @@ DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
|
|||
DECLARE_INTERRUPT_HANDLER(CacheLockingException);
|
||||
DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
|
||||
DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
|
||||
DECLARE_INTERRUPT_HANDLER(WatchdogException);
|
||||
DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException);
|
||||
DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
|
||||
|
||||
/* slb.c */
|
||||
|
@ -421,7 +508,7 @@ DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
|
|||
DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
|
||||
|
||||
/* fault.c */
|
||||
DECLARE_INTERRUPT_HANDLER_RET(do_page_fault);
|
||||
DECLARE_INTERRUPT_HANDLER(do_page_fault);
|
||||
DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
|
||||
|
||||
/* process.c */
|
||||
|
@ -436,7 +523,7 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
|
|||
|
||||
DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
|
||||
|
||||
void unrecoverable_exception(struct pt_regs *regs);
|
||||
void __noreturn unrecoverable_exception(struct pt_regs *regs);
|
||||
|
||||
void replay_system_reset(void);
|
||||
void replay_soft_interrupts(void);
|
||||
|
@ -447,4 +534,6 @@ static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
|
|||
local_irq_enable();
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_POWERPC_INTERRUPT_H */
|
||||
|
|
|
@ -53,8 +53,6 @@ extern void *mcheckirq_ctx[NR_CPUS];
|
|||
extern void *hardirq_ctx[NR_CPUS];
|
||||
extern void *softirq_ctx[NR_CPUS];
|
||||
|
||||
void call_do_softirq(void *sp);
|
||||
void call_do_irq(struct pt_regs *regs, void *sp);
|
||||
extern void do_IRQ(struct pt_regs *regs);
|
||||
extern void __init init_IRQ(void);
|
||||
extern void __do_irq(struct pt_regs *regs);
|
||||
|
|
|
@ -20,7 +20,8 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
|
|||
asm_volatile_goto("1:\n\t"
|
||||
"nop # arch_static_branch\n\t"
|
||||
".pushsection __jump_table, \"aw\"\n\t"
|
||||
JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
|
||||
".long 1b - ., %l[l_yes] - .\n\t"
|
||||
JUMP_ENTRY_TYPE "%c0 - .\n\t"
|
||||
".popsection \n\t"
|
||||
: : "i" (&((char *)key)[branch]) : : l_yes);
|
||||
|
||||
|
@ -34,7 +35,8 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
|
|||
asm_volatile_goto("1:\n\t"
|
||||
"b %l[l_yes] # arch_static_branch_jump\n\t"
|
||||
".pushsection __jump_table, \"aw\"\n\t"
|
||||
JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
|
||||
".long 1b - ., %l[l_yes] - .\n\t"
|
||||
JUMP_ENTRY_TYPE "%c0 - .\n\t"
|
||||
".popsection \n\t"
|
||||
: : "i" (&((char *)key)[branch]) : : l_yes);
|
||||
|
||||
|
@ -43,23 +45,12 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
|
|||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
typedef u64 jump_label_t;
|
||||
#else
|
||||
typedef u32 jump_label_t;
|
||||
#endif
|
||||
|
||||
struct jump_entry {
|
||||
jump_label_t code;
|
||||
jump_label_t target;
|
||||
jump_label_t key;
|
||||
};
|
||||
|
||||
#else
|
||||
#define ARCH_STATIC_BRANCH(LABEL, KEY) \
|
||||
1098: nop; \
|
||||
.pushsection __jump_table, "aw"; \
|
||||
FTR_ENTRY_LONG 1098b, LABEL, KEY; \
|
||||
.long 1098b - ., LABEL - .; \
|
||||
FTR_ENTRY_LONG KEY; \
|
||||
.popsection
|
||||
#endif
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
#define KASAN_SHADOW_SCALE_SHIFT 3
|
||||
|
||||
#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_MODULES) && defined(CONFIG_STRICT_KERNEL_RWX)
|
||||
#ifdef CONFIG_MODULES
|
||||
#define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M)
|
||||
#else
|
||||
#define KASAN_KERN_START PAGE_OFFSET
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* powerpc KFENCE support.
|
||||
*
|
||||
* Copyright (C) 2020 CS GROUP France
|
||||
*/
|
||||
|
||||
#ifndef __ASM_POWERPC_KFENCE_H
|
||||
#define __ASM_POWERPC_KFENCE_H
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
static inline bool arch_kfence_init_pool(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
||||
{
|
||||
pte_t *kpte = virt_to_kpte(addr);
|
||||
|
||||
if (protect) {
|
||||
pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0);
|
||||
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
|
||||
} else {
|
||||
pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* __ASM_POWERPC_KFENCE_H */
|
|
@ -28,15 +28,6 @@
|
|||
|
||||
#ifdef __ASSEMBLY__
|
||||
#ifndef CONFIG_PPC_KUAP
|
||||
.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
|
||||
.endm
|
||||
|
||||
.macro kuap_restore sp, current, gpr1, gpr2, gpr3
|
||||
.endm
|
||||
|
||||
.macro kuap_check current, gpr
|
||||
.endm
|
||||
|
||||
.macro kuap_check_amr gpr1, gpr2
|
||||
.endm
|
||||
|
||||
|
@ -55,6 +46,14 @@ void setup_kuep(bool disabled);
|
|||
static inline void setup_kuep(bool disabled) { }
|
||||
#endif /* CONFIG_PPC_KUEP */
|
||||
|
||||
#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
|
||||
void kuep_lock(void);
|
||||
void kuep_unlock(void);
|
||||
#else
|
||||
static inline void kuep_lock(void) { }
|
||||
static inline void kuep_unlock(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_KUAP
|
||||
void setup_kuap(bool disabled);
|
||||
#else
|
||||
|
@ -66,7 +65,15 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline void kuap_check_amr(void) { }
|
||||
static inline void kuap_assert_locked(void) { }
|
||||
static inline void kuap_save_and_lock(struct pt_regs *regs) { }
|
||||
static inline void kuap_user_restore(struct pt_regs *regs) { }
|
||||
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { }
|
||||
|
||||
static inline unsigned long kuap_get_and_assert_locked(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* book3s/64/kup-radix.h defines these functions for the !KUAP case to flush
|
||||
|
|
|
@ -258,6 +258,8 @@ extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
|
|||
extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
|
||||
struct kvm_memory_slot *memslot,
|
||||
unsigned long *map);
|
||||
extern unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm,
|
||||
unsigned long lpcr);
|
||||
extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
|
||||
unsigned long mask);
|
||||
extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
|
||||
|
|
|
@ -767,8 +767,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
|
|||
unsigned long pte_index, unsigned long avpn);
|
||||
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
|
||||
long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||
unsigned long pte_index, unsigned long avpn,
|
||||
unsigned long va);
|
||||
unsigned long pte_index, unsigned long avpn);
|
||||
long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||
unsigned long pte_index);
|
||||
long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||
|
|
|
@ -263,7 +263,7 @@ extern void arch_exit_mmap(struct mm_struct *mm);
|
|||
static inline void arch_unmap(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long vdso_base = (unsigned long)mm->context.vdso - PAGE_SIZE;
|
||||
unsigned long vdso_base = (unsigned long)mm->context.vdso;
|
||||
|
||||
if (start <= vdso_base && vdso_base < end)
|
||||
mm->context.vdso = NULL;
|
||||
|
|
|
@ -7,33 +7,41 @@
|
|||
|
||||
#ifdef CONFIG_PPC_KUAP
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
|
||||
lis \gpr2, MD_APG_KUAP@h /* only APG0 and APG1 are used */
|
||||
mfspr \gpr1, SPRN_MD_AP
|
||||
mtspr SPRN_MD_AP, \gpr2
|
||||
stw \gpr1, STACK_REGS_KUAP(\sp)
|
||||
.endm
|
||||
|
||||
.macro kuap_restore sp, current, gpr1, gpr2, gpr3
|
||||
lwz \gpr1, STACK_REGS_KUAP(\sp)
|
||||
mtspr SPRN_MD_AP, \gpr1
|
||||
.endm
|
||||
|
||||
.macro kuap_check current, gpr
|
||||
#ifdef CONFIG_PPC_KUAP_DEBUG
|
||||
mfspr \gpr, SPRN_MD_AP
|
||||
rlwinm \gpr, \gpr, 16, 0xffff
|
||||
999: twnei \gpr, MD_APG_KUAP@h
|
||||
EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#else /* !__ASSEMBLY__ */
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/reg.h>
|
||||
|
||||
static inline void kuap_save_and_lock(struct pt_regs *regs)
|
||||
{
|
||||
regs->kuap = mfspr(SPRN_MD_AP);
|
||||
mtspr(SPRN_MD_AP, MD_APG_KUAP);
|
||||
}
|
||||
|
||||
static inline void kuap_user_restore(struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
|
||||
{
|
||||
mtspr(SPRN_MD_AP, regs->kuap);
|
||||
}
|
||||
|
||||
static inline unsigned long kuap_get_and_assert_locked(void)
|
||||
{
|
||||
unsigned long kuap = mfspr(SPRN_MD_AP);
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
|
||||
WARN_ON_ONCE(kuap >> 16 != MD_APG_KUAP >> 16);
|
||||
|
||||
return kuap;
|
||||
}
|
||||
|
||||
static inline void kuap_assert_locked(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
|
||||
kuap_get_and_assert_locked();
|
||||
}
|
||||
|
||||
static inline void allow_user_access(void __user *to, const void __user *from,
|
||||
unsigned long size, unsigned long dir)
|
||||
{
|
||||
|
|
|
@ -172,6 +172,9 @@
|
|||
|
||||
#define mmu_linear_psize MMU_PAGE_8M
|
||||
|
||||
#define MODULES_VADDR (PAGE_OFFSET - SZ_256M)
|
||||
#define MODULES_END PAGE_OFFSET
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/mmdebug.h>
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
* the ppc64 non-hashed page table.
|
||||
*/
|
||||
|
||||
#include <linux/sizes.h>
|
||||
|
||||
#include <asm/nohash/64/pgtable-4k.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/asm-const.h>
|
||||
|
@ -54,7 +56,8 @@
|
|||
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
|
||||
#define IOREMAP_BASE (PHB_IO_END)
|
||||
#define IOREMAP_START (ioremap_bot)
|
||||
#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
|
||||
#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE - FIXADDR_SIZE)
|
||||
#define FIXADDR_SIZE SZ_32M
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
@ -307,7 +307,7 @@ int opal_secvar_enqueue_update(const char *key, uint64_t key_len, u8 *data,
|
|||
|
||||
s64 opal_mpipl_update(enum opal_mpipl_ops op, u64 src, u64 dest, u64 size);
|
||||
s64 opal_mpipl_register_tag(enum opal_mpipl_tags tag, u64 addr);
|
||||
s64 opal_mpipl_query_tag(enum opal_mpipl_tags tag, u64 *addr);
|
||||
s64 opal_mpipl_query_tag(enum opal_mpipl_tags tag, __be64 *addr);
|
||||
|
||||
s64 opal_signal_system_reset(s32 cpu);
|
||||
s64 opal_quiesce(u64 shutdown_type, s32 cpu);
|
||||
|
|
|
@ -43,7 +43,7 @@ struct power_pmu {
|
|||
u64 alt[]);
|
||||
void (*get_mem_data_src)(union perf_mem_data_src *dsrc,
|
||||
u32 flags, struct pt_regs *regs);
|
||||
void (*get_mem_weight)(u64 *weight);
|
||||
void (*get_mem_weight)(u64 *weight, u64 type);
|
||||
unsigned long group_constraint_mask;
|
||||
unsigned long group_constraint_val;
|
||||
u64 (*bhrb_filter_map)(u64 branch_sample_type);
|
||||
|
@ -67,6 +67,12 @@ struct power_pmu {
|
|||
* the pmu supports extended perf regs capability
|
||||
*/
|
||||
int capabilities;
|
||||
/*
|
||||
* Function to check event code for values which are
|
||||
* reserved. Function takes struct perf_event as input,
|
||||
* since event code could be spread in attr.config*
|
||||
*/
|
||||
int (*check_attr_config)(struct perf_event *ev);
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -41,8 +41,6 @@ struct mm_struct;
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
/* Keep these as a macros to avoid include dependency mess */
|
||||
#define pte_page(x) pfn_to_page(pte_pfn(x))
|
||||
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
||||
|
|
|
@ -265,6 +265,7 @@
|
|||
#define PPC_INST_ORI 0x60000000
|
||||
#define PPC_INST_ORIS 0x64000000
|
||||
#define PPC_INST_BRANCH 0x48000000
|
||||
#define PPC_INST_BL 0x48000001
|
||||
#define PPC_INST_BRANCH_COND 0x40800000
|
||||
|
||||
/* Prefixes */
|
||||
|
@ -437,6 +438,9 @@
|
|||
#define PPC_RAW_STFDX(s, a, b) (0x7c0005ae | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define PPC_RAW_LVX(t, a, b) (0x7c0000ce | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define PPC_RAW_STVX(s, a, b) (0x7c0001ce | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define PPC_RAW_ADDE(t, a, b) (0x7c000114 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define PPC_RAW_ADDZE(t, a) (0x7c000194 | ___PPC_RT(t) | ___PPC_RA(a))
|
||||
#define PPC_RAW_ADDME(t, a) (0x7c0001d4 | ___PPC_RT(t) | ___PPC_RA(a))
|
||||
#define PPC_RAW_ADD(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define PPC_RAW_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
|
||||
#define PPC_RAW_ADDC(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
|
||||
|
@ -445,11 +449,14 @@
|
|||
#define PPC_RAW_BLR() (PPC_INST_BLR)
|
||||
#define PPC_RAW_BLRL() (0x4e800021)
|
||||
#define PPC_RAW_MTLR(r) (0x7c0803a6 | ___PPC_RT(r))
|
||||
#define PPC_RAW_MFLR(t) (PPC_INST_MFLR | ___PPC_RT(t))
|
||||
#define PPC_RAW_BCTR() (PPC_INST_BCTR)
|
||||
#define PPC_RAW_MTCTR(r) (PPC_INST_MTCTR | ___PPC_RT(r))
|
||||
#define PPC_RAW_ADDI(d, a, i) (PPC_INST_ADDI | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
|
||||
#define PPC_RAW_LI(r, i) PPC_RAW_ADDI(r, 0, i)
|
||||
#define PPC_RAW_ADDIS(d, a, i) (PPC_INST_ADDIS | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
|
||||
#define PPC_RAW_ADDIC(d, a, i) (0x30000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
|
||||
#define PPC_RAW_ADDIC_DOT(d, a, i) (0x34000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
|
||||
#define PPC_RAW_LIS(r, i) PPC_RAW_ADDIS(r, 0, i)
|
||||
#define PPC_RAW_STDX(r, base, b) (0x7c00012a | ___PPC_RS(r) | ___PPC_RA(base) | ___PPC_RB(b))
|
||||
#define PPC_RAW_STDU(r, base, i) (0xf8000001 | ___PPC_RS(r) | ___PPC_RA(base) | ((i) & 0xfffc))
|
||||
|
@ -472,6 +479,10 @@
|
|||
#define PPC_RAW_CMPLW(a, b) (0x7c000040 | ___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define PPC_RAW_CMPLD(a, b) (0x7c200040 | ___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define PPC_RAW_SUB(d, a, b) (0x7c000050 | ___PPC_RT(d) | ___PPC_RB(a) | ___PPC_RA(b))
|
||||
#define PPC_RAW_SUBFC(d, a, b) (0x7c000010 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define PPC_RAW_SUBFE(d, a, b) (0x7c000110 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define PPC_RAW_SUBFIC(d, a, i) (0x20000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
|
||||
#define PPC_RAW_SUBFZE(d, a) (0x7c000190 | ___PPC_RT(d) | ___PPC_RA(a))
|
||||
#define PPC_RAW_MULD(d, a, b) (0x7c0001d2 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define PPC_RAW_MULW(d, a, b) (0x7c0001d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define PPC_RAW_MULHWU(d, a, b) (0x7c000016 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
|
||||
|
@ -484,11 +495,13 @@
|
|||
#define PPC_RAW_DIVDEU_DOT(t, a, b) (0x7c000312 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
|
||||
#define PPC_RAW_AND(d, a, b) (0x7c000038 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
|
||||
#define PPC_RAW_ANDI(d, a, i) (0x70000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
|
||||
#define PPC_RAW_ANDIS(d, a, i) (0x74000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
|
||||
#define PPC_RAW_AND_DOT(d, a, b) (0x7c000039 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
|
||||
#define PPC_RAW_OR(d, a, b) (0x7c000378 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
|
||||
#define PPC_RAW_MR(d, a) PPC_RAW_OR(d, a, a)
|
||||
#define PPC_RAW_ORI(d, a, i) (PPC_INST_ORI | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
|
||||
#define PPC_RAW_ORIS(d, a, i) (PPC_INST_ORIS | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
|
||||
#define PPC_RAW_NOR(d, a, b) (0x7c0000f8 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
|
||||
#define PPC_RAW_XOR(d, a, b) (0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
|
||||
#define PPC_RAW_XORI(d, a, i) (0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
|
||||
#define PPC_RAW_XORIS(d, a, i) (0x6c000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
|
||||
|
|
|
@ -15,36 +15,6 @@
|
|||
|
||||
#define SZL (BITS_PER_LONG/8)
|
||||
|
||||
/*
|
||||
* Stuff for accurate CPU time accounting.
|
||||
* These macros handle transitions between user and system state
|
||||
* in exception entry and exit and accumulate time to the
|
||||
* user_time and system_time fields in the paca.
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)
|
||||
#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)
|
||||
#else
|
||||
#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) \
|
||||
MFTB(ra); /* get timebase */ \
|
||||
PPC_LL rb, ACCOUNT_STARTTIME_USER(ptr); \
|
||||
PPC_STL ra, ACCOUNT_STARTTIME(ptr); \
|
||||
subf rb,rb,ra; /* subtract start value */ \
|
||||
PPC_LL ra, ACCOUNT_USER_TIME(ptr); \
|
||||
add ra,ra,rb; /* add on to user time */ \
|
||||
PPC_STL ra, ACCOUNT_USER_TIME(ptr); \
|
||||
|
||||
#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb) \
|
||||
MFTB(ra); /* get timebase */ \
|
||||
PPC_LL rb, ACCOUNT_STARTTIME(ptr); \
|
||||
PPC_STL ra, ACCOUNT_STARTTIME_USER(ptr); \
|
||||
subf rb,rb,ra; /* subtract start value */ \
|
||||
PPC_LL ra, ACCOUNT_SYSTEM_TIME(ptr); \
|
||||
add ra,ra,rb; /* add on to system time */ \
|
||||
PPC_STL ra, ACCOUNT_SYSTEM_TIME(ptr)
|
||||
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
||||
|
||||
/*
|
||||
* Macros for storing registers into and loading registers from
|
||||
* exception frames.
|
||||
|
|
|
@ -144,15 +144,12 @@ struct thread_struct {
|
|||
#endif
|
||||
#ifdef CONFIG_PPC32
|
||||
void *pgdir; /* root of page-table tree */
|
||||
unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
|
||||
#ifdef CONFIG_PPC_RTAS
|
||||
unsigned long rtas_sp; /* stack pointer for when in RTAS */
|
||||
#endif
|
||||
#endif
|
||||
#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
|
||||
unsigned long kuap; /* opened segments for user access */
|
||||
#endif
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
unsigned long srr0;
|
||||
unsigned long srr1;
|
||||
unsigned long dar;
|
||||
|
@ -161,7 +158,7 @@ struct thread_struct {
|
|||
unsigned long r0, r3, r4, r5, r6, r8, r9, r11;
|
||||
unsigned long lr, ctr;
|
||||
#endif
|
||||
#endif
|
||||
#endif /* CONFIG_PPC32 */
|
||||
/* Debug Registers */
|
||||
struct debug_reg debug;
|
||||
#ifdef CONFIG_PPC_FPU_REGS
|
||||
|
@ -282,7 +279,6 @@ struct thread_struct {
|
|||
#ifdef CONFIG_PPC32
|
||||
#define INIT_THREAD { \
|
||||
.ksp = INIT_SP, \
|
||||
.ksp_limit = INIT_SP_LIMIT, \
|
||||
.pgdir = swapper_pg_dir, \
|
||||
.fpexc_mode = MSR_FE0 | MSR_FE1, \
|
||||
SPEFSCR_INIT \
|
||||
|
@ -393,6 +389,7 @@ extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
|
|||
extern unsigned long isa206_idle_insn_mayloss(unsigned long type);
|
||||
#ifdef CONFIG_PPC_970_NAP
|
||||
extern void power4_idle_nap(void);
|
||||
void power4_idle_nap_return(void);
|
||||
#endif
|
||||
|
||||
extern unsigned long cpuidle_disable;
|
||||
|
@ -417,6 +414,8 @@ extern int fix_alignment(struct pt_regs *);
|
|||
#define NET_IP_ALIGN 0
|
||||
#endif
|
||||
|
||||
int do_mathemu(struct pt_regs *regs);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_POWERPC_PROCESSOR_H */
|
||||
|
|
|
@ -185,44 +185,27 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
|
|||
#define current_pt_regs() \
|
||||
((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1)
|
||||
|
||||
/*
|
||||
* The 4 low bits (0xf) are available as flags to overload the trap word,
|
||||
* because interrupt vectors have minimum alignment of 0x10. TRAP_FLAGS_MASK
|
||||
* must cover the bits used as flags, including bit 0 which is used as the
|
||||
* "norestart" bit.
|
||||
*/
|
||||
#ifdef __powerpc64__
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
#define TRAP_FLAGS_MASK 0x10
|
||||
#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
|
||||
#define FULL_REGS(regs) true
|
||||
#define SET_FULL_REGS(regs) do { } while (0)
|
||||
#else
|
||||
#define TRAP_FLAGS_MASK 0x11
|
||||
#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
|
||||
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
|
||||
#define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
|
||||
#endif
|
||||
#define CHECK_FULL_REGS(regs) BUG_ON(!FULL_REGS(regs))
|
||||
#define NV_REG_POISON 0xdeadbeefdeadbeefUL
|
||||
#define TRAP_FLAGS_MASK 0x1
|
||||
#else
|
||||
/*
|
||||
* We use the least-significant bit of the trap field to indicate
|
||||
* whether we have saved the full set of registers, or only a
|
||||
* partial set. A 1 there means the partial set.
|
||||
* On 4xx we use the next bit to indicate whether the exception
|
||||
* On 4xx we use bit 1 in the trap word to indicate whether the exception
|
||||
* is a critical exception (1 means it is).
|
||||
*/
|
||||
#define TRAP_FLAGS_MASK 0x1F
|
||||
#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
|
||||
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
|
||||
#define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
|
||||
#define TRAP_FLAGS_MASK 0xf
|
||||
#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
|
||||
#define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
|
||||
#define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
|
||||
#define NV_REG_POISON 0xdeadbeef
|
||||
#define CHECK_FULL_REGS(regs) \
|
||||
do { \
|
||||
if ((regs)->trap & 1) \
|
||||
printk(KERN_CRIT "%s: partial register set\n", __func__); \
|
||||
} while (0)
|
||||
#endif /* __powerpc64__ */
|
||||
#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
|
||||
|
||||
static inline void set_trap(struct pt_regs *regs, unsigned long val)
|
||||
static __always_inline void set_trap(struct pt_regs *regs, unsigned long val)
|
||||
{
|
||||
regs->trap = (regs->trap & TRAP_FLAGS_MASK) | (val & ~TRAP_FLAGS_MASK);
|
||||
}
|
||||
|
@ -244,12 +227,12 @@ static inline bool trap_is_syscall(struct pt_regs *regs)
|
|||
|
||||
static inline bool trap_norestart(struct pt_regs *regs)
|
||||
{
|
||||
return regs->trap & 0x10;
|
||||
return regs->trap & 0x1;
|
||||
}
|
||||
|
||||
static inline void set_trap_norestart(struct pt_regs *regs)
|
||||
static __always_inline void set_trap_norestart(struct pt_regs *regs)
|
||||
{
|
||||
regs->trap |= 0x10;
|
||||
regs->trap |= 0x1;
|
||||
}
|
||||
|
||||
#define arch_has_single_step() (1)
|
||||
|
|
|
@ -44,20 +44,6 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
|
|||
}
|
||||
#define queued_spin_lock queued_spin_lock
|
||||
|
||||
#define smp_mb__after_spinlock() smp_mb()
|
||||
|
||||
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
|
||||
{
|
||||
/*
|
||||
* This barrier was added to simple spinlocks by commit 51d7d5205d338,
|
||||
* but it should now be possible to remove it, asm arm64 has done with
|
||||
* commit c6f5d02b6a0f.
|
||||
*/
|
||||
smp_mb();
|
||||
return atomic_read(&lock->val);
|
||||
}
|
||||
#define queued_spin_is_locked queued_spin_is_locked
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||
#define SPIN_THRESHOLD (1<<15) /* not tuned */
|
||||
|
||||
|
@ -86,6 +72,13 @@ static inline void pv_spinlocks_init(void)
|
|||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Queued spinlocks rely heavily on smp_cond_load_relaxed() to busy-wait,
|
||||
* which was found to have performance problems if implemented with
|
||||
* the preferred spin_begin()/spin_end() SMT priority pattern. Use the
|
||||
* generic version instead.
|
||||
*/
|
||||
|
||||
#include <asm-generic/qspinlock.h>
|
||||
|
||||
#endif /* _ASM_POWERPC_QSPINLOCK_H */
|
||||
|
|
|
@ -124,7 +124,7 @@
|
|||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
|
||||
#else
|
||||
#define MSR_TM_ACTIVE(x) 0
|
||||
#define MSR_TM_ACTIVE(x) ((void)(x), 0)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PPC_BOOK3S_64)
|
||||
|
@ -441,6 +441,7 @@
|
|||
#define LPCR_VRMA_LP1 ASM_CONST(0x0000800000000000)
|
||||
#define LPCR_RMLS 0x1C000000 /* Implementation dependent RMO limit sel */
|
||||
#define LPCR_RMLS_SH 26
|
||||
#define LPCR_HAIL ASM_CONST(0x0000000004000000) /* HV AIL (ISAv3.1) */
|
||||
#define LPCR_ILE ASM_CONST(0x0000000002000000) /* !HV irqs set MSR:LE */
|
||||
#define LPCR_AIL ASM_CONST(0x0000000001800000) /* Alternate interrupt location */
|
||||
#define LPCR_AIL_0 ASM_CONST(0x0000000000000000) /* MMU off exception offset 0x0 */
|
||||
|
@ -1393,8 +1394,7 @@ static inline void mtmsr_isync(unsigned long val)
|
|||
: "r" ((unsigned long)(v)) \
|
||||
: "memory")
|
||||
#endif
|
||||
#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",0" : \
|
||||
: : "memory")
|
||||
#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",2" : : : "memory")
|
||||
|
||||
static inline void wrtee(unsigned long val)
|
||||
{
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
#define RTAS_UNKNOWN_SERVICE (-1)
|
||||
#define RTAS_INSTANTIATE_MAX (1ULL<<30) /* Don't instantiate rtas at/above this value */
|
||||
|
||||
/* Buffer size for ppc_rtas system call. */
|
||||
#define RTAS_RMOBUF_MAX (64 * 1024)
|
||||
/* Memory set aside for sys_rtas to use with calls that need a work area. */
|
||||
#define RTAS_USER_REGION_SIZE (64 * 1024)
|
||||
|
||||
/* RTAS return status codes */
|
||||
#define RTAS_BUSY -2 /* RTAS Busy */
|
||||
|
@ -357,7 +357,7 @@ extern void rtas_take_timebase(void);
|
|||
static inline int page_is_rtas_user_buf(unsigned long pfn)
|
||||
{
|
||||
unsigned long paddr = (pfn << PAGE_SHIFT);
|
||||
if (paddr >= rtas_rmo_buf && paddr < (rtas_rmo_buf + RTAS_RMOBUF_MAX))
|
||||
if (paddr >= rtas_rmo_buf && paddr < (rtas_rmo_buf + RTAS_USER_REGION_SIZE))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -38,8 +38,7 @@ static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
|
|||
|
||||
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
||||
{
|
||||
smp_mb();
|
||||
return !arch_spin_value_unlocked(*lock);
|
||||
return !arch_spin_value_unlocked(READ_ONCE(*lock));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -282,7 +281,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|||
#define arch_read_relax(lock) rw_yield(lock)
|
||||
#define arch_write_relax(lock) rw_yield(lock)
|
||||
|
||||
/* See include/linux/spinlock.h */
|
||||
#define smp_mb__after_spinlock() smp_mb()
|
||||
|
||||
#endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */
|
||||
|
|
|
@ -31,6 +31,7 @@ extern u32 *cpu_to_phys_id;
|
|||
extern bool coregroup_enabled;
|
||||
|
||||
extern int cpu_to_chip_id(int cpu);
|
||||
extern int *chip_id_lookup_table;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
|
@ -121,6 +122,11 @@ static inline struct cpumask *cpu_sibling_mask(int cpu)
|
|||
return per_cpu(cpu_sibling_map, cpu);
|
||||
}
|
||||
|
||||
static inline struct cpumask *cpu_core_mask(int cpu)
|
||||
{
|
||||
return per_cpu(cpu_core_map, cpu);
|
||||
}
|
||||
|
||||
static inline struct cpumask *cpu_l2_cache_mask(int cpu)
|
||||
{
|
||||
return per_cpu(cpu_l2_cache_map, cpu);
|
||||
|
|
|
@ -10,6 +10,9 @@
|
|||
#include <asm/simple_spinlock.h>
|
||||
#endif
|
||||
|
||||
/* See include/linux/spinlock.h */
|
||||
#define smp_mb__after_spinlock() smp_mb()
|
||||
|
||||
#ifndef CONFIG_PARAVIRT_SPINLOCKS
|
||||
static inline void pv_spinlocks_init(void) { }
|
||||
#endif
|
||||
|
|
|
@ -38,7 +38,6 @@
|
|||
#ifndef __ASSEMBLY__
|
||||
#include <linux/cache.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/accounting.h>
|
||||
|
||||
#define SLB_PRELOAD_NR 16U
|
||||
|
@ -152,6 +151,12 @@ void arch_setup_new_exec(void);
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
static inline void clear_thread_local_flags(unsigned int flags)
|
||||
{
|
||||
struct thread_info *ti = current_thread_info();
|
||||
ti->local_flags &= ~flags;
|
||||
}
|
||||
|
||||
static inline bool test_thread_local_flags(unsigned int flags)
|
||||
{
|
||||
struct thread_info *ti = current_thread_info();
|
||||
|
|
|
@ -126,7 +126,7 @@ static inline int cpu_to_coregroup_id(int cpu)
|
|||
#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
|
||||
|
||||
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_cpumask(cpu) (cpu_cpu_mask(cpu))
|
||||
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
|
||||
|
||||
#endif
|
||||
|
|
|
@ -43,129 +43,39 @@ static inline bool __access_ok(unsigned long addr, unsigned long size)
|
|||
* exception handling means that it's no longer "just"...)
|
||||
*
|
||||
*/
|
||||
#define get_user(x, ptr) \
|
||||
__get_user_check((x), (ptr), sizeof(*(ptr)))
|
||||
#define put_user(x, ptr) \
|
||||
__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
#define __get_user(x, ptr) \
|
||||
__get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
|
||||
#define __put_user(x, ptr) \
|
||||
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
#define __get_user_allowed(x, ptr) \
|
||||
__get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
|
||||
|
||||
#define __get_user_inatomic(x, ptr) \
|
||||
__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
|
||||
#define __put_user_inatomic(x, ptr) \
|
||||
__put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
|
||||
#define ___get_user_instr(gu_op, dest, ptr) \
|
||||
({ \
|
||||
long __gui_ret = 0; \
|
||||
unsigned long __gui_ptr = (unsigned long)ptr; \
|
||||
struct ppc_inst __gui_inst; \
|
||||
unsigned int __prefix, __suffix; \
|
||||
__gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \
|
||||
if (__gui_ret == 0) { \
|
||||
if ((__prefix >> 26) == OP_PREFIX) { \
|
||||
__gui_ret = gu_op(__suffix, \
|
||||
(unsigned int __user *)__gui_ptr + 1); \
|
||||
__gui_inst = ppc_inst_prefix(__prefix, \
|
||||
__suffix); \
|
||||
} else { \
|
||||
__gui_inst = ppc_inst(__prefix); \
|
||||
} \
|
||||
if (__gui_ret == 0) \
|
||||
(dest) = __gui_inst; \
|
||||
} \
|
||||
__gui_ret; \
|
||||
})
|
||||
|
||||
#define get_user_instr(x, ptr) \
|
||||
___get_user_instr(get_user, x, ptr)
|
||||
|
||||
#define __get_user_instr(x, ptr) \
|
||||
___get_user_instr(__get_user, x, ptr)
|
||||
|
||||
#define __get_user_instr_inatomic(x, ptr) \
|
||||
___get_user_instr(__get_user_inatomic, x, ptr)
|
||||
|
||||
#else /* !CONFIG_PPC64 */
|
||||
#define get_user_instr(x, ptr) \
|
||||
get_user((x).val, (u32 __user *)(ptr))
|
||||
|
||||
#define __get_user_instr(x, ptr) \
|
||||
__get_user_nocheck((x).val, (u32 __user *)(ptr), sizeof(u32), true)
|
||||
|
||||
#define __get_user_instr_inatomic(x, ptr) \
|
||||
__get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32))
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
extern long __put_user_bad(void);
|
||||
|
||||
#define __put_user_size(x, ptr, size, retval) \
|
||||
do { \
|
||||
__label__ __pu_failed; \
|
||||
#define __put_user(x, ptr) \
|
||||
({ \
|
||||
long __pu_err; \
|
||||
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
|
||||
__typeof__(*(ptr)) __pu_val = (__typeof__(*(ptr)))(x); \
|
||||
__typeof__(sizeof(*(ptr))) __pu_size = sizeof(*(ptr)); \
|
||||
\
|
||||
retval = 0; \
|
||||
allow_write_to_user(ptr, size); \
|
||||
__put_user_size_goto(x, ptr, size, __pu_failed); \
|
||||
prevent_write_to_user(ptr, size); \
|
||||
break; \
|
||||
might_fault(); \
|
||||
do { \
|
||||
__label__ __pu_failed; \
|
||||
\
|
||||
allow_write_to_user(__pu_addr, __pu_size); \
|
||||
__put_user_size_goto(__pu_val, __pu_addr, __pu_size, __pu_failed); \
|
||||
prevent_write_to_user(__pu_addr, __pu_size); \
|
||||
__pu_err = 0; \
|
||||
break; \
|
||||
\
|
||||
__pu_failed: \
|
||||
retval = -EFAULT; \
|
||||
prevent_write_to_user(ptr, size); \
|
||||
} while (0)
|
||||
|
||||
#define __put_user_nocheck(x, ptr, size) \
|
||||
({ \
|
||||
long __pu_err; \
|
||||
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
|
||||
__typeof__(*(ptr)) __pu_val = (x); \
|
||||
__typeof__(size) __pu_size = (size); \
|
||||
\
|
||||
if (!is_kernel_addr((unsigned long)__pu_addr)) \
|
||||
might_fault(); \
|
||||
__chk_user_ptr(__pu_addr); \
|
||||
__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
|
||||
prevent_write_to_user(__pu_addr, __pu_size); \
|
||||
__pu_err = -EFAULT; \
|
||||
} while (0); \
|
||||
\
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
#define __put_user_check(x, ptr, size) \
|
||||
#define put_user(x, ptr) \
|
||||
({ \
|
||||
long __pu_err = -EFAULT; \
|
||||
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
|
||||
__typeof__(*(ptr)) __pu_val = (x); \
|
||||
__typeof__(size) __pu_size = (size); \
|
||||
__typeof__(*(ptr)) __user *_pu_addr = (ptr); \
|
||||
\
|
||||
might_fault(); \
|
||||
if (access_ok(__pu_addr, __pu_size)) \
|
||||
__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
|
||||
\
|
||||
__pu_err; \
|
||||
access_ok(_pu_addr, sizeof(*(ptr))) ? \
|
||||
__put_user(x, _pu_addr) : -EFAULT; \
|
||||
})
|
||||
|
||||
#define __put_user_nosleep(x, ptr, size) \
|
||||
({ \
|
||||
long __pu_err; \
|
||||
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
|
||||
__typeof__(*(ptr)) __pu_val = (x); \
|
||||
__typeof__(size) __pu_size = (size); \
|
||||
\
|
||||
__chk_user_ptr(__pu_addr); \
|
||||
__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
|
||||
\
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
|
||||
/*
|
||||
* We don't tell gcc that we are accessing memory, but this is OK
|
||||
* because we do not write to any memory gcc knows about, so there
|
||||
|
@ -198,25 +108,17 @@ __pu_failed: \
|
|||
|
||||
#define __put_user_size_goto(x, ptr, size, label) \
|
||||
do { \
|
||||
__typeof__(*(ptr)) __user *__pus_addr = (ptr); \
|
||||
\
|
||||
switch (size) { \
|
||||
case 1: __put_user_asm_goto(x, ptr, label, "stb"); break; \
|
||||
case 2: __put_user_asm_goto(x, ptr, label, "sth"); break; \
|
||||
case 4: __put_user_asm_goto(x, ptr, label, "stw"); break; \
|
||||
case 8: __put_user_asm2_goto(x, ptr, label); break; \
|
||||
default: __put_user_bad(); \
|
||||
case 1: __put_user_asm_goto(x, __pus_addr, label, "stb"); break; \
|
||||
case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break; \
|
||||
case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break; \
|
||||
case 8: __put_user_asm2_goto(x, __pus_addr, label); break; \
|
||||
default: BUILD_BUG(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define __unsafe_put_user_goto(x, ptr, size, label) \
|
||||
do { \
|
||||
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
|
||||
__chk_user_ptr(ptr); \
|
||||
__put_user_size_goto((x), __pu_addr, (size), label); \
|
||||
} while (0)
|
||||
|
||||
|
||||
extern long __get_user_bad(void);
|
||||
|
||||
/*
|
||||
* This does an atomic 128 byte aligned load from userspace.
|
||||
* Upto caller to do enable_kernel_vmx() before calling!
|
||||
|
@ -234,6 +136,59 @@ extern long __get_user_bad(void);
|
|||
: "=r" (err) \
|
||||
: "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
|
||||
|
||||
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
|
||||
|
||||
#define __get_user_asm_goto(x, addr, label, op) \
|
||||
asm_volatile_goto( \
|
||||
"1: "op"%U1%X1 %0, %1 # get_user\n" \
|
||||
EX_TABLE(1b, %l2) \
|
||||
: "=r" (x) \
|
||||
: "m"UPD_CONSTR (*addr) \
|
||||
: \
|
||||
: label)
|
||||
|
||||
#ifdef __powerpc64__
|
||||
#define __get_user_asm2_goto(x, addr, label) \
|
||||
__get_user_asm_goto(x, addr, label, "ld")
|
||||
#else /* __powerpc64__ */
|
||||
#define __get_user_asm2_goto(x, addr, label) \
|
||||
asm_volatile_goto( \
|
||||
"1: lwz%X1 %0, %1\n" \
|
||||
"2: lwz%X1 %L0, %L1\n" \
|
||||
EX_TABLE(1b, %l2) \
|
||||
EX_TABLE(2b, %l2) \
|
||||
: "=r" (x) \
|
||||
: "m" (*addr) \
|
||||
: \
|
||||
: label)
|
||||
#endif /* __powerpc64__ */
|
||||
|
||||
#define __get_user_size_goto(x, ptr, size, label) \
|
||||
do { \
|
||||
BUILD_BUG_ON(size > sizeof(x)); \
|
||||
switch (size) { \
|
||||
case 1: __get_user_asm_goto(x, (u8 __user *)ptr, label, "lbz"); break; \
|
||||
case 2: __get_user_asm_goto(x, (u16 __user *)ptr, label, "lhz"); break; \
|
||||
case 4: __get_user_asm_goto(x, (u32 __user *)ptr, label, "lwz"); break; \
|
||||
case 8: __get_user_asm2_goto(x, (u64 __user *)ptr, label); break; \
|
||||
default: x = 0; BUILD_BUG(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define __get_user_size_allowed(x, ptr, size, retval) \
|
||||
do { \
|
||||
__label__ __gus_failed; \
|
||||
\
|
||||
__get_user_size_goto(x, ptr, size, __gus_failed); \
|
||||
retval = 0; \
|
||||
break; \
|
||||
__gus_failed: \
|
||||
x = 0; \
|
||||
retval = -EFAULT; \
|
||||
} while (0)
|
||||
|
||||
#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
|
||||
|
||||
#define __get_user_asm(x, addr, err, op) \
|
||||
__asm__ __volatile__( \
|
||||
"1: "op"%U2%X2 %1, %2 # get_user\n" \
|
||||
|
@ -271,25 +226,27 @@ extern long __get_user_bad(void);
|
|||
#define __get_user_size_allowed(x, ptr, size, retval) \
|
||||
do { \
|
||||
retval = 0; \
|
||||
__chk_user_ptr(ptr); \
|
||||
if (size > sizeof(x)) \
|
||||
(x) = __get_user_bad(); \
|
||||
BUILD_BUG_ON(size > sizeof(x)); \
|
||||
switch (size) { \
|
||||
case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break; \
|
||||
case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break; \
|
||||
case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \
|
||||
case 8: __get_user_asm2(x, (u64 __user *)ptr, retval); break; \
|
||||
default: (x) = __get_user_bad(); \
|
||||
default: x = 0; BUILD_BUG(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define __get_user_size(x, ptr, size, retval) \
|
||||
#define __get_user_size_goto(x, ptr, size, label) \
|
||||
do { \
|
||||
allow_read_from_user(ptr, size); \
|
||||
__get_user_size_allowed(x, ptr, size, retval); \
|
||||
prevent_read_from_user(ptr, size); \
|
||||
long __gus_retval; \
|
||||
\
|
||||
__get_user_size_allowed(x, ptr, size, __gus_retval); \
|
||||
if (__gus_retval) \
|
||||
goto label; \
|
||||
} while (0)
|
||||
|
||||
#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
|
||||
|
||||
/*
|
||||
* This is a type: either unsigned long, if the argument fits into
|
||||
* that type, or otherwise unsigned long long.
|
||||
|
@ -297,86 +254,36 @@ do { \
|
|||
#define __long_type(x) \
|
||||
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
|
||||
|
||||
#define __get_user_nocheck(x, ptr, size, do_allow) \
|
||||
#define __get_user(x, ptr) \
|
||||
({ \
|
||||
long __gu_err; \
|
||||
__long_type(*(ptr)) __gu_val; \
|
||||
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
|
||||
__typeof__(size) __gu_size = (size); \
|
||||
__typeof__(sizeof(*(ptr))) __gu_size = sizeof(*(ptr)); \
|
||||
\
|
||||
__chk_user_ptr(__gu_addr); \
|
||||
if (do_allow && !is_kernel_addr((unsigned long)__gu_addr)) \
|
||||
might_fault(); \
|
||||
if (do_allow) \
|
||||
__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
|
||||
else \
|
||||
__get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
|
||||
might_fault(); \
|
||||
allow_read_from_user(__gu_addr, __gu_size); \
|
||||
__get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
|
||||
prevent_read_from_user(__gu_addr, __gu_size); \
|
||||
(x) = (__typeof__(*(ptr)))__gu_val; \
|
||||
\
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
#define __get_user_check(x, ptr, size) \
|
||||
#define get_user(x, ptr) \
|
||||
({ \
|
||||
long __gu_err = -EFAULT; \
|
||||
__long_type(*(ptr)) __gu_val = 0; \
|
||||
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
|
||||
__typeof__(size) __gu_size = (size); \
|
||||
__typeof__(*(ptr)) __user *_gu_addr = (ptr); \
|
||||
\
|
||||
might_fault(); \
|
||||
if (access_ok(__gu_addr, __gu_size)) \
|
||||
__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
\
|
||||
__gu_err; \
|
||||
access_ok(_gu_addr, sizeof(*(ptr))) ? \
|
||||
__get_user(x, _gu_addr) : \
|
||||
((x) = (__force __typeof__(*(ptr)))0, -EFAULT); \
|
||||
})
|
||||
|
||||
#define __get_user_nosleep(x, ptr, size) \
|
||||
({ \
|
||||
long __gu_err; \
|
||||
__long_type(*(ptr)) __gu_val; \
|
||||
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
|
||||
__typeof__(size) __gu_size = (size); \
|
||||
\
|
||||
__chk_user_ptr(__gu_addr); \
|
||||
__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
\
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
|
||||
/* more complex routines */
|
||||
|
||||
extern unsigned long __copy_tofrom_user(void __user *to,
|
||||
const void __user *from, unsigned long size);
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_COPY_MC
|
||||
unsigned long __must_check
|
||||
copy_mc_generic(void *to, const void *from, unsigned long size);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_mc_to_kernel(void *to, const void *from, unsigned long size)
|
||||
{
|
||||
return copy_mc_generic(to, from, size);
|
||||
}
|
||||
#define copy_mc_to_kernel copy_mc_to_kernel
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_mc_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
if (likely(check_copy_size(from, n, true))) {
|
||||
if (access_ok(to, n)) {
|
||||
allow_write_to_user(to, n);
|
||||
n = copy_mc_generic((void *)to, from, n);
|
||||
prevent_write_to_user(to, n);
|
||||
}
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __powerpc64__
|
||||
static inline unsigned long
|
||||
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
|
||||
|
@ -414,26 +321,51 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
|
|||
|
||||
unsigned long __arch_clear_user(void __user *addr, unsigned long size);
|
||||
|
||||
static inline unsigned long clear_user(void __user *addr, unsigned long size)
|
||||
static inline unsigned long __clear_user(void __user *addr, unsigned long size)
|
||||
{
|
||||
unsigned long ret = size;
|
||||
unsigned long ret;
|
||||
|
||||
might_fault();
|
||||
if (likely(access_ok(addr, size))) {
|
||||
allow_write_to_user(addr, size);
|
||||
ret = __arch_clear_user(addr, size);
|
||||
prevent_write_to_user(addr, size);
|
||||
}
|
||||
allow_write_to_user(addr, size);
|
||||
ret = __arch_clear_user(addr, size);
|
||||
prevent_write_to_user(addr, size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned long __clear_user(void __user *addr, unsigned long size)
|
||||
static inline unsigned long clear_user(void __user *addr, unsigned long size)
|
||||
{
|
||||
return clear_user(addr, size);
|
||||
return likely(access_ok(addr, size)) ? __clear_user(addr, size) : size;
|
||||
}
|
||||
|
||||
extern long strncpy_from_user(char *dst, const char __user *src, long count);
|
||||
extern __must_check long strnlen_user(const char __user *str, long n);
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_COPY_MC
|
||||
unsigned long __must_check
|
||||
copy_mc_generic(void *to, const void *from, unsigned long size);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_mc_to_kernel(void *to, const void *from, unsigned long size)
|
||||
{
|
||||
return copy_mc_generic(to, from, size);
|
||||
}
|
||||
#define copy_mc_to_kernel copy_mc_to_kernel
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_mc_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
if (likely(check_copy_size(from, n, true))) {
|
||||
if (access_ok(to, n)) {
|
||||
allow_write_to_user(to, n);
|
||||
n = copy_mc_generic((void *)to, from, n);
|
||||
prevent_write_to_user(to, n);
|
||||
}
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern long __copy_from_user_flushcache(void *dst, const void __user *src,
|
||||
unsigned size);
|
||||
extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
|
||||
|
@ -482,10 +414,37 @@ user_write_access_begin(const void __user *ptr, size_t len)
|
|||
#define user_write_access_begin user_write_access_begin
|
||||
#define user_write_access_end prevent_current_write_to_user
|
||||
|
||||
#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
|
||||
#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
|
||||
#define unsafe_get_user(x, p, e) do { \
|
||||
__long_type(*(p)) __gu_val; \
|
||||
__typeof__(*(p)) __user *__gu_addr = (p); \
|
||||
\
|
||||
__get_user_size_goto(__gu_val, __gu_addr, sizeof(*(p)), e); \
|
||||
(x) = (__typeof__(*(p)))__gu_val; \
|
||||
} while (0)
|
||||
|
||||
#define unsafe_put_user(x, p, e) \
|
||||
__unsafe_put_user_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
|
||||
__put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
|
||||
|
||||
#define unsafe_copy_from_user(d, s, l, e) \
|
||||
do { \
|
||||
u8 *_dst = (u8 *)(d); \
|
||||
const u8 __user *_src = (const u8 __user *)(s); \
|
||||
size_t _len = (l); \
|
||||
int _i; \
|
||||
\
|
||||
for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
|
||||
unsafe_get_user(*(u64 *)(_dst + _i), (u64 __user *)(_src + _i), e); \
|
||||
if (_len & 4) { \
|
||||
unsafe_get_user(*(u32 *)(_dst + _i), (u32 __user *)(_src + _i), e); \
|
||||
_i += 4; \
|
||||
} \
|
||||
if (_len & 2) { \
|
||||
unsafe_get_user(*(u16 *)(_dst + _i), (u16 __user *)(_src + _i), e); \
|
||||
_i += 2; \
|
||||
} \
|
||||
if (_len & 1) \
|
||||
unsafe_get_user(*(u8 *)(_dst + _i), (u8 __user *)(_src + _i), e); \
|
||||
} while (0)
|
||||
|
||||
#define unsafe_copy_to_user(d, s, l, e) \
|
||||
do { \
|
||||
|
@ -494,9 +453,9 @@ do { \
|
|||
size_t _len = (l); \
|
||||
int _i; \
|
||||
\
|
||||
for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long)) \
|
||||
unsafe_put_user(*(long*)(_src + _i), (long __user *)(_dst + _i), e); \
|
||||
if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) { \
|
||||
for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
|
||||
unsafe_put_user(*(u64 *)(_src + _i), (u64 __user *)(_dst + _i), e); \
|
||||
if (_len & 4) { \
|
||||
unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
|
||||
_i += 4; \
|
||||
} \
|
||||
|
@ -511,14 +470,8 @@ do { \
|
|||
#define HAVE_GET_KERNEL_NOFAULT
|
||||
|
||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
int __kr_err; \
|
||||
\
|
||||
__get_user_size_allowed(*((type *)(dst)), (__force type __user *)(src),\
|
||||
sizeof(type), __kr_err); \
|
||||
if (unlikely(__kr_err)) \
|
||||
goto err_label; \
|
||||
} while (0)
|
||||
__get_user_size_goto(*((type *)(dst)), \
|
||||
(__force type __user *)(src), sizeof(type), err_label)
|
||||
|
||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
||||
__put_user_size_goto(*((type *)(src)), \
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#define __ARCH_WANT_SYS_SIGPROCMASK
|
||||
#ifdef CONFIG_PPC32
|
||||
#define __ARCH_WANT_OLD_STAT
|
||||
#define __ARCH_WANT_SYS_OLD_SELECT
|
||||
#endif
|
||||
#ifdef CONFIG_PPC64
|
||||
#define __ARCH_WANT_SYS_TIME
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
#ifndef _ASM_POWERPC_VDSO_GETTIMEOFDAY_H
|
||||
#define _ASM_POWERPC_VDSO_GETTIMEOFDAY_H
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
#include <asm/ppc_asm.h>
|
||||
|
@ -154,6 +156,14 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
|
|||
|
||||
const struct vdso_data *__arch_get_vdso_data(void);
|
||||
|
||||
#ifdef CONFIG_TIME_NS
|
||||
static __always_inline
|
||||
const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
|
||||
{
|
||||
return (void *)vd + PAGE_SIZE;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
|
||||
{
|
||||
return true;
|
||||
|
|
|
@ -107,9 +107,7 @@ extern struct vdso_arch_data *vdso_data;
|
|||
bcl 20, 31, .+4
|
||||
999:
|
||||
mflr \ptr
|
||||
#if CONFIG_PPC_PAGE_SHIFT > 14
|
||||
addis \ptr, \ptr, (_vdso_datapage - 999b)@ha
|
||||
#endif
|
||||
addi \ptr, \ptr, (_vdso_datapage - 999b)@l
|
||||
.endm
|
||||
|
||||
|
|
|
@ -114,6 +114,7 @@ struct vio_driver {
|
|||
const struct vio_device_id *id_table;
|
||||
int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
|
||||
void (*remove)(struct vio_dev *dev);
|
||||
void (*shutdown)(struct vio_dev *dev);
|
||||
/* A driver must have a get_desired_dma() function to
|
||||
* be loaded in a CMO environment if it uses DMA.
|
||||
*/
|
||||
|
|
|
@ -102,6 +102,7 @@ void xive_flush_interrupt(void);
|
|||
/* xmon hook */
|
||||
void xmon_xive_do_dump(int cpu);
|
||||
int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d);
|
||||
void xmon_xive_get_irq_all(void);
|
||||
|
||||
/* APIs used by KVM */
|
||||
u32 xive_native_default_eq_shift(void);
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#ifndef _ASM_POWERPC_ERRNO_H
|
||||
#define _ASM_POWERPC_ERRNO_H
|
||||
|
||||
#undef EDEADLOCK
|
||||
#include <asm-generic/errno.h>
|
||||
|
||||
#undef EDEADLOCK
|
||||
|
|
|
@ -12,11 +12,6 @@
|
|||
typedef unsigned long __kernel_old_dev_t;
|
||||
#define __kernel_old_dev_t __kernel_old_dev_t
|
||||
#else
|
||||
typedef unsigned int __kernel_size_t;
|
||||
typedef int __kernel_ssize_t;
|
||||
typedef long __kernel_ptrdiff_t;
|
||||
#define __kernel_size_t __kernel_size_t
|
||||
|
||||
typedef short __kernel_ipc_pid_t;
|
||||
#define __kernel_ipc_pid_t __kernel_ipc_pid_t
|
||||
#endif
|
||||
|
|
|
@ -107,7 +107,6 @@ static struct aligninfo spe_aligninfo[32] = {
|
|||
static int emulate_spe(struct pt_regs *regs, unsigned int reg,
|
||||
struct ppc_inst ppc_instr)
|
||||
{
|
||||
int ret;
|
||||
union {
|
||||
u64 ll;
|
||||
u32 w[2];
|
||||
|
@ -127,11 +126,6 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
|
|||
nb = spe_aligninfo[instr].len;
|
||||
flags = spe_aligninfo[instr].flags;
|
||||
|
||||
/* Verify the address of the operand */
|
||||
if (unlikely(user_mode(regs) &&
|
||||
!access_ok(addr, nb)))
|
||||
return -EFAULT;
|
||||
|
||||
/* userland only */
|
||||
if (unlikely(!user_mode(regs)))
|
||||
return 0;
|
||||
|
@ -169,26 +163,27 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
|
|||
}
|
||||
} else {
|
||||
temp.ll = data.ll = 0;
|
||||
ret = 0;
|
||||
p = addr;
|
||||
|
||||
if (!user_read_access_begin(addr, nb))
|
||||
return -EFAULT;
|
||||
|
||||
switch (nb) {
|
||||
case 8:
|
||||
ret |= __get_user_inatomic(temp.v[0], p++);
|
||||
ret |= __get_user_inatomic(temp.v[1], p++);
|
||||
ret |= __get_user_inatomic(temp.v[2], p++);
|
||||
ret |= __get_user_inatomic(temp.v[3], p++);
|
||||
unsafe_get_user(temp.v[0], p++, Efault_read);
|
||||
unsafe_get_user(temp.v[1], p++, Efault_read);
|
||||
unsafe_get_user(temp.v[2], p++, Efault_read);
|
||||
unsafe_get_user(temp.v[3], p++, Efault_read);
|
||||
fallthrough;
|
||||
case 4:
|
||||
ret |= __get_user_inatomic(temp.v[4], p++);
|
||||
ret |= __get_user_inatomic(temp.v[5], p++);
|
||||
unsafe_get_user(temp.v[4], p++, Efault_read);
|
||||
unsafe_get_user(temp.v[5], p++, Efault_read);
|
||||
fallthrough;
|
||||
case 2:
|
||||
ret |= __get_user_inatomic(temp.v[6], p++);
|
||||
ret |= __get_user_inatomic(temp.v[7], p++);
|
||||
if (unlikely(ret))
|
||||
return -EFAULT;
|
||||
unsafe_get_user(temp.v[6], p++, Efault_read);
|
||||
unsafe_get_user(temp.v[7], p++, Efault_read);
|
||||
}
|
||||
user_read_access_end();
|
||||
|
||||
switch (instr) {
|
||||
case EVLDD:
|
||||
|
@ -255,31 +250,41 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
|
|||
|
||||
/* Store result to memory or update registers */
|
||||
if (flags & ST) {
|
||||
ret = 0;
|
||||
p = addr;
|
||||
|
||||
if (!user_write_access_begin(addr, nb))
|
||||
return -EFAULT;
|
||||
|
||||
switch (nb) {
|
||||
case 8:
|
||||
ret |= __put_user_inatomic(data.v[0], p++);
|
||||
ret |= __put_user_inatomic(data.v[1], p++);
|
||||
ret |= __put_user_inatomic(data.v[2], p++);
|
||||
ret |= __put_user_inatomic(data.v[3], p++);
|
||||
unsafe_put_user(data.v[0], p++, Efault_write);
|
||||
unsafe_put_user(data.v[1], p++, Efault_write);
|
||||
unsafe_put_user(data.v[2], p++, Efault_write);
|
||||
unsafe_put_user(data.v[3], p++, Efault_write);
|
||||
fallthrough;
|
||||
case 4:
|
||||
ret |= __put_user_inatomic(data.v[4], p++);
|
||||
ret |= __put_user_inatomic(data.v[5], p++);
|
||||
unsafe_put_user(data.v[4], p++, Efault_write);
|
||||
unsafe_put_user(data.v[5], p++, Efault_write);
|
||||
fallthrough;
|
||||
case 2:
|
||||
ret |= __put_user_inatomic(data.v[6], p++);
|
||||
ret |= __put_user_inatomic(data.v[7], p++);
|
||||
unsafe_put_user(data.v[6], p++, Efault_write);
|
||||
unsafe_put_user(data.v[7], p++, Efault_write);
|
||||
}
|
||||
if (unlikely(ret))
|
||||
return -EFAULT;
|
||||
user_write_access_end();
|
||||
} else {
|
||||
*evr = data.w[0];
|
||||
regs->gpr[reg] = data.w[1];
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
||||
Efault_read:
|
||||
user_read_access_end();
|
||||
return -EFAULT;
|
||||
|
||||
Efault_write:
|
||||
user_write_access_end();
|
||||
return -EFAULT;
|
||||
}
|
||||
#endif /* CONFIG_SPE */
|
||||
|
||||
|
@ -299,13 +304,12 @@ int fix_alignment(struct pt_regs *regs)
|
|||
struct instruction_op op;
|
||||
int r, type;
|
||||
|
||||
/*
|
||||
* We require a complete register set, if not, then our assembly
|
||||
* is broken
|
||||
*/
|
||||
CHECK_FULL_REGS(regs);
|
||||
if (is_kernel_addr(regs->nip))
|
||||
r = copy_inst_from_kernel_nofault(&instr, (void *)regs->nip);
|
||||
else
|
||||
r = __get_user_instr(instr, (void __user *)regs->nip);
|
||||
|
||||
if (unlikely(__get_user_instr(instr, (void __user *)regs->nip)))
|
||||
if (unlikely(r))
|
||||
return -EFAULT;
|
||||
if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
|
||||
/* We don't handle PPC little-endian any more... */
|
||||
|
|
|
@ -91,7 +91,6 @@ int main(void)
|
|||
DEFINE(SIGSEGV, SIGSEGV);
|
||||
DEFINE(NMI_MASK, NMI_MASK);
|
||||
#else
|
||||
OFFSET(KSP_LIMIT, thread_struct, ksp_limit);
|
||||
#ifdef CONFIG_PPC_RTAS
|
||||
OFFSET(RTAS_SP, thread_struct, rtas_sp);
|
||||
#endif
|
||||
|
@ -132,7 +131,6 @@ int main(void)
|
|||
OFFSET(KSP_VSID, thread_struct, ksp_vsid);
|
||||
#else /* CONFIG_PPC64 */
|
||||
OFFSET(PGDIR, thread_struct, pgdir);
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
OFFSET(SRR0, thread_struct, srr0);
|
||||
OFFSET(SRR1, thread_struct, srr1);
|
||||
OFFSET(DAR, thread_struct, dar);
|
||||
|
@ -149,7 +147,6 @@ int main(void)
|
|||
OFFSET(THLR, thread_struct, lr);
|
||||
OFFSET(THCTR, thread_struct, ctr);
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_SPE
|
||||
OFFSET(THREAD_EVR0, thread_struct, evr[0]);
|
||||
OFFSET(THREAD_ACC, thread_struct, acc);
|
||||
|
@ -285,21 +282,11 @@ int main(void)
|
|||
OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
|
||||
OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
|
||||
OFFSET(PACA_DSCR_DEFAULT, paca_struct, dscr_default);
|
||||
OFFSET(ACCOUNT_STARTTIME, paca_struct, accounting.starttime);
|
||||
OFFSET(ACCOUNT_STARTTIME_USER, paca_struct, accounting.starttime_user);
|
||||
OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.utime);
|
||||
OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.stime);
|
||||
#ifdef CONFIG_PPC_BOOK3E
|
||||
OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
|
||||
#endif
|
||||
OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso);
|
||||
#else /* CONFIG_PPC64 */
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
OFFSET(ACCOUNT_STARTTIME, thread_info, accounting.starttime);
|
||||
OFFSET(ACCOUNT_STARTTIME_USER, thread_info, accounting.starttime_user);
|
||||
OFFSET(ACCOUNT_USER_TIME, thread_info, accounting.utime);
|
||||
OFFSET(ACCOUNT_SYSTEM_TIME, thread_info, accounting.stime);
|
||||
#endif
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
/* RTAS */
|
||||
|
@ -323,9 +310,6 @@ int main(void)
|
|||
STACK_PT_REGS_OFFSET(GPR11, gpr[11]);
|
||||
STACK_PT_REGS_OFFSET(GPR12, gpr[12]);
|
||||
STACK_PT_REGS_OFFSET(GPR13, gpr[13]);
|
||||
#ifndef CONFIG_PPC64
|
||||
STACK_PT_REGS_OFFSET(GPR14, gpr[14]);
|
||||
#endif /* CONFIG_PPC64 */
|
||||
/*
|
||||
* Note: these symbols include _ because they overlap with special
|
||||
* register names
|
||||
|
@ -381,7 +365,6 @@ int main(void)
|
|||
DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
|
||||
DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
|
||||
DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
|
||||
DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit));
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
|
|
@ -362,14 +362,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
|
|||
pa = pte_pfn(*ptep);
|
||||
|
||||
/* On radix we can do hugepage mappings for io, so handle that */
|
||||
if (hugepage_shift) {
|
||||
pa <<= hugepage_shift;
|
||||
pa |= token & ((1ul << hugepage_shift) - 1);
|
||||
} else {
|
||||
pa <<= PAGE_SHIFT;
|
||||
pa |= token & (PAGE_SIZE - 1);
|
||||
}
|
||||
if (!hugepage_shift)
|
||||
hugepage_shift = PAGE_SHIFT;
|
||||
|
||||
pa <<= PAGE_SHIFT;
|
||||
pa |= token & ((1ul << hugepage_shift) - 1);
|
||||
return pa;
|
||||
}
|
||||
|
||||
|
@ -779,7 +776,7 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
|
|||
default:
|
||||
eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED, true);
|
||||
return -EINVAL;
|
||||
};
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1568,6 +1565,7 @@ int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(eeh_pe_inject_err);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
static int proc_eeh_show(struct seq_file *m, void *v)
|
||||
{
|
||||
if (!eeh_enabled()) {
|
||||
|
@ -1594,6 +1592,7 @@ static int proc_eeh_show(struct seq_file *m, void *v)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
|
|
|
@ -48,195 +48,16 @@
|
|||
*/
|
||||
.align 12
|
||||
|
||||
#ifdef CONFIG_BOOKE
|
||||
.globl mcheck_transfer_to_handler
|
||||
mcheck_transfer_to_handler:
|
||||
mfspr r0,SPRN_DSRR0
|
||||
stw r0,_DSRR0(r11)
|
||||
mfspr r0,SPRN_DSRR1
|
||||
stw r0,_DSRR1(r11)
|
||||
/* fall through */
|
||||
_ASM_NOKPROBE_SYMBOL(mcheck_transfer_to_handler)
|
||||
|
||||
.globl debug_transfer_to_handler
|
||||
debug_transfer_to_handler:
|
||||
mfspr r0,SPRN_CSRR0
|
||||
stw r0,_CSRR0(r11)
|
||||
mfspr r0,SPRN_CSRR1
|
||||
stw r0,_CSRR1(r11)
|
||||
/* fall through */
|
||||
_ASM_NOKPROBE_SYMBOL(debug_transfer_to_handler)
|
||||
|
||||
.globl crit_transfer_to_handler
|
||||
crit_transfer_to_handler:
|
||||
#ifdef CONFIG_PPC_BOOK3E_MMU
|
||||
mfspr r0,SPRN_MAS0
|
||||
stw r0,MAS0(r11)
|
||||
mfspr r0,SPRN_MAS1
|
||||
stw r0,MAS1(r11)
|
||||
mfspr r0,SPRN_MAS2
|
||||
stw r0,MAS2(r11)
|
||||
mfspr r0,SPRN_MAS3
|
||||
stw r0,MAS3(r11)
|
||||
mfspr r0,SPRN_MAS6
|
||||
stw r0,MAS6(r11)
|
||||
#ifdef CONFIG_PHYS_64BIT
|
||||
mfspr r0,SPRN_MAS7
|
||||
stw r0,MAS7(r11)
|
||||
#endif /* CONFIG_PHYS_64BIT */
|
||||
#endif /* CONFIG_PPC_BOOK3E_MMU */
|
||||
#ifdef CONFIG_44x
|
||||
mfspr r0,SPRN_MMUCR
|
||||
stw r0,MMUCR(r11)
|
||||
#endif
|
||||
mfspr r0,SPRN_SRR0
|
||||
stw r0,_SRR0(r11)
|
||||
mfspr r0,SPRN_SRR1
|
||||
stw r0,_SRR1(r11)
|
||||
|
||||
/* set the stack limit to the current stack */
|
||||
mfspr r8,SPRN_SPRG_THREAD
|
||||
lwz r0,KSP_LIMIT(r8)
|
||||
stw r0,SAVED_KSP_LIMIT(r11)
|
||||
rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
|
||||
stw r0,KSP_LIMIT(r8)
|
||||
/* fall through */
|
||||
_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_40x
|
||||
.globl crit_transfer_to_handler
|
||||
crit_transfer_to_handler:
|
||||
lwz r0,crit_r10@l(0)
|
||||
stw r0,GPR10(r11)
|
||||
lwz r0,crit_r11@l(0)
|
||||
stw r0,GPR11(r11)
|
||||
mfspr r0,SPRN_SRR0
|
||||
stw r0,crit_srr0@l(0)
|
||||
mfspr r0,SPRN_SRR1
|
||||
stw r0,crit_srr1@l(0)
|
||||
|
||||
/* set the stack limit to the current stack */
|
||||
mfspr r8,SPRN_SPRG_THREAD
|
||||
lwz r0,KSP_LIMIT(r8)
|
||||
stw r0,saved_ksp_limit@l(0)
|
||||
rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
|
||||
stw r0,KSP_LIMIT(r8)
|
||||
/* fall through */
|
||||
_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This code finishes saving the registers to the exception frame
|
||||
* and jumps to the appropriate handler for the exception, turning
|
||||
* on address translation.
|
||||
* Note that we rely on the caller having set cr0.eq iff the exception
|
||||
* occurred in kernel mode (i.e. MSR:PR = 0).
|
||||
*/
|
||||
.globl transfer_to_handler_full
|
||||
transfer_to_handler_full:
|
||||
SAVE_NVGPRS(r11)
|
||||
_ASM_NOKPROBE_SYMBOL(transfer_to_handler_full)
|
||||
/* fall through */
|
||||
|
||||
.globl transfer_to_handler
|
||||
transfer_to_handler:
|
||||
stw r2,GPR2(r11)
|
||||
stw r12,_NIP(r11)
|
||||
stw r9,_MSR(r11)
|
||||
andi. r2,r9,MSR_PR
|
||||
mfctr r12
|
||||
mfspr r2,SPRN_XER
|
||||
stw r12,_CTR(r11)
|
||||
stw r2,_XER(r11)
|
||||
mfspr r12,SPRN_SPRG_THREAD
|
||||
tovirt_vmstack r12, r12
|
||||
beq 2f /* if from user, fix up THREAD.regs */
|
||||
addi r2, r12, -THREAD
|
||||
addi r11,r1,STACK_FRAME_OVERHEAD
|
||||
stw r11,PT_REGS(r12)
|
||||
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
|
||||
/* Check to see if the dbcr0 register is set up to debug. Use the
|
||||
internal debug mode bit to do this. */
|
||||
lwz r12,THREAD_DBCR0(r12)
|
||||
andis. r12,r12,DBCR0_IDM@h
|
||||
#endif
|
||||
ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
kuep_lock r11, r12
|
||||
#endif
|
||||
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
|
||||
beq+ 3f
|
||||
/* From user and task is ptraced - load up global dbcr0 */
|
||||
li r12,-1 /* clear all pending debug events */
|
||||
mtspr SPRN_DBSR,r12
|
||||
lis r11,global_dbcr0@ha
|
||||
tophys(r11,r11)
|
||||
addi r11,r11,global_dbcr0@l
|
||||
#ifdef CONFIG_SMP
|
||||
lwz r9,TASK_CPU(r2)
|
||||
slwi r9,r9,2
|
||||
add r11,r11,r9
|
||||
#endif
|
||||
lwz r12,0(r11)
|
||||
mtspr SPRN_DBCR0,r12
|
||||
#endif
|
||||
|
||||
b 3f
|
||||
|
||||
2: /* if from kernel, check interrupted DOZE/NAP mode and
|
||||
* check for stack overflow
|
||||
*/
|
||||
kuap_save_and_lock r11, r12, r9, r2, r6
|
||||
addi r2, r12, -THREAD
|
||||
#ifndef CONFIG_VMAP_STACK
|
||||
lwz r9,KSP_LIMIT(r12)
|
||||
cmplw r1,r9 /* if r1 <= ksp_limit */
|
||||
ble- stack_ovf /* then the kernel stack overflowed */
|
||||
#endif
|
||||
5:
|
||||
#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
|
||||
.globl prepare_transfer_to_handler
|
||||
prepare_transfer_to_handler:
|
||||
/* if from kernel, check interrupted DOZE/NAP mode */
|
||||
lwz r12,TI_LOCAL_FLAGS(r2)
|
||||
mtcrf 0x01,r12
|
||||
bt- 31-TLF_NAPPING,4f
|
||||
bt- 31-TLF_SLEEPING,7f
|
||||
#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
|
||||
.globl transfer_to_handler_cont
|
||||
transfer_to_handler_cont:
|
||||
3:
|
||||
mflr r9
|
||||
tovirt_novmstack r2, r2 /* set r2 to current */
|
||||
tovirt_vmstack r9, r9
|
||||
lwz r11,0(r9) /* virtual address of handler */
|
||||
lwz r9,4(r9) /* where to go when done */
|
||||
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
|
||||
mtspr SPRN_NRI, r0
|
||||
#endif
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
/*
|
||||
* When tracing IRQ state (lockdep) we enable the MMU before we call
|
||||
* the IRQ tracing functions as they might access vmalloc space or
|
||||
* perform IOs for console output.
|
||||
*
|
||||
* To speed up the syscall path where interrupts stay on, let's check
|
||||
* first if we are changing the MSR value at all.
|
||||
*/
|
||||
tophys_novmstack r12, r1
|
||||
lwz r12,_MSR(r12)
|
||||
andi. r12,r12,MSR_EE
|
||||
bne 1f
|
||||
blr
|
||||
|
||||
/* MSR isn't changing, just transition directly */
|
||||
#endif
|
||||
mtspr SPRN_SRR0,r11
|
||||
mtspr SPRN_SRR1,r10
|
||||
mtlr r9
|
||||
rfi /* jump to handler, enable MMU */
|
||||
#ifdef CONFIG_40x
|
||||
b . /* Prevent prefetch past rfi */
|
||||
#endif
|
||||
|
||||
#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
|
||||
4: rlwinm r12,r12,0,~_TLF_NAPPING
|
||||
stw r12,TI_LOCAL_FLAGS(r2)
|
||||
b power_save_ppc32_restore
|
||||
|
@ -246,97 +67,18 @@ transfer_to_handler_cont:
|
|||
lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
|
||||
rlwinm r9,r9,0,~MSR_EE
|
||||
lwz r12,_LINK(r11) /* and return to address in LR */
|
||||
kuap_restore r11, r2, r3, r4, r5
|
||||
lwz r2, GPR2(r11)
|
||||
b fast_exception_return
|
||||
#endif
|
||||
_ASM_NOKPROBE_SYMBOL(transfer_to_handler)
|
||||
_ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont)
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
1: /* MSR is changing, re-enable MMU so we can notify lockdep. We need to
|
||||
* keep interrupts disabled at this point otherwise we might risk
|
||||
* taking an interrupt before we tell lockdep they are enabled.
|
||||
*/
|
||||
lis r12,reenable_mmu@h
|
||||
ori r12,r12,reenable_mmu@l
|
||||
LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
|
||||
mtspr SPRN_SRR0,r12
|
||||
mtspr SPRN_SRR1,r0
|
||||
rfi
|
||||
#ifdef CONFIG_40x
|
||||
b . /* Prevent prefetch past rfi */
|
||||
#endif
|
||||
|
||||
reenable_mmu:
|
||||
/*
|
||||
* We save a bunch of GPRs,
|
||||
* r3 can be different from GPR3(r1) at this point, r9 and r11
|
||||
* contains the old MSR and handler address respectively,
|
||||
* r0, r4-r8, r12, CCR, CTR, XER etc... are left
|
||||
* clobbered as they aren't useful past this point.
|
||||
*/
|
||||
|
||||
stwu r1,-32(r1)
|
||||
stw r9,8(r1)
|
||||
stw r11,12(r1)
|
||||
stw r3,16(r1)
|
||||
|
||||
/* If we are disabling interrupts (normal case), simply log it with
|
||||
* lockdep
|
||||
*/
|
||||
1: bl trace_hardirqs_off
|
||||
lwz r3,16(r1)
|
||||
lwz r11,12(r1)
|
||||
lwz r9,8(r1)
|
||||
addi r1,r1,32
|
||||
mtctr r11
|
||||
mtlr r9
|
||||
bctr /* jump to handler */
|
||||
#endif /* CONFIG_TRACE_IRQFLAGS */
|
||||
|
||||
#ifndef CONFIG_VMAP_STACK
|
||||
/*
|
||||
* On kernel stack overflow, load up an initial stack pointer
|
||||
* and call StackOverflow(regs), which should not return.
|
||||
*/
|
||||
stack_ovf:
|
||||
/* sometimes we use a statically-allocated stack, which is OK. */
|
||||
lis r12,_end@h
|
||||
ori r12,r12,_end@l
|
||||
cmplw r1,r12
|
||||
ble 5b /* r1 <= &_end is OK */
|
||||
SAVE_NVGPRS(r11)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
lis r1,init_thread_union@ha
|
||||
addi r1,r1,init_thread_union@l
|
||||
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
|
||||
lis r9,StackOverflow@ha
|
||||
addi r9,r9,StackOverflow@l
|
||||
LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
|
||||
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
|
||||
mtspr SPRN_NRI, r0
|
||||
#endif
|
||||
mtspr SPRN_SRR0,r9
|
||||
mtspr SPRN_SRR1,r10
|
||||
rfi
|
||||
#ifdef CONFIG_40x
|
||||
b . /* Prevent prefetch past rfi */
|
||||
#endif
|
||||
_ASM_NOKPROBE_SYMBOL(stack_ovf)
|
||||
#endif
|
||||
_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
|
||||
#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
|
||||
|
||||
.globl transfer_to_syscall
|
||||
transfer_to_syscall:
|
||||
SAVE_NVGPRS(r1)
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
kuep_lock r11, r12
|
||||
#endif
|
||||
|
||||
/* Calling convention has r9 = orig r0, r10 = regs */
|
||||
addi r10,r1,STACK_FRAME_OVERHEAD
|
||||
mr r9,r0
|
||||
stw r10,THREAD+PT_REGS(r2)
|
||||
bl system_call_exception
|
||||
|
||||
ret_from_syscall:
|
||||
|
@ -349,10 +91,6 @@ ret_from_syscall:
|
|||
cmplwi cr0,r5,0
|
||||
bne- 2f
|
||||
#endif /* CONFIG_PPC_47x */
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
kuep_unlock r5, r7
|
||||
#endif
|
||||
kuap_check r2, r4
|
||||
lwz r4,_LINK(r1)
|
||||
lwz r5,_CCR(r1)
|
||||
mtlr r4
|
||||
|
@ -411,27 +149,6 @@ ret_from_kernel_thread:
|
|||
li r3,0
|
||||
b ret_from_syscall
|
||||
|
||||
/*
|
||||
* Top-level page fault handling.
|
||||
* This is in assembler because if do_page_fault tells us that
|
||||
* it is a bad kernel page fault, we want to save the non-volatile
|
||||
* registers before calling bad_page_fault.
|
||||
*/
|
||||
.globl handle_page_fault
|
||||
handle_page_fault:
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl do_page_fault
|
||||
cmpwi r3,0
|
||||
beq+ ret_from_except
|
||||
SAVE_NVGPRS(r1)
|
||||
lwz r0,_TRAP(r1)
|
||||
clrrwi r0,r0,1
|
||||
stw r0,_TRAP(r1)
|
||||
mr r4,r3 /* err arg for bad_page_fault */
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl __bad_page_fault
|
||||
b ret_from_except_full
|
||||
|
||||
/*
|
||||
* This routine switches between two different tasks. The process
|
||||
* state of one is saved on its kernel stack. Then the state
|
||||
|
@ -485,7 +202,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
|
|||
stw r10,_CCR(r1)
|
||||
stw r1,KSP(r3) /* Set old stack pointer */
|
||||
|
||||
kuap_check r2, r0
|
||||
#ifdef CONFIG_SMP
|
||||
/* We need a sync somewhere here to make sure that if the
|
||||
* previous task gets rescheduled on another CPU, it sees all
|
||||
|
@ -529,12 +245,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
|
|||
fast_exception_return:
|
||||
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
|
||||
andi. r10,r9,MSR_RI /* check for recoverable interrupt */
|
||||
beq 1f /* if not, we've got problems */
|
||||
beq 3f /* if not, we've got problems */
|
||||
#endif
|
||||
|
||||
2: REST_4GPRS(3, r11)
|
||||
lwz r10,_CCR(r11)
|
||||
REST_GPR(1, r11)
|
||||
REST_2GPRS(1, r11)
|
||||
mtcr r10
|
||||
lwz r10,_LINK(r11)
|
||||
mtlr r10
|
||||
|
@ -556,257 +272,147 @@ fast_exception_return:
|
|||
#endif
|
||||
_ASM_NOKPROBE_SYMBOL(fast_exception_return)
|
||||
|
||||
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
|
||||
/* check if the exception happened in a restartable section */
|
||||
1: lis r3,exc_exit_restart_end@ha
|
||||
addi r3,r3,exc_exit_restart_end@l
|
||||
cmplw r12,r3
|
||||
bge 3f
|
||||
lis r4,exc_exit_restart@ha
|
||||
addi r4,r4,exc_exit_restart@l
|
||||
cmplw r12,r4
|
||||
blt 3f
|
||||
lis r3,fee_restarts@ha
|
||||
tophys(r3,r3)
|
||||
lwz r5,fee_restarts@l(r3)
|
||||
addi r5,r5,1
|
||||
stw r5,fee_restarts@l(r3)
|
||||
mr r12,r4 /* restart at exc_exit_restart */
|
||||
b 2b
|
||||
|
||||
.section .bss
|
||||
.align 2
|
||||
fee_restarts:
|
||||
.space 4
|
||||
.previous
|
||||
|
||||
/* aargh, a nonrecoverable interrupt, panic */
|
||||
/* aargh, we don't know which trap this is */
|
||||
3:
|
||||
li r10,-1
|
||||
stw r10,_TRAP(r11)
|
||||
prepare_transfer_to_handler
|
||||
bl unrecoverable_exception
|
||||
trap /* should not get here */
|
||||
|
||||
.globl interrupt_return
|
||||
interrupt_return:
|
||||
lwz r4,_MSR(r1)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
lis r10,MSR_KERNEL@h
|
||||
ori r10,r10,MSR_KERNEL@l
|
||||
bl transfer_to_handler_full
|
||||
.long unrecoverable_exception
|
||||
.long ret_from_except
|
||||
#endif
|
||||
andi. r0,r4,MSR_PR
|
||||
beq .Lkernel_interrupt_return
|
||||
bl interrupt_exit_user_prepare
|
||||
cmpwi r3,0
|
||||
bne- .Lrestore_nvgprs
|
||||
|
||||
.globl ret_from_except_full
|
||||
ret_from_except_full:
|
||||
REST_NVGPRS(r1)
|
||||
/* fall through */
|
||||
|
||||
.globl ret_from_except
|
||||
ret_from_except:
|
||||
/* Hard-disable interrupts so that current_thread_info()->flags
|
||||
* can't change between when we test it and when we return
|
||||
* from the interrupt. */
|
||||
/* Note: We don't bother telling lockdep about it */
|
||||
LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
|
||||
mtmsr r10 /* disable interrupts */
|
||||
|
||||
lwz r3,_MSR(r1) /* Returning to user mode? */
|
||||
andi. r0,r3,MSR_PR
|
||||
beq resume_kernel
|
||||
|
||||
user_exc_return: /* r10 contains MSR_KERNEL here */
|
||||
/* Check current_thread_info()->flags */
|
||||
lwz r9,TI_FLAGS(r2)
|
||||
andi. r0,r9,_TIF_USER_WORK_MASK
|
||||
bne do_work
|
||||
|
||||
restore_user:
|
||||
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
|
||||
/* Check whether this process has its own DBCR0 value. The internal
|
||||
debug mode bit tells us that dbcr0 should be loaded. */
|
||||
lwz r0,THREAD+THREAD_DBCR0(r2)
|
||||
andis. r10,r0,DBCR0_IDM@h
|
||||
bnel- load_dbcr0
|
||||
#endif
|
||||
ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
kuep_unlock r10, r11
|
||||
#endif
|
||||
|
||||
b restore
|
||||
|
||||
/* N.B. the only way to get here is from the beq following ret_from_except. */
|
||||
resume_kernel:
|
||||
/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
|
||||
lwz r8,TI_FLAGS(r2)
|
||||
andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
|
||||
beq+ 1f
|
||||
|
||||
addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
|
||||
|
||||
lwz r3,GPR1(r1)
|
||||
subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
|
||||
mr r4,r1 /* src: current exception frame */
|
||||
mr r1,r3 /* Reroute the trampoline frame to r1 */
|
||||
|
||||
/* Copy from the original to the trampoline. */
|
||||
li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
|
||||
li r6,0 /* start offset: 0 */
|
||||
mtctr r5
|
||||
2: lwzx r0,r6,r4
|
||||
stwx r0,r6,r3
|
||||
addi r6,r6,4
|
||||
bdnz 2b
|
||||
|
||||
/* Do real store operation to complete stwu */
|
||||
lwz r5,GPR1(r1)
|
||||
stw r8,0(r5)
|
||||
|
||||
/* Clear _TIF_EMULATE_STACK_STORE flag */
|
||||
lis r11,_TIF_EMULATE_STACK_STORE@h
|
||||
addi r5,r2,TI_FLAGS
|
||||
0: lwarx r8,0,r5
|
||||
andc r8,r8,r11
|
||||
stwcx. r8,0,r5
|
||||
bne- 0b
|
||||
1:
|
||||
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
/* check current_thread_info->preempt_count */
|
||||
lwz r0,TI_PREEMPT(r2)
|
||||
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
|
||||
bne restore_kuap
|
||||
andi. r8,r8,_TIF_NEED_RESCHED
|
||||
beq+ restore_kuap
|
||||
lwz r3,_MSR(r1)
|
||||
andi. r0,r3,MSR_EE /* interrupts off? */
|
||||
beq restore_kuap /* don't schedule if so */
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
/* Lockdep thinks irqs are enabled, we need to call
|
||||
* preempt_schedule_irq with IRQs off, so we inform lockdep
|
||||
* now that we -did- turn them off already
|
||||
*/
|
||||
bl trace_hardirqs_off
|
||||
#endif
|
||||
bl preempt_schedule_irq
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
/* And now, to properly rebalance the above, we tell lockdep they
|
||||
* are being turned back on, which will happen when we return
|
||||
*/
|
||||
bl trace_hardirqs_on
|
||||
#endif
|
||||
#endif /* CONFIG_PREEMPTION */
|
||||
restore_kuap:
|
||||
kuap_restore r1, r2, r9, r10, r0
|
||||
|
||||
/* interrupts are hard-disabled at this point */
|
||||
restore:
|
||||
#if defined(CONFIG_44x) && !defined(CONFIG_PPC_47x)
|
||||
lis r4,icache_44x_need_flush@ha
|
||||
lwz r5,icache_44x_need_flush@l(r4)
|
||||
cmplwi cr0,r5,0
|
||||
beq+ 1f
|
||||
li r6,0
|
||||
iccci r0,r0
|
||||
stw r6,icache_44x_need_flush@l(r4)
|
||||
1:
|
||||
#endif /* CONFIG_44x */
|
||||
|
||||
lwz r9,_MSR(r1)
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
/* Lockdep doesn't know about the fact that IRQs are temporarily turned
|
||||
* off in this assembly code while peeking at TI_FLAGS() and such. However
|
||||
* we need to inform it if the exception turned interrupts off, and we
|
||||
* are about to trun them back on.
|
||||
*/
|
||||
andi. r10,r9,MSR_EE
|
||||
beq 1f
|
||||
stwu r1,-32(r1)
|
||||
mflr r0
|
||||
stw r0,4(r1)
|
||||
bl trace_hardirqs_on
|
||||
addi r1, r1, 32
|
||||
lwz r9,_MSR(r1)
|
||||
1:
|
||||
#endif /* CONFIG_TRACE_IRQFLAGS */
|
||||
|
||||
lwz r0,GPR0(r1)
|
||||
lwz r2,GPR2(r1)
|
||||
REST_4GPRS(3, r1)
|
||||
REST_2GPRS(7, r1)
|
||||
|
||||
lwz r10,_XER(r1)
|
||||
lwz r11,_CTR(r1)
|
||||
mtspr SPRN_XER,r10
|
||||
mtctr r11
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
lwarx r11,0,r1
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
|
||||
stwcx. r0,0,r1 /* to clear the reservation */
|
||||
|
||||
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
|
||||
andi. r10,r9,MSR_RI /* check if this exception occurred */
|
||||
beql nonrecoverable /* at a bad place (MSR:RI = 0) */
|
||||
|
||||
lwz r10,_CCR(r1)
|
||||
lwz r11,_LINK(r1)
|
||||
mtcrf 0xFF,r10
|
||||
mtlr r11
|
||||
|
||||
/* Clear the exception_marker on the stack to avoid confusing stacktrace */
|
||||
li r10, 0
|
||||
stw r10, 8(r1)
|
||||
/*
|
||||
* Once we put values in SRR0 and SRR1, we are in a state
|
||||
* where exceptions are not recoverable, since taking an
|
||||
* exception will trash SRR0 and SRR1. Therefore we clear the
|
||||
* MSR:RI bit to indicate this. If we do take an exception,
|
||||
* we can't return to the point of the exception but we
|
||||
* can restart the exception exit path at the label
|
||||
* exc_exit_restart below. -- paulus
|
||||
*/
|
||||
LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
|
||||
mtmsr r10 /* clear the RI bit */
|
||||
.globl exc_exit_restart
|
||||
exc_exit_restart:
|
||||
lwz r12,_NIP(r1)
|
||||
mtspr SPRN_SRR0,r12
|
||||
mtspr SPRN_SRR1,r9
|
||||
REST_4GPRS(9, r1)
|
||||
lwz r1,GPR1(r1)
|
||||
.globl exc_exit_restart_end
|
||||
exc_exit_restart_end:
|
||||
rfi
|
||||
_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
|
||||
_ASM_NOKPROBE_SYMBOL(exc_exit_restart_end)
|
||||
|
||||
#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
|
||||
/*
|
||||
* This is a bit different on 4xx/Book-E because it doesn't have
|
||||
* the RI bit in the MSR.
|
||||
* The TLB miss handler checks if we have interrupted
|
||||
* the exception exit path and restarts it if so
|
||||
* (well maybe one day it will... :).
|
||||
*/
|
||||
lwz r11,_LINK(r1)
|
||||
mtlr r11
|
||||
lwz r10,_CCR(r1)
|
||||
mtcrf 0xff,r10
|
||||
/* Clear the exception_marker on the stack to avoid confusing stacktrace */
|
||||
li r10, 0
|
||||
stw r10, 8(r1)
|
||||
REST_2GPRS(9, r1)
|
||||
.globl exc_exit_restart
|
||||
exc_exit_restart:
|
||||
.Lfast_user_interrupt_return:
|
||||
lwz r11,_NIP(r1)
|
||||
lwz r12,_MSR(r1)
|
||||
mtspr SPRN_SRR0,r11
|
||||
mtspr SPRN_SRR1,r12
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
stwcx. r0,0,r1 /* to clear the reservation */
|
||||
FTR_SECTION_ELSE
|
||||
lwarx r0,0,r1
|
||||
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
|
||||
|
||||
lwz r3,_CCR(r1)
|
||||
lwz r4,_LINK(r1)
|
||||
lwz r5,_CTR(r1)
|
||||
lwz r6,_XER(r1)
|
||||
li r0,0
|
||||
|
||||
/*
|
||||
* Leaving a stale exception_marker on the stack can confuse
|
||||
* the reliable stack unwinder later on. Clear it.
|
||||
*/
|
||||
stw r0,8(r1)
|
||||
REST_4GPRS(7, r1)
|
||||
REST_2GPRS(11, r1)
|
||||
lwz r1,GPR1(r1)
|
||||
.globl exc_exit_restart_end
|
||||
exc_exit_restart_end:
|
||||
|
||||
mtcr r3
|
||||
mtlr r4
|
||||
mtctr r5
|
||||
mtspr SPRN_XER,r6
|
||||
|
||||
REST_4GPRS(2, r1)
|
||||
REST_GPR(6, r1)
|
||||
REST_GPR(0, r1)
|
||||
REST_GPR(1, r1)
|
||||
rfi
|
||||
b . /* prevent prefetch past rfi */
|
||||
_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
|
||||
#ifdef CONFIG_40x
|
||||
b . /* Prevent prefetch past rfi */
|
||||
#endif
|
||||
|
||||
.Lrestore_nvgprs:
|
||||
REST_NVGPRS(r1)
|
||||
b .Lfast_user_interrupt_return
|
||||
|
||||
.Lkernel_interrupt_return:
|
||||
bl interrupt_exit_kernel_prepare
|
||||
|
||||
.Lfast_kernel_interrupt_return:
|
||||
cmpwi cr1,r3,0
|
||||
lwz r11,_NIP(r1)
|
||||
lwz r12,_MSR(r1)
|
||||
mtspr SPRN_SRR0,r11
|
||||
mtspr SPRN_SRR1,r12
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
stwcx. r0,0,r1 /* to clear the reservation */
|
||||
FTR_SECTION_ELSE
|
||||
lwarx r0,0,r1
|
||||
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
|
||||
|
||||
lwz r3,_LINK(r1)
|
||||
lwz r4,_CTR(r1)
|
||||
lwz r5,_XER(r1)
|
||||
lwz r6,_CCR(r1)
|
||||
li r0,0
|
||||
|
||||
REST_4GPRS(7, r1)
|
||||
REST_2GPRS(11, r1)
|
||||
|
||||
mtlr r3
|
||||
mtctr r4
|
||||
mtspr SPRN_XER,r5
|
||||
|
||||
/*
|
||||
* Leaving a stale exception_marker on the stack can confuse
|
||||
* the reliable stack unwinder later on. Clear it.
|
||||
*/
|
||||
stw r0,8(r1)
|
||||
|
||||
REST_4GPRS(2, r1)
|
||||
|
||||
bne- cr1,1f /* emulate stack store */
|
||||
mtcr r6
|
||||
REST_GPR(6, r1)
|
||||
REST_GPR(0, r1)
|
||||
REST_GPR(1, r1)
|
||||
rfi
|
||||
#ifdef CONFIG_40x
|
||||
b . /* Prevent prefetch past rfi */
|
||||
#endif
|
||||
|
||||
1: /*
|
||||
* Emulate stack store with update. New r1 value was already calculated
|
||||
* and updated in our interrupt regs by emulate_loadstore, but we can't
|
||||
* store the previous value of r1 to the stack before re-loading our
|
||||
* registers from it, otherwise they could be clobbered. Use
|
||||
* SPRG Scratch0 as temporary storage to hold the store
|
||||
* data, as interrupts are disabled here so it won't be clobbered.
|
||||
*/
|
||||
mtcr r6
|
||||
#ifdef CONFIG_BOOKE
|
||||
mtspr SPRN_SPRG_WSCRATCH0, r9
|
||||
#else
|
||||
mtspr SPRN_SPRG_SCRATCH0, r9
|
||||
#endif
|
||||
addi r9,r1,INT_FRAME_SIZE /* get original r1 */
|
||||
REST_GPR(6, r1)
|
||||
REST_GPR(0, r1)
|
||||
REST_GPR(1, r1)
|
||||
stw r9,0(r1) /* perform store component of stwu */
|
||||
#ifdef CONFIG_BOOKE
|
||||
mfspr r9, SPRN_SPRG_RSCRATCH0
|
||||
#else
|
||||
mfspr r9, SPRN_SPRG_SCRATCH0
|
||||
#endif
|
||||
rfi
|
||||
#ifdef CONFIG_40x
|
||||
b . /* Prevent prefetch past rfi */
|
||||
#endif
|
||||
_ASM_NOKPROBE_SYMBOL(interrupt_return)
|
||||
|
||||
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
|
||||
|
||||
/*
|
||||
* Returning from a critical interrupt in user mode doesn't need
|
||||
|
@ -837,8 +443,7 @@ _ASM_NOKPROBE_SYMBOL(exc_exit_restart)
|
|||
REST_NVGPRS(r1); \
|
||||
lwz r3,_MSR(r1); \
|
||||
andi. r3,r3,MSR_PR; \
|
||||
LOAD_REG_IMMEDIATE(r10,MSR_KERNEL); \
|
||||
bne user_exc_return; \
|
||||
bne interrupt_return; \
|
||||
lwz r0,GPR0(r1); \
|
||||
lwz r2,GPR2(r1); \
|
||||
REST_4GPRS(3, r1); \
|
||||
|
@ -906,11 +511,6 @@ _ASM_NOKPROBE_SYMBOL(exc_exit_restart)
|
|||
#ifdef CONFIG_40x
|
||||
.globl ret_from_crit_exc
|
||||
ret_from_crit_exc:
|
||||
mfspr r9,SPRN_SPRG_THREAD
|
||||
lis r10,saved_ksp_limit@ha;
|
||||
lwz r10,saved_ksp_limit@l(r10);
|
||||
tovirt(r9,r9);
|
||||
stw r10,KSP_LIMIT(r9)
|
||||
lis r9,crit_srr0@ha;
|
||||
lwz r9,crit_srr0@l(r9);
|
||||
lis r10,crit_srr1@ha;
|
||||
|
@ -924,9 +524,6 @@ _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
|
|||
#ifdef CONFIG_BOOKE
|
||||
.globl ret_from_crit_exc
|
||||
ret_from_crit_exc:
|
||||
mfspr r9,SPRN_SPRG_THREAD
|
||||
lwz r10,SAVED_KSP_LIMIT(r1)
|
||||
stw r10,KSP_LIMIT(r9)
|
||||
RESTORE_xSRR(SRR0,SRR1);
|
||||
RESTORE_MMU_REGS;
|
||||
RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
|
||||
|
@ -934,9 +531,6 @@ _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
|
|||
|
||||
.globl ret_from_debug_exc
|
||||
ret_from_debug_exc:
|
||||
mfspr r9,SPRN_SPRG_THREAD
|
||||
lwz r10,SAVED_KSP_LIMIT(r1)
|
||||
stw r10,KSP_LIMIT(r9)
|
||||
RESTORE_xSRR(SRR0,SRR1);
|
||||
RESTORE_xSRR(CSRR0,CSRR1);
|
||||
RESTORE_MMU_REGS;
|
||||
|
@ -945,9 +539,6 @@ _ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
|
|||
|
||||
.globl ret_from_mcheck_exc
|
||||
ret_from_mcheck_exc:
|
||||
mfspr r9,SPRN_SPRG_THREAD
|
||||
lwz r10,SAVED_KSP_LIMIT(r1)
|
||||
stw r10,KSP_LIMIT(r9)
|
||||
RESTORE_xSRR(SRR0,SRR1);
|
||||
RESTORE_xSRR(CSRR0,CSRR1);
|
||||
RESTORE_xSRR(DSRR0,DSRR1);
|
||||
|
@ -955,121 +546,8 @@ ret_from_mcheck_exc:
|
|||
RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
|
||||
_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
|
||||
#endif /* CONFIG_BOOKE */
|
||||
|
||||
/*
|
||||
* Load the DBCR0 value for a task that is being ptraced,
|
||||
* having first saved away the global DBCR0. Note that r0
|
||||
* has the dbcr0 value to set upon entry to this.
|
||||
*/
|
||||
load_dbcr0:
|
||||
mfmsr r10 /* first disable debug exceptions */
|
||||
rlwinm r10,r10,0,~MSR_DE
|
||||
mtmsr r10
|
||||
isync
|
||||
mfspr r10,SPRN_DBCR0
|
||||
lis r11,global_dbcr0@ha
|
||||
addi r11,r11,global_dbcr0@l
|
||||
#ifdef CONFIG_SMP
|
||||
lwz r9,TASK_CPU(r2)
|
||||
slwi r9,r9,2
|
||||
add r11,r11,r9
|
||||
#endif
|
||||
stw r10,0(r11)
|
||||
mtspr SPRN_DBCR0,r0
|
||||
li r11,-1
|
||||
mtspr SPRN_DBSR,r11 /* clear all pending debug events */
|
||||
blr
|
||||
|
||||
.section .bss
|
||||
.align 4
|
||||
.global global_dbcr0
|
||||
global_dbcr0:
|
||||
.space 4*NR_CPUS
|
||||
.previous
|
||||
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
|
||||
|
||||
do_work: /* r10 contains MSR_KERNEL here */
|
||||
andi. r0,r9,_TIF_NEED_RESCHED
|
||||
beq do_user_signal
|
||||
|
||||
do_resched: /* r10 contains MSR_KERNEL here */
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
bl trace_hardirqs_on
|
||||
mfmsr r10
|
||||
#endif
|
||||
ori r10,r10,MSR_EE
|
||||
mtmsr r10 /* hard-enable interrupts */
|
||||
bl schedule
|
||||
recheck:
|
||||
/* Note: And we don't tell it we are disabling them again
|
||||
* neither. Those disable/enable cycles used to peek at
|
||||
* TI_FLAGS aren't advertised.
|
||||
*/
|
||||
LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
|
||||
mtmsr r10 /* disable interrupts */
|
||||
lwz r9,TI_FLAGS(r2)
|
||||
andi. r0,r9,_TIF_NEED_RESCHED
|
||||
bne- do_resched
|
||||
andi. r0,r9,_TIF_USER_WORK_MASK
|
||||
beq restore_user
|
||||
do_user_signal: /* r10 contains MSR_KERNEL here */
|
||||
ori r10,r10,MSR_EE
|
||||
mtmsr r10 /* hard-enable interrupts */
|
||||
/* save r13-r31 in the exception frame, if not already done */
|
||||
lwz r3,_TRAP(r1)
|
||||
andi. r0,r3,1
|
||||
beq 2f
|
||||
SAVE_NVGPRS(r1)
|
||||
rlwinm r3,r3,0,0,30
|
||||
stw r3,_TRAP(r1)
|
||||
2: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
mr r4,r9
|
||||
bl do_notify_resume
|
||||
REST_NVGPRS(r1)
|
||||
b recheck
|
||||
|
||||
/*
|
||||
* We come here when we are at the end of handling an exception
|
||||
* that occurred at a place where taking an exception will lose
|
||||
* state information, such as the contents of SRR0 and SRR1.
|
||||
*/
|
||||
nonrecoverable:
|
||||
lis r10,exc_exit_restart_end@ha
|
||||
addi r10,r10,exc_exit_restart_end@l
|
||||
cmplw r12,r10
|
||||
bge 3f
|
||||
lis r11,exc_exit_restart@ha
|
||||
addi r11,r11,exc_exit_restart@l
|
||||
cmplw r12,r11
|
||||
blt 3f
|
||||
lis r10,ee_restarts@ha
|
||||
lwz r12,ee_restarts@l(r10)
|
||||
addi r12,r12,1
|
||||
stw r12,ee_restarts@l(r10)
|
||||
mr r12,r11 /* restart at exc_exit_restart */
|
||||
blr
|
||||
3: /* OK, we can't recover, kill this process */
|
||||
lwz r3,_TRAP(r1)
|
||||
andi. r0,r3,1
|
||||
beq 5f
|
||||
SAVE_NVGPRS(r1)
|
||||
rlwinm r3,r3,0,0,30
|
||||
stw r3,_TRAP(r1)
|
||||
5: mfspr r2,SPRN_SPRG_THREAD
|
||||
addi r2,r2,-THREAD
|
||||
tovirt(r2,r2) /* set back r2 to current */
|
||||
4: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl unrecoverable_exception
|
||||
/* shouldn't return */
|
||||
b 4b
|
||||
_ASM_NOKPROBE_SYMBOL(nonrecoverable)
|
||||
|
||||
.section .bss
|
||||
.align 2
|
||||
ee_restarts:
|
||||
.space 4
|
||||
.previous
|
||||
|
||||
/*
|
||||
* PROM code for specific machines follows. Put it
|
||||
* here so it's easy to add arch-specific sections later.
|
||||
|
@ -1088,7 +566,6 @@ _GLOBAL(enter_rtas)
|
|||
lis r6,1f@ha /* physical return address for rtas */
|
||||
addi r6,r6,1f@l
|
||||
tophys(r6,r6)
|
||||
tophys_novmstack r7, r1
|
||||
lwz r8,RTASENTRY(r4)
|
||||
lwz r4,RTASBASE(r4)
|
||||
mfmsr r9
|
||||
|
@ -1097,24 +574,25 @@ _GLOBAL(enter_rtas)
|
|||
mtmsr r0 /* disable interrupts so SRR0/1 don't get trashed */
|
||||
li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
|
||||
mtlr r6
|
||||
stw r7, THREAD + RTAS_SP(r2)
|
||||
stw r1, THREAD + RTAS_SP(r2)
|
||||
mtspr SPRN_SRR0,r8
|
||||
mtspr SPRN_SRR1,r9
|
||||
rfi
|
||||
1: tophys_novmstack r9, r1
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
li r0, MSR_KERNEL & ~MSR_IR /* can take DTLB miss */
|
||||
mtmsr r0
|
||||
isync
|
||||
#endif
|
||||
lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
|
||||
lwz r9,8(r9) /* original msr value */
|
||||
addi r1,r1,INT_FRAME_SIZE
|
||||
li r0,0
|
||||
tophys_novmstack r7, r2
|
||||
stw r0, THREAD + RTAS_SP(r7)
|
||||
1:
|
||||
lis r8, 1f@h
|
||||
ori r8, r8, 1f@l
|
||||
LOAD_REG_IMMEDIATE(r9,MSR_KERNEL)
|
||||
mtspr SPRN_SRR0,r8
|
||||
mtspr SPRN_SRR1,r9
|
||||
rfi /* return to caller */
|
||||
rfi /* Reactivate MMU translation */
|
||||
1:
|
||||
lwz r8,INT_FRAME_SIZE+4(r1) /* get return address */
|
||||
lwz r9,8(r1) /* original msr value */
|
||||
addi r1,r1,INT_FRAME_SIZE
|
||||
li r0,0
|
||||
stw r0, THREAD + RTAS_SP(r2)
|
||||
mtlr r8
|
||||
mtmsr r9
|
||||
blr /* return to caller */
|
||||
_ASM_NOKPROBE_SYMBOL(enter_rtas)
|
||||
#endif /* CONFIG_PPC_RTAS */
|
||||
|
|
|
@ -117,13 +117,12 @@ BEGIN_FTR_SECTION
|
|||
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||
|
||||
/*
|
||||
* RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
|
||||
* would clobber syscall parameters. Also we always enter with IRQs
|
||||
* enabled and nothing pending. system_call_exception() will call
|
||||
* trace_hardirqs_off().
|
||||
*
|
||||
* scv enters with MSR[EE]=1, so don't set PACA_IRQ_HARD_DIS. The
|
||||
* entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED.
|
||||
* scv enters with MSR[EE]=1 and is immediately considered soft-masked.
|
||||
* The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
|
||||
* and interrupts may be masked and pending already.
|
||||
* system_call_exception() will call trace_hardirqs_off() which means
|
||||
* interrupts could already have been blocked before trace_hardirqs_off,
|
||||
* but this is the best we can do.
|
||||
*/
|
||||
|
||||
/* Calling convention has r9 = orig r0, r10 = regs */
|
||||
|
@ -288,9 +287,8 @@ END_BTB_FLUSH_SECTION
|
|||
std r11,-16(r10) /* "regshere" marker */
|
||||
|
||||
/*
|
||||
* RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
|
||||
* would clobber syscall parameters. Also we always enter with IRQs
|
||||
* enabled and nothing pending. system_call_exception() will call
|
||||
* We always enter kernel from userspace with irq soft-mask enabled and
|
||||
* nothing pending. system_call_exception() will call
|
||||
* trace_hardirqs_off().
|
||||
*/
|
||||
li r11,IRQS_ALL_DISABLED
|
||||
|
@ -417,19 +415,6 @@ _GLOBAL(ret_from_kernel_thread)
|
|||
li r3,0
|
||||
b .Lsyscall_exit
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3E
|
||||
/* Save non-volatile GPRs, if not already saved. */
|
||||
_GLOBAL(save_nvgprs)
|
||||
ld r11,_TRAP(r1)
|
||||
andi. r0,r11,1
|
||||
beqlr-
|
||||
SAVE_NVGPRS(r1)
|
||||
clrrdi r0,r11,1
|
||||
std r0,_TRAP(r1)
|
||||
blr
|
||||
_ASM_NOKPROBE_SYMBOL(save_nvgprs);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
|
||||
#define FLUSH_COUNT_CACHE \
|
||||
|
@ -645,7 +630,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
|||
addi r1,r1,SWITCH_FRAME_SIZE
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
/*
|
||||
* If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
|
||||
* touched, no exit work created, then this can be used.
|
||||
|
@ -657,6 +641,7 @@ _ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
|
|||
kuap_check_amr r3, r4
|
||||
ld r5,_MSR(r1)
|
||||
andi. r0,r5,MSR_PR
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
bne .Lfast_user_interrupt_return_amr
|
||||
kuap_kernel_restore r3, r4
|
||||
andi. r0,r5,MSR_RI
|
||||
|
@ -665,6 +650,10 @@ _ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
|
|||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl unrecoverable_exception
|
||||
b . /* should not get here */
|
||||
#else
|
||||
bne .Lfast_user_interrupt_return
|
||||
b .Lfast_kernel_interrupt_return
|
||||
#endif
|
||||
|
||||
.balign IFETCH_ALIGN_BYTES
|
||||
.globl interrupt_return
|
||||
|
@ -678,8 +667,10 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return)
|
|||
cmpdi r3,0
|
||||
bne- .Lrestore_nvgprs
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
.Lfast_user_interrupt_return_amr:
|
||||
kuap_user_restore r3, r4
|
||||
#endif
|
||||
.Lfast_user_interrupt_return:
|
||||
ld r11,_NIP(r1)
|
||||
ld r12,_MSR(r1)
|
||||
|
@ -788,7 +779,6 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
|
|||
|
||||
RFI_TO_KERNEL
|
||||
b . /* prevent speculative execution */
|
||||
#endif /* CONFIG_PPC_BOOK3S */
|
||||
|
||||
#ifdef CONFIG_PPC_RTAS
|
||||
/*
|
||||
|
|
|
@ -63,9 +63,6 @@
|
|||
ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
|
||||
|
||||
special_reg_save:
|
||||
lbz r9,PACAIRQHAPPENED(r13)
|
||||
RECONCILE_IRQ_STATE(r3,r4)
|
||||
|
||||
/*
|
||||
* We only need (or have stack space) to save this stuff if
|
||||
* we interrupted the kernel.
|
||||
|
@ -119,15 +116,11 @@ BEGIN_FTR_SECTION
|
|||
mtspr SPRN_MAS5,r10
|
||||
mtspr SPRN_MAS8,r10
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
|
||||
SPECIAL_EXC_STORE(r9,IRQHAPPENED)
|
||||
|
||||
mfspr r10,SPRN_DEAR
|
||||
SPECIAL_EXC_STORE(r10,DEAR)
|
||||
mfspr r10,SPRN_ESR
|
||||
SPECIAL_EXC_STORE(r10,ESR)
|
||||
|
||||
lbz r10,PACAIRQSOFTMASK(r13)
|
||||
SPECIAL_EXC_STORE(r10,SOFTE)
|
||||
ld r10,_NIP(r1)
|
||||
SPECIAL_EXC_STORE(r10,CSRR0)
|
||||
ld r10,_MSR(r1)
|
||||
|
@ -139,7 +132,8 @@ ret_from_level_except:
|
|||
ld r3,_MSR(r1)
|
||||
andi. r3,r3,MSR_PR
|
||||
beq 1f
|
||||
b ret_from_except
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
1:
|
||||
|
||||
LOAD_REG_ADDR(r11,extlb_level_exc)
|
||||
|
@ -193,27 +187,6 @@ BEGIN_FTR_SECTION
|
|||
mtspr SPRN_MAS8,r10
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
|
||||
|
||||
lbz r6,PACAIRQSOFTMASK(r13)
|
||||
ld r5,SOFTE(r1)
|
||||
|
||||
/* Interrupts had better not already be enabled... */
|
||||
tweqi r6,IRQS_ENABLED
|
||||
|
||||
andi. r6,r5,IRQS_DISABLED
|
||||
bne 1f
|
||||
|
||||
TRACE_ENABLE_INTS
|
||||
stb r5,PACAIRQSOFTMASK(r13)
|
||||
1:
|
||||
/*
|
||||
* Restore PACAIRQHAPPENED rather than setting it based on
|
||||
* the return MSR[EE], since we could have interrupted
|
||||
* __check_irq_replay() or other inconsistent transitory
|
||||
* states that must remain that way.
|
||||
*/
|
||||
SPECIAL_EXC_LOAD(r10,IRQHAPPENED)
|
||||
stb r10,PACAIRQHAPPENED(r13)
|
||||
|
||||
SPECIAL_EXC_LOAD(r10,DEAR)
|
||||
mtspr SPRN_DEAR,r10
|
||||
SPECIAL_EXC_LOAD(r10,ESR)
|
||||
|
@ -417,14 +390,15 @@ exc_##n##_common: \
|
|||
std r6,_LINK(r1); \
|
||||
std r7,_CTR(r1); \
|
||||
std r8,_XER(r1); \
|
||||
li r3,(n)+1; /* indicate partial regs in trap */ \
|
||||
li r3,(n); /* regs.trap vector */ \
|
||||
std r9,0(r1); /* store stack frame back link */ \
|
||||
std r10,_CCR(r1); /* store orig CR in stackframe */ \
|
||||
std r9,GPR1(r1); /* store stack frame back link */ \
|
||||
std r11,SOFTE(r1); /* and save it to stackframe */ \
|
||||
std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \
|
||||
std r3,_TRAP(r1); /* set trap number */ \
|
||||
std r0,RESULT(r1); /* clear regs->result */
|
||||
std r0,RESULT(r1); /* clear regs->result */ \
|
||||
SAVE_NVGPRS(r1);
|
||||
|
||||
#define EXCEPTION_COMMON(n) \
|
||||
EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN)
|
||||
|
@ -435,28 +409,6 @@ exc_##n##_common: \
|
|||
#define EXCEPTION_COMMON_DBG(n) \
|
||||
EXCEPTION_COMMON_LVL(n, SPRN_SPRG_DBG_SCRATCH, PACA_EXDBG)
|
||||
|
||||
/*
|
||||
* This is meant for exceptions that don't immediately hard-enable. We
|
||||
* set a bit in paca->irq_happened to ensure that a subsequent call to
|
||||
* arch_local_irq_restore() will properly hard-enable and avoid the
|
||||
* fast-path, and then reconcile irq state.
|
||||
*/
|
||||
#define INTS_DISABLE RECONCILE_IRQ_STATE(r3,r4)
|
||||
|
||||
/*
|
||||
* This is called by exceptions that don't use INTS_DISABLE (that did not
|
||||
* touch irq indicators in the PACA). This will restore MSR:EE to it's
|
||||
* previous value
|
||||
*
|
||||
* XXX In the long run, we may want to open-code it in order to separate the
|
||||
* load from the wrtee, thus limiting the latency caused by the dependency
|
||||
* but at this point, I'll favor code clarity until we have a near to final
|
||||
* implementation
|
||||
*/
|
||||
#define INTS_RESTORE_HARD \
|
||||
ld r11,_MSR(r1); \
|
||||
wrtee r11;
|
||||
|
||||
/* XXX FIXME: Restore r14/r15 when necessary */
|
||||
#define BAD_STACK_TRAMPOLINE(n) \
|
||||
exc_##n##_bad_stack: \
|
||||
|
@ -505,12 +457,11 @@ exc_##n##_bad_stack: \
|
|||
START_EXCEPTION(label); \
|
||||
NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\
|
||||
EXCEPTION_COMMON(trapnum) \
|
||||
INTS_DISABLE; \
|
||||
ack(r8); \
|
||||
CHECK_NAPPING(); \
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD; \
|
||||
bl hdlr; \
|
||||
b ret_from_except_lite;
|
||||
b interrupt_return
|
||||
|
||||
/* This value is used to mark exception frames on the stack. */
|
||||
.section ".toc","aw"
|
||||
|
@ -561,11 +512,10 @@ __end_interrupts:
|
|||
CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
|
||||
PROLOG_ADDITION_NONE)
|
||||
EXCEPTION_COMMON_CRIT(0x100)
|
||||
bl save_nvgprs
|
||||
bl special_reg_save
|
||||
CHECK_NAPPING();
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl unknown_exception
|
||||
bl unknown_nmi_exception
|
||||
b ret_from_crit_except
|
||||
|
||||
/* Machine Check Interrupt */
|
||||
|
@ -573,7 +523,6 @@ __end_interrupts:
|
|||
MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK,
|
||||
PROLOG_ADDITION_NONE)
|
||||
EXCEPTION_COMMON_MC(0x000)
|
||||
bl save_nvgprs
|
||||
bl special_reg_save
|
||||
CHECK_NAPPING();
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
|
@ -587,7 +536,6 @@ __end_interrupts:
|
|||
mfspr r14,SPRN_DEAR
|
||||
mfspr r15,SPRN_ESR
|
||||
EXCEPTION_COMMON(0x300)
|
||||
INTS_DISABLE
|
||||
b storage_fault_common
|
||||
|
||||
/* Instruction Storage Interrupt */
|
||||
|
@ -597,7 +545,6 @@ __end_interrupts:
|
|||
li r15,0
|
||||
mr r14,r10
|
||||
EXCEPTION_COMMON(0x400)
|
||||
INTS_DISABLE
|
||||
b storage_fault_common
|
||||
|
||||
/* External Input Interrupt */
|
||||
|
@ -619,13 +566,12 @@ __end_interrupts:
|
|||
PROLOG_ADDITION_1REG)
|
||||
mfspr r14,SPRN_ESR
|
||||
EXCEPTION_COMMON(0x700)
|
||||
INTS_DISABLE
|
||||
std r14,_DSISR(r1)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
ld r14,PACA_EXGEN+EX_R14(r13)
|
||||
bl save_nvgprs
|
||||
bl program_check_exception
|
||||
b ret_from_except
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
||||
/* Floating Point Unavailable Interrupt */
|
||||
START_EXCEPTION(fp_unavailable);
|
||||
|
@ -637,12 +583,10 @@ __end_interrupts:
|
|||
andi. r0,r12,MSR_PR;
|
||||
beq- 1f
|
||||
bl load_up_fpu
|
||||
b fast_exception_return
|
||||
1: INTS_DISABLE
|
||||
bl save_nvgprs
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
b fast_interrupt_return
|
||||
1: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl kernel_fp_unavailable_exception
|
||||
b ret_from_except
|
||||
b interrupt_return
|
||||
|
||||
/* Altivec Unavailable Interrupt */
|
||||
START_EXCEPTION(altivec_unavailable);
|
||||
|
@ -656,15 +600,13 @@ BEGIN_FTR_SECTION
|
|||
andi. r0,r12,MSR_PR;
|
||||
beq- 1f
|
||||
bl load_up_altivec
|
||||
b fast_exception_return
|
||||
b fast_interrupt_return
|
||||
1:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
#endif
|
||||
INTS_DISABLE
|
||||
bl save_nvgprs
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl altivec_unavailable_exception
|
||||
b ret_from_except
|
||||
b interrupt_return
|
||||
|
||||
/* AltiVec Assist */
|
||||
START_EXCEPTION(altivec_assist);
|
||||
|
@ -672,17 +614,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
|||
BOOKE_INTERRUPT_ALTIVEC_ASSIST,
|
||||
PROLOG_ADDITION_NONE)
|
||||
EXCEPTION_COMMON(0x220)
|
||||
INTS_DISABLE
|
||||
bl save_nvgprs
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
BEGIN_FTR_SECTION
|
||||
bl altivec_assist_exception
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
REST_NVGPRS(r1)
|
||||
#else
|
||||
bl unknown_exception
|
||||
#endif
|
||||
b ret_from_except
|
||||
b interrupt_return
|
||||
|
||||
|
||||
/* Decrementer Interrupt */
|
||||
|
@ -698,14 +639,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
|||
CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,
|
||||
PROLOG_ADDITION_NONE)
|
||||
EXCEPTION_COMMON_CRIT(0x9f0)
|
||||
bl save_nvgprs
|
||||
bl special_reg_save
|
||||
CHECK_NAPPING();
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
#ifdef CONFIG_BOOKE_WDT
|
||||
bl WatchdogException
|
||||
#else
|
||||
bl unknown_exception
|
||||
bl unknown_nmi_exception
|
||||
#endif
|
||||
b ret_from_crit_except
|
||||
|
||||
|
@ -722,11 +662,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
|||
NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL,
|
||||
PROLOG_ADDITION_NONE)
|
||||
EXCEPTION_COMMON(0xf20)
|
||||
INTS_DISABLE
|
||||
bl save_nvgprs
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl unknown_exception
|
||||
b ret_from_except
|
||||
b interrupt_return
|
||||
|
||||
/* Debug exception as a critical interrupt*/
|
||||
START_EXCEPTION(debug_crit);
|
||||
|
@ -792,9 +730,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
|||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
ld r14,PACA_EXCRIT+EX_R14(r13)
|
||||
ld r15,PACA_EXCRIT+EX_R15(r13)
|
||||
bl save_nvgprs
|
||||
bl DebugException
|
||||
b ret_from_except
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
||||
kernel_dbg_exc:
|
||||
b . /* NYI */
|
||||
|
@ -859,24 +797,22 @@ kernel_dbg_exc:
|
|||
*/
|
||||
mfspr r14,SPRN_DBSR
|
||||
EXCEPTION_COMMON_DBG(0xd08)
|
||||
INTS_DISABLE
|
||||
std r14,_DSISR(r1)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
ld r14,PACA_EXDBG+EX_R14(r13)
|
||||
ld r15,PACA_EXDBG+EX_R15(r13)
|
||||
bl save_nvgprs
|
||||
bl DebugException
|
||||
b ret_from_except
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
||||
START_EXCEPTION(perfmon);
|
||||
NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
|
||||
PROLOG_ADDITION_NONE)
|
||||
EXCEPTION_COMMON(0x260)
|
||||
INTS_DISABLE
|
||||
CHECK_NAPPING()
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl performance_monitor_exception
|
||||
b ret_from_except_lite
|
||||
b interrupt_return
|
||||
|
||||
/* Doorbell interrupt */
|
||||
MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
|
||||
|
@ -887,11 +823,10 @@ kernel_dbg_exc:
|
|||
CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,
|
||||
PROLOG_ADDITION_NONE)
|
||||
EXCEPTION_COMMON_CRIT(0x2a0)
|
||||
bl save_nvgprs
|
||||
bl special_reg_save
|
||||
CHECK_NAPPING();
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl unknown_exception
|
||||
bl unknown_nmi_exception
|
||||
b ret_from_crit_except
|
||||
|
||||
/*
|
||||
|
@ -903,21 +838,18 @@ kernel_dbg_exc:
|
|||
PROLOG_ADDITION_NONE)
|
||||
EXCEPTION_COMMON(0x2c0)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl save_nvgprs
|
||||
INTS_RESTORE_HARD
|
||||
bl unknown_exception
|
||||
b ret_from_except
|
||||
b interrupt_return
|
||||
|
||||
/* Guest Doorbell critical Interrupt */
|
||||
START_EXCEPTION(guest_doorbell_crit);
|
||||
CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,
|
||||
PROLOG_ADDITION_NONE)
|
||||
EXCEPTION_COMMON_CRIT(0x2e0)
|
||||
bl save_nvgprs
|
||||
bl special_reg_save
|
||||
CHECK_NAPPING();
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl unknown_exception
|
||||
bl unknown_nmi_exception
|
||||
b ret_from_crit_except
|
||||
|
||||
/* Hypervisor call */
|
||||
|
@ -926,10 +858,8 @@ kernel_dbg_exc:
|
|||
PROLOG_ADDITION_NONE)
|
||||
EXCEPTION_COMMON(0x310)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl save_nvgprs
|
||||
INTS_RESTORE_HARD
|
||||
bl unknown_exception
|
||||
b ret_from_except
|
||||
b interrupt_return
|
||||
|
||||
/* Embedded Hypervisor priviledged */
|
||||
START_EXCEPTION(ehpriv);
|
||||
|
@ -937,10 +867,8 @@ kernel_dbg_exc:
|
|||
PROLOG_ADDITION_NONE)
|
||||
EXCEPTION_COMMON(0x320)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl save_nvgprs
|
||||
INTS_RESTORE_HARD
|
||||
bl unknown_exception
|
||||
b ret_from_except
|
||||
b interrupt_return
|
||||
|
||||
/* LRAT Error interrupt */
|
||||
START_EXCEPTION(lrat_error);
|
||||
|
@ -948,10 +876,8 @@ kernel_dbg_exc:
|
|||
PROLOG_ADDITION_NONE)
|
||||
EXCEPTION_COMMON(0x340)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl save_nvgprs
|
||||
INTS_RESTORE_HARD
|
||||
bl unknown_exception
|
||||
b ret_from_except
|
||||
b interrupt_return
|
||||
|
||||
/*
|
||||
* An interrupt came in while soft-disabled; We mark paca->irq_happened
|
||||
|
@ -1011,14 +937,7 @@ storage_fault_common:
|
|||
ld r14,PACA_EXGEN+EX_R14(r13)
|
||||
ld r15,PACA_EXGEN+EX_R15(r13)
|
||||
bl do_page_fault
|
||||
cmpdi r3,0
|
||||
bne- 1f
|
||||
b ret_from_except_lite
|
||||
1: bl save_nvgprs
|
||||
mr r4,r3
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl __bad_page_fault
|
||||
b ret_from_except
|
||||
b interrupt_return
|
||||
|
||||
/*
|
||||
* Alignment exception doesn't fit entirely in the 0x100 bytes so it
|
||||
|
@ -1030,291 +949,9 @@ alignment_more:
|
|||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
ld r14,PACA_EXGEN+EX_R14(r13)
|
||||
ld r15,PACA_EXGEN+EX_R15(r13)
|
||||
bl save_nvgprs
|
||||
INTS_RESTORE_HARD
|
||||
bl alignment_exception
|
||||
b ret_from_except
|
||||
|
||||
.align 7
|
||||
_GLOBAL(ret_from_except)
|
||||
ld r11,_TRAP(r1)
|
||||
andi. r0,r11,1
|
||||
bne ret_from_except_lite
|
||||
REST_NVGPRS(r1)
|
||||
|
||||
_GLOBAL(ret_from_except_lite)
|
||||
/*
|
||||
* Disable interrupts so that current_thread_info()->flags
|
||||
* can't change between when we test it and when we return
|
||||
* from the interrupt.
|
||||
*/
|
||||
wrteei 0
|
||||
|
||||
ld r9, PACA_THREAD_INFO(r13)
|
||||
ld r3,_MSR(r1)
|
||||
ld r10,PACACURRENT(r13)
|
||||
ld r4,TI_FLAGS(r9)
|
||||
andi. r3,r3,MSR_PR
|
||||
beq resume_kernel
|
||||
lwz r3,(THREAD+THREAD_DBCR0)(r10)
|
||||
|
||||
/* Check current_thread_info()->flags */
|
||||
andi. r0,r4,_TIF_USER_WORK_MASK
|
||||
bne 1f
|
||||
/*
|
||||
* Check to see if the dbcr0 register is set up to debug.
|
||||
* Use the internal debug mode bit to do this.
|
||||
*/
|
||||
andis. r0,r3,DBCR0_IDM@h
|
||||
beq restore
|
||||
mfmsr r0
|
||||
rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
|
||||
mtmsr r0
|
||||
mtspr SPRN_DBCR0,r3
|
||||
li r10, -1
|
||||
mtspr SPRN_DBSR,r10
|
||||
b restore
|
||||
1: andi. r0,r4,_TIF_NEED_RESCHED
|
||||
beq 2f
|
||||
bl restore_interrupts
|
||||
SCHEDULE_USER
|
||||
b ret_from_except_lite
|
||||
2:
|
||||
bl save_nvgprs
|
||||
/*
|
||||
* Use a non volatile GPR to save and restore our thread_info flags
|
||||
* across the call to restore_interrupts.
|
||||
*/
|
||||
mr r30,r4
|
||||
bl restore_interrupts
|
||||
mr r4,r30
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl do_notify_resume
|
||||
b ret_from_except
|
||||
|
||||
resume_kernel:
|
||||
/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
|
||||
andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
|
||||
beq+ 1f
|
||||
|
||||
addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
|
||||
|
||||
ld r3,GPR1(r1)
|
||||
subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
|
||||
mr r4,r1 /* src: current exception frame */
|
||||
mr r1,r3 /* Reroute the trampoline frame to r1 */
|
||||
|
||||
/* Copy from the original to the trampoline. */
|
||||
li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
|
||||
li r6,0 /* start offset: 0 */
|
||||
mtctr r5
|
||||
2: ldx r0,r6,r4
|
||||
stdx r0,r6,r3
|
||||
addi r6,r6,8
|
||||
bdnz 2b
|
||||
|
||||
/* Do real store operation to complete stdu */
|
||||
ld r5,GPR1(r1)
|
||||
std r8,0(r5)
|
||||
|
||||
/* Clear _TIF_EMULATE_STACK_STORE flag */
|
||||
lis r11,_TIF_EMULATE_STACK_STORE@h
|
||||
addi r5,r9,TI_FLAGS
|
||||
0: ldarx r4,0,r5
|
||||
andc r4,r4,r11
|
||||
stdcx. r4,0,r5
|
||||
bne- 0b
|
||||
1:
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/* Check if we need to preempt */
|
||||
andi. r0,r4,_TIF_NEED_RESCHED
|
||||
beq+ restore
|
||||
/* Check that preempt_count() == 0 and interrupts are enabled */
|
||||
lwz r8,TI_PREEMPT(r9)
|
||||
cmpwi cr0,r8,0
|
||||
bne restore
|
||||
ld r0,SOFTE(r1)
|
||||
andi. r0,r0,IRQS_DISABLED
|
||||
bne restore
|
||||
|
||||
/*
|
||||
* Here we are preempting the current task. We want to make
|
||||
* sure we are soft-disabled first and reconcile irq state.
|
||||
*/
|
||||
RECONCILE_IRQ_STATE(r3,r4)
|
||||
bl preempt_schedule_irq
|
||||
|
||||
/*
|
||||
* arch_local_irq_restore() from preempt_schedule_irq above may
|
||||
* enable hard interrupt but we really should disable interrupts
|
||||
* when we return from the interrupt, and so that we don't get
|
||||
* interrupted after loading SRR0/1.
|
||||
*/
|
||||
wrteei 0
|
||||
#endif /* CONFIG_PREEMPT */
|
||||
|
||||
restore:
|
||||
/*
|
||||
* This is the main kernel exit path. First we check if we
|
||||
* are about to re-enable interrupts
|
||||
*/
|
||||
ld r5,SOFTE(r1)
|
||||
lbz r6,PACAIRQSOFTMASK(r13)
|
||||
andi. r5,r5,IRQS_DISABLED
|
||||
bne .Lrestore_irq_off
|
||||
|
||||
/* We are enabling, were we already enabled ? Yes, just return */
|
||||
andi. r6,r6,IRQS_DISABLED
|
||||
beq cr0,fast_exception_return
|
||||
|
||||
/*
|
||||
* We are about to soft-enable interrupts (we are hard disabled
|
||||
* at this point). We check if there's anything that needs to
|
||||
* be replayed first.
|
||||
*/
|
||||
lbz r0,PACAIRQHAPPENED(r13)
|
||||
cmpwi cr0,r0,0
|
||||
bne- .Lrestore_check_irq_replay
|
||||
|
||||
/*
|
||||
* Get here when nothing happened while soft-disabled, just
|
||||
* soft-enable and move-on. We will hard-enable as a side
|
||||
* effect of rfi
|
||||
*/
|
||||
.Lrestore_no_replay:
|
||||
TRACE_ENABLE_INTS
|
||||
li r0,IRQS_ENABLED
|
||||
stb r0,PACAIRQSOFTMASK(r13);
|
||||
|
||||
/* This is the return from load_up_fpu fast path which could do with
|
||||
* less GPR restores in fact, but for now we have a single return path
|
||||
*/
|
||||
fast_exception_return:
|
||||
wrteei 0
|
||||
1: mr r0,r13
|
||||
ld r10,_MSR(r1)
|
||||
REST_4GPRS(2, r1)
|
||||
andi. r6,r10,MSR_PR
|
||||
REST_2GPRS(6, r1)
|
||||
beq 1f
|
||||
ACCOUNT_CPU_USER_EXIT(r13, r10, r11)
|
||||
ld r0,GPR13(r1)
|
||||
|
||||
1: stdcx. r0,0,r1 /* to clear the reservation */
|
||||
|
||||
ld r8,_CCR(r1)
|
||||
ld r9,_LINK(r1)
|
||||
ld r10,_CTR(r1)
|
||||
ld r11,_XER(r1)
|
||||
mtcr r8
|
||||
mtlr r9
|
||||
mtctr r10
|
||||
mtxer r11
|
||||
REST_2GPRS(8, r1)
|
||||
ld r10,GPR10(r1)
|
||||
ld r11,GPR11(r1)
|
||||
ld r12,GPR12(r1)
|
||||
mtspr SPRN_SPRG_GEN_SCRATCH,r0
|
||||
|
||||
std r10,PACA_EXGEN+EX_R10(r13);
|
||||
std r11,PACA_EXGEN+EX_R11(r13);
|
||||
ld r10,_NIP(r1)
|
||||
ld r11,_MSR(r1)
|
||||
ld r0,GPR0(r1)
|
||||
ld r1,GPR1(r1)
|
||||
mtspr SPRN_SRR0,r10
|
||||
mtspr SPRN_SRR1,r11
|
||||
ld r10,PACA_EXGEN+EX_R10(r13)
|
||||
ld r11,PACA_EXGEN+EX_R11(r13)
|
||||
mfspr r13,SPRN_SPRG_GEN_SCRATCH
|
||||
rfi
|
||||
|
||||
/*
|
||||
* We are returning to a context with interrupts soft disabled.
|
||||
*
|
||||
* However, we may also about to hard enable, so we need to
|
||||
* make sure that in this case, we also clear PACA_IRQ_HARD_DIS
|
||||
* or that bit can get out of sync and bad things will happen
|
||||
*/
|
||||
.Lrestore_irq_off:
|
||||
ld r3,_MSR(r1)
|
||||
lbz r7,PACAIRQHAPPENED(r13)
|
||||
andi. r0,r3,MSR_EE
|
||||
beq 1f
|
||||
rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
|
||||
stb r7,PACAIRQHAPPENED(r13)
|
||||
1:
|
||||
#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
|
||||
/* The interrupt should not have soft enabled. */
|
||||
lbz r7,PACAIRQSOFTMASK(r13)
|
||||
1: tdeqi r7,IRQS_ENABLED
|
||||
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
|
||||
#endif
|
||||
b fast_exception_return
|
||||
|
||||
/*
|
||||
* Something did happen, check if a re-emit is needed
|
||||
* (this also clears paca->irq_happened)
|
||||
*/
|
||||
.Lrestore_check_irq_replay:
|
||||
/* XXX: We could implement a fast path here where we check
|
||||
* for irq_happened being just 0x01, in which case we can
|
||||
* clear it and return. That means that we would potentially
|
||||
* miss a decrementer having wrapped all the way around.
|
||||
*
|
||||
* Still, this might be useful for things like hash_page
|
||||
*/
|
||||
bl __check_irq_replay
|
||||
cmpwi cr0,r3,0
|
||||
beq .Lrestore_no_replay
|
||||
|
||||
/*
|
||||
* We need to re-emit an interrupt. We do so by re-using our
|
||||
* existing exception frame. We first change the trap value,
|
||||
* but we need to ensure we preserve the low nibble of it
|
||||
*/
|
||||
ld r4,_TRAP(r1)
|
||||
clrldi r4,r4,60
|
||||
or r4,r4,r3
|
||||
std r4,_TRAP(r1)
|
||||
|
||||
/*
|
||||
* PACA_IRQ_HARD_DIS won't always be set here, so set it now
|
||||
* to reconcile the IRQ state. Tracing is already accounted for.
|
||||
*/
|
||||
lbz r4,PACAIRQHAPPENED(r13)
|
||||
ori r4,r4,PACA_IRQ_HARD_DIS
|
||||
stb r4,PACAIRQHAPPENED(r13)
|
||||
|
||||
/*
|
||||
* Then find the right handler and call it. Interrupts are
|
||||
* still soft-disabled and we keep them that way.
|
||||
*/
|
||||
cmpwi cr0,r3,0x500
|
||||
bne 1f
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD;
|
||||
bl do_IRQ
|
||||
b ret_from_except
|
||||
1: cmpwi cr0,r3,0x900
|
||||
bne 1f
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD;
|
||||
bl timer_interrupt
|
||||
b ret_from_except
|
||||
#ifdef CONFIG_PPC_DOORBELL
|
||||
1:
|
||||
cmpwi cr0,r3,0x280
|
||||
bne 1f
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD;
|
||||
bl doorbell_exception
|
||||
#endif /* CONFIG_PPC_DOORBELL */
|
||||
1: b ret_from_except /* What else to do here ? */
|
||||
|
||||
_ASM_NOKPROBE_SYMBOL(ret_from_except);
|
||||
_ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
|
||||
_ASM_NOKPROBE_SYMBOL(resume_kernel);
|
||||
_ASM_NOKPROBE_SYMBOL(restore);
|
||||
_ASM_NOKPROBE_SYMBOL(fast_exception_return);
|
||||
b interrupt_return
|
||||
|
||||
/*
|
||||
* Trampolines used when spotting a bad kernel stack pointer in
|
||||
|
|
|
@ -692,25 +692,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
|
|||
ld r1,GPR1(r1)
|
||||
.endm
|
||||
|
||||
/*
|
||||
* When the idle code in power4_idle puts the CPU into NAP mode,
|
||||
* it has to do so in a loop, and relies on the external interrupt
|
||||
* and decrementer interrupt entry code to get it out of the loop.
|
||||
* It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
|
||||
* to signal that it is in the loop and needs help to get out.
|
||||
*/
|
||||
#ifdef CONFIG_PPC_970_NAP
|
||||
#define FINISH_NAP \
|
||||
BEGIN_FTR_SECTION \
|
||||
ld r11, PACA_THREAD_INFO(r13); \
|
||||
ld r9,TI_LOCAL_FLAGS(r11); \
|
||||
andi. r10,r9,_TLF_NAPPING; \
|
||||
bnel power4_fixup_nap; \
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
|
||||
#else
|
||||
#define FINISH_NAP
|
||||
#endif
|
||||
|
||||
/*
|
||||
* There are a few constraints to be concerned with.
|
||||
* - Real mode exceptions code/data must be located at their physical location.
|
||||
|
@ -1248,7 +1229,6 @@ EXC_COMMON_BEGIN(machine_check_common)
|
|||
*/
|
||||
GEN_COMMON machine_check
|
||||
|
||||
FINISH_NAP
|
||||
/* Enable MSR_RI when finished with PACA_EXMC */
|
||||
li r10,MSR_RI
|
||||
mtmsrd r10,1
|
||||
|
@ -1571,7 +1551,6 @@ EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
|
|||
EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
|
||||
EXC_COMMON_BEGIN(hardware_interrupt_common)
|
||||
GEN_COMMON hardware_interrupt
|
||||
FINISH_NAP
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl do_IRQ
|
||||
b interrupt_return
|
||||
|
@ -1801,7 +1780,6 @@ EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80)
|
|||
EXC_VIRT_END(decrementer, 0x4900, 0x80)
|
||||
EXC_COMMON_BEGIN(decrementer_common)
|
||||
GEN_COMMON decrementer
|
||||
FINISH_NAP
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl timer_interrupt
|
||||
b interrupt_return
|
||||
|
@ -1886,7 +1864,6 @@ EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100)
|
|||
EXC_VIRT_END(doorbell_super, 0x4a00, 0x100)
|
||||
EXC_COMMON_BEGIN(doorbell_super_common)
|
||||
GEN_COMMON doorbell_super
|
||||
FINISH_NAP
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
#ifdef CONFIG_PPC_DOORBELL
|
||||
bl doorbell_exception
|
||||
|
@ -2237,7 +2214,6 @@ EXC_COMMON_BEGIN(hmi_exception_early_common)
|
|||
|
||||
EXC_COMMON_BEGIN(hmi_exception_common)
|
||||
GEN_COMMON hmi_exception
|
||||
FINISH_NAP
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl handle_hmi_exception
|
||||
b interrupt_return
|
||||
|
@ -2266,7 +2242,6 @@ EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20)
|
|||
EXC_VIRT_END(h_doorbell, 0x4e80, 0x20)
|
||||
EXC_COMMON_BEGIN(h_doorbell_common)
|
||||
GEN_COMMON h_doorbell
|
||||
FINISH_NAP
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
#ifdef CONFIG_PPC_DOORBELL
|
||||
bl doorbell_exception
|
||||
|
@ -2299,7 +2274,6 @@ EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20)
|
|||
EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20)
|
||||
EXC_COMMON_BEGIN(h_virt_irq_common)
|
||||
GEN_COMMON h_virt_irq
|
||||
FINISH_NAP
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl do_IRQ
|
||||
b interrupt_return
|
||||
|
@ -2345,7 +2319,6 @@ EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20)
|
|||
EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
|
||||
EXC_COMMON_BEGIN(performance_monitor_common)
|
||||
GEN_COMMON performance_monitor
|
||||
FINISH_NAP
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl performance_monitor_exception
|
||||
b interrupt_return
|
||||
|
@ -2530,8 +2503,6 @@ EXC_VIRT_NONE(0x5100, 0x100)
|
|||
INT_DEFINE_BEGIN(cbe_system_error)
|
||||
IVEC=0x1200
|
||||
IHSRR=1
|
||||
IKVM_SKIP=1
|
||||
IKVM_REAL=1
|
||||
INT_DEFINE_END(cbe_system_error)
|
||||
|
||||
EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100)
|
||||
|
@ -2551,11 +2522,16 @@ EXC_REAL_NONE(0x1200, 0x100)
|
|||
EXC_VIRT_NONE(0x5200, 0x100)
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* Interrupt 0x1300 - Instruction Address Breakpoint Interrupt.
|
||||
* This has been removed from the ISA before 2.01, which is the earliest
|
||||
* 64-bit BookS ISA supported, however the G5 / 970 implements this
|
||||
* interrupt with a non-architected feature available through the support
|
||||
* processor interface.
|
||||
*/
|
||||
INT_DEFINE_BEGIN(instruction_breakpoint)
|
||||
IVEC=0x1300
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
IKVM_SKIP=1
|
||||
IKVM_REAL=1
|
||||
#endif
|
||||
INT_DEFINE_END(instruction_breakpoint)
|
||||
|
@ -2701,8 +2677,6 @@ EXC_COMMON_BEGIN(denorm_exception_common)
|
|||
INT_DEFINE_BEGIN(cbe_maintenance)
|
||||
IVEC=0x1600
|
||||
IHSRR=1
|
||||
IKVM_SKIP=1
|
||||
IKVM_REAL=1
|
||||
INT_DEFINE_END(cbe_maintenance)
|
||||
|
||||
EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100)
|
||||
|
@ -2754,8 +2728,6 @@ EXC_COMMON_BEGIN(altivec_assist_common)
|
|||
INT_DEFINE_BEGIN(cbe_thermal)
|
||||
IVEC=0x1800
|
||||
IHSRR=1
|
||||
IKVM_SKIP=1
|
||||
IKVM_REAL=1
|
||||
INT_DEFINE_END(cbe_thermal)
|
||||
|
||||
EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100)
|
||||
|
@ -3096,24 +3068,6 @@ USE_FIXED_SECTION(virt_trampolines)
|
|||
__end_interrupts:
|
||||
DEFINE_FIXED_SYMBOL(__end_interrupts)
|
||||
|
||||
#ifdef CONFIG_PPC_970_NAP
|
||||
/*
|
||||
* Called by exception entry code if _TLF_NAPPING was set, this clears
|
||||
* the NAPPING flag, and redirects the exception exit to
|
||||
* power4_fixup_nap_return.
|
||||
*/
|
||||
.globl power4_fixup_nap
|
||||
EXC_COMMON_BEGIN(power4_fixup_nap)
|
||||
andc r9,r9,r10
|
||||
std r9,TI_LOCAL_FLAGS(r11)
|
||||
LOAD_REG_ADDR(r10, power4_idle_nap_return)
|
||||
std r10,_NIP(r1)
|
||||
blr
|
||||
|
||||
power4_idle_nap_return:
|
||||
blr
|
||||
#endif
|
||||
|
||||
CLOSE_FIXED_SECTION(real_vectors);
|
||||
CLOSE_FIXED_SECTION(real_trampolines);
|
||||
CLOSE_FIXED_SECTION(virt_vectors);
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <asm/fadump.h>
|
||||
#include <asm/fadump-internal.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/interrupt.h>
|
||||
|
||||
/*
|
||||
* The CPU who acquired the lock to trigger the fadump crash should
|
||||
|
@ -44,22 +45,21 @@ static struct fw_dump fw_dump;
|
|||
|
||||
static void __init fadump_reserve_crash_area(u64 base);
|
||||
|
||||
struct kobject *fadump_kobj;
|
||||
|
||||
#ifndef CONFIG_PRESERVE_FA_DUMP
|
||||
|
||||
static struct kobject *fadump_kobj;
|
||||
|
||||
static atomic_t cpus_in_fadump;
|
||||
static DEFINE_MUTEX(fadump_mutex);
|
||||
|
||||
struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
|
||||
static struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
|
||||
|
||||
#define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */
|
||||
#define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \
|
||||
sizeof(struct fadump_memory_range))
|
||||
static struct fadump_memory_range rngs[RESERVED_RNGS_CNT];
|
||||
struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs,
|
||||
RESERVED_RNGS_SZ, 0,
|
||||
RESERVED_RNGS_CNT, true };
|
||||
static struct fadump_mrange_info
|
||||
reserved_mrange_info = { "reserved", rngs, RESERVED_RNGS_SZ, 0, RESERVED_RNGS_CNT, true };
|
||||
|
||||
static void __init early_init_dt_scan_reserved_ranges(unsigned long node);
|
||||
|
||||
|
@ -79,7 +79,7 @@ static struct cma *fadump_cma;
|
|||
* But for some reason even if it fails we still have the memory reservation
|
||||
* with us and we can still continue doing fadump.
|
||||
*/
|
||||
int __init fadump_cma_init(void)
|
||||
static int __init fadump_cma_init(void)
|
||||
{
|
||||
unsigned long long base, size;
|
||||
int rc;
|
||||
|
@ -292,7 +292,7 @@ static void fadump_show_config(void)
|
|||
* that is required for a kernel to boot successfully.
|
||||
*
|
||||
*/
|
||||
static inline u64 fadump_calculate_reserve_size(void)
|
||||
static __init u64 fadump_calculate_reserve_size(void)
|
||||
{
|
||||
u64 base, size, bootmem_min;
|
||||
int ret;
|
||||
|
@ -728,7 +728,7 @@ void crash_fadump(struct pt_regs *regs, const char *str)
|
|||
* If we came in via system reset, wait a while for the secondary
|
||||
* CPUs to enter.
|
||||
*/
|
||||
if (TRAP(&(fdh->regs)) == 0x100) {
|
||||
if (TRAP(&(fdh->regs)) == INTERRUPT_SYSTEM_RESET) {
|
||||
msecs = CRASH_TIMEOUT;
|
||||
while ((atomic_read(&cpus_in_fadump) < ncpus) && (--msecs > 0))
|
||||
mdelay(1);
|
||||
|
|
|
@ -92,9 +92,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
|
|||
/* enable use of FP after return */
|
||||
#ifdef CONFIG_PPC32
|
||||
mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
tovirt(r5, r5)
|
||||
#endif
|
||||
lwz r4,THREAD_FPEXC_MODE(r5)
|
||||
ori r9,r9,MSR_FP /* enable FP for current */
|
||||
or r9,r9,r4
|
||||
|
|
|
@ -10,36 +10,39 @@
|
|||
* We assume sprg3 has the physical address of the current
|
||||
* task's thread_struct.
|
||||
*/
|
||||
.macro EXCEPTION_PROLOG handle_dar_dsisr=0
|
||||
.macro EXCEPTION_PROLOG trapno name handle_dar_dsisr=0
|
||||
EXCEPTION_PROLOG_0 handle_dar_dsisr=\handle_dar_dsisr
|
||||
EXCEPTION_PROLOG_1
|
||||
EXCEPTION_PROLOG_2 handle_dar_dsisr=\handle_dar_dsisr
|
||||
EXCEPTION_PROLOG_2 \trapno \name handle_dar_dsisr=\handle_dar_dsisr
|
||||
.endm
|
||||
|
||||
.macro EXCEPTION_PROLOG_0 handle_dar_dsisr=0
|
||||
mtspr SPRN_SPRG_SCRATCH0,r10
|
||||
mtspr SPRN_SPRG_SCRATCH1,r11
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
.if \handle_dar_dsisr
|
||||
#ifdef CONFIG_40x
|
||||
mfspr r11, SPRN_DEAR
|
||||
#else
|
||||
mfspr r11, SPRN_DAR
|
||||
#endif
|
||||
stw r11, DAR(r10)
|
||||
#ifdef CONFIG_40x
|
||||
mfspr r11, SPRN_ESR
|
||||
#else
|
||||
mfspr r11, SPRN_DSISR
|
||||
#endif
|
||||
stw r11, DSISR(r10)
|
||||
.endif
|
||||
mfspr r11, SPRN_SRR0
|
||||
stw r11, SRR0(r10)
|
||||
#endif
|
||||
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
stw r11, SRR1(r10)
|
||||
#endif
|
||||
mfcr r10
|
||||
andi. r11, r11, MSR_PR
|
||||
.endm
|
||||
|
||||
.macro EXCEPTION_PROLOG_1 for_rtas=0
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
.macro EXCEPTION_PROLOG_1
|
||||
mtspr SPRN_SPRG_SCRATCH2,r1
|
||||
subi r1, r1, INT_FRAME_SIZE /* use r1 if kernel */
|
||||
beq 1f
|
||||
|
@ -47,32 +50,33 @@
|
|||
lwz r1,TASK_STACK-THREAD(r1)
|
||||
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
|
||||
1:
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
mtcrf 0x3f, r1
|
||||
bt 32 - THREAD_ALIGN_SHIFT, stack_overflow
|
||||
#else
|
||||
subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */
|
||||
beq 1f
|
||||
mfspr r11,SPRN_SPRG_THREAD
|
||||
lwz r11,TASK_STACK-THREAD(r11)
|
||||
addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
|
||||
1: tophys(r11, r11)
|
||||
bt 32 - THREAD_ALIGN_SHIFT, vmap_stack_overflow
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
|
||||
mtmsr r11
|
||||
isync
|
||||
.macro EXCEPTION_PROLOG_2 trapno name handle_dar_dsisr=0
|
||||
#ifdef CONFIG_PPC_8xx
|
||||
.if \handle_dar_dsisr
|
||||
li r11, RPN_PATTERN
|
||||
mtspr SPRN_DAR, r11 /* Tag DAR, to be used in DTLB Error */
|
||||
.endif
|
||||
#endif
|
||||
LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~MSR_RI) /* re-enable MMU */
|
||||
mtspr SPRN_SRR1, r11
|
||||
lis r11, 1f@h
|
||||
ori r11, r11, 1f@l
|
||||
mtspr SPRN_SRR0, r11
|
||||
mfspr r11, SPRN_SPRG_SCRATCH2
|
||||
rfi
|
||||
|
||||
.text
|
||||
\name\()_virt:
|
||||
1:
|
||||
stw r11,GPR1(r1)
|
||||
stw r11,0(r1)
|
||||
mr r11, r1
|
||||
#else
|
||||
stw r1,GPR1(r11)
|
||||
stw r1,0(r11)
|
||||
tovirt(r1, r11) /* set new kernel sp */
|
||||
#endif
|
||||
stw r10,_CCR(r11) /* save registers */
|
||||
stw r12,GPR12(r11)
|
||||
stw r9,GPR9(r11)
|
||||
|
@ -82,7 +86,6 @@
|
|||
stw r12,GPR11(r11)
|
||||
mflr r10
|
||||
stw r10,_LINK(r11)
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
mfspr r12, SPRN_SPRG_THREAD
|
||||
tovirt(r12, r12)
|
||||
.if \handle_dar_dsisr
|
||||
|
@ -93,26 +96,48 @@
|
|||
.endif
|
||||
lwz r9, SRR1(r12)
|
||||
lwz r12, SRR0(r12)
|
||||
#else
|
||||
mfspr r12,SPRN_SRR0
|
||||
mfspr r9,SPRN_SRR1
|
||||
#endif
|
||||
#ifdef CONFIG_40x
|
||||
rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
|
||||
#elif defined(CONFIG_PPC_8xx)
|
||||
mtspr SPRN_EID, r2 /* Set MSR_RI */
|
||||
#else
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
li r10, MSR_KERNEL & ~MSR_IR /* can take exceptions */
|
||||
#else
|
||||
li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR) /* can take exceptions */
|
||||
#endif
|
||||
li r10, MSR_KERNEL /* can take exceptions */
|
||||
mtmsr r10 /* (except for mach check in rtas) */
|
||||
#endif
|
||||
stw r0,GPR0(r11)
|
||||
COMMON_EXCEPTION_PROLOG_END \trapno
|
||||
_ASM_NOKPROBE_SYMBOL(\name\()_virt)
|
||||
.endm
|
||||
|
||||
.macro COMMON_EXCEPTION_PROLOG_END trapno
|
||||
stw r0,GPR0(r1)
|
||||
lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
|
||||
addi r10,r10,STACK_FRAME_REGS_MARKER@l
|
||||
stw r10,8(r11)
|
||||
SAVE_4GPRS(3, r11)
|
||||
SAVE_2GPRS(7, r11)
|
||||
stw r10,8(r1)
|
||||
li r10, \trapno
|
||||
stw r10,_TRAP(r1)
|
||||
SAVE_4GPRS(3, r1)
|
||||
SAVE_2GPRS(7, r1)
|
||||
SAVE_NVGPRS(r1)
|
||||
stw r2,GPR2(r1)
|
||||
stw r12,_NIP(r1)
|
||||
stw r9,_MSR(r1)
|
||||
mfctr r10
|
||||
mfspr r2,SPRN_SPRG_THREAD
|
||||
stw r10,_CTR(r1)
|
||||
tovirt(r2, r2)
|
||||
mfspr r10,SPRN_XER
|
||||
addi r2, r2, -THREAD
|
||||
stw r10,_XER(r1)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
.endm
|
||||
|
||||
.macro prepare_transfer_to_handler
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
andi. r12,r9,MSR_PR
|
||||
bne 777f
|
||||
bl prepare_transfer_to_handler
|
||||
777:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro SYSCALL_ENTRY trapno
|
||||
|
@ -156,54 +181,6 @@
|
|||
b transfer_to_syscall /* jump to handler */
|
||||
.endm
|
||||
|
||||
.macro save_dar_dsisr_on_stack reg1, reg2, sp
|
||||
#ifndef CONFIG_VMAP_STACK
|
||||
mfspr \reg1, SPRN_DAR
|
||||
mfspr \reg2, SPRN_DSISR
|
||||
stw \reg1, _DAR(\sp)
|
||||
stw \reg2, _DSISR(\sp)
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro get_and_save_dar_dsisr_on_stack reg1, reg2, sp
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
lwz \reg1, _DAR(\sp)
|
||||
lwz \reg2, _DSISR(\sp)
|
||||
#else
|
||||
save_dar_dsisr_on_stack \reg1, \reg2, \sp
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro tovirt_vmstack dst, src
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
tovirt(\dst, \src)
|
||||
#else
|
||||
.ifnc \dst, \src
|
||||
mr \dst, \src
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro tovirt_novmstack dst, src
|
||||
#ifndef CONFIG_VMAP_STACK
|
||||
tovirt(\dst, \src)
|
||||
#else
|
||||
.ifnc \dst, \src
|
||||
mr \dst, \src
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro tophys_novmstack dst, src
|
||||
#ifndef CONFIG_VMAP_STACK
|
||||
tophys(\dst, \src)
|
||||
#else
|
||||
.ifnc \dst, \src
|
||||
mr \dst, \src
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Note: code which follows this uses cr0.eq (set if from kernel),
|
||||
* r11, r12 (SRR0), and r9 (SRR1).
|
||||
|
@ -217,41 +194,29 @@
|
|||
*/
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
#define START_EXCEPTION(n, label) \
|
||||
__HEAD; \
|
||||
. = n; \
|
||||
DO_KVM n; \
|
||||
label:
|
||||
|
||||
#else
|
||||
#define START_EXCEPTION(n, label) \
|
||||
__HEAD; \
|
||||
. = n; \
|
||||
label:
|
||||
|
||||
#endif
|
||||
|
||||
#define EXCEPTION(n, label, hdlr, xfer) \
|
||||
#define EXCEPTION(n, label, hdlr) \
|
||||
START_EXCEPTION(n, label) \
|
||||
EXCEPTION_PROLOG; \
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD; \
|
||||
xfer(n, hdlr)
|
||||
|
||||
#define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \
|
||||
li r10,trap; \
|
||||
stw r10,_TRAP(r11); \
|
||||
LOAD_REG_IMMEDIATE(r10, msr); \
|
||||
bl tfer; \
|
||||
.long hdlr; \
|
||||
.long ret
|
||||
|
||||
#define EXC_XFER_STD(n, hdlr) \
|
||||
EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \
|
||||
ret_from_except_full)
|
||||
|
||||
#define EXC_XFER_LITE(n, hdlr) \
|
||||
EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
|
||||
ret_from_except)
|
||||
EXCEPTION_PROLOG n label; \
|
||||
prepare_transfer_to_handler; \
|
||||
bl hdlr; \
|
||||
b interrupt_return
|
||||
|
||||
.macro vmap_stack_overflow_exception
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
__HEAD
|
||||
vmap_stack_overflow:
|
||||
#ifdef CONFIG_SMP
|
||||
mfspr r1, SPRN_SPRG_THREAD
|
||||
lwz r1, TASK_CPU - THREAD(r1)
|
||||
|
@ -261,16 +226,11 @@
|
|||
lis r1, emergency_ctx@ha
|
||||
#endif
|
||||
lwz r1, emergency_ctx@l(r1)
|
||||
cmpwi cr1, r1, 0
|
||||
bne cr1, 1f
|
||||
lis r1, init_thread_union@ha
|
||||
addi r1, r1, init_thread_union@l
|
||||
1: addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
|
||||
EXCEPTION_PROLOG_2
|
||||
SAVE_NVGPRS(r11)
|
||||
addi r3, r1, STACK_FRAME_OVERHEAD
|
||||
EXC_XFER_STD(0, stack_overflow_exception)
|
||||
#endif
|
||||
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
|
||||
EXCEPTION_PROLOG_2 0 vmap_stack_overflow
|
||||
prepare_transfer_to_handler
|
||||
bl stack_overflow_exception
|
||||
b interrupt_return
|
||||
.endm
|
||||
|
||||
#endif /* __HEAD_32_H__ */
|
||||
|
|
|
@ -89,7 +89,11 @@ _ENTRY(crit_srr0)
|
|||
.space 4
|
||||
_ENTRY(crit_srr1)
|
||||
.space 4
|
||||
_ENTRY(saved_ksp_limit)
|
||||
_ENTRY(crit_r1)
|
||||
.space 4
|
||||
_ENTRY(crit_dear)
|
||||
.space 4
|
||||
_ENTRY(crit_esr)
|
||||
.space 4
|
||||
|
||||
/*
|
||||
|
@ -100,42 +104,62 @@ _ENTRY(saved_ksp_limit)
|
|||
* Instead we use a couple of words of memory at low physical addresses.
|
||||
* This is OK since we don't support SMP on these processors.
|
||||
*/
|
||||
#define CRITICAL_EXCEPTION_PROLOG \
|
||||
stw r10,crit_r10@l(0); /* save two registers to work with */\
|
||||
stw r11,crit_r11@l(0); \
|
||||
mfcr r10; /* save CR in r10 for now */\
|
||||
mfspr r11,SPRN_SRR3; /* check whether user or kernel */\
|
||||
andi. r11,r11,MSR_PR; \
|
||||
lis r11,critirq_ctx@ha; \
|
||||
tophys(r11,r11); \
|
||||
lwz r11,critirq_ctx@l(r11); \
|
||||
beq 1f; \
|
||||
/* COMING FROM USER MODE */ \
|
||||
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
|
||||
lwz r11,TASK_STACK-THREAD(r11); /* this thread's kernel stack */\
|
||||
1: addi r11,r11,THREAD_SIZE-INT_FRAME_SIZE; /* Alloc an excpt frm */\
|
||||
tophys(r11,r11); \
|
||||
stw r10,_CCR(r11); /* save various registers */\
|
||||
stw r12,GPR12(r11); \
|
||||
stw r9,GPR9(r11); \
|
||||
mflr r10; \
|
||||
stw r10,_LINK(r11); \
|
||||
mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\
|
||||
stw r12,_DEAR(r11); /* since they may have had stuff */\
|
||||
mfspr r9,SPRN_ESR; /* in them at the point where the */\
|
||||
stw r9,_ESR(r11); /* exception was taken */\
|
||||
mfspr r12,SPRN_SRR2; \
|
||||
stw r1,GPR1(r11); \
|
||||
mfspr r9,SPRN_SRR3; \
|
||||
stw r1,0(r11); \
|
||||
tovirt(r1,r11); \
|
||||
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
|
||||
stw r0,GPR0(r11); \
|
||||
lis r10, STACK_FRAME_REGS_MARKER@ha; /* exception frame marker */\
|
||||
addi r10, r10, STACK_FRAME_REGS_MARKER@l; \
|
||||
stw r10, 8(r11); \
|
||||
SAVE_4GPRS(3, r11); \
|
||||
SAVE_2GPRS(7, r11)
|
||||
.macro CRITICAL_EXCEPTION_PROLOG trapno name
|
||||
stw r10,crit_r10@l(0) /* save two registers to work with */
|
||||
stw r11,crit_r11@l(0)
|
||||
mfspr r10,SPRN_SRR0
|
||||
mfspr r11,SPRN_SRR1
|
||||
stw r10,crit_srr0@l(0)
|
||||
stw r11,crit_srr1@l(0)
|
||||
mfspr r10,SPRN_DEAR
|
||||
mfspr r11,SPRN_ESR
|
||||
stw r10,crit_dear@l(0)
|
||||
stw r11,crit_esr@l(0)
|
||||
mfcr r10 /* save CR in r10 for now */
|
||||
mfspr r11,SPRN_SRR3 /* check whether user or kernel */
|
||||
andi. r11,r11,MSR_PR
|
||||
lis r11,(critirq_ctx-PAGE_OFFSET)@ha
|
||||
lwz r11,(critirq_ctx-PAGE_OFFSET)@l(r11)
|
||||
beq 1f
|
||||
/* COMING FROM USER MODE */
|
||||
mfspr r11,SPRN_SPRG_THREAD /* if from user, start at top of */
|
||||
lwz r11,TASK_STACK-THREAD(r11) /* this thread's kernel stack */
|
||||
1: stw r1,crit_r1@l(0)
|
||||
addi r1,r11,THREAD_SIZE-INT_FRAME_SIZE /* Alloc an excpt frm */
|
||||
LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)) /* re-enable MMU */
|
||||
mtspr SPRN_SRR1, r11
|
||||
lis r11, 1f@h
|
||||
ori r11, r11, 1f@l
|
||||
mtspr SPRN_SRR0, r11
|
||||
rfi
|
||||
|
||||
.text
|
||||
1:
|
||||
\name\()_virt:
|
||||
lwz r11,crit_r1@l(0)
|
||||
stw r11,GPR1(r1)
|
||||
stw r11,0(r1)
|
||||
mr r11,r1
|
||||
stw r10,_CCR(r11) /* save various registers */
|
||||
stw r12,GPR12(r11)
|
||||
stw r9,GPR9(r11)
|
||||
mflr r10
|
||||
stw r10,_LINK(r11)
|
||||
lis r9,PAGE_OFFSET@ha
|
||||
lwz r10,crit_r10@l(r9)
|
||||
lwz r12,crit_r11@l(r9)
|
||||
stw r10,GPR10(r11)
|
||||
stw r12,GPR11(r11)
|
||||
lwz r12,crit_dear@l(r9)
|
||||
lwz r9,crit_esr@l(r9)
|
||||
stw r12,_DEAR(r11) /* since they may have had stuff */
|
||||
stw r9,_ESR(r11) /* exception was taken */
|
||||
mfspr r12,SPRN_SRR2
|
||||
mfspr r9,SPRN_SRR3
|
||||
rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
|
||||
COMMON_EXCEPTION_PROLOG_END \trapno + 2
|
||||
_ASM_NOKPROBE_SYMBOL(\name\()_virt)
|
||||
.endm
|
||||
|
||||
/*
|
||||
* State at this point:
|
||||
|
@ -155,10 +179,10 @@ _ENTRY(saved_ksp_limit)
|
|||
*/
|
||||
#define CRITICAL_EXCEPTION(n, label, hdlr) \
|
||||
START_EXCEPTION(n, label); \
|
||||
CRITICAL_EXCEPTION_PROLOG; \
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD; \
|
||||
EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
|
||||
crit_transfer_to_handler, ret_from_crit_exc)
|
||||
CRITICAL_EXCEPTION_PROLOG n label; \
|
||||
prepare_transfer_to_handler; \
|
||||
bl hdlr; \
|
||||
b ret_from_crit_exc
|
||||
|
||||
/*
|
||||
* 0x0100 - Critical Interrupt Exception
|
||||
|
@ -178,69 +202,67 @@ _ENTRY(saved_ksp_limit)
|
|||
* if they can't resolve the lightweight TLB fault.
|
||||
*/
|
||||
START_EXCEPTION(0x0300, DataStorage)
|
||||
EXCEPTION_PROLOG
|
||||
mfspr r5, SPRN_ESR /* Grab the ESR, save it */
|
||||
stw r5, _ESR(r11)
|
||||
mfspr r4, SPRN_DEAR /* Grab the DEAR, save it */
|
||||
stw r4, _DEAR(r11)
|
||||
EXC_XFER_LITE(0x300, handle_page_fault)
|
||||
EXCEPTION_PROLOG 0x300 DataStorage handle_dar_dsisr=1
|
||||
prepare_transfer_to_handler
|
||||
bl do_page_fault
|
||||
b interrupt_return
|
||||
|
||||
/*
|
||||
* 0x0400 - Instruction Storage Exception
|
||||
* This is caused by a fetch from non-execute or guarded pages.
|
||||
*/
|
||||
START_EXCEPTION(0x0400, InstructionAccess)
|
||||
EXCEPTION_PROLOG
|
||||
EXCEPTION_PROLOG 0x400 InstructionAccess
|
||||
li r5,0
|
||||
stw r5, _ESR(r11) /* Zero ESR */
|
||||
stw r12, _DEAR(r11) /* SRR0 as DEAR */
|
||||
EXC_XFER_LITE(0x400, handle_page_fault)
|
||||
prepare_transfer_to_handler
|
||||
bl do_page_fault
|
||||
b interrupt_return
|
||||
|
||||
/* 0x0500 - External Interrupt Exception */
|
||||
EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
|
||||
EXCEPTION(0x0500, HardwareInterrupt, do_IRQ)
|
||||
|
||||
/* 0x0600 - Alignment Exception */
|
||||
START_EXCEPTION(0x0600, Alignment)
|
||||
EXCEPTION_PROLOG
|
||||
mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */
|
||||
stw r4,_DEAR(r11)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
EXC_XFER_STD(0x600, alignment_exception)
|
||||
EXCEPTION_PROLOG 0x600 Alignment handle_dar_dsisr=1
|
||||
prepare_transfer_to_handler
|
||||
bl alignment_exception
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
||||
/* 0x0700 - Program Exception */
|
||||
START_EXCEPTION(0x0700, ProgramCheck)
|
||||
EXCEPTION_PROLOG
|
||||
mfspr r4,SPRN_ESR /* Grab the ESR and save it */
|
||||
stw r4,_ESR(r11)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
EXC_XFER_STD(0x700, program_check_exception)
|
||||
EXCEPTION_PROLOG 0x700 ProgramCheck handle_dar_dsisr=1
|
||||
prepare_transfer_to_handler
|
||||
bl program_check_exception
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
||||
EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x0800, Trap_08, unknown_exception)
|
||||
EXCEPTION(0x0900, Trap_09, unknown_exception)
|
||||
EXCEPTION(0x0A00, Trap_0A, unknown_exception)
|
||||
EXCEPTION(0x0B00, Trap_0B, unknown_exception)
|
||||
|
||||
/* 0x0C00 - System Call Exception */
|
||||
START_EXCEPTION(0x0C00, SystemCall)
|
||||
SYSCALL_ENTRY 0xc00
|
||||
/* Trap_0D is commented out to get more space for system call exception */
|
||||
|
||||
/* EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD) */
|
||||
EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_STD)
|
||||
/* EXCEPTION(0x0D00, Trap_0D, unknown_exception) */
|
||||
EXCEPTION(0x0E00, Trap_0E, unknown_exception)
|
||||
EXCEPTION(0x0F00, Trap_0F, unknown_exception)
|
||||
|
||||
/* 0x1000 - Programmable Interval Timer (PIT) Exception */
|
||||
. = 0x1000
|
||||
START_EXCEPTION(0x1000, DecrementerTrap)
|
||||
b Decrementer
|
||||
|
||||
/* 0x1010 - Fixed Interval Timer (FIT) Exception
|
||||
*/
|
||||
. = 0x1010
|
||||
/* 0x1010 - Fixed Interval Timer (FIT) Exception */
|
||||
START_EXCEPTION(0x1010, FITExceptionTrap)
|
||||
b FITException
|
||||
|
||||
/* 0x1020 - Watchdog Timer (WDT) Exception
|
||||
*/
|
||||
. = 0x1020
|
||||
/* 0x1020 - Watchdog Timer (WDT) Exception */
|
||||
START_EXCEPTION(0x1020, WDTExceptionTrap)
|
||||
b WDTException
|
||||
|
||||
/* 0x1100 - Data TLB Miss Exception
|
||||
|
@ -249,13 +271,13 @@ _ENTRY(saved_ksp_limit)
|
|||
* load TLB entries from the page table if they exist.
|
||||
*/
|
||||
START_EXCEPTION(0x1100, DTLBMiss)
|
||||
mtspr SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */
|
||||
mtspr SPRN_SPRG_SCRATCH1, r11
|
||||
mtspr SPRN_SPRG_SCRATCH5, r10 /* Save some working registers */
|
||||
mtspr SPRN_SPRG_SCRATCH6, r11
|
||||
mtspr SPRN_SPRG_SCRATCH3, r12
|
||||
mtspr SPRN_SPRG_SCRATCH4, r9
|
||||
mfcr r12
|
||||
mfspr r9, SPRN_PID
|
||||
mtspr SPRN_SPRG_SCRATCH5, r9
|
||||
rlwimi r12, r9, 0, 0xff
|
||||
mfspr r10, SPRN_DEAR /* Get faulting address */
|
||||
|
||||
/* If we are faulting a kernel address, we have to use the
|
||||
|
@ -316,13 +338,12 @@ _ENTRY(saved_ksp_limit)
|
|||
/* The bailout. Restore registers to pre-exception conditions
|
||||
* and call the heavyweights to help us out.
|
||||
*/
|
||||
mfspr r9, SPRN_SPRG_SCRATCH5
|
||||
mtspr SPRN_PID, r9
|
||||
mtcr r12
|
||||
mtspr SPRN_PID, r12
|
||||
mtcrf 0x80, r12
|
||||
mfspr r9, SPRN_SPRG_SCRATCH4
|
||||
mfspr r12, SPRN_SPRG_SCRATCH3
|
||||
mfspr r11, SPRN_SPRG_SCRATCH1
|
||||
mfspr r10, SPRN_SPRG_SCRATCH0
|
||||
mfspr r11, SPRN_SPRG_SCRATCH6
|
||||
mfspr r10, SPRN_SPRG_SCRATCH5
|
||||
b DataStorage
|
||||
|
||||
/* 0x1200 - Instruction TLB Miss Exception
|
||||
|
@ -330,13 +351,13 @@ _ENTRY(saved_ksp_limit)
|
|||
* registers and bailout to a different point.
|
||||
*/
|
||||
START_EXCEPTION(0x1200, ITLBMiss)
|
||||
mtspr SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */
|
||||
mtspr SPRN_SPRG_SCRATCH1, r11
|
||||
mtspr SPRN_SPRG_SCRATCH5, r10 /* Save some working registers */
|
||||
mtspr SPRN_SPRG_SCRATCH6, r11
|
||||
mtspr SPRN_SPRG_SCRATCH3, r12
|
||||
mtspr SPRN_SPRG_SCRATCH4, r9
|
||||
mfcr r12
|
||||
mfspr r9, SPRN_PID
|
||||
mtspr SPRN_SPRG_SCRATCH5, r9
|
||||
rlwimi r12, r9, 0, 0xff
|
||||
mfspr r10, SPRN_SRR0 /* Get faulting address */
|
||||
|
||||
/* If we are faulting a kernel address, we have to use the
|
||||
|
@ -397,28 +418,27 @@ _ENTRY(saved_ksp_limit)
|
|||
/* The bailout. Restore registers to pre-exception conditions
|
||||
* and call the heavyweights to help us out.
|
||||
*/
|
||||
mfspr r9, SPRN_SPRG_SCRATCH5
|
||||
mtspr SPRN_PID, r9
|
||||
mtcr r12
|
||||
mtspr SPRN_PID, r12
|
||||
mtcrf 0x80, r12
|
||||
mfspr r9, SPRN_SPRG_SCRATCH4
|
||||
mfspr r12, SPRN_SPRG_SCRATCH3
|
||||
mfspr r11, SPRN_SPRG_SCRATCH1
|
||||
mfspr r10, SPRN_SPRG_SCRATCH0
|
||||
mfspr r11, SPRN_SPRG_SCRATCH6
|
||||
mfspr r10, SPRN_SPRG_SCRATCH5
|
||||
b InstructionAccess
|
||||
|
||||
EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1300, Trap_13, unknown_exception)
|
||||
EXCEPTION(0x1400, Trap_14, unknown_exception)
|
||||
EXCEPTION(0x1500, Trap_15, unknown_exception)
|
||||
EXCEPTION(0x1600, Trap_16, unknown_exception)
|
||||
EXCEPTION(0x1700, Trap_17, unknown_exception)
|
||||
EXCEPTION(0x1800, Trap_18, unknown_exception)
|
||||
EXCEPTION(0x1900, Trap_19, unknown_exception)
|
||||
EXCEPTION(0x1A00, Trap_1A, unknown_exception)
|
||||
EXCEPTION(0x1B00, Trap_1B, unknown_exception)
|
||||
EXCEPTION(0x1C00, Trap_1C, unknown_exception)
|
||||
EXCEPTION(0x1D00, Trap_1D, unknown_exception)
|
||||
EXCEPTION(0x1E00, Trap_1E, unknown_exception)
|
||||
EXCEPTION(0x1F00, Trap_1F, unknown_exception)
|
||||
|
||||
/* Check for a single step debug exception while in an exception
|
||||
* handler before state has been saved. This is to catch the case
|
||||
|
@ -435,7 +455,7 @@ _ENTRY(saved_ksp_limit)
|
|||
*/
|
||||
/* 0x2000 - Debug Exception */
|
||||
START_EXCEPTION(0x2000, DebugTrap)
|
||||
CRITICAL_EXCEPTION_PROLOG
|
||||
CRITICAL_EXCEPTION_PROLOG 0x2000 DebugTrap
|
||||
|
||||
/*
|
||||
* If this is a single step or branch-taken exception in an
|
||||
|
@ -477,32 +497,35 @@ _ENTRY(saved_ksp_limit)
|
|||
/* continue normal handling for a critical exception... */
|
||||
2: mfspr r4,SPRN_DBSR
|
||||
stw r4,_ESR(r11) /* DebugException takes DBSR in _ESR */
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
EXC_XFER_TEMPLATE(DebugException, 0x2002, \
|
||||
(MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
|
||||
crit_transfer_to_handler, ret_from_crit_exc)
|
||||
prepare_transfer_to_handler
|
||||
bl DebugException
|
||||
b ret_from_crit_exc
|
||||
|
||||
/* Programmable Interval Timer (PIT) Exception. (from 0x1000) */
|
||||
__HEAD
|
||||
Decrementer:
|
||||
EXCEPTION_PROLOG
|
||||
EXCEPTION_PROLOG 0x1000 Decrementer
|
||||
lis r0,TSR_PIS@h
|
||||
mtspr SPRN_TSR,r0 /* Clear the PIT exception */
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
EXC_XFER_LITE(0x1000, timer_interrupt)
|
||||
prepare_transfer_to_handler
|
||||
bl timer_interrupt
|
||||
b interrupt_return
|
||||
|
||||
/* Fixed Interval Timer (FIT) Exception. (from 0x1010) */
|
||||
__HEAD
|
||||
FITException:
|
||||
EXCEPTION_PROLOG
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD;
|
||||
EXC_XFER_STD(0x1010, unknown_exception)
|
||||
EXCEPTION_PROLOG 0x1010 FITException
|
||||
prepare_transfer_to_handler
|
||||
bl unknown_exception
|
||||
b interrupt_return
|
||||
|
||||
/* Watchdog Timer (WDT) Exception. (from 0x1020) */
|
||||
__HEAD
|
||||
WDTException:
|
||||
CRITICAL_EXCEPTION_PROLOG;
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD;
|
||||
EXC_XFER_TEMPLATE(WatchdogException, 0x1020+2,
|
||||
(MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)),
|
||||
crit_transfer_to_handler, ret_from_crit_exc)
|
||||
CRITICAL_EXCEPTION_PROLOG 0x1020 WDTException
|
||||
prepare_transfer_to_handler
|
||||
bl WatchdogException
|
||||
b ret_from_crit_exc
|
||||
|
||||
/* Other PowerPC processors, namely those derived from the 6xx-series
|
||||
* have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
|
||||
|
@ -510,6 +533,7 @@ WDTException:
|
|||
* reserved.
|
||||
*/
|
||||
|
||||
__HEAD
|
||||
/* Damn, I came up one instruction too many to fit into the
|
||||
* exception space :-). Both the instruction and data TLB
|
||||
* miss get to this point to load the TLB.
|
||||
|
@ -543,13 +567,12 @@ finish_tlb_load:
|
|||
|
||||
/* Done...restore registers and get out of here.
|
||||
*/
|
||||
mfspr r9, SPRN_SPRG_SCRATCH5
|
||||
mtspr SPRN_PID, r9
|
||||
mtcr r12
|
||||
mtspr SPRN_PID, r12
|
||||
mtcrf 0x80, r12
|
||||
mfspr r9, SPRN_SPRG_SCRATCH4
|
||||
mfspr r12, SPRN_SPRG_SCRATCH3
|
||||
mfspr r11, SPRN_SPRG_SCRATCH1
|
||||
mfspr r10, SPRN_SPRG_SCRATCH0
|
||||
mfspr r11, SPRN_SPRG_SCRATCH6
|
||||
mfspr r10, SPRN_SPRG_SCRATCH5
|
||||
rfi /* Should sync shadow TLBs */
|
||||
b . /* prevent prefetch past rfi */
|
||||
|
||||
|
|
|
@ -263,8 +263,7 @@ interrupt_base:
|
|||
INSTRUCTION_STORAGE_EXCEPTION
|
||||
|
||||
/* External Input Interrupt */
|
||||
EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, \
|
||||
do_IRQ, EXC_XFER_LITE)
|
||||
EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, do_IRQ)
|
||||
|
||||
/* Alignment Interrupt */
|
||||
ALIGNMENT_EXCEPTION
|
||||
|
@ -277,7 +276,7 @@ interrupt_base:
|
|||
FP_UNAVAILABLE_EXCEPTION
|
||||
#else
|
||||
EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \
|
||||
FloatingPointUnavailable, unknown_exception, EXC_XFER_STD)
|
||||
FloatingPointUnavailable, unknown_exception)
|
||||
#endif
|
||||
/* System Call Interrupt */
|
||||
START_EXCEPTION(SystemCall)
|
||||
|
@ -285,15 +284,14 @@ interrupt_base:
|
|||
|
||||
/* Auxiliary Processor Unavailable Interrupt */
|
||||
EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
|
||||
AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_STD)
|
||||
AuxillaryProcessorUnavailable, unknown_exception)
|
||||
|
||||
/* Decrementer Interrupt */
|
||||
DECREMENTER_EXCEPTION
|
||||
|
||||
/* Fixed Internal Timer Interrupt */
|
||||
/* TODO: Add FIT support */
|
||||
EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, \
|
||||
unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, unknown_exception)
|
||||
|
||||
/* Watchdog Timer Interrupt */
|
||||
/* TODO: Add watchdog support */
|
||||
|
|
|
@ -29,6 +29,13 @@
|
|||
#include <asm/ptrace.h>
|
||||
#include <asm/export.h>
|
||||
#include <asm/code-patching-asm.h>
|
||||
#include <asm/interrupt.h>
|
||||
|
||||
/*
|
||||
* Value for the bits that have fixed value in RPN entries.
|
||||
* Also used for tagging DAR for DTLBerror.
|
||||
*/
|
||||
#define RPN_PATTERN 0x00f0
|
||||
|
||||
#include "head_32.h"
|
||||
|
||||
|
@ -42,12 +49,6 @@
|
|||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Value for the bits that have fixed value in RPN entries.
|
||||
* Also used for tagging DAR for DTLBerror.
|
||||
*/
|
||||
#define RPN_PATTERN 0x00f0
|
||||
|
||||
#define PAGE_SHIFT_512K 19
|
||||
#define PAGE_SHIFT_8M 23
|
||||
|
||||
|
@ -118,56 +119,54 @@ instruction_counter:
|
|||
#endif
|
||||
|
||||
/* System reset */
|
||||
EXCEPTION(0x100, Reset, system_reset_exception, EXC_XFER_STD)
|
||||
EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, system_reset_exception)
|
||||
|
||||
/* Machine check */
|
||||
. = 0x200
|
||||
MachineCheck:
|
||||
EXCEPTION_PROLOG handle_dar_dsisr=1
|
||||
save_dar_dsisr_on_stack r4, r5, r11
|
||||
li r6, RPN_PATTERN
|
||||
mtspr SPRN_DAR, r6 /* Tag DAR, to be used in DTLB Error */
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
EXC_XFER_STD(0x200, machine_check_exception)
|
||||
START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck)
|
||||
EXCEPTION_PROLOG INTERRUPT_MACHINE_CHECK MachineCheck handle_dar_dsisr=1
|
||||
prepare_transfer_to_handler
|
||||
bl machine_check_exception
|
||||
b interrupt_return
|
||||
|
||||
/* External interrupt */
|
||||
EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
|
||||
EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ)
|
||||
|
||||
/* Alignment exception */
|
||||
. = 0x600
|
||||
Alignment:
|
||||
EXCEPTION_PROLOG handle_dar_dsisr=1
|
||||
save_dar_dsisr_on_stack r4, r5, r11
|
||||
li r6, RPN_PATTERN
|
||||
mtspr SPRN_DAR, r6 /* Tag DAR, to be used in DTLB Error */
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
b .Lalignment_exception_ool
|
||||
START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment)
|
||||
EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1
|
||||
prepare_transfer_to_handler
|
||||
bl alignment_exception
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
||||
/* Program check exception */
|
||||
EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
|
||||
START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck)
|
||||
EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck
|
||||
prepare_transfer_to_handler
|
||||
bl program_check_exception
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
||||
/* Decrementer */
|
||||
EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
|
||||
|
||||
/* With VMAP_STACK there's not enough room for this at 0x600 */
|
||||
. = 0xa00
|
||||
.Lalignment_exception_ool:
|
||||
EXC_XFER_STD(0x600, alignment_exception)
|
||||
EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt)
|
||||
|
||||
/* System call */
|
||||
. = 0xc00
|
||||
SystemCall:
|
||||
SYSCALL_ENTRY 0xc00
|
||||
START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall)
|
||||
SYSCALL_ENTRY INTERRUPT_SYSCALL
|
||||
|
||||
/* Single step - not used on 601 */
|
||||
EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
|
||||
EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception)
|
||||
|
||||
/* On the MPC8xx, this is a software emulation interrupt. It occurs
|
||||
* for all unimplemented and illegal instructions.
|
||||
*/
|
||||
EXCEPTION(0x1000, SoftEmu, emulation_assist_interrupt, EXC_XFER_STD)
|
||||
START_EXCEPTION(INTERRUPT_SOFT_EMU_8xx, SoftEmu)
|
||||
EXCEPTION_PROLOG INTERRUPT_SOFT_EMU_8xx SoftEmu
|
||||
prepare_transfer_to_handler
|
||||
bl emulation_assist_interrupt
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
||||
. = 0x1100
|
||||
/*
|
||||
* For the MPC8xx, this is a software tablewalk to load the instruction
|
||||
* TLB. The task switch loads the M_TWB register with the pointer to the first
|
||||
|
@ -189,7 +188,7 @@ SystemCall:
|
|||
#define INVALIDATE_ADJACENT_PAGES_CPU15(addr, tmp)
|
||||
#endif
|
||||
|
||||
InstructionTLBMiss:
|
||||
START_EXCEPTION(INTERRUPT_INST_TLB_MISS_8xx, InstructionTLBMiss)
|
||||
mtspr SPRN_SPRG_SCRATCH2, r10
|
||||
mtspr SPRN_M_TW, r11
|
||||
|
||||
|
@ -245,8 +244,7 @@ InstructionTLBMiss:
|
|||
rfi
|
||||
#endif
|
||||
|
||||
. = 0x1200
|
||||
DataStoreTLBMiss:
|
||||
START_EXCEPTION(INTERRUPT_DATA_TLB_MISS_8xx, DataStoreTLBMiss)
|
||||
mtspr SPRN_SPRG_SCRATCH2, r10
|
||||
mtspr SPRN_M_TW, r11
|
||||
mfcr r11
|
||||
|
@ -309,83 +307,74 @@ DataStoreTLBMiss:
|
|||
* to many reasons, such as executing guarded memory or illegal instruction
|
||||
* addresses. There is nothing to do but handle a big time error fault.
|
||||
*/
|
||||
. = 0x1300
|
||||
InstructionTLBError:
|
||||
EXCEPTION_PROLOG
|
||||
START_EXCEPTION(INTERRUPT_INST_TLB_ERROR_8xx, InstructionTLBError)
|
||||
/* 0x400 is InstructionAccess exception, needed by bad_page_fault() */
|
||||
EXCEPTION_PROLOG INTERRUPT_INST_STORAGE InstructionTLBError
|
||||
andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
|
||||
andis. r10,r9,SRR1_ISI_NOPT@h
|
||||
beq+ .Litlbie
|
||||
tlbie r12
|
||||
/* 0x400 is InstructionAccess exception, needed by bad_page_fault() */
|
||||
.Litlbie:
|
||||
stw r12, _DAR(r11)
|
||||
stw r5, _DSISR(r11)
|
||||
EXC_XFER_LITE(0x400, handle_page_fault)
|
||||
prepare_transfer_to_handler
|
||||
bl do_page_fault
|
||||
b interrupt_return
|
||||
|
||||
/* This is the data TLB error on the MPC8xx. This could be due to
|
||||
* many reasons, including a dirty update to a pte. We bail out to
|
||||
* a higher level function that can handle it.
|
||||
*/
|
||||
. = 0x1400
|
||||
DataTLBError:
|
||||
START_EXCEPTION(INTERRUPT_DATA_TLB_ERROR_8xx, DataTLBError)
|
||||
EXCEPTION_PROLOG_0 handle_dar_dsisr=1
|
||||
mfspr r11, SPRN_DAR
|
||||
cmpwi cr1, r11, RPN_PATTERN
|
||||
beq- cr1, FixupDAR /* must be a buggy dcbX, icbi insn. */
|
||||
DARFixed:/* Return from dcbx instruction bug workaround */
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
li r11, RPN_PATTERN
|
||||
mtspr SPRN_DAR, r11 /* Tag DAR, to be used in DTLB Error */
|
||||
#endif
|
||||
EXCEPTION_PROLOG_1
|
||||
EXCEPTION_PROLOG_2 handle_dar_dsisr=1
|
||||
get_and_save_dar_dsisr_on_stack r4, r5, r11
|
||||
/* 0x300 is DataAccess exception, needed by bad_page_fault() */
|
||||
EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataTLBError handle_dar_dsisr=1
|
||||
lwz r4, _DAR(r11)
|
||||
lwz r5, _DSISR(r11)
|
||||
andis. r10,r5,DSISR_NOHPTE@h
|
||||
beq+ .Ldtlbie
|
||||
tlbie r4
|
||||
.Ldtlbie:
|
||||
#ifndef CONFIG_VMAP_STACK
|
||||
li r10,RPN_PATTERN
|
||||
mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */
|
||||
#endif
|
||||
/* 0x300 is DataAccess exception, needed by bad_page_fault() */
|
||||
EXC_XFER_LITE(0x300, handle_page_fault)
|
||||
prepare_transfer_to_handler
|
||||
bl do_page_fault
|
||||
b interrupt_return
|
||||
|
||||
stack_overflow:
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
vmap_stack_overflow_exception
|
||||
#endif
|
||||
|
||||
/* On the MPC8xx, these next four traps are used for development
|
||||
* support of breakpoints and such. Someday I will get around to
|
||||
* using them.
|
||||
*/
|
||||
do_databreakpoint:
|
||||
EXCEPTION_PROLOG_1
|
||||
EXCEPTION_PROLOG_2 handle_dar_dsisr=1
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
mfspr r4,SPRN_BAR
|
||||
stw r4,_DAR(r11)
|
||||
#ifndef CONFIG_VMAP_STACK
|
||||
mfspr r5,SPRN_DSISR
|
||||
stw r5,_DSISR(r11)
|
||||
#endif
|
||||
EXC_XFER_STD(0x1c00, do_break)
|
||||
|
||||
. = 0x1c00
|
||||
DataBreakpoint:
|
||||
START_EXCEPTION(INTERRUPT_DATA_BREAKPOINT_8xx, DataBreakpoint)
|
||||
EXCEPTION_PROLOG_0 handle_dar_dsisr=1
|
||||
mfspr r11, SPRN_SRR0
|
||||
cmplwi cr1, r11, (.Ldtlbie - PAGE_OFFSET)@l
|
||||
cmplwi cr7, r11, (.Litlbie - PAGE_OFFSET)@l
|
||||
cror 4*cr1+eq, 4*cr1+eq, 4*cr7+eq
|
||||
bne cr1, do_databreakpoint
|
||||
bne cr1, 1f
|
||||
mtcr r10
|
||||
mfspr r10, SPRN_SPRG_SCRATCH0
|
||||
mfspr r11, SPRN_SPRG_SCRATCH1
|
||||
rfi
|
||||
|
||||
1: EXCEPTION_PROLOG_1
|
||||
EXCEPTION_PROLOG_2 INTERRUPT_DATA_BREAKPOINT_8xx DataBreakpoint handle_dar_dsisr=1
|
||||
mfspr r4,SPRN_BAR
|
||||
stw r4,_DAR(r11)
|
||||
prepare_transfer_to_handler
|
||||
bl do_break
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
. = 0x1d00
|
||||
InstructionBreakpoint:
|
||||
START_EXCEPTION(INTERRUPT_INST_BREAKPOINT_8xx, InstructionBreakpoint)
|
||||
mtspr SPRN_SPRG_SCRATCH0, r10
|
||||
lwz r10, (instruction_counter - PAGE_OFFSET)@l(0)
|
||||
addi r10, r10, -1
|
||||
|
@ -396,11 +385,12 @@ InstructionBreakpoint:
|
|||
mfspr r10, SPRN_SPRG_SCRATCH0
|
||||
rfi
|
||||
#else
|
||||
EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(INTERRUPT_INST_BREAKPOINT_8xx, Trap_1d, unknown_exception)
|
||||
#endif
|
||||
EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1e00, Trap_1e, unknown_exception)
|
||||
EXCEPTION(0x1f00, Trap_1f, unknown_exception)
|
||||
|
||||
__HEAD
|
||||
. = 0x2000
|
||||
|
||||
/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
|
||||
|
@ -510,14 +500,10 @@ FixupDAR:/* Entry point for dcbx workaround. */
|
|||
152:
|
||||
mfdar r11
|
||||
mtctr r11 /* restore ctr reg from DAR */
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
mfspr r11, SPRN_SPRG_THREAD
|
||||
stw r10, DAR(r11)
|
||||
mfspr r10, SPRN_DSISR
|
||||
stw r10, DSISR(r11)
|
||||
#else
|
||||
mtdar r10 /* save fault EA to DAR */
|
||||
#endif
|
||||
mfspr r10,SPRN_M_TW
|
||||
b DARFixed /* Go back to normal TLB handling */
|
||||
|
||||
|
@ -819,7 +805,7 @@ EXPORT_SYMBOL(empty_zero_page)
|
|||
swapper_pg_dir:
|
||||
.space PGD_TABLE_SIZE
|
||||
|
||||
/* Room for two PTE table poiners, usually the kernel and current user
|
||||
/* Room for two PTE table pointers, usually the kernel and current user
|
||||
* pointer to their respective root page table (pgdir).
|
||||
*/
|
||||
.globl abatron_pteptrs
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <asm/kvm_book3s_asm.h>
|
||||
#include <asm/export.h>
|
||||
#include <asm/feature-fixups.h>
|
||||
#include <asm/interrupt.h>
|
||||
|
||||
#include "head_32.h"
|
||||
|
||||
|
@ -239,7 +240,7 @@ __secondary_hold_acknowledge:
|
|||
/* System reset */
|
||||
/* core99 pmac starts the seconary here by changing the vector, and
|
||||
putting it back to what it was (unknown_async_exception) when done. */
|
||||
EXCEPTION(0x100, Reset, unknown_async_exception, EXC_XFER_STD)
|
||||
EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, unknown_async_exception)
|
||||
|
||||
/* Machine check */
|
||||
/*
|
||||
|
@ -255,40 +256,28 @@ __secondary_hold_acknowledge:
|
|||
* pointer when we take an exception from supervisor mode.)
|
||||
* -- paulus.
|
||||
*/
|
||||
. = 0x200
|
||||
DO_KVM 0x200
|
||||
MachineCheck:
|
||||
START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck)
|
||||
EXCEPTION_PROLOG_0
|
||||
#ifdef CONFIG_PPC_CHRP
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
mtspr SPRN_SPRG_SCRATCH2,r1
|
||||
mfspr r1, SPRN_SPRG_THREAD
|
||||
lwz r1, RTAS_SP(r1)
|
||||
cmpwi cr1, r1, 0
|
||||
bne cr1, 7f
|
||||
mfspr r1, SPRN_SPRG_SCRATCH2
|
||||
#else
|
||||
mfspr r11, SPRN_SPRG_THREAD
|
||||
lwz r11, RTAS_SP(r11)
|
||||
cmpwi cr1, r11, 0
|
||||
bne cr1, 7f
|
||||
#endif
|
||||
#endif /* CONFIG_PPC_CHRP */
|
||||
EXCEPTION_PROLOG_1 for_rtas=1
|
||||
7: EXCEPTION_PROLOG_2
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
EXCEPTION_PROLOG_1
|
||||
7: EXCEPTION_PROLOG_2 0x200 MachineCheck
|
||||
#ifdef CONFIG_PPC_CHRP
|
||||
beq cr1, machine_check_tramp
|
||||
beq cr1, 1f
|
||||
twi 31, 0, 0
|
||||
#else
|
||||
b machine_check_tramp
|
||||
#endif
|
||||
1: prepare_transfer_to_handler
|
||||
bl machine_check_exception
|
||||
b interrupt_return
|
||||
|
||||
/* Data access exception. */
|
||||
. = 0x300
|
||||
DO_KVM 0x300
|
||||
DataAccess:
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
START_EXCEPTION(INTERRUPT_DATA_STORAGE, DataAccess)
|
||||
#ifdef CONFIG_PPC_BOOK3S_604
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
mtspr SPRN_SPRG_SCRATCH2,r10
|
||||
|
@ -309,30 +298,20 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
|
|||
#endif
|
||||
1: EXCEPTION_PROLOG_0 handle_dar_dsisr=1
|
||||
EXCEPTION_PROLOG_1
|
||||
b handle_page_fault_tramp_1
|
||||
#else /* CONFIG_VMAP_STACK */
|
||||
EXCEPTION_PROLOG handle_dar_dsisr=1
|
||||
get_and_save_dar_dsisr_on_stack r4, r5, r11
|
||||
#ifdef CONFIG_PPC_BOOK3S_604
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
|
||||
bne handle_page_fault_tramp_2 /* if not, try to put a PTE */
|
||||
rlwinm r3, r5, 32 - 15, 21, 21 /* DSISR_STORE -> _PAGE_RW */
|
||||
bl hash_page
|
||||
b handle_page_fault_tramp_1
|
||||
MMU_FTR_SECTION_ELSE
|
||||
#endif
|
||||
b handle_page_fault_tramp_2
|
||||
#ifdef CONFIG_PPC_BOOK3S_604
|
||||
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
#endif
|
||||
#endif /* CONFIG_VMAP_STACK */
|
||||
EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
|
||||
prepare_transfer_to_handler
|
||||
lwz r5, _DSISR(r11)
|
||||
andis. r0, r5, DSISR_DABRMATCH@h
|
||||
bne- 1f
|
||||
bl do_page_fault
|
||||
b interrupt_return
|
||||
1: bl do_break
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
||||
|
||||
/* Instruction access exception. */
|
||||
. = 0x400
|
||||
DO_KVM 0x400
|
||||
InstructionAccess:
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
START_EXCEPTION(INTERRUPT_INST_STORAGE, InstructionAccess)
|
||||
mtspr SPRN_SPRG_SCRATCH0,r10
|
||||
mtspr SPRN_SPRG_SCRATCH1,r11
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
|
@ -352,43 +331,35 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
|||
andi. r11, r11, MSR_PR
|
||||
|
||||
EXCEPTION_PROLOG_1
|
||||
EXCEPTION_PROLOG_2
|
||||
#else /* CONFIG_VMAP_STACK */
|
||||
EXCEPTION_PROLOG
|
||||
andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
|
||||
beq 1f /* if so, try to put a PTE */
|
||||
li r3,0 /* into the hash table */
|
||||
mr r4,r12 /* SRR0 is fault address */
|
||||
#ifdef CONFIG_PPC_BOOK3S_604
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
bl hash_page
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
#endif
|
||||
#endif /* CONFIG_VMAP_STACK */
|
||||
EXCEPTION_PROLOG_2 INTERRUPT_INST_STORAGE InstructionAccess
|
||||
andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
|
||||
stw r5, _DSISR(r11)
|
||||
stw r12, _DAR(r11)
|
||||
EXC_XFER_LITE(0x400, handle_page_fault)
|
||||
prepare_transfer_to_handler
|
||||
bl do_page_fault
|
||||
b interrupt_return
|
||||
|
||||
/* External interrupt */
|
||||
EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
|
||||
EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ)
|
||||
|
||||
/* Alignment exception */
|
||||
. = 0x600
|
||||
DO_KVM 0x600
|
||||
Alignment:
|
||||
EXCEPTION_PROLOG handle_dar_dsisr=1
|
||||
save_dar_dsisr_on_stack r4, r5, r11
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
b alignment_exception_tramp
|
||||
START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment)
|
||||
EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1
|
||||
prepare_transfer_to_handler
|
||||
bl alignment_exception
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
||||
/* Program check exception */
|
||||
EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
|
||||
START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck)
|
||||
EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck
|
||||
prepare_transfer_to_handler
|
||||
bl program_check_exception
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
||||
/* Floating-point unavailable */
|
||||
. = 0x800
|
||||
DO_KVM 0x800
|
||||
FPUnavailable:
|
||||
START_EXCEPTION(0x800, FPUnavailable)
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
BEGIN_FTR_SECTION
|
||||
/*
|
||||
|
@ -397,30 +368,29 @@ BEGIN_FTR_SECTION
|
|||
*/
|
||||
b ProgramCheck
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
|
||||
EXCEPTION_PROLOG
|
||||
EXCEPTION_PROLOG INTERRUPT_FP_UNAVAIL FPUnavailable
|
||||
beq 1f
|
||||
bl load_up_fpu /* if from user, just load it up */
|
||||
b fast_exception_return
|
||||
1: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
|
||||
1: prepare_transfer_to_handler
|
||||
bl kernel_fp_unavailable_exception
|
||||
b interrupt_return
|
||||
#else
|
||||
b ProgramCheck
|
||||
#endif
|
||||
|
||||
/* Decrementer */
|
||||
EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
|
||||
EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt)
|
||||
|
||||
EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0xa00, Trap_0a, unknown_exception)
|
||||
EXCEPTION(0xb00, Trap_0b, unknown_exception)
|
||||
|
||||
/* System call */
|
||||
. = 0xc00
|
||||
DO_KVM 0xc00
|
||||
SystemCall:
|
||||
SYSCALL_ENTRY 0xc00
|
||||
START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall)
|
||||
SYSCALL_ENTRY INTERRUPT_SYSCALL
|
||||
|
||||
EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception)
|
||||
EXCEPTION(0xe00, Trap_0e, unknown_exception)
|
||||
|
||||
/*
|
||||
* The Altivec unavailable trap is at 0x0f20. Foo.
|
||||
|
@ -430,19 +400,18 @@ SystemCall:
|
|||
* non-altivec kernel running on a machine with altivec just
|
||||
* by executing an altivec instruction.
|
||||
*/
|
||||
. = 0xf00
|
||||
DO_KVM 0xf00
|
||||
START_EXCEPTION(INTERRUPT_PERFMON, PerformanceMonitorTrap)
|
||||
b PerformanceMonitor
|
||||
|
||||
. = 0xf20
|
||||
DO_KVM 0xf20
|
||||
START_EXCEPTION(INTERRUPT_ALTIVEC_UNAVAIL, AltiVecUnavailableTrap)
|
||||
b AltiVecUnavailable
|
||||
|
||||
__HEAD
|
||||
/*
|
||||
* Handle TLB miss for instruction on 603/603e.
|
||||
* Note: we get an alternate set of r0 - r3 to use automatically.
|
||||
*/
|
||||
. = 0x1000
|
||||
. = INTERRUPT_INST_TLB_MISS_603
|
||||
InstructionTLBMiss:
|
||||
/*
|
||||
* r0: scratch
|
||||
|
@ -508,7 +477,7 @@ InstructionAddressInvalid:
|
|||
/*
|
||||
* Handle TLB miss for DATA Load operation on 603/603e
|
||||
*/
|
||||
. = 0x1100
|
||||
. = INTERRUPT_DATA_LOAD_TLB_MISS_603
|
||||
DataLoadTLBMiss:
|
||||
/*
|
||||
* r0: scratch
|
||||
|
@ -586,7 +555,7 @@ DataAddressInvalid:
|
|||
/*
|
||||
* Handle TLB miss for DATA Store on 603/603e
|
||||
*/
|
||||
. = 0x1200
|
||||
. = INTERRUPT_DATA_STORE_TLB_MISS_603
|
||||
DataStoreTLBMiss:
|
||||
/*
|
||||
* r0: scratch
|
||||
|
@ -650,57 +619,39 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
|
|||
#define TAUException unknown_async_exception
|
||||
#endif
|
||||
|
||||
EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
|
||||
EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
|
||||
EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
|
||||
EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception)
|
||||
EXCEPTION(0x1400, SMI, SMIException)
|
||||
EXCEPTION(0x1500, Trap_15, unknown_exception)
|
||||
EXCEPTION(0x1600, Trap_16, altivec_assist_exception)
|
||||
EXCEPTION(0x1700, Trap_17, TAUException)
|
||||
EXCEPTION(0x1800, Trap_18, unknown_exception)
|
||||
EXCEPTION(0x1900, Trap_19, unknown_exception)
|
||||
EXCEPTION(0x1a00, Trap_1a, unknown_exception)
|
||||
EXCEPTION(0x1b00, Trap_1b, unknown_exception)
|
||||
EXCEPTION(0x1c00, Trap_1c, unknown_exception)
|
||||
EXCEPTION(0x1d00, Trap_1d, unknown_exception)
|
||||
EXCEPTION(0x1e00, Trap_1e, unknown_exception)
|
||||
EXCEPTION(0x1f00, Trap_1f, unknown_exception)
|
||||
EXCEPTION(0x2000, RunMode, RunModeException)
|
||||
EXCEPTION(0x2100, Trap_21, unknown_exception)
|
||||
EXCEPTION(0x2200, Trap_22, unknown_exception)
|
||||
EXCEPTION(0x2300, Trap_23, unknown_exception)
|
||||
EXCEPTION(0x2400, Trap_24, unknown_exception)
|
||||
EXCEPTION(0x2500, Trap_25, unknown_exception)
|
||||
EXCEPTION(0x2600, Trap_26, unknown_exception)
|
||||
EXCEPTION(0x2700, Trap_27, unknown_exception)
|
||||
EXCEPTION(0x2800, Trap_28, unknown_exception)
|
||||
EXCEPTION(0x2900, Trap_29, unknown_exception)
|
||||
EXCEPTION(0x2a00, Trap_2a, unknown_exception)
|
||||
EXCEPTION(0x2b00, Trap_2b, unknown_exception)
|
||||
EXCEPTION(0x2c00, Trap_2c, unknown_exception)
|
||||
EXCEPTION(0x2d00, Trap_2d, unknown_exception)
|
||||
EXCEPTION(0x2e00, Trap_2e, unknown_exception)
|
||||
EXCEPTION(0x2f00, Trap_2f, unknown_exception)
|
||||
|
||||
__HEAD
|
||||
. = 0x3000
|
||||
|
||||
machine_check_tramp:
|
||||
EXC_XFER_STD(0x200, machine_check_exception)
|
||||
|
||||
alignment_exception_tramp:
|
||||
EXC_XFER_STD(0x600, alignment_exception)
|
||||
|
||||
handle_page_fault_tramp_1:
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
EXCEPTION_PROLOG_2 handle_dar_dsisr=1
|
||||
#endif
|
||||
lwz r5, _DSISR(r11)
|
||||
/* fall through */
|
||||
handle_page_fault_tramp_2:
|
||||
andis. r0, r5, DSISR_DABRMATCH@h
|
||||
bne- 1f
|
||||
EXC_XFER_LITE(0x300, handle_page_fault)
|
||||
1: EXC_XFER_STD(0x300, do_break)
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
#ifdef CONFIG_PPC_BOOK3S_604
|
||||
.macro save_regs_thread thread
|
||||
stw r0, THR0(\thread)
|
||||
|
@ -775,26 +726,31 @@ fast_hash_page_return:
|
|||
rfi
|
||||
#endif /* CONFIG_PPC_BOOK3S_604 */
|
||||
|
||||
stack_overflow:
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
vmap_stack_overflow_exception
|
||||
#endif
|
||||
|
||||
__HEAD
|
||||
AltiVecUnavailable:
|
||||
EXCEPTION_PROLOG
|
||||
EXCEPTION_PROLOG 0xf20 AltiVecUnavailable
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
beq 1f
|
||||
bl load_up_altivec /* if from user, just load it up */
|
||||
b fast_exception_return
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
1: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
|
||||
1: prepare_transfer_to_handler
|
||||
bl altivec_unavailable_exception
|
||||
b interrupt_return
|
||||
|
||||
__HEAD
|
||||
PerformanceMonitor:
|
||||
EXCEPTION_PROLOG
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
EXC_XFER_STD(0xf00, performance_monitor_exception)
|
||||
EXCEPTION_PROLOG 0xf00 PerformanceMonitor
|
||||
prepare_transfer_to_handler
|
||||
bl performance_monitor_exception
|
||||
b interrupt_return
|
||||
|
||||
|
||||
__HEAD
|
||||
/*
|
||||
* This code is jumped to from the startup code to copy
|
||||
* the kernel image to physical address PHYSICAL_START.
|
||||
|
|
|
@ -44,7 +44,7 @@ END_BTB_FLUSH_SECTION
|
|||
#endif
|
||||
|
||||
|
||||
#define NORMAL_EXCEPTION_PROLOG(intno) \
|
||||
#define NORMAL_EXCEPTION_PROLOG(trapno, intno) \
|
||||
mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
|
||||
mfspr r10, SPRN_SPRG_THREAD; \
|
||||
stw r11, THREAD_NORMSAVE(0)(r10); \
|
||||
|
@ -53,6 +53,8 @@ END_BTB_FLUSH_SECTION
|
|||
mfspr r11, SPRN_SRR1; \
|
||||
DO_KVM BOOKE_INTERRUPT_##intno SPRN_SRR1; \
|
||||
andi. r11, r11, MSR_PR; /* check whether user or kernel */\
|
||||
LOAD_REG_IMMEDIATE(r11, MSR_KERNEL); \
|
||||
mtmsr r11; \
|
||||
mr r11, r1; \
|
||||
beq 1f; \
|
||||
BOOKE_CLEAR_BTB(r11) \
|
||||
|
@ -76,12 +78,39 @@ END_BTB_FLUSH_SECTION
|
|||
stw r1, 0(r11); \
|
||||
mr r1, r11; \
|
||||
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
|
||||
stw r0,GPR0(r11); \
|
||||
lis r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \
|
||||
addi r10, r10, STACK_FRAME_REGS_MARKER@l; \
|
||||
stw r10, 8(r11); \
|
||||
SAVE_4GPRS(3, r11); \
|
||||
SAVE_2GPRS(7, r11)
|
||||
COMMON_EXCEPTION_PROLOG_END trapno
|
||||
|
||||
.macro COMMON_EXCEPTION_PROLOG_END trapno
|
||||
stw r0,GPR0(r1)
|
||||
lis r10, STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
|
||||
addi r10, r10, STACK_FRAME_REGS_MARKER@l
|
||||
stw r10, 8(r1)
|
||||
li r10, \trapno
|
||||
stw r10,_TRAP(r1)
|
||||
SAVE_4GPRS(3, r1)
|
||||
SAVE_2GPRS(7, r1)
|
||||
SAVE_NVGPRS(r1)
|
||||
stw r2,GPR2(r1)
|
||||
stw r12,_NIP(r1)
|
||||
stw r9,_MSR(r1)
|
||||
mfctr r10
|
||||
mfspr r2,SPRN_SPRG_THREAD
|
||||
stw r10,_CTR(r1)
|
||||
tovirt(r2, r2)
|
||||
mfspr r10,SPRN_XER
|
||||
addi r2, r2, -THREAD
|
||||
stw r10,_XER(r1)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
.endm
|
||||
|
||||
.macro prepare_transfer_to_handler
|
||||
#ifdef CONFIG_E500
|
||||
andi. r12,r9,MSR_PR
|
||||
bne 777f
|
||||
bl prepare_transfer_to_handler
|
||||
777:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro SYSCALL_ENTRY trapno intno srr1
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
|
@ -180,7 +209,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
|
|||
* registers as the normal prolog above. Instead we use a portion of the
|
||||
* critical/machine check exception stack at low physical addresses.
|
||||
*/
|
||||
#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, intno, exc_level_srr0, exc_level_srr1) \
|
||||
#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, trapno, intno, exc_level_srr0, exc_level_srr1) \
|
||||
mtspr SPRN_SPRG_WSCRATCH_##exc_level,r8; \
|
||||
BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \
|
||||
stw r9,GPR9(r8); /* save various registers */\
|
||||
|
@ -192,6 +221,8 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
|
|||
DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \
|
||||
BOOKE_CLEAR_BTB(r10) \
|
||||
andi. r11,r11,MSR_PR; \
|
||||
LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)); \
|
||||
mtmsr r11; \
|
||||
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
|
||||
lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\
|
||||
addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\
|
||||
|
@ -221,16 +252,44 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
|
|||
stw r1,0(r11); \
|
||||
mr r1,r11; \
|
||||
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
|
||||
stw r0,GPR0(r11); \
|
||||
SAVE_4GPRS(3, r11); \
|
||||
SAVE_2GPRS(7, r11)
|
||||
COMMON_EXCEPTION_PROLOG_END trapno
|
||||
|
||||
#define CRITICAL_EXCEPTION_PROLOG(intno) \
|
||||
EXC_LEVEL_EXCEPTION_PROLOG(CRIT, intno, SPRN_CSRR0, SPRN_CSRR1)
|
||||
#define DEBUG_EXCEPTION_PROLOG \
|
||||
EXC_LEVEL_EXCEPTION_PROLOG(DBG, DEBUG, SPRN_DSRR0, SPRN_DSRR1)
|
||||
#define MCHECK_EXCEPTION_PROLOG \
|
||||
EXC_LEVEL_EXCEPTION_PROLOG(MC, MACHINE_CHECK, \
|
||||
#define SAVE_xSRR(xSRR) \
|
||||
mfspr r0,SPRN_##xSRR##0; \
|
||||
stw r0,_##xSRR##0(r1); \
|
||||
mfspr r0,SPRN_##xSRR##1; \
|
||||
stw r0,_##xSRR##1(r1)
|
||||
|
||||
|
||||
.macro SAVE_MMU_REGS
|
||||
#ifdef CONFIG_PPC_BOOK3E_MMU
|
||||
mfspr r0,SPRN_MAS0
|
||||
stw r0,MAS0(r1)
|
||||
mfspr r0,SPRN_MAS1
|
||||
stw r0,MAS1(r1)
|
||||
mfspr r0,SPRN_MAS2
|
||||
stw r0,MAS2(r1)
|
||||
mfspr r0,SPRN_MAS3
|
||||
stw r0,MAS3(r1)
|
||||
mfspr r0,SPRN_MAS6
|
||||
stw r0,MAS6(r1)
|
||||
#ifdef CONFIG_PHYS_64BIT
|
||||
mfspr r0,SPRN_MAS7
|
||||
stw r0,MAS7(r1)
|
||||
#endif /* CONFIG_PHYS_64BIT */
|
||||
#endif /* CONFIG_PPC_BOOK3E_MMU */
|
||||
#ifdef CONFIG_44x
|
||||
mfspr r0,SPRN_MMUCR
|
||||
stw r0,MMUCR(r1)
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#define CRITICAL_EXCEPTION_PROLOG(trapno, intno) \
|
||||
EXC_LEVEL_EXCEPTION_PROLOG(CRIT, trapno+2, intno, SPRN_CSRR0, SPRN_CSRR1)
|
||||
#define DEBUG_EXCEPTION_PROLOG(trapno) \
|
||||
EXC_LEVEL_EXCEPTION_PROLOG(DBG, trapno+8, DEBUG, SPRN_DSRR0, SPRN_DSRR1)
|
||||
#define MCHECK_EXCEPTION_PROLOG(trapno) \
|
||||
EXC_LEVEL_EXCEPTION_PROLOG(MC, trapno+4, MACHINE_CHECK, \
|
||||
SPRN_MCSRR0, SPRN_MCSRR1)
|
||||
|
||||
/*
|
||||
|
@ -257,44 +316,34 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
|
|||
.align 5; \
|
||||
label:
|
||||
|
||||
#define EXCEPTION(n, intno, label, hdlr, xfer) \
|
||||
#define EXCEPTION(n, intno, label, hdlr) \
|
||||
START_EXCEPTION(label); \
|
||||
NORMAL_EXCEPTION_PROLOG(intno); \
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD; \
|
||||
xfer(n, hdlr)
|
||||
NORMAL_EXCEPTION_PROLOG(n, intno); \
|
||||
prepare_transfer_to_handler; \
|
||||
bl hdlr; \
|
||||
b interrupt_return
|
||||
|
||||
#define CRITICAL_EXCEPTION(n, intno, label, hdlr) \
|
||||
START_EXCEPTION(label); \
|
||||
CRITICAL_EXCEPTION_PROLOG(intno); \
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD; \
|
||||
EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
|
||||
crit_transfer_to_handler, ret_from_crit_exc)
|
||||
CRITICAL_EXCEPTION_PROLOG(n, intno); \
|
||||
SAVE_MMU_REGS; \
|
||||
SAVE_xSRR(SRR); \
|
||||
prepare_transfer_to_handler; \
|
||||
bl hdlr; \
|
||||
b ret_from_crit_exc
|
||||
|
||||
#define MCHECK_EXCEPTION(n, label, hdlr) \
|
||||
START_EXCEPTION(label); \
|
||||
MCHECK_EXCEPTION_PROLOG; \
|
||||
MCHECK_EXCEPTION_PROLOG(n); \
|
||||
mfspr r5,SPRN_ESR; \
|
||||
stw r5,_ESR(r11); \
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD; \
|
||||
EXC_XFER_TEMPLATE(hdlr, n+4, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
|
||||
mcheck_transfer_to_handler, ret_from_mcheck_exc)
|
||||
|
||||
#define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \
|
||||
li r10,trap; \
|
||||
stw r10,_TRAP(r11); \
|
||||
lis r10,msr@h; \
|
||||
ori r10,r10,msr@l; \
|
||||
bl tfer; \
|
||||
.long hdlr; \
|
||||
.long ret
|
||||
|
||||
#define EXC_XFER_STD(n, hdlr) \
|
||||
EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \
|
||||
ret_from_except_full)
|
||||
|
||||
#define EXC_XFER_LITE(n, hdlr) \
|
||||
EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
|
||||
ret_from_except)
|
||||
SAVE_xSRR(DSRR); \
|
||||
SAVE_xSRR(CSRR); \
|
||||
SAVE_MMU_REGS; \
|
||||
SAVE_xSRR(SRR); \
|
||||
prepare_transfer_to_handler; \
|
||||
bl hdlr; \
|
||||
b ret_from_mcheck_exc
|
||||
|
||||
/* Check for a single step debug exception while in an exception
|
||||
* handler before state has been saved. This is to catch the case
|
||||
|
@ -311,7 +360,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
|
|||
*/
|
||||
#define DEBUG_DEBUG_EXCEPTION \
|
||||
START_EXCEPTION(DebugDebug); \
|
||||
DEBUG_EXCEPTION_PROLOG; \
|
||||
DEBUG_EXCEPTION_PROLOG(2000); \
|
||||
\
|
||||
/* \
|
||||
* If there is a single step or branch-taken exception in an \
|
||||
|
@ -360,12 +409,16 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
|
|||
/* continue normal handling for a debug exception... */ \
|
||||
2: mfspr r4,SPRN_DBSR; \
|
||||
stw r4,_ESR(r11); /* DebugException takes DBSR in _ESR */\
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD; \
|
||||
EXC_XFER_TEMPLATE(DebugException, 0x2008, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), debug_transfer_to_handler, ret_from_debug_exc)
|
||||
SAVE_xSRR(CSRR); \
|
||||
SAVE_MMU_REGS; \
|
||||
SAVE_xSRR(SRR); \
|
||||
prepare_transfer_to_handler; \
|
||||
bl DebugException; \
|
||||
b ret_from_debug_exc
|
||||
|
||||
#define DEBUG_CRIT_EXCEPTION \
|
||||
START_EXCEPTION(DebugCrit); \
|
||||
CRITICAL_EXCEPTION_PROLOG(DEBUG); \
|
||||
CRITICAL_EXCEPTION_PROLOG(2000,DEBUG); \
|
||||
\
|
||||
/* \
|
||||
* If there is a single step or branch-taken exception in an \
|
||||
|
@ -414,58 +467,71 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
|
|||
/* continue normal handling for a critical exception... */ \
|
||||
2: mfspr r4,SPRN_DBSR; \
|
||||
stw r4,_ESR(r11); /* DebugException takes DBSR in _ESR */\
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD; \
|
||||
EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), crit_transfer_to_handler, ret_from_crit_exc)
|
||||
SAVE_MMU_REGS; \
|
||||
SAVE_xSRR(SRR); \
|
||||
prepare_transfer_to_handler; \
|
||||
bl DebugException; \
|
||||
b ret_from_crit_exc
|
||||
|
||||
#define DATA_STORAGE_EXCEPTION \
|
||||
START_EXCEPTION(DataStorage) \
|
||||
NORMAL_EXCEPTION_PROLOG(DATA_STORAGE); \
|
||||
NORMAL_EXCEPTION_PROLOG(0x300, DATA_STORAGE); \
|
||||
mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
|
||||
stw r5,_ESR(r11); \
|
||||
mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \
|
||||
stw r4, _DEAR(r11); \
|
||||
EXC_XFER_LITE(0x0300, handle_page_fault)
|
||||
prepare_transfer_to_handler; \
|
||||
bl do_page_fault; \
|
||||
b interrupt_return
|
||||
|
||||
#define INSTRUCTION_STORAGE_EXCEPTION \
|
||||
START_EXCEPTION(InstructionStorage) \
|
||||
NORMAL_EXCEPTION_PROLOG(INST_STORAGE); \
|
||||
NORMAL_EXCEPTION_PROLOG(0x400, INST_STORAGE); \
|
||||
mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
|
||||
stw r5,_ESR(r11); \
|
||||
stw r12, _DEAR(r11); /* Pass SRR0 as arg2 */ \
|
||||
EXC_XFER_LITE(0x0400, handle_page_fault)
|
||||
prepare_transfer_to_handler; \
|
||||
bl do_page_fault; \
|
||||
b interrupt_return
|
||||
|
||||
#define ALIGNMENT_EXCEPTION \
|
||||
START_EXCEPTION(Alignment) \
|
||||
NORMAL_EXCEPTION_PROLOG(ALIGNMENT); \
|
||||
NORMAL_EXCEPTION_PROLOG(0x600, ALIGNMENT); \
|
||||
mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \
|
||||
stw r4,_DEAR(r11); \
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD; \
|
||||
EXC_XFER_STD(0x0600, alignment_exception)
|
||||
prepare_transfer_to_handler; \
|
||||
bl alignment_exception; \
|
||||
REST_NVGPRS(r1); \
|
||||
b interrupt_return
|
||||
|
||||
#define PROGRAM_EXCEPTION \
|
||||
START_EXCEPTION(Program) \
|
||||
NORMAL_EXCEPTION_PROLOG(PROGRAM); \
|
||||
NORMAL_EXCEPTION_PROLOG(0x700, PROGRAM); \
|
||||
mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \
|
||||
stw r4,_ESR(r11); \
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD; \
|
||||
EXC_XFER_STD(0x0700, program_check_exception)
|
||||
prepare_transfer_to_handler; \
|
||||
bl program_check_exception; \
|
||||
REST_NVGPRS(r1); \
|
||||
b interrupt_return
|
||||
|
||||
#define DECREMENTER_EXCEPTION \
|
||||
START_EXCEPTION(Decrementer) \
|
||||
NORMAL_EXCEPTION_PROLOG(DECREMENTER); \
|
||||
NORMAL_EXCEPTION_PROLOG(0x900, DECREMENTER); \
|
||||
lis r0,TSR_DIS@h; /* Setup the DEC interrupt mask */ \
|
||||
mtspr SPRN_TSR,r0; /* Clear the DEC interrupt */ \
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD; \
|
||||
EXC_XFER_LITE(0x0900, timer_interrupt)
|
||||
prepare_transfer_to_handler; \
|
||||
bl timer_interrupt; \
|
||||
b interrupt_return
|
||||
|
||||
#define FP_UNAVAILABLE_EXCEPTION \
|
||||
START_EXCEPTION(FloatingPointUnavailable) \
|
||||
NORMAL_EXCEPTION_PROLOG(FP_UNAVAIL); \
|
||||
NORMAL_EXCEPTION_PROLOG(0x800, FP_UNAVAIL); \
|
||||
beq 1f; \
|
||||
bl load_up_fpu; /* if from user, just load it up */ \
|
||||
b fast_exception_return; \
|
||||
1: addi r3,r1,STACK_FRAME_OVERHEAD; \
|
||||
EXC_XFER_STD(0x800, kernel_fp_unavailable_exception)
|
||||
1: prepare_transfer_to_handler; \
|
||||
bl kernel_fp_unavailable_exception; \
|
||||
b interrupt_return
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
struct exception_regs {
|
||||
|
@ -481,7 +547,6 @@ struct exception_regs {
|
|||
unsigned long csrr1;
|
||||
unsigned long dsrr0;
|
||||
unsigned long dsrr1;
|
||||
unsigned long saved_ksp_limit;
|
||||
};
|
||||
|
||||
/* ensure this structure is always sized to a multiple of the stack alignment */
|
||||
|
|
|
@ -113,7 +113,7 @@ _ENTRY(_start);
|
|||
|
||||
1:
|
||||
/*
|
||||
* We have the runtime (virutal) address of our base.
|
||||
* We have the runtime (virtual) address of our base.
|
||||
* We calculate our shift of offset from a 64M page.
|
||||
* We could map the 64M page we belong to at PAGE_OFFSET and
|
||||
* get going from there.
|
||||
|
@ -363,23 +363,26 @@ interrupt_base:
|
|||
|
||||
/* Data Storage Interrupt */
|
||||
START_EXCEPTION(DataStorage)
|
||||
NORMAL_EXCEPTION_PROLOG(DATA_STORAGE)
|
||||
NORMAL_EXCEPTION_PROLOG(0x300, DATA_STORAGE)
|
||||
mfspr r5,SPRN_ESR /* Grab the ESR, save it */
|
||||
stw r5,_ESR(r11)
|
||||
mfspr r4,SPRN_DEAR /* Grab the DEAR, save it */
|
||||
stw r4, _DEAR(r11)
|
||||
andis. r10,r5,(ESR_ILK|ESR_DLK)@h
|
||||
bne 1f
|
||||
EXC_XFER_LITE(0x0300, handle_page_fault)
|
||||
prepare_transfer_to_handler
|
||||
bl do_page_fault
|
||||
b interrupt_return
|
||||
1:
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
EXC_XFER_LITE(0x0300, CacheLockingException)
|
||||
prepare_transfer_to_handler
|
||||
bl CacheLockingException
|
||||
b interrupt_return
|
||||
|
||||
/* Instruction Storage Interrupt */
|
||||
INSTRUCTION_STORAGE_EXCEPTION
|
||||
|
||||
/* External Input Interrupt */
|
||||
EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ, EXC_XFER_LITE)
|
||||
EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ)
|
||||
|
||||
/* Alignment Interrupt */
|
||||
ALIGNMENT_EXCEPTION
|
||||
|
@ -391,8 +394,7 @@ interrupt_base:
|
|||
#ifdef CONFIG_PPC_FPU
|
||||
FP_UNAVAILABLE_EXCEPTION
|
||||
#else
|
||||
EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
|
||||
unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, unknown_exception)
|
||||
#endif
|
||||
|
||||
/* System Call Interrupt */
|
||||
|
@ -400,16 +402,14 @@ interrupt_base:
|
|||
SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL SPRN_SRR1
|
||||
|
||||
/* Auxiliary Processor Unavailable Interrupt */
|
||||
EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \
|
||||
unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, unknown_exception)
|
||||
|
||||
/* Decrementer Interrupt */
|
||||
DECREMENTER_EXCEPTION
|
||||
|
||||
/* Fixed Internal Timer Interrupt */
|
||||
/* TODO: Add FIT support */
|
||||
EXCEPTION(0x3100, FIT, FixedIntervalTimer, \
|
||||
unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x3100, FIT, FixedIntervalTimer, unknown_exception)
|
||||
|
||||
/* Watchdog Timer Interrupt */
|
||||
#ifdef CONFIG_BOOKE_WDT
|
||||
|
@ -497,7 +497,7 @@ END_BTB_FLUSH_SECTION
|
|||
#endif
|
||||
#endif
|
||||
|
||||
bne 2f /* Bail if permission/valid mismach */
|
||||
bne 2f /* Bail if permission/valid mismatch */
|
||||
|
||||
/* Jump to common tlb load */
|
||||
b finish_tlb_load
|
||||
|
@ -592,7 +592,7 @@ END_BTB_FLUSH_SECTION
|
|||
#endif
|
||||
#endif
|
||||
|
||||
bne 2f /* Bail if permission mismach */
|
||||
bne 2f /* Bail if permission mismatch */
|
||||
|
||||
/* Jump to common TLB load point */
|
||||
b finish_tlb_load
|
||||
|
@ -614,38 +614,44 @@ END_BTB_FLUSH_SECTION
|
|||
#ifdef CONFIG_SPE
|
||||
/* SPE Unavailable */
|
||||
START_EXCEPTION(SPEUnavailable)
|
||||
NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL)
|
||||
NORMAL_EXCEPTION_PROLOG(0x2010, SPE_UNAVAIL)
|
||||
beq 1f
|
||||
bl load_up_spe
|
||||
b fast_exception_return
|
||||
1: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
EXC_XFER_LITE(0x2010, KernelSPE)
|
||||
1: prepare_transfer_to_handler
|
||||
bl KernelSPE
|
||||
b interrupt_return
|
||||
#elif defined(CONFIG_SPE_POSSIBLE)
|
||||
EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
|
||||
unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, unknown_exception)
|
||||
#endif /* CONFIG_SPE_POSSIBLE */
|
||||
|
||||
/* SPE Floating Point Data */
|
||||
#ifdef CONFIG_SPE
|
||||
EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData,
|
||||
SPEFloatingPointException, EXC_XFER_STD)
|
||||
START_EXCEPTION(SPEFloatingPointData)
|
||||
NORMAL_EXCEPTION_PROLOG(0x2030, SPE_FP_DATA)
|
||||
prepare_transfer_to_handler
|
||||
bl SPEFloatingPointException
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
||||
/* SPE Floating Point Round */
|
||||
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
|
||||
SPEFloatingPointRoundException, EXC_XFER_STD)
|
||||
START_EXCEPTION(SPEFloatingPointRound)
|
||||
NORMAL_EXCEPTION_PROLOG(0x2050, SPE_FP_ROUND)
|
||||
prepare_transfer_to_handler
|
||||
bl SPEFloatingPointRoundException
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
#elif defined(CONFIG_SPE_POSSIBLE)
|
||||
EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData,
|
||||
unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
|
||||
unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, unknown_exception)
|
||||
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, unknown_exception)
|
||||
#endif /* CONFIG_SPE_POSSIBLE */
|
||||
|
||||
|
||||
/* Performance Monitor */
|
||||
EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \
|
||||
performance_monitor_exception, EXC_XFER_STD)
|
||||
performance_monitor_exception)
|
||||
|
||||
EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception)
|
||||
|
||||
CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \
|
||||
CriticalDoorbell, unknown_exception)
|
||||
|
@ -660,10 +666,10 @@ END_BTB_FLUSH_SECTION
|
|||
unknown_exception)
|
||||
|
||||
/* Hypercall */
|
||||
EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception)
|
||||
|
||||
/* Embedded Hypervisor Privilege */
|
||||
EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception, EXC_XFER_STD)
|
||||
EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception)
|
||||
|
||||
interrupt_end:
|
||||
|
||||
|
@ -854,7 +860,7 @@ KernelSPE:
|
|||
lwz r5,_NIP(r1)
|
||||
bl printk
|
||||
#endif
|
||||
b ret_from_except
|
||||
b interrupt_return
|
||||
#ifdef CONFIG_PRINTK
|
||||
87: .string "SPE used in kernel (task=%p, pc=%x) \n"
|
||||
#endif
|
||||
|
|
|
@ -141,7 +141,7 @@ void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
|
|||
{
|
||||
struct instruction_op op;
|
||||
|
||||
if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip))
|
||||
if (__get_user_instr(*instr, (void __user *)regs->nip))
|
||||
return;
|
||||
|
||||
analyse_instr(&op, regs, *instr);
|
||||
|
|
|
@ -145,9 +145,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
|||
|
||||
/*
|
||||
* Return from NAP/DOZE mode, restore some CPU specific registers,
|
||||
* we are called with DR/IR still off and r2 containing physical
|
||||
* address of current. R11 points to the exception frame (physical
|
||||
* address). We have to preserve r10.
|
||||
* R11 points to the exception frame. We have to preserve r10.
|
||||
*/
|
||||
_GLOBAL(power_save_ppc32_restore)
|
||||
lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */
|
||||
|
@ -166,11 +164,7 @@ BEGIN_FTR_SECTION
|
|||
mfspr r9,SPRN_HID0
|
||||
andis. r9,r9,HID0_NAP@h
|
||||
beq 1f
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
addis r9, r11, nap_save_msscr0@ha
|
||||
#else
|
||||
addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
|
||||
#endif
|
||||
lwz r9,nap_save_msscr0@l(r9)
|
||||
mtspr SPRN_MSSCR0, r9
|
||||
sync
|
||||
|
@ -178,15 +172,11 @@ BEGIN_FTR_SECTION
|
|||
1:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
|
||||
BEGIN_FTR_SECTION
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
addis r9, r11, nap_save_hid1@ha
|
||||
#else
|
||||
addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
|
||||
#endif
|
||||
lwz r9,nap_save_hid1@l(r9)
|
||||
mtspr SPRN_HID1, r9
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
|
||||
b transfer_to_handler_cont
|
||||
blr
|
||||
_ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore)
|
||||
|
||||
.data
|
||||
|
|
|
@ -209,4 +209,8 @@ _GLOBAL(power4_idle_nap)
|
|||
mtmsrd r7
|
||||
isync
|
||||
b 1b
|
||||
|
||||
.globl power4_idle_nap_return
|
||||
power4_idle_nap_return:
|
||||
blr
|
||||
#endif
|
||||
|
|
|
@ -74,20 +74,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
|
|||
|
||||
/*
|
||||
* Return from NAP/DOZE mode, restore some CPU specific registers,
|
||||
* r2 containing physical address of current.
|
||||
* r11 points to the exception frame (physical address).
|
||||
* r2 containing address of current.
|
||||
* r11 points to the exception frame.
|
||||
* We have to preserve r10.
|
||||
*/
|
||||
_GLOBAL(power_save_ppc32_restore)
|
||||
lwz r9,_LINK(r11) /* interrupted in e500_idle */
|
||||
stw r9,_NIP(r11) /* make it do a blr */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
lwz r11,TASK_CPU(r2) /* get cpu number * 4 */
|
||||
slwi r11,r11,2
|
||||
#else
|
||||
li r11,0
|
||||
#endif
|
||||
|
||||
b transfer_to_handler_cont
|
||||
blr
|
||||
_ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore)
|
||||
|
|
|
@ -20,6 +20,10 @@
|
|||
#include <asm/time.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
#if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
|
||||
unsigned long global_dbcr0[NR_CPUS];
|
||||
#endif
|
||||
|
||||
typedef long (*syscall_fn)(long, long, long, long, long, long);
|
||||
|
||||
/* Has to run notrace because it is entered not completely "reconciled" */
|
||||
|
@ -29,20 +33,24 @@ notrace long system_call_exception(long r3, long r4, long r5,
|
|||
{
|
||||
syscall_fn f;
|
||||
|
||||
kuep_lock();
|
||||
#ifdef CONFIG_PPC32
|
||||
kuap_save_and_lock(regs);
|
||||
#endif
|
||||
|
||||
regs->orig_gpr3 = r3;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
||||
BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
|
||||
|
||||
trace_hardirqs_off(); /* finish reconciling */
|
||||
|
||||
CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
|
||||
user_exit_irqoff();
|
||||
|
||||
trace_hardirqs_off(); /* finish reconciling */
|
||||
|
||||
if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
|
||||
BUG_ON(!(regs->msr & MSR_RI));
|
||||
BUG_ON(!(regs->msr & MSR_PR));
|
||||
BUG_ON(!FULL_REGS(regs));
|
||||
BUG_ON(arch_irq_disabled_regs(regs));
|
||||
|
||||
#ifdef CONFIG_PPC_PKEY
|
||||
|
@ -69,9 +77,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
|
|||
isync();
|
||||
} else
|
||||
#endif
|
||||
#ifdef CONFIG_PPC64
|
||||
kuap_check_amr();
|
||||
#endif
|
||||
kuap_assert_locked();
|
||||
|
||||
booke_restore_dbcr0();
|
||||
|
||||
|
@ -247,9 +253,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
|
|||
|
||||
CT_WARN_ON(ct_state() == CONTEXT_USER);
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
kuap_check_amr();
|
||||
#endif
|
||||
kuap_assert_locked();
|
||||
|
||||
regs->result = r3;
|
||||
|
||||
|
@ -344,16 +348,13 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
|
|||
|
||||
account_cpu_user_exit();
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64 /* BOOK3E and ppc32 not using this */
|
||||
/*
|
||||
* We do this at the end so that we do context switch with KERNEL AMR
|
||||
*/
|
||||
/* Restore user access locks last */
|
||||
kuap_user_restore(regs);
|
||||
#endif
|
||||
kuep_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PPC_BOOK3E_64 /* BOOK3E not yet using this */
|
||||
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
|
||||
{
|
||||
unsigned long ti_flags;
|
||||
|
@ -363,7 +364,6 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
|
|||
if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
|
||||
BUG_ON(!(regs->msr & MSR_RI));
|
||||
BUG_ON(!(regs->msr & MSR_PR));
|
||||
BUG_ON(!FULL_REGS(regs));
|
||||
BUG_ON(arch_irq_disabled_regs(regs));
|
||||
CT_WARN_ON(ct_state() == CONTEXT_USER);
|
||||
|
||||
|
@ -371,9 +371,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
|
|||
* We don't need to restore AMR on the way back to userspace for KUAP.
|
||||
* AMR can only have been unlocked if we interrupted the kernel.
|
||||
*/
|
||||
#ifdef CONFIG_PPC64
|
||||
kuap_check_amr();
|
||||
#endif
|
||||
kuap_assert_locked();
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
|
@ -392,7 +390,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
|
|||
ti_flags = READ_ONCE(current_thread_info()->flags);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
|
||||
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
|
||||
unlikely((ti_flags & _TIF_RESTORE_TM))) {
|
||||
restore_tm_state(regs);
|
||||
|
@ -427,12 +425,9 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
|
|||
|
||||
account_cpu_user_exit();
|
||||
|
||||
/*
|
||||
* We do this at the end so that we do context switch with KERNEL AMR
|
||||
*/
|
||||
#ifdef CONFIG_PPC64
|
||||
/* Restore user access locks last */
|
||||
kuap_user_restore(regs);
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -442,25 +437,20 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
|
|||
{
|
||||
unsigned long flags;
|
||||
unsigned long ret = 0;
|
||||
#ifdef CONFIG_PPC64
|
||||
unsigned long amr;
|
||||
#endif
|
||||
unsigned long kuap;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x) &&
|
||||
unlikely(!(regs->msr & MSR_RI)))
|
||||
unrecoverable_exception(regs);
|
||||
BUG_ON(regs->msr & MSR_PR);
|
||||
BUG_ON(!FULL_REGS(regs));
|
||||
/*
|
||||
* CT_WARN_ON comes here via program_check_exception,
|
||||
* so avoid recursion.
|
||||
*/
|
||||
if (TRAP(regs) != 0x700)
|
||||
if (TRAP(regs) != INTERRUPT_PROGRAM)
|
||||
CT_WARN_ON(ct_state() == CONTEXT_USER);
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
amr = kuap_get_and_check_amr();
|
||||
#endif
|
||||
kuap = kuap_get_and_assert_locked();
|
||||
|
||||
if (unlikely(current_thread_info()->flags & _TIF_EMULATE_STACK_STORE)) {
|
||||
clear_bits(_TIF_EMULATE_STACK_STORE, ¤t_thread_info()->flags);
|
||||
|
@ -498,14 +488,11 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Don't want to mfspr(SPRN_AMR) here, because this comes after mtmsr,
|
||||
* which would cause Read-After-Write stalls. Hence, we take the AMR
|
||||
* value from the check above.
|
||||
* 64s does not want to mfspr(SPRN_AMR) here, because this comes after
|
||||
* mtmsr, which would cause Read-After-Write stalls. Hence, take the
|
||||
* AMR value from the check above.
|
||||
*/
|
||||
#ifdef CONFIG_PPC64
|
||||
kuap_kernel_restore(regs, amr);
|
||||
#endif
|
||||
kuap_kernel_restore(regs, kuap);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -72,8 +72,7 @@ static void iommu_debugfs_del(struct iommu_table *tbl)
|
|||
|
||||
sprintf(name, "%08lx", tbl->it_index);
|
||||
liobn_entry = debugfs_lookup(name, iommu_debugfs_dir);
|
||||
if (liobn_entry)
|
||||
debugfs_remove(liobn_entry);
|
||||
debugfs_remove(liobn_entry);
|
||||
}
|
||||
#else
|
||||
static void iommu_debugfs_add(struct iommu_table *tbl){}
|
||||
|
@ -297,6 +296,15 @@ static unsigned long iommu_range_alloc(struct device *dev,
|
|||
pass++;
|
||||
goto again;
|
||||
|
||||
} else if (pass == tbl->nr_pools + 1) {
|
||||
/* Last resort: try largepool */
|
||||
spin_unlock(&pool->lock);
|
||||
pool = &tbl->large_pool;
|
||||
spin_lock(&pool->lock);
|
||||
pool->hint = pool->start;
|
||||
pass++;
|
||||
goto again;
|
||||
|
||||
} else {
|
||||
/* Give up */
|
||||
spin_unlock_irqrestore(&(pool->lock), flags);
|
||||
|
@ -719,7 +727,6 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
|
|||
{
|
||||
unsigned long sz;
|
||||
static int welcomed = 0;
|
||||
struct page *page;
|
||||
unsigned int i;
|
||||
struct iommu_pool *p;
|
||||
|
||||
|
@ -728,11 +735,11 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
|
|||
/* number of bytes needed for the bitmap */
|
||||
sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
|
||||
|
||||
page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
|
||||
if (!page)
|
||||
panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
|
||||
tbl->it_map = page_address(page);
|
||||
memset(tbl->it_map, 0, sz);
|
||||
tbl->it_map = vzalloc_node(sz, nid);
|
||||
if (!tbl->it_map) {
|
||||
pr_err("%s: Can't allocate %ld bytes\n", __func__, sz);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
iommu_table_reserve_pages(tbl, res_start, res_end);
|
||||
|
||||
|
@ -774,8 +781,6 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
|
|||
|
||||
static void iommu_table_free(struct kref *kref)
|
||||
{
|
||||
unsigned long bitmap_sz;
|
||||
unsigned int order;
|
||||
struct iommu_table *tbl;
|
||||
|
||||
tbl = container_of(kref, struct iommu_table, it_kref);
|
||||
|
@ -796,12 +801,8 @@ static void iommu_table_free(struct kref *kref)
|
|||
if (!bitmap_empty(tbl->it_map, tbl->it_size))
|
||||
pr_warn("%s: Unexpected TCEs\n", __func__);
|
||||
|
||||
/* calculate bitmap size in bytes */
|
||||
bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
|
||||
|
||||
/* free bitmap */
|
||||
order = get_order(bitmap_sz);
|
||||
free_pages((unsigned long) tbl->it_map, order);
|
||||
vfree(tbl->it_map);
|
||||
|
||||
/* free table */
|
||||
kfree(tbl);
|
||||
|
@ -897,6 +898,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
|
|||
unsigned int order;
|
||||
unsigned int nio_pages, io_order;
|
||||
struct page *page;
|
||||
size_t size_io = size;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
order = get_order(size);
|
||||
|
@ -923,8 +925,9 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
|
|||
memset(ret, 0, size);
|
||||
|
||||
/* Set up tces to cover the allocated range */
|
||||
nio_pages = size >> tbl->it_page_shift;
|
||||
io_order = get_iommu_order(size, tbl);
|
||||
size_io = IOMMU_PAGE_ALIGN(size_io, tbl);
|
||||
nio_pages = size_io >> tbl->it_page_shift;
|
||||
io_order = get_iommu_order(size_io, tbl);
|
||||
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
|
||||
mask >> tbl->it_page_shift, io_order, 0);
|
||||
if (mapping == DMA_MAPPING_ERROR) {
|
||||
|
@ -939,10 +942,9 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
|
|||
void *vaddr, dma_addr_t dma_handle)
|
||||
{
|
||||
if (tbl) {
|
||||
unsigned int nio_pages;
|
||||
size_t size_io = IOMMU_PAGE_ALIGN(size, tbl);
|
||||
unsigned int nio_pages = size_io >> tbl->it_page_shift;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
nio_pages = size >> tbl->it_page_shift;
|
||||
iommu_free(tbl, dma_handle, nio_pages);
|
||||
size = PAGE_ALIGN(size);
|
||||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
|
@ -1096,7 +1098,7 @@ int iommu_take_ownership(struct iommu_table *tbl)
|
|||
|
||||
spin_lock_irqsave(&tbl->large_pool.lock, flags);
|
||||
for (i = 0; i < tbl->nr_pools; i++)
|
||||
spin_lock(&tbl->pools[i].lock);
|
||||
spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
|
||||
|
||||
iommu_table_release_pages(tbl);
|
||||
|
||||
|
@ -1124,7 +1126,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
|
|||
|
||||
spin_lock_irqsave(&tbl->large_pool.lock, flags);
|
||||
for (i = 0; i < tbl->nr_pools; i++)
|
||||
spin_lock(&tbl->pools[i].lock);
|
||||
spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
|
||||
|
||||
memset(tbl->it_map, 0, sz);
|
||||
|
||||
|
|
|
@ -104,82 +104,6 @@ static inline notrace unsigned long get_irq_happened(void)
|
|||
return happened;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3E
|
||||
|
||||
/* This is called whenever we are re-enabling interrupts
|
||||
* and returns either 0 (nothing to do) or 500/900/280 if
|
||||
* there's an EE, DEC or DBELL to generate.
|
||||
*
|
||||
* This is called in two contexts: From arch_local_irq_restore()
|
||||
* before soft-enabling interrupts, and from the exception exit
|
||||
* path when returning from an interrupt from a soft-disabled to
|
||||
* a soft enabled context. In both case we have interrupts hard
|
||||
* disabled.
|
||||
*
|
||||
* We take care of only clearing the bits we handled in the
|
||||
* PACA irq_happened field since we can only re-emit one at a
|
||||
* time and we don't want to "lose" one.
|
||||
*/
|
||||
notrace unsigned int __check_irq_replay(void)
|
||||
{
|
||||
/*
|
||||
* We use local_paca rather than get_paca() to avoid all
|
||||
* the debug_smp_processor_id() business in this low level
|
||||
* function
|
||||
*/
|
||||
unsigned char happened = local_paca->irq_happened;
|
||||
|
||||
/*
|
||||
* We are responding to the next interrupt, so interrupt-off
|
||||
* latencies should be reset here.
|
||||
*/
|
||||
trace_hardirqs_on();
|
||||
trace_hardirqs_off();
|
||||
|
||||
if (happened & PACA_IRQ_DEC) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_DEC;
|
||||
return 0x900;
|
||||
}
|
||||
|
||||
if (happened & PACA_IRQ_EE) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_EE;
|
||||
return 0x500;
|
||||
}
|
||||
|
||||
if (happened & PACA_IRQ_DBELL) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_DBELL;
|
||||
return 0x280;
|
||||
}
|
||||
|
||||
if (happened & PACA_IRQ_HARD_DIS)
|
||||
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
|
||||
|
||||
/* There should be nothing left ! */
|
||||
BUG_ON(local_paca->irq_happened != 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is specifically called by assembly code to re-enable interrupts
|
||||
* if they are currently disabled. This is typically called before
|
||||
* schedule() or do_signal() when returning to userspace. We do it
|
||||
* in C to avoid the burden of dealing with lockdep etc...
|
||||
*
|
||||
* NOTE: This is called with interrupts hard disabled but not marked
|
||||
* as such in paca->irq_happened, so we need to resync this.
|
||||
*/
|
||||
void notrace restore_interrupts(void)
|
||||
{
|
||||
if (irqs_disabled()) {
|
||||
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||
local_irq_enable();
|
||||
} else
|
||||
__hard_irq_enable();
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC_BOOK3E */
|
||||
|
||||
void replay_soft_interrupts(void)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
|
@ -218,7 +142,7 @@ void replay_soft_interrupts(void)
|
|||
*/
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_HMI;
|
||||
regs.trap = 0xe60;
|
||||
regs.trap = INTERRUPT_HMI;
|
||||
handle_hmi_exception(®s);
|
||||
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
|
||||
hard_irq_disable();
|
||||
|
@ -226,7 +150,7 @@ void replay_soft_interrupts(void)
|
|||
|
||||
if (local_paca->irq_happened & PACA_IRQ_DEC) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_DEC;
|
||||
regs.trap = 0x900;
|
||||
regs.trap = INTERRUPT_DECREMENTER;
|
||||
timer_interrupt(®s);
|
||||
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
|
||||
hard_irq_disable();
|
||||
|
@ -234,7 +158,7 @@ void replay_soft_interrupts(void)
|
|||
|
||||
if (local_paca->irq_happened & PACA_IRQ_EE) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_EE;
|
||||
regs.trap = 0x500;
|
||||
regs.trap = INTERRUPT_EXTERNAL;
|
||||
do_IRQ(®s);
|
||||
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
|
||||
hard_irq_disable();
|
||||
|
@ -242,10 +166,7 @@ void replay_soft_interrupts(void)
|
|||
|
||||
if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_DBELL;
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3E))
|
||||
regs.trap = 0x280;
|
||||
else
|
||||
regs.trap = 0xa00;
|
||||
regs.trap = INTERRUPT_DOORBELL;
|
||||
doorbell_exception(®s);
|
||||
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
|
||||
hard_irq_disable();
|
||||
|
@ -254,7 +175,7 @@ void replay_soft_interrupts(void)
|
|||
/* Book3E does not support soft-masking PMI interrupts */
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) {
|
||||
local_paca->irq_happened &= ~PACA_IRQ_PMI;
|
||||
regs.trap = 0xf00;
|
||||
regs.trap = INTERRUPT_PERFMON;
|
||||
performance_monitor_exception(®s);
|
||||
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
|
||||
hard_irq_disable();
|
||||
|
@ -282,7 +203,7 @@ static inline void replay_soft_interrupts_irqrestore(void)
|
|||
* and re-locking AMR but we shouldn't get here in the first place,
|
||||
* hence the warning.
|
||||
*/
|
||||
kuap_check_amr();
|
||||
kuap_assert_locked();
|
||||
|
||||
if (kuap_state != AMR_KUAP_BLOCKED)
|
||||
set_kuap(AMR_KUAP_BLOCKED);
|
||||
|
@ -667,6 +588,47 @@ static inline void check_stack_overflow(void)
|
|||
}
|
||||
}
|
||||
|
||||
static __always_inline void call_do_softirq(const void *sp)
|
||||
{
|
||||
/* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
|
||||
asm volatile (
|
||||
PPC_STLU " %%r1, %[offset](%[sp]) ;"
|
||||
"mr %%r1, %[sp] ;"
|
||||
"bl %[callee] ;"
|
||||
PPC_LL " %%r1, 0(%%r1) ;"
|
||||
: // Outputs
|
||||
: // Inputs
|
||||
[sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
|
||||
[callee] "i" (__do_softirq)
|
||||
: // Clobbers
|
||||
"lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
|
||||
"cr7", "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
|
||||
"r11", "r12"
|
||||
);
|
||||
}
|
||||
|
||||
static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
|
||||
{
|
||||
register unsigned long r3 asm("r3") = (unsigned long)regs;
|
||||
|
||||
/* Temporarily switch r1 to sp, call __do_irq() then restore r1. */
|
||||
asm volatile (
|
||||
PPC_STLU " %%r1, %[offset](%[sp]) ;"
|
||||
"mr %%r1, %[sp] ;"
|
||||
"bl %[callee] ;"
|
||||
PPC_LL " %%r1, 0(%%r1) ;"
|
||||
: // Outputs
|
||||
"+r" (r3)
|
||||
: // Inputs
|
||||
[sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
|
||||
[callee] "i" (__do_irq)
|
||||
: // Clobbers
|
||||
"lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
|
||||
"cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
|
||||
"r11", "r12"
|
||||
);
|
||||
}
|
||||
|
||||
void __do_irq(struct pt_regs *regs)
|
||||
{
|
||||
unsigned int irq;
|
||||
|
|
|
@ -11,10 +11,10 @@
|
|||
void arch_jump_label_transform(struct jump_entry *entry,
|
||||
enum jump_label_type type)
|
||||
{
|
||||
struct ppc_inst *addr = (struct ppc_inst *)(unsigned long)entry->code;
|
||||
struct ppc_inst *addr = (struct ppc_inst *)jump_entry_code(entry);
|
||||
|
||||
if (type == JUMP_LABEL_JMP)
|
||||
patch_branch(addr, entry->target, 0);
|
||||
patch_branch(addr, jump_entry_target(entry), 0);
|
||||
else
|
||||
patch_instruction(addr, ppc_inst(PPC_INST_NOP));
|
||||
}
|
||||
|
|
|
@ -376,7 +376,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
|
|||
}
|
||||
|
||||
/*
|
||||
* This function does PowerPC specific procesing for interfacing to gdb.
|
||||
* This function does PowerPC specific processing for interfacing to gdb.
|
||||
*/
|
||||
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
|
||||
char *remcom_in_buffer, char *remcom_out_buffer,
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <asm/udbg.h>
|
||||
#include <asm/pci-bridge.h>
|
||||
#include <asm/ppc-pci.h>
|
||||
#include <asm/early_ioremap.h>
|
||||
|
||||
#undef DEBUG
|
||||
|
||||
|
@ -34,6 +35,7 @@ static struct legacy_serial_info {
|
|||
unsigned int clock;
|
||||
int irq_check_parent;
|
||||
phys_addr_t taddr;
|
||||
void __iomem *early_addr;
|
||||
} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
|
||||
|
||||
static const struct of_device_id legacy_serial_parents[] __initconst = {
|
||||
|
@ -325,17 +327,16 @@ static void __init setup_legacy_serial_console(int console)
|
|||
{
|
||||
struct legacy_serial_info *info = &legacy_serial_infos[console];
|
||||
struct plat_serial8250_port *port = &legacy_serial_ports[console];
|
||||
void __iomem *addr;
|
||||
unsigned int stride;
|
||||
|
||||
stride = 1 << port->regshift;
|
||||
|
||||
/* Check if a translated MMIO address has been found */
|
||||
if (info->taddr) {
|
||||
addr = ioremap(info->taddr, 0x1000);
|
||||
if (addr == NULL)
|
||||
info->early_addr = early_ioremap(info->taddr, 0x1000);
|
||||
if (info->early_addr == NULL)
|
||||
return;
|
||||
udbg_uart_init_mmio(addr, stride);
|
||||
udbg_uart_init_mmio(info->early_addr, stride);
|
||||
} else {
|
||||
/* Check if it's PIO and we support untranslated PIO */
|
||||
if (port->iotype == UPIO_PORT && isa_io_special)
|
||||
|
@ -353,6 +354,30 @@ static void __init setup_legacy_serial_console(int console)
|
|||
udbg_uart_setup(info->speed, info->clock);
|
||||
}
|
||||
|
||||
static int __init ioremap_legacy_serial_console(void)
|
||||
{
|
||||
struct legacy_serial_info *info = &legacy_serial_infos[legacy_serial_console];
|
||||
struct plat_serial8250_port *port = &legacy_serial_ports[legacy_serial_console];
|
||||
void __iomem *vaddr;
|
||||
|
||||
if (legacy_serial_console < 0)
|
||||
return 0;
|
||||
|
||||
if (!info->early_addr)
|
||||
return 0;
|
||||
|
||||
vaddr = ioremap(info->taddr, 0x1000);
|
||||
if (WARN_ON(!vaddr))
|
||||
return -ENOMEM;
|
||||
|
||||
udbg_uart_init_mmio(vaddr, 1 << port->regshift);
|
||||
early_iounmap(info->early_addr, 0x1000);
|
||||
info->early_addr = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(ioremap_legacy_serial_console);
|
||||
|
||||
/*
|
||||
* This is called very early, as part of setup_system() or eventually
|
||||
* setup_arch(), basically before anything else in this file. This function
|
||||
|
|
|
@ -40,7 +40,7 @@ static struct irq_work mce_ue_event_irq_work = {
|
|||
.func = machine_check_ue_irq_work,
|
||||
};
|
||||
|
||||
DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
|
||||
static DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(mce_notifier_list);
|
||||
|
||||
|
@ -131,6 +131,8 @@ void save_mce_event(struct pt_regs *regs, long handled,
|
|||
* Populate the mce error_type and type-specific error_type.
|
||||
*/
|
||||
mce_set_error_info(mce, mce_err);
|
||||
if (mce->error_type == MCE_ERROR_TYPE_UE)
|
||||
mce->u.ue_error.ignore_event = mce_err->ignore_event;
|
||||
|
||||
if (!addr)
|
||||
return;
|
||||
|
@ -159,7 +161,6 @@ void save_mce_event(struct pt_regs *regs, long handled,
|
|||
if (phys_addr != ULONG_MAX) {
|
||||
mce->u.ue_error.physical_address_provided = true;
|
||||
mce->u.ue_error.physical_address = phys_addr;
|
||||
mce->u.ue_error.ignore_event = mce_err->ignore_event;
|
||||
machine_check_ue_event(mce);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,45 +27,6 @@
|
|||
|
||||
.text
|
||||
|
||||
/*
|
||||
* We store the saved ksp_limit in the unused part
|
||||
* of the STACK_FRAME_OVERHEAD
|
||||
*/
|
||||
_GLOBAL(call_do_softirq)
|
||||
mflr r0
|
||||
stw r0,4(r1)
|
||||
lwz r10,THREAD+KSP_LIMIT(r2)
|
||||
stw r3, THREAD+KSP_LIMIT(r2)
|
||||
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
|
||||
mr r1,r3
|
||||
stw r10,8(r1)
|
||||
bl __do_softirq
|
||||
lwz r10,8(r1)
|
||||
lwz r1,0(r1)
|
||||
lwz r0,4(r1)
|
||||
stw r10,THREAD+KSP_LIMIT(r2)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
/*
|
||||
* void call_do_irq(struct pt_regs *regs, void *sp);
|
||||
*/
|
||||
_GLOBAL(call_do_irq)
|
||||
mflr r0
|
||||
stw r0,4(r1)
|
||||
lwz r10,THREAD+KSP_LIMIT(r2)
|
||||
stw r4, THREAD+KSP_LIMIT(r2)
|
||||
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
|
||||
mr r1,r4
|
||||
stw r10,8(r1)
|
||||
bl __do_irq
|
||||
lwz r10,8(r1)
|
||||
lwz r1,0(r1)
|
||||
lwz r0,4(r1)
|
||||
stw r10,THREAD+KSP_LIMIT(r2)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
/*
|
||||
* This returns the high 64 bits of the product of two 64-bit numbers.
|
||||
*/
|
||||
|
|
|
@ -27,28 +27,6 @@
|
|||
|
||||
.text
|
||||
|
||||
_GLOBAL(call_do_softirq)
|
||||
mflr r0
|
||||
std r0,16(r1)
|
||||
stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
|
||||
mr r1,r3
|
||||
bl __do_softirq
|
||||
ld r1,0(r1)
|
||||
ld r0,16(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
_GLOBAL(call_do_irq)
|
||||
mflr r0
|
||||
std r0,16(r1)
|
||||
stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
|
||||
mr r1,r4
|
||||
bl __do_irq
|
||||
ld r1,0(r1)
|
||||
ld r0,16(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
_GLOBAL(__bswapdi2)
|
||||
EXPORT_SYMBOL(__bswapdi2)
|
||||
srdi r8,r3,32
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <asm/firmware.h>
|
||||
#include <linux/sort.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
static LIST_HEAD(module_bug_list);
|
||||
|
||||
|
@ -88,12 +89,28 @@ int module_finalize(const Elf_Ehdr *hdr,
|
|||
}
|
||||
|
||||
#ifdef MODULES_VADDR
|
||||
void *module_alloc(unsigned long size)
|
||||
static __always_inline void *
|
||||
__module_alloc(unsigned long size, unsigned long start, unsigned long end)
|
||||
{
|
||||
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
|
||||
|
||||
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, GFP_KERNEL,
|
||||
return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL,
|
||||
PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
unsigned long limit = (unsigned long)_etext - SZ_32M;
|
||||
void *ptr = NULL;
|
||||
|
||||
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
|
||||
|
||||
/* First try within 32M limit from _etext to avoid branch trampolines */
|
||||
if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit)
|
||||
ptr = __module_alloc(size, limit, MODULES_END);
|
||||
|
||||
if (!ptr)
|
||||
ptr = __module_alloc(size, MODULES_VADDR, MODULES_END);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -141,11 +141,21 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
|
|||
}
|
||||
}
|
||||
|
||||
static void patch_imm32_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
|
||||
{
|
||||
patch_instruction((struct ppc_inst *)addr,
|
||||
ppc_inst(PPC_RAW_LIS(reg, IMM_H(val))));
|
||||
addr++;
|
||||
|
||||
patch_instruction((struct ppc_inst *)addr,
|
||||
ppc_inst(PPC_RAW_ORI(reg, reg, IMM_L(val))));
|
||||
}
|
||||
|
||||
/*
|
||||
* Generate instructions to load provided immediate 64-bit value
|
||||
* to register 'reg' and patch these instructions at 'addr'.
|
||||
*/
|
||||
static void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
|
||||
static void patch_imm64_load_insns(unsigned long long val, int reg, kprobe_opcode_t *addr)
|
||||
{
|
||||
/* lis reg,(op)@highest */
|
||||
patch_instruction((struct ppc_inst *)addr,
|
||||
|
@ -177,6 +187,14 @@ static void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t *
|
|||
___PPC_RS(reg) | (val & 0xffff)));
|
||||
}
|
||||
|
||||
static void patch_imm_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PPC64))
|
||||
patch_imm64_load_insns(val, reg, addr);
|
||||
else
|
||||
patch_imm32_load_insns(val, reg, addr);
|
||||
}
|
||||
|
||||
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
|
||||
{
|
||||
struct ppc_inst branch_op_callback, branch_emulate_step, temp;
|
||||
|
@ -230,7 +248,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
|
|||
* Fixup the template with instructions to:
|
||||
* 1. load the address of the actual probepoint
|
||||
*/
|
||||
patch_imm64_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX);
|
||||
patch_imm_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX);
|
||||
|
||||
/*
|
||||
* 2. branch to optimized_callback() and emulate_step()
|
||||
|
@ -264,7 +282,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
|
|||
* 3. load instruction to be emulated into relevant register, and
|
||||
*/
|
||||
temp = ppc_inst_read((struct ppc_inst *)p->ainsn.insn);
|
||||
patch_imm64_load_insns(ppc_inst_as_u64(temp), 4, buff + TMPL_INSN_IDX);
|
||||
patch_imm_load_insns(ppc_inst_as_ulong(temp), 4, buff + TMPL_INSN_IDX);
|
||||
|
||||
/*
|
||||
* 4. branch back from trampoline
|
||||
|
|
|
@ -9,6 +9,16 @@
|
|||
#include <asm/ptrace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#define SAVE_30GPRS(base) SAVE_10GPRS(2,base); SAVE_10GPRS(12,base); SAVE_10GPRS(22,base)
|
||||
#define REST_30GPRS(base) REST_10GPRS(2,base); REST_10GPRS(12,base); REST_10GPRS(22,base)
|
||||
#define TEMPLATE_FOR_IMM_LOAD_INSNS nop; nop; nop; nop; nop
|
||||
#else
|
||||
#define SAVE_30GPRS(base) stmw r2, GPR2(base)
|
||||
#define REST_30GPRS(base) lmw r2, GPR2(base)
|
||||
#define TEMPLATE_FOR_IMM_LOAD_INSNS nop; nop; nop
|
||||
#endif
|
||||
|
||||
#define OPT_SLOT_SIZE 65536
|
||||
|
||||
.balign 4
|
||||
|
@ -30,39 +40,41 @@ optinsn_slot:
|
|||
.global optprobe_template_entry
|
||||
optprobe_template_entry:
|
||||
/* Create an in-memory pt_regs */
|
||||
stdu r1,-INT_FRAME_SIZE(r1)
|
||||
PPC_STLU r1,-INT_FRAME_SIZE(r1)
|
||||
SAVE_GPR(0,r1)
|
||||
/* Save the previous SP into stack */
|
||||
addi r0,r1,INT_FRAME_SIZE
|
||||
std r0,GPR1(r1)
|
||||
SAVE_10GPRS(2,r1)
|
||||
SAVE_10GPRS(12,r1)
|
||||
SAVE_10GPRS(22,r1)
|
||||
PPC_STL r0,GPR1(r1)
|
||||
SAVE_30GPRS(r1)
|
||||
/* Save SPRS */
|
||||
mfmsr r5
|
||||
std r5,_MSR(r1)
|
||||
PPC_STL r5,_MSR(r1)
|
||||
li r5,0x700
|
||||
std r5,_TRAP(r1)
|
||||
PPC_STL r5,_TRAP(r1)
|
||||
li r5,0
|
||||
std r5,ORIG_GPR3(r1)
|
||||
std r5,RESULT(r1)
|
||||
PPC_STL r5,ORIG_GPR3(r1)
|
||||
PPC_STL r5,RESULT(r1)
|
||||
mfctr r5
|
||||
std r5,_CTR(r1)
|
||||
PPC_STL r5,_CTR(r1)
|
||||
mflr r5
|
||||
std r5,_LINK(r1)
|
||||
PPC_STL r5,_LINK(r1)
|
||||
mfspr r5,SPRN_XER
|
||||
std r5,_XER(r1)
|
||||
PPC_STL r5,_XER(r1)
|
||||
mfcr r5
|
||||
std r5,_CCR(r1)
|
||||
PPC_STL r5,_CCR(r1)
|
||||
#ifdef CONFIG_PPC64
|
||||
lbz r5,PACAIRQSOFTMASK(r13)
|
||||
std r5,SOFTE(r1)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We may get here from a module, so load the kernel TOC in r2.
|
||||
* The original TOC gets restored when pt_regs is restored
|
||||
* further below.
|
||||
*/
|
||||
#ifdef CONFIG_PPC64
|
||||
ld r2,PACATOC(r13)
|
||||
#endif
|
||||
|
||||
.global optprobe_template_op_address
|
||||
optprobe_template_op_address:
|
||||
|
@ -70,11 +82,8 @@ optprobe_template_op_address:
|
|||
* Parameters to optimized_callback():
|
||||
* 1. optimized_kprobe structure in r3
|
||||
*/
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
TEMPLATE_FOR_IMM_LOAD_INSNS
|
||||
|
||||
/* 2. pt_regs pointer in r4 */
|
||||
addi r4,r1,STACK_FRAME_OVERHEAD
|
||||
|
||||
|
@ -92,11 +101,7 @@ optprobe_template_call_handler:
|
|||
.global optprobe_template_insn
|
||||
optprobe_template_insn:
|
||||
/* 2, Pass instruction to be emulated in r4 */
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
TEMPLATE_FOR_IMM_LOAD_INSNS
|
||||
|
||||
.global optprobe_template_call_emulate
|
||||
optprobe_template_call_emulate:
|
||||
|
@ -107,20 +112,18 @@ optprobe_template_call_emulate:
|
|||
* All done.
|
||||
* Now, restore the registers...
|
||||
*/
|
||||
ld r5,_MSR(r1)
|
||||
PPC_LL r5,_MSR(r1)
|
||||
mtmsr r5
|
||||
ld r5,_CTR(r1)
|
||||
PPC_LL r5,_CTR(r1)
|
||||
mtctr r5
|
||||
ld r5,_LINK(r1)
|
||||
PPC_LL r5,_LINK(r1)
|
||||
mtlr r5
|
||||
ld r5,_XER(r1)
|
||||
PPC_LL r5,_XER(r1)
|
||||
mtxer r5
|
||||
ld r5,_CCR(r1)
|
||||
PPC_LL r5,_CCR(r1)
|
||||
mtcr r5
|
||||
REST_GPR(0,r1)
|
||||
REST_10GPRS(2,r1)
|
||||
REST_10GPRS(12,r1)
|
||||
REST_10GPRS(22,r1)
|
||||
REST_30GPRS(r1)
|
||||
/* Restore the previous SP */
|
||||
addi r1,r1,INT_FRAME_SIZE
|
||||
|
||||
|
|
|
@ -1117,9 +1117,10 @@ void restore_tm_state(struct pt_regs *regs)
|
|||
regs->msr |= msr_diff;
|
||||
}
|
||||
|
||||
#else
|
||||
#else /* !CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
#define tm_recheckpoint_new_task(new)
|
||||
#define __switch_to_tm(prev, new)
|
||||
void tm_reclaim_current(uint8_t cause) {}
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
|
||||
static inline void save_sprs(struct thread_struct *t)
|
||||
|
@ -1255,6 +1256,9 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
|||
*/
|
||||
restore_sprs(old_thread, new_thread);
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
kuap_assert_locked();
|
||||
#endif
|
||||
last = _switch(old_thread, new_thread);
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
|
@ -1444,11 +1448,9 @@ static void print_msr_bits(unsigned long val)
|
|||
#ifdef CONFIG_PPC64
|
||||
#define REG "%016lx"
|
||||
#define REGS_PER_LINE 4
|
||||
#define LAST_VOLATILE 13
|
||||
#else
|
||||
#define REG "%08lx"
|
||||
#define REGS_PER_LINE 8
|
||||
#define LAST_VOLATILE 12
|
||||
#endif
|
||||
|
||||
static void __show_regs(struct pt_regs *regs)
|
||||
|
@ -1465,7 +1467,9 @@ static void __show_regs(struct pt_regs *regs)
|
|||
trap = TRAP(regs);
|
||||
if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR))
|
||||
pr_cont("CFAR: "REG" ", regs->orig_gpr3);
|
||||
if (trap == 0x200 || trap == 0x300 || trap == 0x600) {
|
||||
if (trap == INTERRUPT_MACHINE_CHECK ||
|
||||
trap == INTERRUPT_DATA_STORAGE ||
|
||||
trap == INTERRUPT_ALIGNMENT) {
|
||||
if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE))
|
||||
pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
|
||||
else
|
||||
|
@ -1484,8 +1488,6 @@ static void __show_regs(struct pt_regs *regs)
|
|||
if ((i % REGS_PER_LINE) == 0)
|
||||
pr_cont("\nGPR%02d: ", i);
|
||||
pr_cont(REG " ", regs->gpr[i]);
|
||||
if (i == LAST_VOLATILE && !FULL_REGS(regs))
|
||||
break;
|
||||
}
|
||||
pr_cont("\n");
|
||||
/*
|
||||
|
@ -1688,7 +1690,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
} else {
|
||||
/* user thread */
|
||||
struct pt_regs *regs = current_pt_regs();
|
||||
CHECK_FULL_REGS(regs);
|
||||
*childregs = *regs;
|
||||
if (usp)
|
||||
childregs->gpr[1] = usp;
|
||||
|
@ -1724,9 +1725,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
kregs = (struct pt_regs *) sp;
|
||||
sp -= STACK_FRAME_OVERHEAD;
|
||||
p->thread.ksp = sp;
|
||||
#ifdef CONFIG_PPC32
|
||||
p->thread.ksp_limit = (unsigned long)end_of_stack(p);
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
for (i = 0; i < nr_wp_slots(); i++)
|
||||
p->thread.ptrace_bps[i] = NULL;
|
||||
|
@ -1796,13 +1794,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
|
|||
regs->ccr = 0;
|
||||
regs->gpr[1] = sp;
|
||||
|
||||
/*
|
||||
* We have just cleared all the nonvolatile GPRs, so make
|
||||
* FULL_REGS(regs) return true. This is necessary to allow
|
||||
* ptrace to examine the thread immediately after exec.
|
||||
*/
|
||||
SET_FULL_REGS(regs);
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
regs->mq = 0;
|
||||
regs->nip = start;
|
||||
|
|
|
@ -65,6 +65,8 @@
|
|||
#define DBG(fmt...)
|
||||
#endif
|
||||
|
||||
int *chip_id_lookup_table;
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
int __initdata iommu_is_off;
|
||||
int __initdata iommu_force_on;
|
||||
|
@ -267,7 +269,7 @@ static struct feature_property {
|
|||
};
|
||||
|
||||
#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
|
||||
static inline void identical_pvr_fixup(unsigned long node)
|
||||
static __init void identical_pvr_fixup(unsigned long node)
|
||||
{
|
||||
unsigned int pvr;
|
||||
const char *model = of_get_flat_dt_prop(node, "model", NULL);
|
||||
|
@ -914,13 +916,22 @@ EXPORT_SYMBOL(of_get_ibm_chip_id);
|
|||
int cpu_to_chip_id(int cpu)
|
||||
{
|
||||
struct device_node *np;
|
||||
int ret = -1, idx;
|
||||
|
||||
idx = cpu / threads_per_core;
|
||||
if (chip_id_lookup_table && chip_id_lookup_table[idx] != -1)
|
||||
return chip_id_lookup_table[idx];
|
||||
|
||||
np = of_get_cpu_node(cpu, NULL);
|
||||
if (!np)
|
||||
return -1;
|
||||
if (np) {
|
||||
ret = of_get_ibm_chip_id(np);
|
||||
of_node_put(np);
|
||||
|
||||
of_node_put(np);
|
||||
return of_get_ibm_chip_id(np);
|
||||
if (chip_id_lookup_table)
|
||||
chip_id_lookup_table[idx] = ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(cpu_to_chip_id);
|
||||
|
||||
|
|
|
@ -2983,7 +2983,7 @@ static void __init fixup_device_tree_efika_add_phy(void)
|
|||
" 0x3 encode-int encode+"
|
||||
" s\" interrupts\" property"
|
||||
" finish-device");
|
||||
};
|
||||
}
|
||||
|
||||
/* Check for a PHY device node - if missing then create one and
|
||||
* give it's phandle to the ethernet node */
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue