2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* S390 version
|
2012-07-20 17:15:04 +08:00
|
|
|
* Copyright IBM Corp. 1999
|
2005-04-17 06:20:36 +08:00
|
|
|
* Author(s): Hartmut Penner (hp@de.ibm.com),
|
|
|
|
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
|
|
|
*
|
|
|
|
* Derived from "include/asm-i386/processor.h"
|
|
|
|
* Copyright (C) 1994, Linus Torvalds
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ASM_S390_PROCESSOR_H
|
|
|
|
#define __ASM_S390_PROCESSOR_H
|
|
|
|
|
2015-10-06 22:23:39 +08:00
|
|
|
#include <linux/const.h>
|
|
|
|
|
2014-04-15 18:55:07 +08:00
|
|
|
#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
|
|
|
|
#define CIF_ASCE 1 /* user asce needs fixup / uaccess */
|
2014-09-30 23:37:52 +08:00
|
|
|
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
|
s390/kernel: lazy restore fpu registers
Improve the save and restore behavior of FPU register contents to use the
vector extension within the kernel.
The kernel does not use floating-point or vector registers and, therefore,
saving and restoring the FPU register contents are performed for handling
signals or switching processes only. To prepare for using vector
instructions and vector registers within the kernel, enhance the save
behavior and implement a lazy restore at return to user space from a
system call or interrupt.
To implement the lazy restore, the save_fpu_regs() sets a CPU information
flag, CIF_FPU, to indicate that the FPU registers must be restored.
Saving and setting CIF_FPU is performed in an atomic fashion to be
interrupt-safe. When the kernel wants to use the vector extension or
wants to change the FPU register state for a task during signal handling,
the save_fpu_regs() must be called first. The CIF_FPU flag is also set at
process switch. At return to user space, the FPU state is restored. In
particular, the FPU state includes the floating-point or vector register
contents, as well as, vector-enablement and floating-point control. The
FPU state restore and clearing CIF_FPU is also performed in an atomic
fashion.
For KVM, the restore of the FPU register state is performed when restoring
the general-purpose guest registers before the SIE instructions is started.
Because the path towards the SIE instruction is interruptible, the CIF_FPU
flag must be checked again right before going into SIE. If set, the guest
registers must be reloaded again by re-entering the outer SIE loop. This
is the same behavior as if the SIE critical section is interrupted.
Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2015-06-10 18:53:42 +08:00
|
|
|
#define CIF_FPU 3 /* restore vector registers */
|
2015-08-15 17:42:21 +08:00
|
|
|
#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */
|
2014-04-15 18:55:07 +08:00
|
|
|
|
2015-10-06 22:23:39 +08:00
|
|
|
#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING)
|
|
|
|
#define _CIF_ASCE _BITUL(CIF_ASCE)
|
|
|
|
#define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY)
|
|
|
|
#define _CIF_FPU _BITUL(CIF_FPU)
|
|
|
|
#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ)
|
2014-04-15 18:55:07 +08:00
|
|
|
|
2012-09-05 19:26:11 +08:00
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
2008-12-25 20:39:16 +08:00
|
|
|
#include <linux/linkage.h>
|
2012-03-29 01:30:02 +08:00
|
|
|
#include <linux/irqflags.h>
|
2009-09-11 16:29:04 +08:00
|
|
|
#include <asm/cpu.h>
|
2009-04-14 21:36:16 +08:00
|
|
|
#include <asm/page.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/ptrace.h>
|
2009-04-14 21:36:16 +08:00
|
|
|
#include <asm/setup.h>
|
2012-07-31 16:52:05 +08:00
|
|
|
#include <asm/runtime_instr.h>
|
2015-06-11 21:33:54 +08:00
|
|
|
#include <asm/fpu-internal.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-04-15 18:55:07 +08:00
|
|
|
static inline void set_cpu_flag(int flag)
|
|
|
|
{
|
2015-10-06 22:23:29 +08:00
|
|
|
S390_lowcore.cpu_flags |= (1UL << flag);
|
2014-04-15 18:55:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void clear_cpu_flag(int flag)
|
|
|
|
{
|
2015-10-06 22:23:29 +08:00
|
|
|
S390_lowcore.cpu_flags &= ~(1UL << flag);
|
2014-04-15 18:55:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int test_cpu_flag(int flag)
|
|
|
|
{
|
2015-10-06 22:23:29 +08:00
|
|
|
return !!(S390_lowcore.cpu_flags & (1UL << flag));
|
2014-04-15 18:55:07 +08:00
|
|
|
}
|
|
|
|
|
2014-09-30 23:37:52 +08:00
|
|
|
#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Default implementation of macro that returns current
|
|
|
|
* instruction pointer ("program counter").
|
|
|
|
*/
|
2006-09-28 22:56:43 +08:00
|
|
|
#define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; })
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-09-11 16:29:04 +08:00
|
|
|
static inline void get_cpu_id(struct cpuid *ptr)
|
2007-02-21 17:55:18 +08:00
|
|
|
{
|
2010-02-27 05:37:31 +08:00
|
|
|
asm volatile("stidp %0" : "=Q" (*ptr));
|
2007-02-21 17:55:18 +08:00
|
|
|
}
|
|
|
|
|
2007-02-06 04:18:31 +08:00
|
|
|
extern void s390_adjust_jiffies(void);
|
2011-10-30 22:17:13 +08:00
|
|
|
extern const struct seq_operations cpuinfo_op;
|
|
|
|
extern int sysctl_ieee_emulation_warnings;
|
2012-09-07 03:48:11 +08:00
|
|
|
extern void execve_tail(void);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
2009-03-18 20:27:36 +08:00
|
|
|
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
|
2009-03-18 20:27:36 +08:00
|
|
|
#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
|
2008-02-10 01:24:36 +08:00
|
|
|
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
|
|
|
|
(1UL << 30) : (1UL << 41))
|
|
|
|
#define TASK_SIZE TASK_SIZE_OF(current)
|
2013-07-26 21:04:03 +08:00
|
|
|
#define TASK_MAX_SIZE (1UL << 53)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-02-10 01:24:37 +08:00
|
|
|
#define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
|
|
|
|
#define STACK_TOP_MAX (1UL << 42)
|
2008-02-08 20:19:26 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#define HAVE_ARCH_PICK_MMAP_LAYOUT
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
__u32 ar4;
|
|
|
|
} mm_segment_t;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Thread structure
|
|
|
|
*/
|
|
|
|
struct thread_struct {
|
2015-06-11 21:33:54 +08:00
|
|
|
struct fpu fpu; /* FP and VX register save area */
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned int acrs[NUM_ACRS];
|
|
|
|
unsigned long ksp; /* kernel stack pointer */
|
|
|
|
mm_segment_t mm_segment;
|
2011-07-24 16:48:20 +08:00
|
|
|
unsigned long gmap_addr; /* address of last gmap fault. */
|
2013-06-17 22:25:18 +08:00
|
|
|
unsigned int gmap_pfault; /* signal of a pending guest pfault */
|
2011-01-05 19:48:10 +08:00
|
|
|
struct per_regs per_user; /* User specified PER registers */
|
|
|
|
struct per_event per_event; /* Cause of the last PER trap */
|
2012-07-31 17:03:04 +08:00
|
|
|
unsigned long per_flags; /* Flags to control debug behavior */
|
2005-04-17 06:20:36 +08:00
|
|
|
/* pfault_wait is used to block the process on a pfault event */
|
|
|
|
unsigned long pfault_wait;
|
2011-05-23 16:24:34 +08:00
|
|
|
struct list_head list;
|
2012-07-31 16:52:05 +08:00
|
|
|
/* cpu runtime instrumentation */
|
|
|
|
struct runtime_instr_cb *ri_cb;
|
|
|
|
int ri_signum;
|
2012-07-31 17:03:04 +08:00
|
|
|
unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2013-07-03 04:58:26 +08:00
|
|
|
/* Flag to disable transactions. */
|
|
|
|
#define PER_FLAG_NO_TE 1UL
|
|
|
|
/* Flag to enable random transaction aborts. */
|
|
|
|
#define PER_FLAG_TE_ABORT_RAND 2UL
|
|
|
|
/* Flag to specify random transaction abort mode:
|
|
|
|
* - abort each transaction at a random instruction before TEND if set.
|
|
|
|
* - abort random transactions at a random instruction if cleared.
|
|
|
|
*/
|
|
|
|
#define PER_FLAG_TE_ABORT_RAND_TEND 4UL
|
2012-07-31 17:03:04 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
typedef struct thread_struct thread_struct;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Stack layout of a C stack frame.
|
|
|
|
*/
|
|
|
|
#ifndef __PACK_STACK
|
|
|
|
struct stack_frame {
|
|
|
|
unsigned long back_chain;
|
|
|
|
unsigned long empty1[5];
|
|
|
|
unsigned long gprs[10];
|
|
|
|
unsigned int empty2[8];
|
|
|
|
};
|
|
|
|
#else
|
|
|
|
struct stack_frame {
|
|
|
|
unsigned long empty1[5];
|
|
|
|
unsigned int empty2[8];
|
|
|
|
unsigned long gprs[10];
|
|
|
|
unsigned long back_chain;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define ARCH_MIN_TASKALIGN 8
|
|
|
|
|
2015-09-29 23:53:22 +08:00
|
|
|
extern __vector128 init_task_fpu_regs[__NUM_VXRS];
|
2007-10-22 18:52:45 +08:00
|
|
|
#define INIT_THREAD { \
|
|
|
|
.ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
|
2015-09-29 23:53:22 +08:00
|
|
|
.fpu.regs = (void *)&init_task_fpu_regs, \
|
2007-10-22 18:52:45 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Do necessary setup to start up a new thread.
|
|
|
|
*/
|
2011-10-30 22:16:50 +08:00
|
|
|
#define start_thread(regs, new_psw, new_stackp) do { \
|
2013-09-24 15:14:56 +08:00
|
|
|
regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \
|
2011-10-30 22:16:50 +08:00
|
|
|
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
|
|
|
|
regs->gprs[15] = new_stackp; \
|
2012-09-07 03:48:11 +08:00
|
|
|
execve_tail(); \
|
2008-07-14 15:58:54 +08:00
|
|
|
} while (0)
|
|
|
|
|
2011-10-30 22:16:50 +08:00
|
|
|
#define start_thread31(regs, new_psw, new_stackp) do { \
|
2013-09-24 15:14:56 +08:00
|
|
|
regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
|
2011-10-30 22:16:50 +08:00
|
|
|
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
|
|
|
|
regs->gprs[15] = new_stackp; \
|
|
|
|
crst_table_downgrade(current->mm, 1UL << 31); \
|
2012-09-07 03:48:11 +08:00
|
|
|
execve_tail(); \
|
2005-04-17 06:20:36 +08:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/* Forward declaration, a strange C thing */
|
|
|
|
struct task_struct;
|
|
|
|
struct mm_struct;
|
2008-02-08 20:18:33 +08:00
|
|
|
struct seq_file;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-02-12 20:08:27 +08:00
|
|
|
void show_cacheinfo(struct seq_file *m);
|
2012-08-29 20:12:20 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Free all resources held by a thread. */
|
|
|
|
extern void release_thread(struct task_struct *);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return saved PC of a blocked thread.
|
|
|
|
*/
|
|
|
|
extern unsigned long thread_saved_pc(struct task_struct *t);
|
|
|
|
|
|
|
|
unsigned long get_wchan(struct task_struct *p);
|
2006-01-12 17:05:49 +08:00
|
|
|
#define task_pt_regs(tsk) ((struct pt_regs *) \
|
2006-01-12 17:05:50 +08:00
|
|
|
(task_stack_page(tsk) + THREAD_SIZE) - 1)
|
2006-01-12 17:05:49 +08:00
|
|
|
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)
|
|
|
|
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-10-16 15:58:01 +08:00
|
|
|
/* Has task runtime instrumentation enabled ? */
|
|
|
|
#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
|
|
|
|
|
2012-03-29 01:30:02 +08:00
|
|
|
static inline unsigned short stap(void)
|
|
|
|
{
|
|
|
|
unsigned short cpu_address;
|
|
|
|
|
|
|
|
asm volatile("stap %0" : "=m" (cpu_address));
|
|
|
|
return cpu_address;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Give up the time slice of the virtual PU.
|
|
|
|
*/
|
2015-01-28 14:43:56 +08:00
|
|
|
void cpu_relax(void);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
arch, locking: Ciao arch_mutex_cpu_relax()
The arch_mutex_cpu_relax() function, introduced by 34b133f, is
hacky and ugly. It was added a few years ago to address the fact
that common cpu_relax() calls include yielding on s390, and thus
impact the optimistic spinning functionality of mutexes. Nowadays
we use this function well beyond mutexes: rwsem, qrwlock, mcs and
lockref. Since the macro that defines the call is in the mutex header,
any users must include mutex.h and the naming is misleading as well.
This patch (i) renames the call to cpu_relax_lowlatency ("relax, but
only if you can do it with very low latency") and (ii) defines it in
each arch's asm/processor.h local header, just like for regular cpu_relax
functions. On all archs, except s390, cpu_relax_lowlatency is simply cpu_relax,
and thus we can take it out of mutex.h. While this can seem redundant,
I believe it is a good choice as it allows us to move out arch specific
logic from generic locking primitives and enables future(?) archs to
transparently define it, similarly to System Z.
Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Anton Blanchard <anton@samba.org>
Cc: Aurelien Jacquiot <a-jacquiot@ti.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Bharat Bhushan <r65777@freescale.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chen Liqin <liqin.linux@gmail.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: David Howells <dhowells@redhat.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Deepthi Dharwar <deepthi@linux.vnet.ibm.com>
Cc: Dominik Dingel <dingel@linux.vnet.ibm.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: Haavard Skinnemoen <hskinnemoen@gmail.com>
Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Hirokazu Takata <takata@linux-m32r.org>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: James E.J. Bottomley <jejb@parisc-linux.org>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Jesper Nilsson <jesper.nilsson@axis.com>
Cc: Joe Perches <joe@perches.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Joseph Myers <joseph@codesourcery.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
Cc: Lennox Wu <lennox.wu@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Salter <msalter@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Neuling <mikey@neuling.org>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Mikael Starvik <starvik@axis.com>
Cc: Nicolas Pitre <nico@linaro.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Qais Yousef <qais.yousef@imgtec.com>
Cc: Qiaowei Ren <qiaowei.ren@intel.com>
Cc: Rafael Wysocki <rafael.j.wysocki@intel.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Steven Miao <realmz6@gmail.com>
Cc: Steven Rostedt <srostedt@redhat.com>
Cc: Stratos Karafotis <stratosk@semaphore.gr>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vasily Kulikov <segoon@openwall.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Vineet Gupta <Vineet.Gupta1@synopsys.com>
Cc: Waiman Long <Waiman.Long@hp.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Wolfram Sang <wsa@the-dreams.de>
Cc: adi-buildroot-devel@lists.sourceforge.net
Cc: linux390@de.ibm.com
Cc: linux-alpha@vger.kernel.org
Cc: linux-am33-list@redhat.com
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-c6x-dev@linux-c6x.org
Cc: linux-cris-kernel@axis.com
Cc: linux-hexagon@vger.kernel.org
Cc: linux-ia64@vger.kernel.org
Cc: linux@lists.openrisc.net
Cc: linux-m32r-ja@ml.linux-m32r.org
Cc: linux-m32r@ml.linux-m32r.org
Cc: linux-m68k@lists.linux-m68k.org
Cc: linux-metag@vger.kernel.org
Cc: linux-mips@linux-mips.org
Cc: linux-parisc@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-s390@vger.kernel.org
Cc: linux-sh@vger.kernel.org
Cc: linux-xtensa@linux-xtensa.org
Cc: sparclinux@vger.kernel.org
Link: http://lkml.kernel.org/r/1404079773.2619.4.camel@buesod1.americas.hpqcorp.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2014-06-30 06:09:33 +08:00
|
|
|
#define cpu_relax_lowlatency() barrier()
|
2013-09-28 17:23:59 +08:00
|
|
|
|
2007-06-19 19:10:06 +08:00
|
|
|
static inline void psw_set_key(unsigned int key)
|
|
|
|
{
|
|
|
|
asm volatile("spka 0(%0)" : : "d" (key));
|
|
|
|
}
|
|
|
|
|
2005-06-26 05:55:30 +08:00
|
|
|
/*
|
|
|
|
* Set PSW to specified value.
|
|
|
|
*/
|
|
|
|
static inline void __load_psw(psw_t psw)
|
|
|
|
{
|
2010-02-27 05:37:31 +08:00
|
|
|
asm volatile("lpswe %0" : : "Q" (psw) : "cc");
|
2005-06-26 05:55:30 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Set PSW mask to specified value, while leaving the
|
|
|
|
* PSW addr pointing to the next instruction.
|
|
|
|
*/
|
|
|
|
static inline void __load_psw_mask (unsigned long mask)
|
|
|
|
{
|
|
|
|
unsigned long addr;
|
|
|
|
psw_t psw;
|
2005-06-26 05:55:30 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
psw.mask = mask;
|
|
|
|
|
2006-09-28 22:56:43 +08:00
|
|
|
asm volatile(
|
|
|
|
" larl %0,1f\n"
|
2010-02-27 05:37:31 +08:00
|
|
|
" stg %0,%O1+8(%R1)\n"
|
|
|
|
" lpswe %1\n"
|
2005-04-17 06:20:36 +08:00
|
|
|
"1:"
|
2010-02-27 05:37:31 +08:00
|
|
|
: "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2011-10-30 22:16:48 +08:00
|
|
|
|
2015-07-08 16:20:04 +08:00
|
|
|
/*
|
|
|
|
* Extract current PSW mask
|
|
|
|
*/
|
|
|
|
static inline unsigned long __extract_psw(void)
|
|
|
|
{
|
|
|
|
unsigned int reg1, reg2;
|
|
|
|
|
|
|
|
asm volatile("epsw %0,%1" : "=d" (reg1), "=a" (reg2));
|
|
|
|
return (((unsigned long) reg1) << 32) | ((unsigned long) reg2);
|
|
|
|
}
|
|
|
|
|
2011-10-30 22:16:48 +08:00
|
|
|
/*
|
|
|
|
* Rewind PSW instruction address by specified number of bytes.
|
|
|
|
*/
|
|
|
|
static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
|
|
|
|
{
|
|
|
|
unsigned long mask;
|
|
|
|
|
|
|
|
mask = (psw.mask & PSW_MASK_EA) ? -1UL :
|
|
|
|
(psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 :
|
|
|
|
(1UL << 24) - 1;
|
|
|
|
return (psw.addr - ilc) & mask;
|
|
|
|
}
|
2014-10-01 16:57:57 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Function to stop a processor until the next interrupt occurs
|
|
|
|
*/
|
|
|
|
void enabled_wait(void);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Function to drop a processor into disabled wait state
|
|
|
|
*/
|
2012-01-13 09:17:21 +08:00
|
|
|
static inline void __noreturn disabled_wait(unsigned long code)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
unsigned long ctl_buf;
|
2005-06-26 05:55:30 +08:00
|
|
|
psw_t dw_psw;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-10-30 22:16:50 +08:00
|
|
|
dw_psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA;
|
2005-06-26 05:55:30 +08:00
|
|
|
dw_psw.addr = code;
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Store status and then load disabled wait psw,
|
|
|
|
* the processor is dead afterwards
|
|
|
|
*/
|
2006-09-28 22:56:43 +08:00
|
|
|
asm volatile(
|
|
|
|
" stctg 0,0,0(%2)\n"
|
|
|
|
" ni 4(%2),0xef\n" /* switch off protection */
|
|
|
|
" lctlg 0,0,0(%2)\n"
|
|
|
|
" lghi 1,0x1000\n"
|
|
|
|
" stpt 0x328(1)\n" /* store timer */
|
|
|
|
" stckc 0x330(1)\n" /* store clock comparator */
|
|
|
|
" stpx 0x318(1)\n" /* store prefix register */
|
|
|
|
" stam 0,15,0x340(1)\n"/* store access registers */
|
|
|
|
" stfpc 0x31c(1)\n" /* store fpu control */
|
|
|
|
" std 0,0x200(1)\n" /* store f0 */
|
|
|
|
" std 1,0x208(1)\n" /* store f1 */
|
|
|
|
" std 2,0x210(1)\n" /* store f2 */
|
|
|
|
" std 3,0x218(1)\n" /* store f3 */
|
|
|
|
" std 4,0x220(1)\n" /* store f4 */
|
|
|
|
" std 5,0x228(1)\n" /* store f5 */
|
|
|
|
" std 6,0x230(1)\n" /* store f6 */
|
|
|
|
" std 7,0x238(1)\n" /* store f7 */
|
|
|
|
" std 8,0x240(1)\n" /* store f8 */
|
|
|
|
" std 9,0x248(1)\n" /* store f9 */
|
|
|
|
" std 10,0x250(1)\n" /* store f10 */
|
|
|
|
" std 11,0x258(1)\n" /* store f11 */
|
|
|
|
" std 12,0x260(1)\n" /* store f12 */
|
|
|
|
" std 13,0x268(1)\n" /* store f13 */
|
|
|
|
" std 14,0x270(1)\n" /* store f14 */
|
|
|
|
" std 15,0x278(1)\n" /* store f15 */
|
|
|
|
" stmg 0,15,0x280(1)\n"/* store general registers */
|
|
|
|
" stctg 0,15,0x380(1)\n"/* store control registers */
|
|
|
|
" oi 0x384(1),0x10\n"/* fake protection bit */
|
|
|
|
" lpswe 0(%1)"
|
|
|
|
: "=m" (ctl_buf)
|
2009-09-23 04:58:47 +08:00
|
|
|
: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
|
2008-12-25 20:39:16 +08:00
|
|
|
while (1);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-03-29 01:30:02 +08:00
|
|
|
/*
|
|
|
|
* Use to set psw mask except for the first byte which
|
|
|
|
* won't be changed by this function.
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
__set_psw_mask(unsigned long mask)
|
|
|
|
{
|
|
|
|
__load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
|
|
|
|
}
|
|
|
|
|
|
|
|
#define local_mcck_enable() \
|
2013-09-24 15:14:56 +08:00
|
|
|
__set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK)
|
2012-03-29 01:30:02 +08:00
|
|
|
#define local_mcck_disable() \
|
2013-09-24 15:14:56 +08:00
|
|
|
__set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT)
|
2012-03-29 01:30:02 +08:00
|
|
|
|
2007-02-06 04:18:37 +08:00
|
|
|
/*
|
|
|
|
* Basic Machine Check/Program Check Handler.
|
|
|
|
*/
|
|
|
|
|
|
|
|
extern void s390_base_mcck_handler(void);
|
|
|
|
extern void s390_base_pgm_handler(void);
|
|
|
|
extern void s390_base_ext_handler(void);
|
|
|
|
|
|
|
|
extern void (*s390_base_mcck_handler_fn)(void);
|
|
|
|
extern void (*s390_base_pgm_handler_fn)(void);
|
|
|
|
extern void (*s390_base_ext_handler_fn)(void);
|
|
|
|
|
2006-09-26 14:31:33 +08:00
|
|
|
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
|
|
|
|
|
2012-06-05 15:59:52 +08:00
|
|
|
extern int memcpy_real(void *, void *, size_t);
|
|
|
|
extern void memcpy_absolute(void *, void *, size_t);
|
|
|
|
|
|
|
|
#define mem_assign_absolute(dest, val) { \
|
|
|
|
__typeof__(dest) __tmp = (val); \
|
|
|
|
\
|
|
|
|
BUILD_BUG_ON(sizeof(__tmp) != sizeof(val)); \
|
|
|
|
memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
|
|
|
|
}
|
|
|
|
|
2012-09-05 19:26:11 +08:00
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
|
|
|
|
#endif /* __ASM_S390_PROCESSOR_H */
|