2005-10-10 12:19:43 +08:00
|
|
|
#ifndef _ASM_POWERPC_PROCESSOR_H
|
|
|
|
#define _ASM_POWERPC_PROCESSOR_H
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
2005-10-10 12:19:43 +08:00
|
|
|
* Copyright (C) 2001 PPC 64 Team, IBM Corp
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
|
2005-10-10 12:19:43 +08:00
|
|
|
#include <asm/reg.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-06-25 12:07:18 +08:00
|
|
|
#ifdef CONFIG_VSX
|
|
|
|
#define TS_FPRWIDTH 2
|
2013-09-23 10:04:37 +08:00
|
|
|
|
|
|
|
#ifdef __BIG_ENDIAN__
|
|
|
|
#define TS_FPROFFSET 0
|
|
|
|
#define TS_VSRLOWOFFSET 1
|
|
|
|
#else
|
|
|
|
#define TS_FPROFFSET 1
|
|
|
|
#define TS_VSRLOWOFFSET 0
|
|
|
|
#endif
|
|
|
|
|
2008-06-25 12:07:18 +08:00
|
|
|
#else
|
2008-06-26 15:07:48 +08:00
|
|
|
#define TS_FPRWIDTH 1
|
2013-09-23 10:04:37 +08:00
|
|
|
#define TS_FPROFFSET 0
|
2008-06-25 12:07:18 +08:00
|
|
|
#endif
|
2008-06-26 15:07:48 +08:00
|
|
|
|
2012-12-07 05:49:56 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
/* Default SMT priority is set to 3. Use 11- 13bits to save priority. */
|
|
|
|
#define PPR_PRIORITY 3
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
#define INIT_PPR (PPR_PRIORITY << 50)
|
|
|
|
#else
|
|
|
|
#define INIT_PPR ((u64)PPR_PRIORITY << 50)
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
|
2005-10-10 12:19:43 +08:00
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/compiler.h>
|
2011-04-23 05:48:27 +08:00
|
|
|
#include <linux/cache.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <asm/types.h>
|
2012-12-20 22:06:44 +08:00
|
|
|
#include <asm/hw_breakpoint.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-11-10 10:37:51 +08:00
|
|
|
/* We do _not_ want to define new machine types at all, those must die
|
|
|
|
* in favor of using the device-tree
|
|
|
|
* -- BenH.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
|
2013-03-27 08:47:03 +08:00
|
|
|
/* PREP sub-platform types. Unused */
|
2005-04-17 06:20:36 +08:00
|
|
|
#define _PREP_Motorola 0x01 /* motorola prep */
|
|
|
|
#define _PREP_Firm 0x02 /* firmworks prep */
|
|
|
|
#define _PREP_IBM 0x00 /* ibm prep */
|
|
|
|
#define _PREP_Bull 0x03 /* bull prep */
|
|
|
|
|
2005-11-10 10:37:51 +08:00
|
|
|
/* CHRP sub-platform types. These are arbitrary */
|
2005-04-17 06:20:36 +08:00
|
|
|
#define _CHRP_Motorola 0x04 /* motorola chrp, the cobra */
|
|
|
|
#define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */
|
|
|
|
#define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */
|
2006-07-04 12:16:28 +08:00
|
|
|
#define _CHRP_briq 0x07 /* TotalImpact's briQ */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-28 20:15:54 +08:00
|
|
|
#if defined(__KERNEL__) && defined(CONFIG_PPC32)
|
|
|
|
|
|
|
|
extern int _chrp_type;
|
2005-11-10 10:37:51 +08:00
|
|
|
|
2006-03-28 20:15:54 +08:00
|
|
|
#endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
|
|
|
|
|
2005-10-10 12:19:43 +08:00
|
|
|
/*
|
|
|
|
* Default implementation of macro that returns current
|
|
|
|
* instruction pointer ("program counter").
|
|
|
|
*/
|
|
|
|
#define current_text_addr() ({ __label__ _l; _l: &&_l;})
|
|
|
|
|
|
|
|
/* Macros for adjusting thread priority (hardware multi-threading) */
|
|
|
|
#define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
|
|
|
|
#define HMT_low() asm volatile("or 1,1,1 # low priority")
|
|
|
|
#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
|
|
|
|
#define HMT_medium() asm volatile("or 2,2,2 # medium priority")
|
|
|
|
#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
|
|
|
|
#define HMT_high() asm volatile("or 3,3,3 # high priority")
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
struct task_struct;
|
2005-10-10 12:19:43 +08:00
|
|
|
void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
|
2005-04-17 06:20:36 +08:00
|
|
|
void release_thread(struct task_struct *);
|
|
|
|
|
|
|
|
/* Lazy FPU handling on uni-processor */
|
|
|
|
extern struct task_struct *last_task_used_math;
|
|
|
|
extern struct task_struct *last_task_used_altivec;
|
2008-06-25 12:07:18 +08:00
|
|
|
extern struct task_struct *last_task_used_vsx;
|
2005-04-17 06:20:36 +08:00
|
|
|
extern struct task_struct *last_task_used_spe;
|
|
|
|
|
2005-10-10 12:19:43 +08:00
|
|
|
#ifdef CONFIG_PPC32
|
2008-05-23 23:59:15 +08:00
|
|
|
|
|
|
|
#if CONFIG_TASK_SIZE > CONFIG_KERNEL_START
|
|
|
|
#error User TASK_SIZE overlaps with KERNEL_START address
|
|
|
|
#endif
|
2005-10-10 12:19:43 +08:00
|
|
|
#define TASK_SIZE (CONFIG_TASK_SIZE)
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* This decides where the kernel will search for a free chunk of vm
|
|
|
|
* space during mmap's.
|
|
|
|
*/
|
|
|
|
#define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
|
2005-10-10 12:19:43 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC64
|
2012-09-10 10:52:55 +08:00
|
|
|
/* 64-bit user address space is 46-bits (64TB user VM) */
|
|
|
|
#define TASK_SIZE_USER64 (0x0000400000000000UL)
|
2005-10-10 12:19:43 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* 32-bit user address space is 4GB - 1 page
|
|
|
|
* (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
|
|
|
|
*/
|
|
|
|
#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
|
|
|
|
|
2008-02-05 14:28:59 +08:00
|
|
|
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
|
2005-10-10 12:19:43 +08:00
|
|
|
TASK_SIZE_USER32 : TASK_SIZE_USER64)
|
2008-02-05 14:28:59 +08:00
|
|
|
#define TASK_SIZE TASK_SIZE_OF(current)
|
2005-10-10 12:19:43 +08:00
|
|
|
|
|
|
|
/* This decides where the kernel will search for a free chunk of vm
|
|
|
|
* space during mmap's.
|
|
|
|
*/
|
|
|
|
#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
|
|
|
|
#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
|
|
|
|
|
2010-08-27 11:49:11 +08:00
|
|
|
#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
|
2005-10-10 12:19:43 +08:00
|
|
|
TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-02-08 20:19:26 +08:00
|
|
|
#ifdef __powerpc64__
|
|
|
|
|
|
|
|
#define STACK_TOP_USER64 TASK_SIZE_USER64
|
|
|
|
#define STACK_TOP_USER32 TASK_SIZE_USER32
|
|
|
|
|
2010-08-27 11:49:11 +08:00
|
|
|
#define STACK_TOP (is_32bit_task() ? \
|
2008-02-08 20:19:26 +08:00
|
|
|
STACK_TOP_USER32 : STACK_TOP_USER64)
|
|
|
|
|
|
|
|
#define STACK_TOP_MAX STACK_TOP_USER64
|
|
|
|
|
|
|
|
#else /* __powerpc64__ */
|
|
|
|
|
|
|
|
#define STACK_TOP TASK_SIZE
|
|
|
|
#define STACK_TOP_MAX STACK_TOP
|
|
|
|
|
|
|
|
#endif /* __powerpc64__ */
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
typedef struct {
|
|
|
|
unsigned long seg;
|
|
|
|
} mm_segment_t;
|
|
|
|
|
2013-09-10 18:20:42 +08:00
|
|
|
#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
|
|
|
|
#define TS_TRANS_FPR(i) transact_fp.fpr[i][TS_FPROFFSET]
|
|
|
|
|
|
|
|
/* FP and VSX 0-31 register set */
|
|
|
|
struct thread_fp_state {
|
|
|
|
u64 fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
|
|
|
|
u64 fpscr; /* Floating point status */
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Complete AltiVec register set including VSCR */
|
|
|
|
struct thread_vr_state {
|
|
|
|
vector128 vr[32] __attribute__((aligned(16)));
|
|
|
|
vector128 vscr __attribute__((aligned(16)));
|
|
|
|
};
|
2008-06-26 15:07:48 +08:00
|
|
|
|
2013-07-04 14:15:46 +08:00
|
|
|
struct debug_reg {
|
2010-02-08 19:53:26 +08:00
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
|
|
|
/*
|
|
|
|
* The following help to manage the use of Debug Control Registers
|
|
|
|
* om the BookE platforms.
|
|
|
|
*/
|
2013-05-22 12:20:58 +08:00
|
|
|
uint32_t dbcr0;
|
|
|
|
uint32_t dbcr1;
|
2010-02-08 19:53:26 +08:00
|
|
|
#ifdef CONFIG_BOOKE
|
2013-05-22 12:20:58 +08:00
|
|
|
uint32_t dbcr2;
|
2010-02-08 19:53:26 +08:00
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* The stored value of the DBSR register will be the value at the
|
|
|
|
* last debug interrupt. This register can only be read from the
|
|
|
|
* user (will never be written to) and has value while helping to
|
|
|
|
* describe the reason for the last debug trap. Torez
|
|
|
|
*/
|
2013-05-22 12:20:58 +08:00
|
|
|
uint32_t dbsr;
|
2010-02-08 19:53:26 +08:00
|
|
|
/*
|
|
|
|
* The following will contain addresses used by debug applications
|
|
|
|
* to help trace and trap on particular address locations.
|
|
|
|
* The bits in the Debug Control Registers above help define which
|
|
|
|
* of the following registers will contain valid data and/or addresses.
|
|
|
|
*/
|
|
|
|
unsigned long iac1;
|
|
|
|
unsigned long iac2;
|
|
|
|
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
|
|
|
unsigned long iac3;
|
|
|
|
unsigned long iac4;
|
|
|
|
#endif
|
|
|
|
unsigned long dac1;
|
|
|
|
unsigned long dac2;
|
|
|
|
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
|
|
|
unsigned long dvc1;
|
|
|
|
unsigned long dvc2;
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2013-07-04 14:15:46 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct thread_struct {
|
|
|
|
unsigned long ksp; /* Kernel stack pointer */
|
2013-06-26 13:42:22 +08:00
|
|
|
|
2013-07-04 14:15:46 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
unsigned long ksp_vsid;
|
|
|
|
#endif
|
|
|
|
struct pt_regs *regs; /* Pointer to saved register state */
|
|
|
|
mm_segment_t fs; /* for get_fs() validation */
|
|
|
|
#ifdef CONFIG_BOOKE
|
|
|
|
/* BookE base exception scratch space; align on cacheline */
|
|
|
|
unsigned long normsave[8] ____cacheline_aligned;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC32
|
|
|
|
void *pgdir; /* root of page-table tree */
|
|
|
|
unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
|
|
|
|
#endif
|
2013-06-26 13:42:22 +08:00
|
|
|
/* Debug Registers */
|
2013-07-04 14:15:46 +08:00
|
|
|
struct debug_reg debug;
|
2013-09-10 18:20:42 +08:00
|
|
|
struct thread_fp_state fp_state;
|
2013-09-10 18:21:10 +08:00
|
|
|
struct thread_fp_state *fp_save_area;
|
2005-10-10 12:19:43 +08:00
|
|
|
int fpexc_mode; /* floating-point exception mode */
|
2006-06-07 14:15:39 +08:00
|
|
|
unsigned int align_ctl; /* alignment handling control */
|
2005-10-10 12:19:43 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
unsigned long start_tb; /* Start purr when proc switched in */
|
|
|
|
unsigned long accum_tb; /* Total accumilated purr for process */
|
2010-06-15 14:05:19 +08:00
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
|
|
struct perf_event *ptrace_bps[HBP_NUM];
|
|
|
|
/*
|
|
|
|
* Helps identify source of single-step exception and subsequent
|
|
|
|
* hw-breakpoint enablement
|
|
|
|
*/
|
|
|
|
struct perf_event *last_hit_ubp;
|
|
|
|
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
2005-10-10 12:19:43 +08:00
|
|
|
#endif
|
2012-12-20 22:06:44 +08:00
|
|
|
struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
|
2012-08-24 05:27:09 +08:00
|
|
|
unsigned long trap_nr; /* last trap # on this thread */
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_ALTIVEC
|
2013-09-10 18:20:42 +08:00
|
|
|
struct thread_vr_state vr_state;
|
2013-09-10 18:21:10 +08:00
|
|
|
struct thread_vr_state *vr_save_area;
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long vrsave;
|
|
|
|
int used_vr; /* set if process has used altivec */
|
|
|
|
#endif /* CONFIG_ALTIVEC */
|
2008-06-25 12:07:18 +08:00
|
|
|
#ifdef CONFIG_VSX
|
|
|
|
/* VSR status */
|
|
|
|
int used_vsr; /* set if process has used altivec */
|
|
|
|
#endif /* CONFIG_VSX */
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_SPE
|
|
|
|
unsigned long evr[32]; /* upper 32-bits of SPE regs */
|
|
|
|
u64 acc; /* Accumulator */
|
|
|
|
unsigned long spefscr; /* SPE & eFP status */
|
powerpc: fix exception clearing in e500 SPE float emulation
The e500 SPE floating-point emulation code clears existing exceptions
(__FPU_FPSCR &= ~FP_EX_MASK;) before ORing in the exceptions from the
emulated operation. However, these exception bits are the "sticky",
cumulative exception bits, and should only be cleared by the user
program setting SPEFSCR, not implicitly by any floating-point
instruction (whether executed purely by the hardware or emulated).
The spurious clearing of these bits shows up as missing exceptions in
glibc testing.
Fixing this, however, is not as simple as just not clearing the bits,
because while the bits may be from previous floating-point operations
(in which case they should not be cleared), the processor can also set
the sticky bits itself before the interrupt for an exception occurs,
and this can happen in cases when IEEE 754 semantics are that the
sticky bit should not be set. Specifically, the "invalid" sticky bit
is set in various cases with non-finite operands, where IEEE 754
semantics do not involve raising such an exception, and the
"underflow" sticky bit is set in cases of exact underflow, whereas
IEEE 754 semantics are that this flag is set only for inexact
underflow. Thus, for correct emulation the kernel needs to know the
setting of these two sticky bits before the instruction being
emulated.
When a floating-point operation raises an exception, the kernel can
note the state of the sticky bits immediately afterwards. Some
<fenv.h> functions that affect the state of these bits, such as
fesetenv and feholdexcept, need to use prctl with PR_GET_FPEXC and
PR_SET_FPEXC anyway, and so it is natural to record the state of those
bits during that call into the kernel and so avoid any need for a
separate call into the kernel to inform it of a change to those bits.
Thus, the interface I chose to use (in this patch and the glibc port)
is that one of those prctl calls must be made after any userspace
change to those sticky bits, other than through a floating-point
operation that traps into the kernel anyway. feclearexcept and
fesetexceptflag duly make those calls, which would not be required
were it not for this issue.
The previous EGLIBC port, and the uClibc code copied from it, is
fundamentally broken as regards any use of prctl for floating-point
exceptions because it didn't use the PR_FP_EXC_SW_ENABLE bit in its
prctl calls (and did various worse things, such as passing a pointer
when prctl expected an integer). If you avoid anything where prctl is
used, the clearing of sticky bits still means it will never give
anything approximating correct exception semantics with existing
kernels. I don't believe the patch makes things any worse for
existing code that doesn't try to inform the kernel of changes to
sticky bits - such code may get incorrect exceptions in some cases,
but it would have done so anyway in other cases.
Signed-off-by: Joseph Myers <joseph@codesourcery.com>
Signed-off-by: Scott Wood <scottwood@freescale.com>
2013-12-11 07:07:45 +08:00
|
|
|
unsigned long spefscr_last; /* SPEFSCR value on last prctl
|
|
|
|
call or trap return */
|
2005-04-17 06:20:36 +08:00
|
|
|
int used_spe; /* set if process has used spe */
|
|
|
|
#endif /* CONFIG_SPE */
|
2013-02-14 00:21:31 +08:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
u64 tm_tfhar; /* Transaction fail handler addr */
|
|
|
|
u64 tm_texasr; /* Transaction exception & summary */
|
|
|
|
u64 tm_tfiar; /* Transaction fail instr address reg */
|
|
|
|
unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */
|
|
|
|
struct pt_regs ckpt_regs; /* Checkpointed registers */
|
|
|
|
|
2013-08-09 15:29:31 +08:00
|
|
|
unsigned long tm_tar;
|
|
|
|
unsigned long tm_ppr;
|
|
|
|
unsigned long tm_dscr;
|
|
|
|
|
2013-02-14 00:21:31 +08:00
|
|
|
/*
|
|
|
|
* Transactional FP and VSX 0-31 register set.
|
|
|
|
* NOTE: the sense of these is the opposite of the integer ckpt_regs!
|
|
|
|
*
|
|
|
|
* When a transaction is active/signalled/scheduled etc., *regs is the
|
|
|
|
* most recent set of/speculated GPRs with ckpt_regs being the older
|
|
|
|
* checkpointed regs to which we roll back if transaction aborts.
|
|
|
|
*
|
|
|
|
* However, fpr[] is the checkpointed 'base state' of FP regs, and
|
|
|
|
* transact_fpr[] is the new set of transactional values.
|
|
|
|
* VRs work the same way.
|
|
|
|
*/
|
2013-09-10 18:20:42 +08:00
|
|
|
struct thread_fp_state transact_fp;
|
|
|
|
struct thread_vr_state transact_vr;
|
2013-02-14 00:21:31 +08:00
|
|
|
unsigned long transact_vrsave;
|
|
|
|
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
2010-04-16 06:11:51 +08:00
|
|
|
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
|
|
|
void* kvm_shadow_vcpu; /* KVM internal data */
|
|
|
|
#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
|
2011-12-20 23:34:43 +08:00
|
|
|
#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
|
|
|
|
struct kvm_vcpu *kvm_vcpu;
|
|
|
|
#endif
|
2011-03-02 23:18:48 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
unsigned long dscr;
|
|
|
|
int dscr_inherit;
|
2012-12-07 05:49:56 +08:00
|
|
|
unsigned long ppr; /* used to save/restore SMT priority */
|
2011-03-02 23:18:48 +08:00
|
|
|
#endif
|
2013-02-07 23:46:58 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
unsigned long tar;
|
2013-05-01 04:17:04 +08:00
|
|
|
unsigned long ebbrr;
|
|
|
|
unsigned long ebbhr;
|
|
|
|
unsigned long bescr;
|
2013-05-22 00:31:12 +08:00
|
|
|
unsigned long siar;
|
|
|
|
unsigned long sdar;
|
|
|
|
unsigned long sier;
|
|
|
|
unsigned long mmcr2;
|
2013-06-28 16:15:16 +08:00
|
|
|
unsigned mmcr0;
|
|
|
|
unsigned used_ebb;
|
2013-02-07 23:46:58 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define ARCH_MIN_TASKALIGN 16
|
|
|
|
|
|
|
|
#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
|
2008-04-28 14:21:22 +08:00
|
|
|
#define INIT_SP_LIMIT \
|
|
|
|
(_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-10-28 11:50:21 +08:00
|
|
|
#ifdef CONFIG_SPE
|
powerpc: fix exception clearing in e500 SPE float emulation
The e500 SPE floating-point emulation code clears existing exceptions
(__FPU_FPSCR &= ~FP_EX_MASK;) before ORing in the exceptions from the
emulated operation. However, these exception bits are the "sticky",
cumulative exception bits, and should only be cleared by the user
program setting SPEFSCR, not implicitly by any floating-point
instruction (whether executed purely by the hardware or emulated).
The spurious clearing of these bits shows up as missing exceptions in
glibc testing.
Fixing this, however, is not as simple as just not clearing the bits,
because while the bits may be from previous floating-point operations
(in which case they should not be cleared), the processor can also set
the sticky bits itself before the interrupt for an exception occurs,
and this can happen in cases when IEEE 754 semantics are that the
sticky bit should not be set. Specifically, the "invalid" sticky bit
is set in various cases with non-finite operands, where IEEE 754
semantics do not involve raising such an exception, and the
"underflow" sticky bit is set in cases of exact underflow, whereas
IEEE 754 semantics are that this flag is set only for inexact
underflow. Thus, for correct emulation the kernel needs to know the
setting of these two sticky bits before the instruction being
emulated.
When a floating-point operation raises an exception, the kernel can
note the state of the sticky bits immediately afterwards. Some
<fenv.h> functions that affect the state of these bits, such as
fesetenv and feholdexcept, need to use prctl with PR_GET_FPEXC and
PR_SET_FPEXC anyway, and so it is natural to record the state of those
bits during that call into the kernel and so avoid any need for a
separate call into the kernel to inform it of a change to those bits.
Thus, the interface I chose to use (in this patch and the glibc port)
is that one of those prctl calls must be made after any userspace
change to those sticky bits, other than through a floating-point
operation that traps into the kernel anyway. feclearexcept and
fesetexceptflag duly make those calls, which would not be required
were it not for this issue.
The previous EGLIBC port, and the uClibc code copied from it, is
fundamentally broken as regards any use of prctl for floating-point
exceptions because it didn't use the PR_FP_EXC_SW_ENABLE bit in its
prctl calls (and did various worse things, such as passing a pointer
when prctl expected an integer). If you avoid anything where prctl is
used, the clearing of sticky bits still means it will never give
anything approximating correct exception semantics with existing
kernels. I don't believe the patch makes things any worse for
existing code that doesn't try to inform the kernel of changes to
sticky bits - such code may get incorrect exceptions in some cases,
but it would have done so anyway in other cases.
Signed-off-by: Joseph Myers <joseph@codesourcery.com>
Signed-off-by: Scott Wood <scottwood@freescale.com>
2013-12-11 07:07:45 +08:00
|
|
|
#define SPEFSCR_INIT \
|
|
|
|
.spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \
|
|
|
|
.spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
|
2008-10-28 11:50:21 +08:00
|
|
|
#else
|
|
|
|
#define SPEFSCR_INIT
|
|
|
|
#endif
|
2005-10-10 12:19:43 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PPC32
|
2005-04-17 06:20:36 +08:00
|
|
|
#define INIT_THREAD { \
|
|
|
|
.ksp = INIT_SP, \
|
2008-04-28 14:21:22 +08:00
|
|
|
.ksp_limit = INIT_SP_LIMIT, \
|
2005-04-17 06:20:36 +08:00
|
|
|
.fs = KERNEL_DS, \
|
|
|
|
.pgdir = swapper_pg_dir, \
|
|
|
|
.fpexc_mode = MSR_FE0 | MSR_FE1, \
|
2008-10-28 11:50:21 +08:00
|
|
|
SPEFSCR_INIT \
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2005-10-10 12:19:43 +08:00
|
|
|
#else
|
|
|
|
#define INIT_THREAD { \
|
|
|
|
.ksp = INIT_SP, \
|
|
|
|
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
|
|
|
|
.fs = KERNEL_DS, \
|
[POWERPC] disable floating point exceptions for init
Floating point exceptions should not be enabled by default,
as this setting impacts the performance on some CPUs, in
particular the Cell BE. Since the bits are inherited from
parent processes, the place to change the default is the
thread struct used for init.
glibc sets this up correctly per thread in its fesetenv
function, so user space should not be impacted by this
setting. None of the other common libc implementations
(uClibc, dietlibc, newlib, klibc) has support for fp
exceptions, so they are unlikely to be hit by this either.
There is a small risk that somebody wrote their own
application that manually sets the fpscr bits instead
of calling fesetenv, without changing the MSR bits as well.
Those programs will break with this change.
It probably makes sense to change glibc in the future
to be more clever about FE bits, so that when running
on a CPU where this is expensive, it disables exceptions
ASAP, while it keeps them enabled on CPUs where running
with exceptions on is cheaper than changing the state
often.
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-06-20 08:30:33 +08:00
|
|
|
.fpexc_mode = 0, \
|
2012-12-07 05:49:56 +08:00
|
|
|
.ppr = INIT_PPR, \
|
2005-10-10 12:19:43 +08:00
|
|
|
}
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Return saved PC of a blocked thread. For now, this is the "user" PC
|
|
|
|
*/
|
2005-10-10 12:19:43 +08:00
|
|
|
#define thread_saved_pc(tsk) \
|
|
|
|
((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-07-07 22:22:27 +08:00
|
|
|
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs)
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long get_wchan(struct task_struct *p);
|
|
|
|
|
2005-10-10 12:19:43 +08:00
|
|
|
#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
|
|
|
|
#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Get/set floating-point exception mode */
|
2005-10-10 12:19:43 +08:00
|
|
|
#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
|
|
|
|
#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
|
|
|
|
extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
|
|
|
|
|
2006-06-07 14:14:40 +08:00
|
|
|
#define GET_ENDIAN(tsk, adr) get_endian((tsk), (adr))
|
|
|
|
#define SET_ENDIAN(tsk, val) set_endian((tsk), (val))
|
|
|
|
|
|
|
|
extern int get_endian(struct task_struct *tsk, unsigned long adr);
|
|
|
|
extern int set_endian(struct task_struct *tsk, unsigned int val);
|
|
|
|
|
2006-06-07 14:15:39 +08:00
|
|
|
#define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr))
|
|
|
|
#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
|
|
|
|
|
|
|
|
extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
|
|
|
|
extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
|
|
|
|
|
powerpc: Don't corrupt transactional state when using FP/VMX in kernel
Currently, when we have a process using the transactional memory
facilities on POWER8 (that is, the processor is in transactional
or suspended state), and the process enters the kernel and the
kernel then uses the floating-point or vector (VMX/Altivec) facility,
we end up corrupting the user-visible FP/VMX/VSX state. This
happens, for example, if a page fault causes a copy-on-write
operation, because the copy_page function will use VMX to do the
copy on POWER8. The test program below demonstrates the bug.
The bug happens because when FP/VMX state for a transactional process
is stored in the thread_struct, we store the checkpointed state in
.fp_state/.vr_state and the transactional (current) state in
.transact_fp/.transact_vr. However, when the kernel wants to use
FP/VMX, it calls enable_kernel_fp() or enable_kernel_altivec(),
which saves the current state in .fp_state/.vr_state. Furthermore,
when we return to the user process we return with FP/VMX/VSX
disabled. The next time the process uses FP/VMX/VSX, we don't know
which set of state (the current register values, .fp_state/.vr_state,
or .transact_fp/.transact_vr) we should be using, since we have no
way to tell if we are still in the same transaction, and if not,
whether the previous transaction succeeded or failed.
Thus it is necessary to strictly adhere to the rule that if FP has
been enabled at any point in a transaction, we must keep FP enabled
for the user process with the current transactional state in the
FP registers, until we detect that it is no longer in a transaction.
Similarly for VMX; once enabled it must stay enabled until the
process is no longer transactional.
In order to keep this rule, we add a new thread_info flag which we
test when returning from the kernel to userspace, called TIF_RESTORE_TM.
This flag indicates that there is FP/VMX/VSX state to be restored
before entering userspace, and when it is set the .tm_orig_msr field
in the thread_struct indicates what state needs to be restored.
The restoration is done by restore_tm_state(). The TIF_RESTORE_TM
bit is set by new giveup_fpu/altivec_maybe_transactional helpers,
which are called from enable_kernel_fp/altivec, giveup_vsx, and
flush_fp/altivec_to_thread instead of giveup_fpu/altivec.
The other thing to be done is to get the transactional FP/VMX/VSX
state from .fp_state/.vr_state when doing reclaim, if that state
has been saved there by giveup_fpu/altivec_maybe_transactional.
Having done this, we set the FP/VMX bit in the thread's MSR after
reclaim to indicate that that part of the state is now valid
(having been reclaimed from the processor's checkpointed state).
Finally, in the signal handling code, we move the clearing of the
transactional state bits in the thread's MSR a bit earlier, before
calling flush_fp_to_thread(), so that we don't unnecessarily set
the TIF_RESTORE_TM bit.
This is the test program:
/* Michael Neuling 4/12/2013
*
* See if the altivec state is leaked out of an aborted transaction due to
* kernel vmx copy loops.
*
* gcc -m64 htm_vmxcopy.c -o htm_vmxcopy
*
*/
/* We don't use all of these, but for reference: */
int main(int argc, char *argv[])
{
long double vecin = 1.3;
long double vecout;
unsigned long pgsize = getpagesize();
int i;
int fd;
int size = pgsize*16;
char tmpfile[] = "/tmp/page_faultXXXXXX";
char buf[pgsize];
char *a;
uint64_t aborted = 0;
fd = mkstemp(tmpfile);
assert(fd >= 0);
memset(buf, 0, pgsize);
for (i = 0; i < size; i += pgsize)
assert(write(fd, buf, pgsize) == pgsize);
unlink(tmpfile);
a = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
assert(a != MAP_FAILED);
asm __volatile__(
"lxvd2x 40,0,%[vecinptr] ; " // set 40 to initial value
TBEGIN
"beq 3f ;"
TSUSPEND
"xxlxor 40,40,40 ; " // set 40 to 0
"std 5, 0(%[map]) ;" // cause kernel vmx copy page
TABORT
TRESUME
TEND
"li %[res], 0 ;"
"b 5f ;"
"3: ;" // Abort handler
"li %[res], 1 ;"
"5: ;"
"stxvd2x 40,0,%[vecoutptr] ; "
: [res]"=r"(aborted)
: [vecinptr]"r"(&vecin),
[vecoutptr]"r"(&vecout),
[map]"r"(a)
: "memory", "r0", "r3", "r4", "r5", "r6", "r7");
if (aborted && (vecin != vecout)){
printf("FAILED: vector state leaked on abort %f != %f\n",
(double)vecin, (double)vecout);
exit(1);
}
munmap(a, size);
close(fd);
printf("PASSED!\n");
return 0;
}
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2014-01-13 12:56:29 +08:00
|
|
|
extern void fp_enable(void);
|
|
|
|
extern void vec_enable(void);
|
2013-09-10 18:21:10 +08:00
|
|
|
extern void load_fp_state(struct thread_fp_state *fp);
|
|
|
|
extern void store_fp_state(struct thread_fp_state *fp);
|
|
|
|
extern void load_vr_state(struct thread_vr_state *vr);
|
|
|
|
extern void store_vr_state(struct thread_vr_state *vr);
|
|
|
|
|
2005-10-10 12:19:43 +08:00
|
|
|
static inline unsigned int __unpack_fe01(unsigned long msr_bits)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
|
|
|
|
}
|
|
|
|
|
2005-10-10 12:19:43 +08:00
|
|
|
static inline unsigned long __pack_fe01(unsigned int fpmode)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
|
|
|
|
}
|
|
|
|
|
2005-10-10 12:19:43 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
|
|
|
|
#else
|
2005-04-17 06:20:36 +08:00
|
|
|
#define cpu_relax() barrier()
|
2005-10-10 12:19:43 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
arch, locking: Ciao arch_mutex_cpu_relax()
The arch_mutex_cpu_relax() function, introduced by 34b133f, is
hacky and ugly. It was added a few years ago to address the fact
that common cpu_relax() calls include yielding on s390, and thus
impact the optimistic spinning functionality of mutexes. Nowadays
we use this function well beyond mutexes: rwsem, qrwlock, mcs and
lockref. Since the macro that defines the call is in the mutex header,
any users must include mutex.h and the naming is misleading as well.
This patch (i) renames the call to cpu_relax_lowlatency ("relax, but
only if you can do it with very low latency") and (ii) defines it in
each arch's asm/processor.h local header, just like for regular cpu_relax
functions. On all archs, except s390, cpu_relax_lowlatency is simply cpu_relax,
and thus we can take it out of mutex.h. While this can seem redundant,
I believe it is a good choice as it allows us to move out arch specific
logic from generic locking primitives and enables future(?) archs to
transparently define it, similarly to System Z.
Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Anton Blanchard <anton@samba.org>
Cc: Aurelien Jacquiot <a-jacquiot@ti.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Bharat Bhushan <r65777@freescale.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chen Liqin <liqin.linux@gmail.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: David Howells <dhowells@redhat.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Deepthi Dharwar <deepthi@linux.vnet.ibm.com>
Cc: Dominik Dingel <dingel@linux.vnet.ibm.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: Haavard Skinnemoen <hskinnemoen@gmail.com>
Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Hirokazu Takata <takata@linux-m32r.org>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: James E.J. Bottomley <jejb@parisc-linux.org>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Jesper Nilsson <jesper.nilsson@axis.com>
Cc: Joe Perches <joe@perches.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Joseph Myers <joseph@codesourcery.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
Cc: Lennox Wu <lennox.wu@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Salter <msalter@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Neuling <mikey@neuling.org>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Mikael Starvik <starvik@axis.com>
Cc: Nicolas Pitre <nico@linaro.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Qais Yousef <qais.yousef@imgtec.com>
Cc: Qiaowei Ren <qiaowei.ren@intel.com>
Cc: Rafael Wysocki <rafael.j.wysocki@intel.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Steven Miao <realmz6@gmail.com>
Cc: Steven Rostedt <srostedt@redhat.com>
Cc: Stratos Karafotis <stratosk@semaphore.gr>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vasily Kulikov <segoon@openwall.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Vineet Gupta <Vineet.Gupta1@synopsys.com>
Cc: Waiman Long <Waiman.Long@hp.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Wolfram Sang <wsa@the-dreams.de>
Cc: adi-buildroot-devel@lists.sourceforge.net
Cc: linux390@de.ibm.com
Cc: linux-alpha@vger.kernel.org
Cc: linux-am33-list@redhat.com
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-c6x-dev@linux-c6x.org
Cc: linux-cris-kernel@axis.com
Cc: linux-hexagon@vger.kernel.org
Cc: linux-ia64@vger.kernel.org
Cc: linux@lists.openrisc.net
Cc: linux-m32r-ja@ml.linux-m32r.org
Cc: linux-m32r@ml.linux-m32r.org
Cc: linux-m68k@lists.linux-m68k.org
Cc: linux-metag@vger.kernel.org
Cc: linux-mips@linux-mips.org
Cc: linux-parisc@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-s390@vger.kernel.org
Cc: linux-sh@vger.kernel.org
Cc: linux-xtensa@linux-xtensa.org
Cc: sparclinux@vger.kernel.org
Link: http://lkml.kernel.org/r/1404079773.2619.4.camel@buesod1.americas.hpqcorp.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2014-06-30 06:09:33 +08:00
|
|
|
#define cpu_relax_lowlatency() cpu_relax()
|
|
|
|
|
2006-03-27 08:46:18 +08:00
|
|
|
/* Check that a certain kernel stack pointer is valid in task_struct p */
|
|
|
|
int validate_sp(unsigned long sp, struct task_struct *p,
|
|
|
|
unsigned long nbytes);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Prefetch macros.
|
|
|
|
*/
|
|
|
|
#define ARCH_HAS_PREFETCH
|
|
|
|
#define ARCH_HAS_PREFETCHW
|
|
|
|
#define ARCH_HAS_SPINLOCK_PREFETCH
|
|
|
|
|
2005-10-10 12:19:43 +08:00
|
|
|
static inline void prefetch(const void *x)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-10-10 12:19:43 +08:00
|
|
|
if (unlikely(!x))
|
|
|
|
return;
|
|
|
|
|
|
|
|
__asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-10-10 12:19:43 +08:00
|
|
|
static inline void prefetchw(const void *x)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-10-10 12:19:43 +08:00
|
|
|
if (unlikely(!x))
|
|
|
|
return;
|
|
|
|
|
|
|
|
__asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define spin_lock_prefetch(x) prefetchw(x)
|
|
|
|
|
2005-10-10 12:19:43 +08:00
|
|
|
#define HAVE_ARCH_PICK_MMAP_LAYOUT
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-03-25 14:23:59 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
2013-05-27 02:09:41 +08:00
|
|
|
static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
|
2009-03-25 14:23:59 +08:00
|
|
|
{
|
|
|
|
if (is_32)
|
2013-05-27 02:09:41 +08:00
|
|
|
return sp & 0x0ffffffffUL;
|
2009-03-25 14:23:59 +08:00
|
|
|
return sp;
|
|
|
|
}
|
|
|
|
#else
|
2013-05-27 02:09:41 +08:00
|
|
|
static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
|
2009-03-25 14:23:59 +08:00
|
|
|
{
|
2013-05-27 02:09:41 +08:00
|
|
|
return sp;
|
2009-03-25 14:23:59 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-11-30 10:47:03 +08:00
|
|
|
extern unsigned long cpuidle_disable;
|
2011-11-30 10:46:31 +08:00
|
|
|
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
|
|
|
|
|
2012-03-29 01:30:02 +08:00
|
|
|
extern int powersave_nap; /* set if nap mode can be used in idle loop */
|
2014-05-23 16:15:26 +08:00
|
|
|
extern void power7_nap(int check_irq);
|
2014-02-26 08:08:25 +08:00
|
|
|
extern void power7_sleep(void);
|
2012-03-29 01:30:02 +08:00
|
|
|
extern void flush_instruction_cache(void);
|
|
|
|
extern void hard_reset_now(void);
|
|
|
|
extern void poweroff_now(void);
|
|
|
|
extern int fix_alignment(struct pt_regs *);
|
|
|
|
extern void cvt_fd(float *from, double *to);
|
|
|
|
extern void cvt_df(double *from, float *to);
|
|
|
|
extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
/*
|
|
|
|
* We handle most unaligned accesses in hardware. On the other hand
|
|
|
|
* unaligned DMA can be very expensive on some ppc64 IO chips (it does
|
|
|
|
* powers of 2 writes until it reaches sufficient alignment).
|
|
|
|
*
|
|
|
|
* Based on this we disable the IP header alignment in network drivers.
|
|
|
|
*/
|
|
|
|
#define NET_IP_ALIGN 0
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* __KERNEL__ */
|
2005-10-10 12:19:43 +08:00
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_POWERPC_PROCESSOR_H */
|