mirror of https://gitee.com/openkylin/linux.git
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from H. Peter Anvin. * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x32, siginfo: Provide proper overrides for x32 siginfo_t asm-generic: Allow overriding clock_t and add attributes to siginfo_t x32: Check __ILP32__ instead of __LP64__ for x32 x86, acpi: Call acpi_enter_sleep_state via an asmlinkage C function from assembler ACPI: Convert wake_sleep_flags to a value instead of function x86, apic: APIC code touches invalid MSR on P5 class machines i387: ptrace breaks the lazy-fpu-restore logic x86/platform: Remove incorrect error message in x86_default_fixup_cpu_id() x86, efi: Add dedicated EFI stub entry point x86/amd: Remove broken links from comment and kernel message x86, microcode: Ensure that module is only loaded on supported AMD CPUs x86, microcode: Fix sysfs warning during module unload on unsupported CPUs
This commit is contained in:
commit
86ec090e58
|
@ -33,6 +33,9 @@
|
|||
__HEAD
|
||||
ENTRY(startup_32)
|
||||
#ifdef CONFIG_EFI_STUB
|
||||
jmp preferred_addr
|
||||
|
||||
.balign 0x10
|
||||
/*
|
||||
* We don't need the return address, so set up the stack so
|
||||
* efi_main() can find its arugments.
|
||||
|
@ -41,12 +44,17 @@ ENTRY(startup_32)
|
|||
|
||||
call efi_main
|
||||
cmpl $0, %eax
|
||||
je preferred_addr
|
||||
movl %eax, %esi
|
||||
call 1f
|
||||
jne 2f
|
||||
1:
|
||||
/* EFI init failed, so hang. */
|
||||
hlt
|
||||
jmp 1b
|
||||
2:
|
||||
call 3f
|
||||
3:
|
||||
popl %eax
|
||||
subl $1b, %eax
|
||||
subl $3b, %eax
|
||||
subl BP_pref_address(%esi), %eax
|
||||
add BP_code32_start(%esi), %eax
|
||||
leal preferred_addr(%eax), %eax
|
||||
|
|
|
@ -200,18 +200,28 @@ ENTRY(startup_64)
|
|||
* entire text+data+bss and hopefully all of memory.
|
||||
*/
|
||||
#ifdef CONFIG_EFI_STUB
|
||||
pushq %rsi
|
||||
/*
|
||||
* The entry point for the PE/COFF executable is 0x210, so only
|
||||
* legacy boot loaders will execute this jmp.
|
||||
*/
|
||||
jmp preferred_addr
|
||||
|
||||
.org 0x210
|
||||
mov %rcx, %rdi
|
||||
mov %rdx, %rsi
|
||||
call efi_main
|
||||
popq %rsi
|
||||
cmpq $0,%rax
|
||||
je preferred_addr
|
||||
movq %rax,%rsi
|
||||
call 1f
|
||||
cmpq $0,%rax
|
||||
jne 2f
|
||||
1:
|
||||
/* EFI init failed, so hang. */
|
||||
hlt
|
||||
jmp 1b
|
||||
2:
|
||||
call 3f
|
||||
3:
|
||||
popq %rax
|
||||
subq $1b, %rax
|
||||
subq $3b, %rax
|
||||
subq BP_pref_address(%rsi), %rax
|
||||
add BP_code32_start(%esi), %eax
|
||||
leaq preferred_addr(%rax), %rax
|
||||
|
|
|
@ -205,8 +205,13 @@ int main(int argc, char ** argv)
|
|||
put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* Address of entry point */
|
||||
put_unaligned_le32(i, &buf[pe_header + 0x28]);
|
||||
/*
|
||||
* Address of entry point.
|
||||
*
|
||||
* The EFI stub entry point is +16 bytes from the start of
|
||||
* the .text section.
|
||||
*/
|
||||
put_unaligned_le32(i + 16, &buf[pe_header + 0x28]);
|
||||
|
||||
/* .text size */
|
||||
put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]);
|
||||
|
@ -217,9 +222,11 @@ int main(int argc, char ** argv)
|
|||
/*
|
||||
* Address of entry point. startup_32 is at the beginning and
|
||||
* the 64-bit entry point (startup_64) is always 512 bytes
|
||||
* after.
|
||||
* after. The EFI stub entry point is 16 bytes after that, as
|
||||
* the first instruction allows legacy loaders to jump over
|
||||
* the EFI stub initialisation
|
||||
*/
|
||||
put_unaligned_le32(i + 512, &buf[pe_header + 0x28]);
|
||||
put_unaligned_le32(i + 528, &buf[pe_header + 0x28]);
|
||||
|
||||
/* .text size */
|
||||
put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]);
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
#else
|
||||
# ifdef __i386__
|
||||
# include "posix_types_32.h"
|
||||
# elif defined(__LP64__)
|
||||
# include "posix_types_64.h"
|
||||
# else
|
||||
# elif defined(__ILP32__)
|
||||
# include "posix_types_x32.h"
|
||||
# else
|
||||
# include "posix_types_64.h"
|
||||
# endif
|
||||
#endif
|
||||
|
|
|
@ -257,7 +257,7 @@ struct sigcontext {
|
|||
__u64 oldmask;
|
||||
__u64 cr2;
|
||||
struct _fpstate __user *fpstate; /* zero when no FPU context */
|
||||
#ifndef __LP64__
|
||||
#ifdef __ILP32__
|
||||
__u32 __fpstate_pad;
|
||||
#endif
|
||||
__u64 reserved1[8];
|
||||
|
|
|
@ -2,7 +2,13 @@
|
|||
#define _ASM_X86_SIGINFO_H
|
||||
|
||||
#ifdef __x86_64__
|
||||
# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
|
||||
# ifdef __ILP32__ /* x32 */
|
||||
typedef long long __kernel_si_clock_t __attribute__((aligned(4)));
|
||||
# define __ARCH_SI_CLOCK_T __kernel_si_clock_t
|
||||
# define __ARCH_SI_ATTRIBUTES __attribute__((aligned(8)))
|
||||
# else /* x86-64 */
|
||||
# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#include <asm-generic/siginfo.h>
|
||||
|
|
|
@ -63,10 +63,10 @@
|
|||
#else
|
||||
# ifdef __i386__
|
||||
# include <asm/unistd_32.h>
|
||||
# elif defined(__LP64__)
|
||||
# include <asm/unistd_64.h>
|
||||
# else
|
||||
# elif defined(__ILP32__)
|
||||
# include <asm/unistd_x32.h>
|
||||
# else
|
||||
# include <asm/unistd_64.h>
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
|
|
@ -195,6 +195,5 @@ extern struct x86_msi_ops x86_msi;
|
|||
|
||||
extern void x86_init_noop(void);
|
||||
extern void x86_init_uint_noop(unsigned int unused);
|
||||
extern void x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -24,6 +24,10 @@ unsigned long acpi_realmode_flags;
|
|||
static char temp_stack[4096];
|
||||
#endif
|
||||
|
||||
asmlinkage void acpi_enter_s3(void)
|
||||
{
|
||||
acpi_enter_sleep_state(3, wake_sleep_flags);
|
||||
}
|
||||
/**
|
||||
* acpi_suspend_lowlevel - save kernel state
|
||||
*
|
||||
|
|
|
@ -3,12 +3,16 @@
|
|||
*/
|
||||
|
||||
#include <asm/trampoline.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
extern unsigned long saved_video_mode;
|
||||
extern long saved_magic;
|
||||
|
||||
extern int wakeup_pmode_return;
|
||||
|
||||
extern u8 wake_sleep_flags;
|
||||
extern asmlinkage void acpi_enter_s3(void);
|
||||
|
||||
extern unsigned long acpi_copy_wakeup_routine(unsigned long);
|
||||
extern void wakeup_long64(void);
|
||||
|
||||
|
|
|
@ -74,9 +74,7 @@ restore_registers:
|
|||
ENTRY(do_suspend_lowlevel)
|
||||
call save_processor_state
|
||||
call save_registers
|
||||
pushl $3
|
||||
call acpi_enter_sleep_state
|
||||
addl $4, %esp
|
||||
call acpi_enter_s3
|
||||
|
||||
# In case of S3 failure, we'll emerge here. Jump
|
||||
# to ret_point to recover
|
||||
|
|
|
@ -71,9 +71,7 @@ ENTRY(do_suspend_lowlevel)
|
|||
movq %rsi, saved_rsi
|
||||
|
||||
addq $8, %rsp
|
||||
movl $3, %edi
|
||||
xorl %eax, %eax
|
||||
call acpi_enter_sleep_state
|
||||
call acpi_enter_s3
|
||||
/* in case something went wrong, restore the machine status and go on */
|
||||
jmp resume_point
|
||||
|
||||
|
|
|
@ -1637,9 +1637,11 @@ static int __init apic_verify(void)
|
|||
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
|
||||
|
||||
/* The BIOS may have set up the APIC at some other address */
|
||||
rdmsr(MSR_IA32_APICBASE, l, h);
|
||||
if (l & MSR_IA32_APICBASE_ENABLE)
|
||||
mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
|
||||
if (boot_cpu_data.x86 >= 6) {
|
||||
rdmsr(MSR_IA32_APICBASE, l, h);
|
||||
if (l & MSR_IA32_APICBASE_ENABLE)
|
||||
mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
|
||||
}
|
||||
|
||||
pr_info("Found and enabled local APIC!\n");
|
||||
return 0;
|
||||
|
@ -1657,13 +1659,15 @@ int __init apic_force_enable(unsigned long addr)
|
|||
* MSR. This can only be done in software for Intel P6 or later
|
||||
* and AMD K7 (Model > 1) or later.
|
||||
*/
|
||||
rdmsr(MSR_IA32_APICBASE, l, h);
|
||||
if (!(l & MSR_IA32_APICBASE_ENABLE)) {
|
||||
pr_info("Local APIC disabled by BIOS -- reenabling.\n");
|
||||
l &= ~MSR_IA32_APICBASE_BASE;
|
||||
l |= MSR_IA32_APICBASE_ENABLE | addr;
|
||||
wrmsr(MSR_IA32_APICBASE, l, h);
|
||||
enabled_via_apicbase = 1;
|
||||
if (boot_cpu_data.x86 >= 6) {
|
||||
rdmsr(MSR_IA32_APICBASE, l, h);
|
||||
if (!(l & MSR_IA32_APICBASE_ENABLE)) {
|
||||
pr_info("Local APIC disabled by BIOS -- reenabling.\n");
|
||||
l &= ~MSR_IA32_APICBASE_BASE;
|
||||
l |= MSR_IA32_APICBASE_ENABLE | addr;
|
||||
wrmsr(MSR_IA32_APICBASE, l, h);
|
||||
enabled_via_apicbase = 1;
|
||||
}
|
||||
}
|
||||
return apic_verify();
|
||||
}
|
||||
|
@ -2209,10 +2213,12 @@ static void lapic_resume(void)
|
|||
* FIXME! This will be wrong if we ever support suspend on
|
||||
* SMP! We'll need to do this as part of the CPU restore!
|
||||
*/
|
||||
rdmsr(MSR_IA32_APICBASE, l, h);
|
||||
l &= ~MSR_IA32_APICBASE_BASE;
|
||||
l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
|
||||
wrmsr(MSR_IA32_APICBASE, l, h);
|
||||
if (boot_cpu_data.x86 >= 6) {
|
||||
rdmsr(MSR_IA32_APICBASE, l, h);
|
||||
l &= ~MSR_IA32_APICBASE_BASE;
|
||||
l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
|
||||
wrmsr(MSR_IA32_APICBASE, l, h);
|
||||
}
|
||||
}
|
||||
|
||||
maxlvt = lapic_get_maxlvt();
|
||||
|
|
|
@ -207,8 +207,11 @@ static void __init map_csrs(void)
|
|||
|
||||
static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
|
||||
{
|
||||
c->phys_proc_id = node;
|
||||
per_cpu(cpu_llc_id, smp_processor_id()) = node;
|
||||
|
||||
if (c->phys_proc_id != node) {
|
||||
c->phys_proc_id = node;
|
||||
per_cpu(cpu_llc_id, smp_processor_id()) = node;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init numachip_system_init(void)
|
||||
|
|
|
@ -26,7 +26,8 @@
|
|||
* contact AMD for precise details and a CPU swap.
|
||||
*
|
||||
* See http://www.multimania.com/poulot/k6bug.html
|
||||
* http://www.amd.com/K6/k6docs/revgd.html
|
||||
* and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
|
||||
* (Publication # 21266 Issue Date: August 1998)
|
||||
*
|
||||
* The following test is erm.. interesting. AMD neglected to up
|
||||
* the chip setting when fixing the bug but they also tweaked some
|
||||
|
@ -94,7 +95,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
|
|||
"system stability may be impaired when more than 32 MB are used.\n");
|
||||
else
|
||||
printk(KERN_CONT "probably OK (after B9730xxxx).\n");
|
||||
printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
|
||||
}
|
||||
|
||||
/* K6 with old style WHCR */
|
||||
|
@ -353,10 +353,11 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
|
|||
node = per_cpu(cpu_llc_id, cpu);
|
||||
|
||||
/*
|
||||
* If core numbers are inconsistent, it's likely a multi-fabric platform,
|
||||
* so invoke platform-specific handler
|
||||
* On multi-fabric platform (e.g. Numascale NumaChip) a
|
||||
* platform-specific handler needs to be called to fixup some
|
||||
* IDs of the CPU.
|
||||
*/
|
||||
if (c->phys_proc_id != node)
|
||||
if (x86_cpuinit.fixup_cpu_id)
|
||||
x86_cpuinit.fixup_cpu_id(c, node);
|
||||
|
||||
if (!node_online(node)) {
|
||||
|
|
|
@ -1162,15 +1162,6 @@ static void dbg_restore_debug_regs(void)
|
|||
#define dbg_restore_debug_regs()
|
||||
#endif /* ! CONFIG_KGDB */
|
||||
|
||||
/*
|
||||
* Prints an error where the NUMA and configured core-number mismatch and the
|
||||
* platform didn't override this to fix it up
|
||||
*/
|
||||
void __cpuinit x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node)
|
||||
{
|
||||
pr_err("NUMA core number %d differs from configured core number %d\n", node, c->phys_proc_id);
|
||||
}
|
||||
|
||||
/*
|
||||
* cpu_init() initializes state that is per-CPU. Some data is already
|
||||
* initialized (naturally) in the bootstrap process, such as the GDT
|
||||
|
|
|
@ -235,6 +235,7 @@ int init_fpu(struct task_struct *tsk)
|
|||
if (tsk_used_math(tsk)) {
|
||||
if (HAVE_HWFP && tsk == current)
|
||||
unlazy_fpu(tsk);
|
||||
tsk->thread.fpu.last_cpu = ~0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -82,11 +82,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
|
|||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
|
||||
pr_warning("CPU%d: family %d not supported\n", cpu, c->x86);
|
||||
return -1;
|
||||
}
|
||||
|
||||
csig->rev = c->microcode;
|
||||
pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
|
||||
|
||||
|
@ -380,6 +375,13 @@ static struct microcode_ops microcode_amd_ops = {
|
|||
|
||||
struct microcode_ops * __init init_amd_microcode(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
|
||||
pr_warning("AMD CPU family 0x%x not supported\n", c->x86);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
patch = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!patch)
|
||||
return NULL;
|
||||
|
|
|
@ -419,10 +419,8 @@ static int mc_device_add(struct device *dev, struct subsys_interface *sif)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (microcode_init_cpu(cpu) == UCODE_ERROR) {
|
||||
sysfs_remove_group(&dev->kobj, &mc_attr_group);
|
||||
if (microcode_init_cpu(cpu) == UCODE_ERROR)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -528,11 +526,11 @@ static int __init microcode_init(void)
|
|||
microcode_ops = init_intel_microcode();
|
||||
else if (c->x86_vendor == X86_VENDOR_AMD)
|
||||
microcode_ops = init_amd_microcode();
|
||||
|
||||
if (!microcode_ops) {
|
||||
else
|
||||
pr_err("no support for this CPU vendor\n");
|
||||
|
||||
if (!microcode_ops)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
microcode_pdev = platform_device_register_simple("microcode", -1,
|
||||
NULL, 0);
|
||||
|
|
|
@ -93,7 +93,6 @@ struct x86_init_ops x86_init __initdata = {
|
|||
struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
|
||||
.early_percpu_clock_init = x86_init_noop,
|
||||
.setup_percpu_clockev = setup_secondary_APIC_clock,
|
||||
.fixup_cpu_id = x86_default_fixup_cpu_id,
|
||||
};
|
||||
|
||||
static void default_nmi_init(void) { };
|
||||
|
|
|
@ -28,24 +28,34 @@
|
|||
#include "internal.h"
|
||||
#include "sleep.h"
|
||||
|
||||
u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS;
|
||||
static unsigned int gts, bfs;
|
||||
module_param(gts, uint, 0644);
|
||||
module_param(bfs, uint, 0644);
|
||||
static int set_param_wake_flag(const char *val, struct kernel_param *kp)
|
||||
{
|
||||
int ret = param_set_int(val, kp);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (kp->arg == (const char *)>s) {
|
||||
if (gts)
|
||||
wake_sleep_flags |= ACPI_EXECUTE_GTS;
|
||||
else
|
||||
wake_sleep_flags &= ~ACPI_EXECUTE_GTS;
|
||||
}
|
||||
if (kp->arg == (const char *)&bfs) {
|
||||
if (bfs)
|
||||
wake_sleep_flags |= ACPI_EXECUTE_BFS;
|
||||
else
|
||||
wake_sleep_flags &= ~ACPI_EXECUTE_BFS;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
module_param_call(gts, set_param_wake_flag, param_get_int, >s, 0644);
|
||||
module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644);
|
||||
MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
|
||||
MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
|
||||
|
||||
static u8 wake_sleep_flags(void)
|
||||
{
|
||||
u8 flags = ACPI_NO_OPTIONAL_METHODS;
|
||||
|
||||
if (gts)
|
||||
flags |= ACPI_EXECUTE_GTS;
|
||||
if (bfs)
|
||||
flags |= ACPI_EXECUTE_BFS;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static u8 sleep_states[ACPI_S_STATE_COUNT];
|
||||
|
||||
static void acpi_sleep_tts_switch(u32 acpi_state)
|
||||
|
@ -263,7 +273,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
|
|||
{
|
||||
acpi_status status = AE_OK;
|
||||
u32 acpi_state = acpi_target_sleep_state;
|
||||
u8 flags = wake_sleep_flags();
|
||||
int error;
|
||||
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
@ -271,7 +280,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
|
|||
switch (acpi_state) {
|
||||
case ACPI_STATE_S1:
|
||||
barrier();
|
||||
status = acpi_enter_sleep_state(acpi_state, flags);
|
||||
status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags);
|
||||
break;
|
||||
|
||||
case ACPI_STATE_S3:
|
||||
|
@ -286,7 +295,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
|
|||
acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
|
||||
|
||||
/* Reprogram control registers and execute _BFS */
|
||||
acpi_leave_sleep_state_prep(acpi_state, flags);
|
||||
acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags);
|
||||
|
||||
/* ACPI 3.0 specs (P62) says that it's the responsibility
|
||||
* of the OSPM to clear the status bit [ implying that the
|
||||
|
@ -550,30 +559,27 @@ static int acpi_hibernation_begin(void)
|
|||
|
||||
static int acpi_hibernation_enter(void)
|
||||
{
|
||||
u8 flags = wake_sleep_flags();
|
||||
acpi_status status = AE_OK;
|
||||
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
||||
/* This shouldn't return. If it returns, we have a problem */
|
||||
status = acpi_enter_sleep_state(ACPI_STATE_S4, flags);
|
||||
status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags);
|
||||
/* Reprogram control registers and execute _BFS */
|
||||
acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags);
|
||||
acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
|
||||
|
||||
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
|
||||
}
|
||||
|
||||
static void acpi_hibernation_leave(void)
|
||||
{
|
||||
u8 flags = wake_sleep_flags();
|
||||
|
||||
/*
|
||||
* If ACPI is not enabled by the BIOS and the boot kernel, we need to
|
||||
* enable it here.
|
||||
*/
|
||||
acpi_enable();
|
||||
/* Reprogram control registers and execute _BFS */
|
||||
acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags);
|
||||
acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
|
||||
/* Check the hardware signature */
|
||||
if (facs && s4_hardware_signature != facs->hardware_signature) {
|
||||
printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
|
||||
|
@ -828,12 +834,10 @@ static void acpi_power_off_prepare(void)
|
|||
|
||||
static void acpi_power_off(void)
|
||||
{
|
||||
u8 flags = wake_sleep_flags();
|
||||
|
||||
/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
|
||||
printk(KERN_DEBUG "%s called\n", __func__);
|
||||
local_irq_disable();
|
||||
acpi_enter_sleep_state(ACPI_STATE_S5, flags);
|
||||
acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -35,6 +35,14 @@ typedef union sigval {
|
|||
#define __ARCH_SI_BAND_T long
|
||||
#endif
|
||||
|
||||
#ifndef __ARCH_SI_CLOCK_T
|
||||
#define __ARCH_SI_CLOCK_T __kernel_clock_t
|
||||
#endif
|
||||
|
||||
#ifndef __ARCH_SI_ATTRIBUTES
|
||||
#define __ARCH_SI_ATTRIBUTES
|
||||
#endif
|
||||
|
||||
#ifndef HAVE_ARCH_SIGINFO_T
|
||||
|
||||
typedef struct siginfo {
|
||||
|
@ -72,8 +80,8 @@ typedef struct siginfo {
|
|||
__kernel_pid_t _pid; /* which child */
|
||||
__ARCH_SI_UID_T _uid; /* sender's uid */
|
||||
int _status; /* exit code */
|
||||
__kernel_clock_t _utime;
|
||||
__kernel_clock_t _stime;
|
||||
__ARCH_SI_CLOCK_T _utime;
|
||||
__ARCH_SI_CLOCK_T _stime;
|
||||
} _sigchld;
|
||||
|
||||
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
|
||||
|
@ -91,7 +99,7 @@ typedef struct siginfo {
|
|||
int _fd;
|
||||
} _sigpoll;
|
||||
} _sifields;
|
||||
} siginfo_t;
|
||||
} __ARCH_SI_ATTRIBUTES siginfo_t;
|
||||
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Reference in New Issue