linux/arch/x86/power/cpu.c

322 lines
8.4 KiB
C
Raw Normal View History

/*
* Suspend support specific for i386/x86-64.
*
* Distribute under GPLv2
*
* Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
* Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
* Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
*/
#include <linux/suspend.h>
#include <linux/export.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
[PATCH] x86_64: Set up safe page tables during resume The following patch makes swsusp avoid the possible temporary corruption of page translation tables during resume on x86-64. This is achieved by creating a copy of the relevant page tables that will not be modified by swsusp and can be safely used by it on resume. The problem is that during resume on x86-64 swsusp may temporarily corrupt the page tables used for the direct mapping of RAM. If that happens, a page fault occurs and cannot be handled properly, which leads to the solid hang of the affected system. This leads to the loss of the system's state from before suspend and may result in the loss of data or the corruption of filesystems, so it is a serious issue. Also, it appears to happen quite often (for me, as often as 50% of the time). The problem is related to the fact that (at least) one of the PMD entries used in the direct memory mapping (starting at PAGE_OFFSET) points to a page table the physical address of which is much greater than the physical address of the PMD entry itself. Moreover, unfortunately, the physical address of the page table before suspend (i.e. the one stored in the suspend image) happens to be different to the physical address of the corresponding page table used during resume (i.e. the one that is valid right before swsusp_arch_resume() in arch/x86_64/kernel/suspend_asm.S is executed). Thus while the image is restored, the "offending" PMD entry gets overwritten, so it does not point to the right physical address any more (i.e. there's no page table at the address pointed to by it, because it points to the address the page table has been at during suspend). Consequently, if the PMD entry is used later on, and it _is_ used in the process of copying the image pages, a page fault occurs, but it cannot be handled in the normal way and the system hangs. In principle we can call create_resume_mapping() from swsusp_arch_resume() (ie. from suspend_asm.S), but then the memory allocations in create_resume_mapping(), resume_pud_mapping(), and resume_pmd_mapping() must be made carefully so that we use _only_ NosaveFree pages in them (the other pages are overwritten by the loop in swsusp_arch_resume()). Additionally, we are in atomic context at that time, so we cannot use GFP_KERNEL. Moreover, if one of the allocations fails, we should free all of the allocated pages, so we need to trace them somehow. All of this is done in the appended patch, except that the functions populating the page tables are located in arch/x86_64/kernel/suspend.c rather than in init.c. It may be done in a more elegan way in the future, with the help of some swsusp patches that are in the works now. [AK: move some externs into headers, renamed a function] Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-10 03:19:40 +08:00
#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/mtrr.h>
#include <asm/page.h>
#include <asm/mce.h>
#include <asm/suspend.h>
#include <asm/fpu/internal.h>
#include <asm/debugreg.h>
#include <asm/cpu.h>
#ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx;
__visible unsigned long saved_context_esp, saved_context_ebp;
__visible unsigned long saved_context_esi, saved_context_edi;
__visible unsigned long saved_context_eflags;
#endif
x86, gdt, hibernate: Store/load GDT for hibernate path. The git commite7a5cd063c7b4c58417f674821d63f5eb6747e37 ("x86-64, gdt: Store/load GDT for ACPI S3 or hibernate/resume path is not needed.") assumes that for the hibernate path the booting kernel and the resuming kernel MUST be the same. That is certainly the case for a 32-bit kernel (see check_image_kernel and CONFIG_ARCH_HIBERNATION_HEADER config option). However for 64-bit kernels it is OK to have a different kernel version (and size of the image) of the booting and resuming kernels. Hence the above mentioned git commit introduces an regression. This patch fixes it by introducing a 'struct desc_ptr gdt_desc' back in the 'struct saved_context'. However instead of having in the 'save_processor_state' and 'restore_processor_state' the store/load_gdt calls, we are only saving the GDT in the save_processor_state. For the restore path the lgdt operation is done in hibernate_asm_[32|64].S in the 'restore_registers' path. The apt reader of this description will recognize that only 64-bit kernels need this treatment, not 32-bit. This patch adds the logic in the 32-bit path to be more similar to 64-bit so that in the future the unification process can take advantage of this. [ hpa: this also reverts an inadvertent on-disk format change ] Suggested-by: "H. Peter Anvin" <hpa@zytor.com> Acked-by: "Rafael J. Wysocki" <rjw@sisk.pl> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Link: http://lkml.kernel.org/r/1367459610-9656-2-git-send-email-konrad.wilk@oracle.com Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-05-02 09:53:30 +08:00
struct saved_context saved_context;
/**
* __save_processor_state - save CPU registers before creating a
* hibernation image and before restoring the memory state from it
* @ctxt - structure to store the registers contents in
*
* NOTE: If there is a CPU register the modification of which by the
* boot kernel (ie. the kernel used for loading the hibernation image)
* might affect the operations of the restored target kernel (ie. the one
* saved in the hibernation image), then its contents must be saved by this
* function. In other words, if kernel A is hibernated and different
* kernel B is used for loading the hibernation image into memory, the
* kernel A's __save_processor_state() function must save all registers
* needed by kernel A, so that it can operate correctly after the resume
* regardless of what kernel B does in the meantime.
*/
static void __save_processor_state(struct saved_context *ctxt)
{
#ifdef CONFIG_X86_32
mtrr_save_fixed_ranges(NULL);
#endif
kernel_fpu_begin();
/*
* descriptor tables
*/
#ifdef CONFIG_X86_32
store_idt(&ctxt->idt);
#else
/* CONFIG_X86_64 */
store_idt((struct desc_ptr *)&ctxt->idt_limit);
#endif
x86, gdt, hibernate: Store/load GDT for hibernate path. The git commite7a5cd063c7b4c58417f674821d63f5eb6747e37 ("x86-64, gdt: Store/load GDT for ACPI S3 or hibernate/resume path is not needed.") assumes that for the hibernate path the booting kernel and the resuming kernel MUST be the same. That is certainly the case for a 32-bit kernel (see check_image_kernel and CONFIG_ARCH_HIBERNATION_HEADER config option). However for 64-bit kernels it is OK to have a different kernel version (and size of the image) of the booting and resuming kernels. Hence the above mentioned git commit introduces an regression. This patch fixes it by introducing a 'struct desc_ptr gdt_desc' back in the 'struct saved_context'. However instead of having in the 'save_processor_state' and 'restore_processor_state' the store/load_gdt calls, we are only saving the GDT in the save_processor_state. For the restore path the lgdt operation is done in hibernate_asm_[32|64].S in the 'restore_registers' path. The apt reader of this description will recognize that only 64-bit kernels need this treatment, not 32-bit. This patch adds the logic in the 32-bit path to be more similar to 64-bit so that in the future the unification process can take advantage of this. [ hpa: this also reverts an inadvertent on-disk format change ] Suggested-by: "H. Peter Anvin" <hpa@zytor.com> Acked-by: "Rafael J. Wysocki" <rjw@sisk.pl> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Link: http://lkml.kernel.org/r/1367459610-9656-2-git-send-email-konrad.wilk@oracle.com Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-05-02 09:53:30 +08:00
/*
* We save it here, but restore it only in the hibernate case.
* For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
* mode in "secondary_startup_64". In 32-bit mode it is done via
* 'pmode_gdt' in wakeup_start.
*/
ctxt->gdt_desc.size = GDT_SIZE - 1;
ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_table(smp_processor_id());
store_tr(ctxt->tr);
/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
/*
* segment registers
*/
#ifdef CONFIG_X86_32
savesegment(es, ctxt->es);
savesegment(fs, ctxt->fs);
savesegment(gs, ctxt->gs);
savesegment(ss, ctxt->ss);
#else
/* CONFIG_X86_64 */
asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
rdmsrl(MSR_FS_BASE, ctxt->fs_base);
rdmsrl(MSR_GS_BASE, ctxt->gs_base);
rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
mtrr_save_fixed_ranges(NULL);
rdmsrl(MSR_EFER, ctxt->efer);
#endif
/*
* control registers
*/
ctxt->cr0 = read_cr0();
ctxt->cr2 = read_cr2();
ctxt->cr3 = read_cr3();
ctxt->cr4 = __read_cr4_safe();
#ifdef CONFIG_X86_64
ctxt->cr8 = read_cr8();
#endif
ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
&ctxt->misc_enable);
}
/* Needed by apm.c */
void save_processor_state(void)
{
__save_processor_state(&saved_context);
x86_platform.save_sched_clock_state();
}
#ifdef CONFIG_X86_32
EXPORT_SYMBOL(save_processor_state);
#endif
static void do_fpu_end(void)
{
/*
* Restore FPU regs if necessary.
*/
kernel_fpu_end();
}
static void fix_processor_context(void)
{
int cpu = smp_processor_id();
struct tss_struct *t = &per_cpu(cpu_tss, cpu);
#ifdef CONFIG_X86_64
struct desc_struct *desc = get_cpu_gdt_table(cpu);
tss_desc tss;
#endif
set_tss_desc(cpu, t); /*
* This just modifies memory; should not be
* necessary. But... This is necessary, because
* 386 hardware has concept of busy TSS or some
* similar stupidity.
*/
#ifdef CONFIG_X86_64
memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
syscall_init(); /* This sets MSR_*STAR and related */
#endif
load_TR_desc(); /* This does ltr */
load_LDT(&current->active_mm->context); /* This does lldt */
fpu__resume_cpu();
}
/**
* __restore_processor_state - restore the contents of CPU registers saved
* by __save_processor_state()
* @ctxt - structure to load the registers contents from
*/
x86, power, suspend: Annotate restore_processor_state() with notrace ftrace_stop() is used to stop function tracing during suspend and resume which removes a lot of possible debugging opportunities with tracing. The reason was that some function in the resume path was causing a triple fault if it were to be traced. The issue I found was that doing something as simple as calling smp_processor_id() would reboot the box! When function tracing was first created I didn't have a good way to figure out what function was having issues, or it looked to be multiple ones. To fix it, we just created a big hammer approach to the problem which was to add a flag in the mcount trampoline that could be checked and not call the traced functions. Lately I developed better ways to find problem functions and I can bisect down to see what function is causing the issue. I removed the flag that stopped tracing and proceeded to find the problem function and it ended up being restore_processor_state(). This function makes sense as when the CPU comes back online from a suspend it calls this function to set up registers, amongst them the GS register, which stores things such as what CPU the processor is (if you call smp_processor_id() without this set up properly, it would fault). By making restore_processor_state() notrace, the system can suspend and resume without the need of the big hammer tracing to stop. Link: http://lkml.kernel.org/r/3577662.BSnUZfboWb@vostro.rjw.lan Acked-by: "Rafael J. Wysocki" <rjw@rjwysocki.net> Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2014-06-25 08:58:26 +08:00
static void notrace __restore_processor_state(struct saved_context *ctxt)
{
if (ctxt->misc_enable_saved)
wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
/*
* control registers
*/
/* cr4 was introduced in the Pentium CPU */
#ifdef CONFIG_X86_32
if (ctxt->cr4)
__write_cr4(ctxt->cr4);
#else
/* CONFIG X86_64 */
wrmsrl(MSR_EFER, ctxt->efer);
write_cr8(ctxt->cr8);
__write_cr4(ctxt->cr4);
#endif
write_cr3(ctxt->cr3);
write_cr2(ctxt->cr2);
write_cr0(ctxt->cr0);
/*
* now restore the descriptor tables to their proper values
* ltr is done i fix_processor_context().
*/
#ifdef CONFIG_X86_32
load_idt(&ctxt->idt);
#else
/* CONFIG_X86_64 */
load_idt((const struct desc_ptr *)&ctxt->idt_limit);
#endif
/*
* segment registers
*/
#ifdef CONFIG_X86_32
loadsegment(es, ctxt->es);
loadsegment(fs, ctxt->fs);
loadsegment(gs, ctxt->gs);
loadsegment(ss, ctxt->ss);
/*
* sysenter MSRs
*/
if (boot_cpu_has(X86_FEATURE_SEP))
enable_sep_cpu();
#else
/* CONFIG_X86_64 */
asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
load_gs_index(ctxt->gs);
asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
wrmsrl(MSR_FS_BASE, ctxt->fs_base);
wrmsrl(MSR_GS_BASE, ctxt->gs_base);
wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
#endif
fix_processor_context();
do_fpu_end();
x86_platform.restore_sched_clock_state();
mtrr_bp_restore();
perf_restore_debug_store();
}
/* Needed by apm.c */
x86, power, suspend: Annotate restore_processor_state() with notrace ftrace_stop() is used to stop function tracing during suspend and resume which removes a lot of possible debugging opportunities with tracing. The reason was that some function in the resume path was causing a triple fault if it were to be traced. The issue I found was that doing something as simple as calling smp_processor_id() would reboot the box! When function tracing was first created I didn't have a good way to figure out what function was having issues, or it looked to be multiple ones. To fix it, we just created a big hammer approach to the problem which was to add a flag in the mcount trampoline that could be checked and not call the traced functions. Lately I developed better ways to find problem functions and I can bisect down to see what function is causing the issue. I removed the flag that stopped tracing and proceeded to find the problem function and it ended up being restore_processor_state(). This function makes sense as when the CPU comes back online from a suspend it calls this function to set up registers, amongst them the GS register, which stores things such as what CPU the processor is (if you call smp_processor_id() without this set up properly, it would fault). By making restore_processor_state() notrace, the system can suspend and resume without the need of the big hammer tracing to stop. Link: http://lkml.kernel.org/r/3577662.BSnUZfboWb@vostro.rjw.lan Acked-by: "Rafael J. Wysocki" <rjw@rjwysocki.net> Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2014-06-25 08:58:26 +08:00
void notrace restore_processor_state(void)
{
__restore_processor_state(&saved_context);
}
#ifdef CONFIG_X86_32
EXPORT_SYMBOL(restore_processor_state);
#endif
/*
* When bsp_check() is called in hibernate and suspend, cpu hotplug
* is disabled already. So it's unnessary to handle race condition between
* cpumask query and cpu hotplug.
*/
static int bsp_check(void)
{
if (cpumask_first(cpu_online_mask) != 0) {
pr_warn("CPU0 is offline.\n");
return -ENODEV;
}
return 0;
}
static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
void *ptr)
{
int ret = 0;
switch (action) {
case PM_SUSPEND_PREPARE:
case PM_HIBERNATION_PREPARE:
ret = bsp_check();
break;
#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
case PM_RESTORE_PREPARE:
/*
* When system resumes from hibernation, online CPU0 because
* 1. it's required for resume and
* 2. the CPU was online before hibernation
*/
if (!cpu_online(0))
_debug_hotplug_cpu(0, 1);
break;
case PM_POST_RESTORE:
/*
* When a resume really happens, this code won't be called.
*
* This code is called only when user space hibernation software
* prepares for snapshot device during boot time. So we just
* call _debug_hotplug_cpu() to restore to CPU0's state prior to
* preparing the snapshot device.
*
* This works for normal boot case in our CPU0 hotplug debug
* mode, i.e. CPU0 is offline and user mode hibernation
* software initializes during boot time.
*
* If CPU0 is online and user application accesses snapshot
* device after boot time, this will offline CPU0 and user may
* see different CPU0 state before and after accessing
* the snapshot device. But hopefully this is not a case when
* user debugging CPU0 hotplug. Even if users hit this case,
* they can easily online CPU0 back.
*
* To simplify this debug code, we only consider normal boot
* case. Otherwise we need to remember CPU0's state and restore
* to that state and resolve racy conditions etc.
*/
_debug_hotplug_cpu(0, 0);
break;
#endif
default:
break;
}
return notifier_from_errno(ret);
}
static int __init bsp_pm_check_init(void)
{
/*
* Set this bsp_pm_callback as lower priority than
* cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
* earlier to disable cpu hotplug before bsp online check.
*/
pm_notifier(bsp_pm_callback, -INT_MAX);
return 0;
}
core_initcall(bsp_pm_check_init);