Merge branch 'pm-x86'
* pm-x86: x86/power: Fix 'nosmt' vs hibernation triple fault during resume x86: intel_epb: Do not build when CONFIG_PM is unset
This commit is contained in:
commit
a964d23c94
|
@ -28,7 +28,10 @@ obj-y += cpuid-deps.o
|
|||
obj-$(CONFIG_PROC_FS) += proc.o
|
||||
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
|
||||
|
||||
obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o intel_epb.o
|
||||
ifdef CONFIG_CPU_SUP_INTEL
|
||||
obj-y += intel.o intel_pconfig.o
|
||||
obj-$(CONFIG_PM) += intel_epb.o
|
||||
endif
|
||||
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
|
||||
obj-$(CONFIG_CPU_SUP_HYGON) += hygon.o
|
||||
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
|
||||
|
|
|
@ -97,7 +97,6 @@ static void intel_epb_restore(void)
|
|||
wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static struct syscore_ops intel_epb_syscore_ops = {
|
||||
.suspend = intel_epb_save,
|
||||
.resume = intel_epb_restore,
|
||||
|
@ -194,25 +193,6 @@ static int intel_epb_offline(unsigned int cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void register_intel_ebp_syscore_ops(void)
|
||||
{
|
||||
register_syscore_ops(&intel_epb_syscore_ops);
|
||||
}
|
||||
#else /* !CONFIG_PM */
|
||||
static int intel_epb_online(unsigned int cpu)
|
||||
{
|
||||
intel_epb_restore();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_epb_offline(unsigned int cpu)
|
||||
{
|
||||
return intel_epb_save();
|
||||
}
|
||||
|
||||
static inline void register_intel_ebp_syscore_ops(void) {}
|
||||
#endif
|
||||
|
||||
static __init int intel_epb_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
@ -226,7 +206,7 @@ static __init int intel_epb_init(void)
|
|||
if (ret < 0)
|
||||
goto err_out_online;
|
||||
|
||||
register_intel_ebp_syscore_ops();
|
||||
register_syscore_ops(&intel_epb_syscore_ops);
|
||||
return 0;
|
||||
|
||||
err_out_online:
|
||||
|
|
|
@ -299,7 +299,17 @@ int hibernate_resume_nonboot_cpu_disable(void)
|
|||
* address in its instruction pointer may not be possible to resolve
|
||||
* any more at that point (the page tables used by it previously may
|
||||
* have been overwritten by hibernate image data).
|
||||
*
|
||||
* First, make sure that we wake up all the potentially disabled SMT
|
||||
* threads which have been initially brought up and then put into
|
||||
* mwait/cpuidle sleep.
|
||||
* Those will be put to proper (not interfering with hibernation
|
||||
* resume) sleep afterwards, and the resumed kernel will decide itself
|
||||
* what to do with them.
|
||||
*/
|
||||
ret = cpuhp_smt_enable();
|
||||
if (ret)
|
||||
return ret;
|
||||
smp_ops.play_dead = resume_play_dead;
|
||||
ret = disable_nonboot_cpus();
|
||||
smp_ops.play_dead = play_dead;
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/suspend.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include <crypto/hash.h>
|
||||
|
||||
|
@ -245,3 +246,35 @@ int relocate_restore_code(void)
|
|||
__flush_tlb_all();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_resume_nosmt(void)
|
||||
{
|
||||
int ret = 0;
|
||||
/*
|
||||
* We reached this while coming out of hibernation. This means
|
||||
* that SMT siblings are sleeping in hlt, as mwait is not safe
|
||||
* against control transition during resume (see comment in
|
||||
* hibernate_resume_nonboot_cpu_disable()).
|
||||
*
|
||||
* If the resumed kernel has SMT disabled, we have to take all the
|
||||
* SMT siblings out of hlt, and offline them again so that they
|
||||
* end up in mwait proper.
|
||||
*
|
||||
* Called with hotplug disabled.
|
||||
*/
|
||||
cpu_hotplug_enable();
|
||||
if (cpu_smt_control == CPU_SMT_DISABLED ||
|
||||
cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
|
||||
enum cpuhp_smt_control old = cpu_smt_control;
|
||||
|
||||
ret = cpuhp_smt_enable();
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = cpuhp_smt_disable(old);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
cpu_hotplug_disable();
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -201,10 +201,14 @@ enum cpuhp_smt_control {
|
|||
extern enum cpuhp_smt_control cpu_smt_control;
|
||||
extern void cpu_smt_disable(bool force);
|
||||
extern void cpu_smt_check_topology(void);
|
||||
extern int cpuhp_smt_enable(void);
|
||||
extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
|
||||
#else
|
||||
# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
|
||||
static inline void cpu_smt_disable(bool force) { }
|
||||
static inline void cpu_smt_check_topology(void) { }
|
||||
static inline int cpuhp_smt_enable(void) { return 0; }
|
||||
static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -2061,7 +2061,7 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
|
|||
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
|
||||
}
|
||||
|
||||
static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
|
||||
int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
|
||||
{
|
||||
int cpu, ret = 0;
|
||||
|
||||
|
@ -2093,7 +2093,7 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int cpuhp_smt_enable(void)
|
||||
int cpuhp_smt_enable(void)
|
||||
{
|
||||
int cpu, ret = 0;
|
||||
|
||||
|
|
|
@ -257,6 +257,11 @@ void swsusp_show_speed(ktime_t start, ktime_t stop,
|
|||
(kps % 1000) / 10);
|
||||
}
|
||||
|
||||
__weak int arch_resume_nosmt(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* create_image - Create a hibernation image.
|
||||
* @platform_mode: Whether or not to use the platform driver.
|
||||
|
@ -324,6 +329,10 @@ static int create_image(int platform_mode)
|
|||
Enable_cpus:
|
||||
suspend_enable_secondary_cpus();
|
||||
|
||||
/* Allow architectures to do nosmt-specific post-resume dances */
|
||||
if (!in_suspend)
|
||||
error = arch_resume_nosmt();
|
||||
|
||||
Platform_finish:
|
||||
platform_finish(platform_mode);
|
||||
|
||||
|
|
Loading…
Reference in New Issue