x86/cpu: Load microcode during restore_processor_state()
When resuming from system sleep state, restore_processor_state()
restores the boot CPU MSRs. These MSRs could be emulated by microcode.
If microcode is not loaded yet, writing to emulated MSRs leads to
unchecked MSR access error:
...
PM: Calling lapic_suspend+0x0/0x210
unchecked MSR access error: WRMSR to 0x10f (tried to write 0x0...0) at rIP: ... (native_write_msr)
Call Trace:
<TASK>
? restore_processor_state
x86_acpi_suspend_lowlevel
acpi_suspend_enter
suspend_devices_and_enter
pm_suspend.cold
state_store
kobj_attr_store
sysfs_kf_write
kernfs_fop_write_iter
new_sync_write
vfs_write
ksys_write
__x64_sys_write
do_syscall_64
entry_SYSCALL_64_after_hwframe
RIP: 0033:0x7fda13c260a7
To ensure microcode emulated MSRs are available for restoration, load
the microcode on the boot CPU before restoring these MSRs.
[ Pawan: write commit message and productize it. ]
Fixes: e2a1256b17
("x86/speculation: Restore speculation related MSRs during S3 resume")
Reported-by: Kyle D. Pelton <kyle.d.pelton@intel.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Tested-by: Kyle D. Pelton <kyle.d.pelton@intel.com>
Cc: stable@vger.kernel.org
Link: https://bugzilla.kernel.org/show_bug.cgi?id=215841
Link: https://lore.kernel.org/r/4350dfbf785cd482d3fafa72b2b49c83102df3ce.1650386317.git.pawan.kumar.gupta@linux.intel.com
This commit is contained in:
parent
3ccce93403
commit
f9e14dbbd4
|
@ -131,10 +131,12 @@ extern void __init load_ucode_bsp(void);
|
||||||
extern void load_ucode_ap(void);
|
extern void load_ucode_ap(void);
|
||||||
void reload_early_microcode(void);
|
void reload_early_microcode(void);
|
||||||
extern bool initrd_gone;
|
extern bool initrd_gone;
|
||||||
|
void microcode_bsp_resume(void);
|
||||||
#else
|
#else
|
||||||
static inline void __init load_ucode_bsp(void) { }
|
static inline void __init load_ucode_bsp(void) { }
|
||||||
static inline void load_ucode_ap(void) { }
|
static inline void load_ucode_ap(void) { }
|
||||||
static inline void reload_early_microcode(void) { }
|
static inline void reload_early_microcode(void) { }
|
||||||
|
static inline void microcode_bsp_resume(void) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* _ASM_X86_MICROCODE_H */
|
#endif /* _ASM_X86_MICROCODE_H */
|
||||||
|
|
|
@ -758,9 +758,9 @@ static struct subsys_interface mc_cpu_interface = {
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mc_bp_resume - Update boot CPU microcode during resume.
|
* microcode_bsp_resume - Update boot CPU microcode during resume.
|
||||||
*/
|
*/
|
||||||
static void mc_bp_resume(void)
|
void microcode_bsp_resume(void)
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||||
|
@ -772,7 +772,7 @@ static void mc_bp_resume(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct syscore_ops mc_syscore_ops = {
|
static struct syscore_ops mc_syscore_ops = {
|
||||||
.resume = mc_bp_resume,
|
.resume = microcode_bsp_resume,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int mc_cpu_starting(unsigned int cpu)
|
static int mc_cpu_starting(unsigned int cpu)
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
#include <asm/cpu_device_id.h>
|
#include <asm/cpu_device_id.h>
|
||||||
|
#include <asm/microcode.h>
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
__visible unsigned long saved_context_ebx;
|
__visible unsigned long saved_context_ebx;
|
||||||
|
@ -262,11 +263,18 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
|
||||||
x86_platform.restore_sched_clock_state();
|
x86_platform.restore_sched_clock_state();
|
||||||
mtrr_bp_restore();
|
mtrr_bp_restore();
|
||||||
perf_restore_debug_store();
|
perf_restore_debug_store();
|
||||||
msr_restore_context(ctxt);
|
|
||||||
|
|
||||||
c = &cpu_data(smp_processor_id());
|
c = &cpu_data(smp_processor_id());
|
||||||
if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
|
if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
|
||||||
init_ia32_feat_ctl(c);
|
init_ia32_feat_ctl(c);
|
||||||
|
|
||||||
|
microcode_bsp_resume();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This needs to happen after the microcode has been updated upon resume
|
||||||
|
* because some of the MSRs are "emulated" in microcode.
|
||||||
|
*/
|
||||||
|
msr_restore_context(ctxt);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Needed by apm.c */
|
/* Needed by apm.c */
|
||||||
|
|
Loading…
Reference in New Issue