- A fix to disable PCI/MSI[-X] masking for XEN_HVM guests as that is
solely controlled by the hypervisor - A build fix to make the function prototype (__warn()) as visible as the definition itself - A bunch of objtool annotation fixes which have accumulated over time - An ORC unwinder fix to handle bad input gracefully - Well, we thought the microcode gets loaded in time in order to restore the microcode-emulated MSRs but we thought wrong. So there's a fix for that to have the ordering done properly - Add new Intel model numbers - A spelling fix -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmJucwMACgkQEsHwGGHe VUpgiw/8CuOXJhHSuYscEfAmPGoiG9+oLTYVc1NEfJEIyNuZULcr+aYlddTF79hm V+Flq6FyA3NU220F8t5s3jOaDkWjWJ8nZGPUUxo5+yNHugIGYh/kLy6w8LC8SgLq GqqYX4fd28tqFSgIBCrr+9GgpTE7bvzBGYLByKj9AO6ecLvWJmc+bENQCTaTRFgl og6xenzyECWxgbWIql0UeB1xw2AJ8UfYVeLKzOHpc95ZF209+mg7JLL5yIxwwgNV /CGoh28+twjX5SA1rr3cUx9gmFzrYubYZMglhgugBsShkdfuMLhis4woU7lF7cV9 HnxH6mkvN4R0Im7DZXgQPJ63ZFLJ8tN3RyLQDYBRd71w0Epr/K2aacYeQkWTflcx 4Ia+AiJ7rpKx0cUbUHX7pf3lzna/c8u/xPnlAIbR6rfwXO5mACupaofN5atAdx9T 9rPCPIdroM5XzBTiN4aNJHEsADL1h/oQdzrziTwryyezbTtnNC5KW53hnqyf5Bqo gBlbfVsnwM0AfLHSPE1D0liOR2spwuB+/bWrsOCzEYENC44nDxHE/MUUjg7/l+Vr 6N5syrQ7QsIPqUaEM+bQdKHGaXSU6amF8OWpFMjzkleQw5m7/X8LzyZsBlB4yeqv 63hUEpdmFyR/6bLdEvjUXeAPcbA41WHwOMdNPaKDqn3zhwYZaa4= =poyP -----END PGP SIGNATURE----- Merge tag 'x86_urgent_for_v5.18_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 fixes from Borislav Petkov: - A fix to disable PCI/MSI[-X] masking for XEN_HVM guests as that is solely controlled by the hypervisor - A build fix to make the function prototype (__warn()) as visible as the definition itself - A bunch of objtool annotation fixes which have accumulated over time - An ORC unwinder fix to handle bad input gracefully - Well, we thought the microcode gets loaded in time in order to restore the microcode-emulated MSRs but we thought wrong. So there's a fix for that to have the ordering done properly - Add new Intel model numbers - A spelling fix * tag 'x86_urgent_for_v5.18_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/pci/xen: Disable PCI/MSI[-X] masking for XEN_HVM guests bug: Have __warn() prototype defined unconditionally x86/Kconfig: fix the spelling of 'becoming' in X86_KERNEL_IBT config objtool: Use offstr() to print address of missing ENDBR objtool: Print data address for "!ENDBR" data warnings x86/xen: Add ANNOTATE_NOENDBR to startup_xen() x86/uaccess: Add ENDBR to __put_user_nocheck*() x86/retpoline: Add ANNOTATE_NOENDBR for retpolines x86/static_call: Add ANNOTATE_NOENDBR to static call trampoline objtool: Enable unreachable warnings for CLANG LTO x86,objtool: Explicitly mark idtentry_body()s tail REACHABLE x86,objtool: Mark cpu_startup_entry() __noreturn x86,xen,objtool: Add UNWIND hint lib/strn*,objtool: Enforce user_access_begin() rules MAINTAINERS: Add x86 unwinding entry x86/unwind/orc: Recheck address range after stack info was updated x86/cpu: Load microcode during restore_processor_state() x86/cpu: Add new Alderlake and Raptorlake CPU model numbers
This commit is contained in:
commit
b2da7df52e
|
@ -21443,6 +21443,15 @@ F: arch/x86/include/asm/uv/
|
|||
F: arch/x86/kernel/apic/x2apic_uv_x.c
|
||||
F: arch/x86/platform/uv/
|
||||
|
||||
X86 STACK UNWINDING
|
||||
M: Josh Poimboeuf <jpoimboe@redhat.com>
|
||||
M: Peter Zijlstra <peterz@infradead.org>
|
||||
S: Supported
|
||||
F: arch/x86/include/asm/unwind*.h
|
||||
F: arch/x86/kernel/dumpstack.c
|
||||
F: arch/x86/kernel/stacktrace.c
|
||||
F: arch/x86/kernel/unwind_*.c
|
||||
|
||||
X86 VDSO
|
||||
M: Andy Lutomirski <luto@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
|
|
|
@ -1866,7 +1866,7 @@ config X86_KERNEL_IBT
|
|||
code with them to make this happen.
|
||||
|
||||
In addition to building the kernel with IBT, seal all functions that
|
||||
are not indirect call targets, avoiding them ever becomming one.
|
||||
are not indirect call targets, avoiding them ever becoming one.
|
||||
|
||||
This requires LTO like objtool runs and will slow down the build. It
|
||||
does significantly reduce the number of ENDBR instructions in the
|
||||
|
|
|
@ -337,6 +337,9 @@ SYM_CODE_END(ret_from_fork)
|
|||
|
||||
call \cfunc
|
||||
|
||||
/* For some configurations \cfunc ends up being a noreturn. */
|
||||
REACHABLE
|
||||
|
||||
jmp error_return
|
||||
.endm
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
* _G - parts with extra graphics on
|
||||
* _X - regular server parts
|
||||
* _D - micro server parts
|
||||
* _N,_P - other mobile parts
|
||||
*
|
||||
* Historical OPTDIFFs:
|
||||
*
|
||||
|
@ -107,8 +108,10 @@
|
|||
|
||||
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
|
||||
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
|
||||
#define INTEL_FAM6_ALDERLAKE_N 0xBE
|
||||
|
||||
#define INTEL_FAM6_RAPTORLAKE 0xB7
|
||||
#define INTEL_FAM6_RAPTORLAKE_P 0xBA
|
||||
|
||||
/* "Small Core" Processors (Atom) */
|
||||
|
||||
|
|
|
@ -131,10 +131,12 @@ extern void __init load_ucode_bsp(void);
|
|||
extern void load_ucode_ap(void);
|
||||
void reload_early_microcode(void);
|
||||
extern bool initrd_gone;
|
||||
void microcode_bsp_resume(void);
|
||||
#else
|
||||
static inline void __init load_ucode_bsp(void) { }
|
||||
static inline void load_ucode_ap(void) { }
|
||||
static inline void reload_early_microcode(void) { }
|
||||
static inline void microcode_bsp_resume(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_MICROCODE_H */
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
".align 4 \n" \
|
||||
".globl " STATIC_CALL_TRAMP_STR(name) " \n" \
|
||||
STATIC_CALL_TRAMP_STR(name) ": \n" \
|
||||
ANNOTATE_NOENDBR \
|
||||
insns " \n" \
|
||||
".byte 0x53, 0x43, 0x54 \n" \
|
||||
".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \
|
||||
|
|
|
@ -758,9 +758,9 @@ static struct subsys_interface mc_cpu_interface = {
|
|||
};
|
||||
|
||||
/**
|
||||
* mc_bp_resume - Update boot CPU microcode during resume.
|
||||
* microcode_bsp_resume - Update boot CPU microcode during resume.
|
||||
*/
|
||||
static void mc_bp_resume(void)
|
||||
void microcode_bsp_resume(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
|
@ -772,7 +772,7 @@ static void mc_bp_resume(void)
|
|||
}
|
||||
|
||||
static struct syscore_ops mc_syscore_ops = {
|
||||
.resume = mc_bp_resume,
|
||||
.resume = microcode_bsp_resume,
|
||||
};
|
||||
|
||||
static int mc_cpu_starting(unsigned int cpu)
|
||||
|
|
|
@ -339,11 +339,11 @@ static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
|
|||
struct stack_info *info = &state->stack_info;
|
||||
void *addr = (void *)_addr;
|
||||
|
||||
if (!on_stack(info, addr, len) &&
|
||||
(get_stack_info(addr, state->task, info, &state->stack_mask)))
|
||||
return false;
|
||||
if (on_stack(info, addr, len))
|
||||
return true;
|
||||
|
||||
return true;
|
||||
return !get_stack_info(addr, state->task, info, &state->stack_mask) &&
|
||||
on_stack(info, addr, len);
|
||||
}
|
||||
|
||||
static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
|
||||
|
|
|
@ -48,6 +48,7 @@ SYM_FUNC_START(__put_user_1)
|
|||
cmp %_ASM_BX,%_ASM_CX
|
||||
jae .Lbad_put_user
|
||||
SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL)
|
||||
ENDBR
|
||||
ASM_STAC
|
||||
1: movb %al,(%_ASM_CX)
|
||||
xor %ecx,%ecx
|
||||
|
@ -62,6 +63,7 @@ SYM_FUNC_START(__put_user_2)
|
|||
cmp %_ASM_BX,%_ASM_CX
|
||||
jae .Lbad_put_user
|
||||
SYM_INNER_LABEL(__put_user_nocheck_2, SYM_L_GLOBAL)
|
||||
ENDBR
|
||||
ASM_STAC
|
||||
2: movw %ax,(%_ASM_CX)
|
||||
xor %ecx,%ecx
|
||||
|
@ -76,6 +78,7 @@ SYM_FUNC_START(__put_user_4)
|
|||
cmp %_ASM_BX,%_ASM_CX
|
||||
jae .Lbad_put_user
|
||||
SYM_INNER_LABEL(__put_user_nocheck_4, SYM_L_GLOBAL)
|
||||
ENDBR
|
||||
ASM_STAC
|
||||
3: movl %eax,(%_ASM_CX)
|
||||
xor %ecx,%ecx
|
||||
|
@ -90,6 +93,7 @@ SYM_FUNC_START(__put_user_8)
|
|||
cmp %_ASM_BX,%_ASM_CX
|
||||
jae .Lbad_put_user
|
||||
SYM_INNER_LABEL(__put_user_nocheck_8, SYM_L_GLOBAL)
|
||||
ENDBR
|
||||
ASM_STAC
|
||||
4: mov %_ASM_AX,(%_ASM_CX)
|
||||
#ifdef CONFIG_X86_32
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
.align RETPOLINE_THUNK_SIZE
|
||||
SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
|
||||
UNWIND_HINT_EMPTY
|
||||
ANNOTATE_NOENDBR
|
||||
|
||||
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
|
||||
__stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \
|
||||
|
@ -55,7 +56,6 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
|
|||
|
||||
.align RETPOLINE_THUNK_SIZE
|
||||
SYM_CODE_START(__x86_indirect_thunk_array)
|
||||
ANNOTATE_NOENDBR // apply_retpolines
|
||||
|
||||
#define GEN(reg) THUNK reg
|
||||
#include <asm/GEN-for-each-reg.h>
|
||||
|
|
|
@ -467,7 +467,6 @@ static __init void xen_setup_pci_msi(void)
|
|||
else
|
||||
xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs;
|
||||
xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs;
|
||||
pci_msi_ignore_mask = 1;
|
||||
} else if (xen_hvm_domain()) {
|
||||
xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs;
|
||||
xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs;
|
||||
|
@ -481,6 +480,11 @@ static __init void xen_setup_pci_msi(void)
|
|||
* in allocating the native domain and never use it.
|
||||
*/
|
||||
x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain;
|
||||
/*
|
||||
* With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely
|
||||
* controlled by the hypervisor.
|
||||
*/
|
||||
pci_msi_ignore_mask = 1;
|
||||
}
|
||||
|
||||
#else /* CONFIG_PCI_MSI */
|
||||
|
|
|
@ -50,6 +50,7 @@
|
|||
#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8)
|
||||
|
||||
SYM_CODE_START_LOCAL(pvh_start_xen)
|
||||
UNWIND_HINT_EMPTY
|
||||
cld
|
||||
|
||||
lgdt (_pa(gdt))
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <asm/cpu.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/microcode.h>
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
__visible unsigned long saved_context_ebx;
|
||||
|
@ -262,11 +263,18 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
|
|||
x86_platform.restore_sched_clock_state();
|
||||
mtrr_bp_restore();
|
||||
perf_restore_debug_store();
|
||||
msr_restore_context(ctxt);
|
||||
|
||||
c = &cpu_data(smp_processor_id());
|
||||
if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
|
||||
init_ia32_feat_ctl(c);
|
||||
|
||||
microcode_bsp_resume();
|
||||
|
||||
/*
|
||||
* This needs to happen after the microcode has been updated upon resume
|
||||
* because some of the MSRs are "emulated" in microcode.
|
||||
*/
|
||||
msr_restore_context(ctxt);
|
||||
}
|
||||
|
||||
/* Needed by apm.c */
|
||||
|
|
|
@ -45,6 +45,7 @@ SYM_CODE_END(hypercall_page)
|
|||
__INIT
|
||||
SYM_CODE_START(startup_xen)
|
||||
UNWIND_HINT_EMPTY
|
||||
ANNOTATE_NOENDBR
|
||||
cld
|
||||
|
||||
/* Clear .bss */
|
||||
|
|
|
@ -21,6 +21,12 @@
|
|||
#include <linux/panic.h>
|
||||
#include <linux/printk.h>
|
||||
|
||||
struct warn_args;
|
||||
struct pt_regs;
|
||||
|
||||
void __warn(const char *file, int line, void *caller, unsigned taint,
|
||||
struct pt_regs *regs, struct warn_args *args);
|
||||
|
||||
#ifdef CONFIG_BUG
|
||||
|
||||
#ifdef CONFIG_GENERIC_BUG
|
||||
|
@ -110,11 +116,6 @@ extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
|
|||
#endif
|
||||
|
||||
/* used internally by panic.c */
|
||||
struct warn_args;
|
||||
struct pt_regs;
|
||||
|
||||
void __warn(const char *file, int line, void *caller, unsigned taint,
|
||||
struct pt_regs *regs, struct warn_args *args);
|
||||
|
||||
#ifndef WARN_ON
|
||||
#define WARN_ON(condition) ({ \
|
||||
|
|
|
@ -167,7 +167,7 @@ static inline int suspend_disable_secondary_cpus(void) { return 0; }
|
|||
static inline void suspend_enable_secondary_cpus(void) { }
|
||||
#endif /* !CONFIG_PM_SLEEP_SMP */
|
||||
|
||||
void cpu_startup_entry(enum cpuhp_state state);
|
||||
void __noreturn cpu_startup_entry(enum cpuhp_state state);
|
||||
|
||||
void cpu_idle_poll_ctrl(bool enable);
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
* hit it), 'max' is the address space maximum (and we return
|
||||
* -EFAULT if we hit it).
|
||||
*/
|
||||
static inline long do_strncpy_from_user(char *dst, const char __user *src,
|
||||
static __always_inline long do_strncpy_from_user(char *dst, const char __user *src,
|
||||
unsigned long count, unsigned long max)
|
||||
{
|
||||
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
* if it fits in a aligned 'long'. The caller needs to check
|
||||
* the return value against "> max".
|
||||
*/
|
||||
static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
|
||||
static __always_inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
|
||||
{
|
||||
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
|
||||
unsigned long align, res = 0;
|
||||
|
|
|
@ -231,7 +231,7 @@ objtool_args = \
|
|||
$(if $(part-of-module), --module) \
|
||||
$(if $(CONFIG_X86_KERNEL_IBT), --lto --ibt) \
|
||||
$(if $(CONFIG_FRAME_POINTER),, --no-fp) \
|
||||
$(if $(CONFIG_GCOV_KERNEL)$(CONFIG_LTO_CLANG), --no-unreachable)\
|
||||
$(if $(CONFIG_GCOV_KERNEL), --no-unreachable) \
|
||||
$(if $(CONFIG_RETPOLINE), --retpoline) \
|
||||
$(if $(CONFIG_X86_SMAP), --uaccess) \
|
||||
$(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount) \
|
||||
|
|
|
@ -140,7 +140,7 @@ objtool_link()
|
|||
if ! is_enabled CONFIG_FRAME_POINTER; then
|
||||
objtoolopt="${objtoolopt} --no-fp"
|
||||
fi
|
||||
if is_enabled CONFIG_GCOV_KERNEL || is_enabled CONFIG_LTO_CLANG; then
|
||||
if is_enabled CONFIG_GCOV_KERNEL; then
|
||||
objtoolopt="${objtoolopt} --no-unreachable"
|
||||
fi
|
||||
if is_enabled CONFIG_RETPOLINE; then
|
||||
|
|
|
@ -184,6 +184,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
|
|||
"do_group_exit",
|
||||
"stop_this_cpu",
|
||||
"__invalid_creds",
|
||||
"cpu_startup_entry",
|
||||
};
|
||||
|
||||
if (!func)
|
||||
|
@ -3217,9 +3218,8 @@ validate_ibt_reloc(struct objtool_file *file, struct reloc *reloc)
|
|||
static void warn_noendbr(const char *msg, struct section *sec, unsigned long offset,
|
||||
struct instruction *dest)
|
||||
{
|
||||
WARN_FUNC("%srelocation to !ENDBR: %s+0x%lx", sec, offset, msg,
|
||||
dest->func ? dest->func->name : dest->sec->name,
|
||||
dest->func ? dest->offset - dest->func->offset : dest->offset);
|
||||
WARN_FUNC("%srelocation to !ENDBR: %s", sec, offset, msg,
|
||||
offstr(dest->sec, dest->offset));
|
||||
}
|
||||
|
||||
static void validate_ibt_dest(struct objtool_file *file, struct instruction *insn,
|
||||
|
@ -3823,11 +3823,8 @@ static int validate_ibt(struct objtool_file *file)
|
|||
struct instruction *dest;
|
||||
|
||||
dest = validate_ibt_reloc(file, reloc);
|
||||
if (is_data && dest && !dest->noendbr) {
|
||||
warn_noendbr("data ", reloc->sym->sec,
|
||||
reloc->sym->offset + reloc->addend,
|
||||
dest);
|
||||
}
|
||||
if (is_data && dest && !dest->noendbr)
|
||||
warn_noendbr("data ", sec, reloc->offset, dest);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue