Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: "Speculation: - Make the microcode check more robust - Make the L1TF memory limit depend on the internal cache physical address space and not on the CPUID advertised physical address space, which might be significantly smaller. This avoids disabling L1TF on machines which utilize the full physical address space. - Fix the GDT mapping for EFI calls on 32bit PTI - Fix the MCE nospec implementation to prevent #GP Fixes and robustness: - Use the proper operand order for LSL in the VDSO - Prevent NMI uaccess race against CR3 switching - Add a lockdep check to verify that text_mutex is held in text_poke() functions - Repair the fallout of giving native_restore_fl() a prototype - Prevent kernel memory dumps based on usermode RIP - Wipe KASAN shadow stack before rewinding the stack to prevent false positives - Move the AMS GOTO enforcement to the actual build stage to allow user API header extraction without a compiler - Fix a section mismatch introduced by the on demand VDSO mapping change Miscellaneous: - Trivial typo, GCC quirk removal and CC_SET/OUT() cleanups" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/pti: Fix section mismatch warning/error x86/vdso: Fix lsl operand order x86/mce: Fix set_mce_nospec() to avoid #GP fault x86/efi: Load fixmap GDT in efi_call_phys_epilog() x86/nmi: Fix NMI uaccess race against CR3 switching x86: Allow generating user-space headers without a compiler x86/dumpstack: Don't dump kernel memory based on usermode RIP x86/asm: Use CC_SET()/CC_OUT() in __gen_sigismember() x86/alternatives: Lockdep-enforce text_mutex in text_poke*() x86/entry/64: Wipe KASAN stack shadow before rewind_stack_do_exit() x86/irqflags: Mark native_restore_fl extern inline x86/build: Remove jump label quirk for GCC older than 4.5.2 x86/Kconfig: Fix trivial typo x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+ x86/spectre: Add missing family 6 check to microcode check
This commit is contained in:
commit
899ba79553
|
@ -2843,7 +2843,7 @@ config X86_SYSFB
|
||||||
This option, if enabled, marks VGA/VBE/EFI framebuffers as generic
|
This option, if enabled, marks VGA/VBE/EFI framebuffers as generic
|
||||||
framebuffers so the new generic system-framebuffer drivers can be
|
framebuffers so the new generic system-framebuffer drivers can be
|
||||||
used on x86. If the framebuffer is not compatible with the generic
|
used on x86. If the framebuffer is not compatible with the generic
|
||||||
modes, it is adverticed as fallback platform framebuffer so legacy
|
modes, it is advertised as fallback platform framebuffer so legacy
|
||||||
drivers like efifb, vesafb and uvesafb can pick it up.
|
drivers like efifb, vesafb and uvesafb can pick it up.
|
||||||
If this option is not selected, all system framebuffers are always
|
If this option is not selected, all system framebuffers are always
|
||||||
marked as fallback platform framebuffers as usual.
|
marked as fallback platform framebuffers as usual.
|
||||||
|
|
|
@ -175,22 +175,6 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifndef CC_HAVE_ASM_GOTO
|
|
||||||
$(error Compiler lacks asm-goto support.)
|
|
||||||
endif
|
|
||||||
|
|
||||||
#
|
|
||||||
# Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a
|
|
||||||
# GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way
|
|
||||||
# to test for this bug at compile-time because the test case needs to execute,
|
|
||||||
# which is a no-go for cross compilers. So check the GCC version instead.
|
|
||||||
#
|
|
||||||
ifdef CONFIG_JUMP_LABEL
|
|
||||||
ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1)
|
|
||||||
ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1)
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
|
ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
|
||||||
# This compiler flag is not supported by Clang:
|
# This compiler flag is not supported by Clang:
|
||||||
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
|
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
|
||||||
|
@ -312,6 +296,13 @@ PHONY += vdso_install
|
||||||
vdso_install:
|
vdso_install:
|
||||||
$(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
|
$(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
|
||||||
|
|
||||||
|
archprepare: checkbin
|
||||||
|
checkbin:
|
||||||
|
ifndef CC_HAVE_ASM_GOTO
|
||||||
|
@echo Compiler lacks asm-goto support.
|
||||||
|
@exit 1
|
||||||
|
endif
|
||||||
|
|
||||||
archclean:
|
archclean:
|
||||||
$(Q)rm -rf $(objtree)/arch/i386
|
$(Q)rm -rf $(objtree)/arch/i386
|
||||||
$(Q)rm -rf $(objtree)/arch/x86_64
|
$(Q)rm -rf $(objtree)/arch/x86_64
|
||||||
|
|
|
@ -2465,7 +2465,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
|
||||||
|
|
||||||
perf_callchain_store(entry, regs->ip);
|
perf_callchain_store(entry, regs->ip);
|
||||||
|
|
||||||
if (!current->mm)
|
if (!nmi_uaccess_okay())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (perf_callchain_user32(regs, entry))
|
if (perf_callchain_user32(regs, entry))
|
||||||
|
|
|
@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void)
|
||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void native_restore_fl(unsigned long flags)
|
extern inline void native_restore_fl(unsigned long flags);
|
||||||
|
extern inline void native_restore_fl(unsigned long flags)
|
||||||
{
|
{
|
||||||
asm volatile("push %0 ; popf"
|
asm volatile("push %0 ; popf"
|
||||||
: /* no output */
|
: /* no output */
|
||||||
|
|
|
@ -132,6 +132,8 @@ struct cpuinfo_x86 {
|
||||||
/* Index into per_cpu list: */
|
/* Index into per_cpu list: */
|
||||||
u16 cpu_index;
|
u16 cpu_index;
|
||||||
u32 microcode;
|
u32 microcode;
|
||||||
|
/* Address space bits used by the cache internally */
|
||||||
|
u8 x86_cache_bits;
|
||||||
unsigned initialized : 1;
|
unsigned initialized : 1;
|
||||||
} __randomize_layout;
|
} __randomize_layout;
|
||||||
|
|
||||||
|
@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c);
|
||||||
|
|
||||||
static inline unsigned long long l1tf_pfn_limit(void)
|
static inline unsigned long long l1tf_pfn_limit(void)
|
||||||
{
|
{
|
||||||
return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
|
return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void early_cpu_init(void);
|
extern void early_cpu_init(void);
|
||||||
|
|
|
@ -39,6 +39,7 @@ extern void do_signal(struct pt_regs *regs);
|
||||||
|
|
||||||
#define __ARCH_HAS_SA_RESTORER
|
#define __ARCH_HAS_SA_RESTORER
|
||||||
|
|
||||||
|
#include <asm/asm.h>
|
||||||
#include <uapi/asm/sigcontext.h>
|
#include <uapi/asm/sigcontext.h>
|
||||||
|
|
||||||
#ifdef __i386__
|
#ifdef __i386__
|
||||||
|
@ -86,9 +87,9 @@ static inline int __const_sigismember(sigset_t *set, int _sig)
|
||||||
|
|
||||||
static inline int __gen_sigismember(sigset_t *set, int _sig)
|
static inline int __gen_sigismember(sigset_t *set, int _sig)
|
||||||
{
|
{
|
||||||
unsigned char ret;
|
bool ret;
|
||||||
asm("btl %2,%1\n\tsetc %0"
|
asm("btl %2,%1" CC_SET(c)
|
||||||
: "=qm"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
|
: CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -111,6 +111,6 @@ static inline unsigned long caller_frame_pointer(void)
|
||||||
return (unsigned long)frame;
|
return (unsigned long)frame;
|
||||||
}
|
}
|
||||||
|
|
||||||
void show_opcodes(u8 *rip, const char *loglvl);
|
void show_opcodes(struct pt_regs *regs, const char *loglvl);
|
||||||
void show_ip(struct pt_regs *regs, const char *loglvl);
|
void show_ip(struct pt_regs *regs, const char *loglvl);
|
||||||
#endif /* _ASM_X86_STACKTRACE_H */
|
#endif /* _ASM_X86_STACKTRACE_H */
|
||||||
|
|
|
@ -175,8 +175,16 @@ struct tlb_state {
|
||||||
* are on. This means that it may not match current->active_mm,
|
* are on. This means that it may not match current->active_mm,
|
||||||
* which will contain the previous user mm when we're in lazy TLB
|
* which will contain the previous user mm when we're in lazy TLB
|
||||||
* mode even if we've already switched back to swapper_pg_dir.
|
* mode even if we've already switched back to swapper_pg_dir.
|
||||||
|
*
|
||||||
|
* During switch_mm_irqs_off(), loaded_mm will be set to
|
||||||
|
* LOADED_MM_SWITCHING during the brief interrupts-off window
|
||||||
|
* when CR3 and loaded_mm would otherwise be inconsistent. This
|
||||||
|
* is for nmi_uaccess_okay()'s benefit.
|
||||||
*/
|
*/
|
||||||
struct mm_struct *loaded_mm;
|
struct mm_struct *loaded_mm;
|
||||||
|
|
||||||
|
#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
|
||||||
|
|
||||||
u16 loaded_mm_asid;
|
u16 loaded_mm_asid;
|
||||||
u16 next_asid;
|
u16 next_asid;
|
||||||
/* last user mm's ctx id */
|
/* last user mm's ctx id */
|
||||||
|
@ -246,6 +254,38 @@ struct tlb_state {
|
||||||
};
|
};
|
||||||
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
|
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Blindly accessing user memory from NMI context can be dangerous
|
||||||
|
* if we're in the middle of switching the current user task or
|
||||||
|
* switching the loaded mm. It can also be dangerous if we
|
||||||
|
* interrupted some kernel code that was temporarily using a
|
||||||
|
* different mm.
|
||||||
|
*/
|
||||||
|
static inline bool nmi_uaccess_okay(void)
|
||||||
|
{
|
||||||
|
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
|
||||||
|
struct mm_struct *current_mm = current->mm;
|
||||||
|
|
||||||
|
VM_WARN_ON_ONCE(!loaded_mm);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The condition we want to check is
|
||||||
|
* current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
|
||||||
|
* if we're running in a VM with shadow paging, and nmi_uaccess_okay()
|
||||||
|
* is supposed to be reasonably fast.
|
||||||
|
*
|
||||||
|
* Instead, we check the almost equivalent but somewhat conservative
|
||||||
|
* condition below, and we rely on the fact that switch_mm_irqs_off()
|
||||||
|
* sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
|
||||||
|
*/
|
||||||
|
if (loaded_mm != current_mm)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/* Initialize cr4 shadow for this CPU. */
|
/* Initialize cr4 shadow for this CPU. */
|
||||||
static inline void cr4_init_shadow(void)
|
static inline void cr4_init_shadow(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void)
|
||||||
*
|
*
|
||||||
* If RDPID is available, use it.
|
* If RDPID is available, use it.
|
||||||
*/
|
*/
|
||||||
alternative_io ("lsl %[p],%[seg]",
|
alternative_io ("lsl %[seg],%[p]",
|
||||||
".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
|
".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
|
||||||
X86_FEATURE_RDPID,
|
X86_FEATURE_RDPID,
|
||||||
[p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
|
[p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
|
||||||
|
|
|
@ -684,8 +684,6 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
|
||||||
* It means the size must be writable atomically and the address must be aligned
|
* It means the size must be writable atomically and the address must be aligned
|
||||||
* in a way that permits an atomic write. It also makes sure we fit on a single
|
* in a way that permits an atomic write. It also makes sure we fit on a single
|
||||||
* page.
|
* page.
|
||||||
*
|
|
||||||
* Note: Must be called under text_mutex.
|
|
||||||
*/
|
*/
|
||||||
void *text_poke(void *addr, const void *opcode, size_t len)
|
void *text_poke(void *addr, const void *opcode, size_t len)
|
||||||
{
|
{
|
||||||
|
@ -700,6 +698,8 @@ void *text_poke(void *addr, const void *opcode, size_t len)
|
||||||
*/
|
*/
|
||||||
BUG_ON(!after_bootmem);
|
BUG_ON(!after_bootmem);
|
||||||
|
|
||||||
|
lockdep_assert_held(&text_mutex);
|
||||||
|
|
||||||
if (!core_kernel_text((unsigned long)addr)) {
|
if (!core_kernel_text((unsigned long)addr)) {
|
||||||
pages[0] = vmalloc_to_page(addr);
|
pages[0] = vmalloc_to_page(addr);
|
||||||
pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
|
pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
|
||||||
|
@ -782,8 +782,6 @@ int poke_int3_handler(struct pt_regs *regs)
|
||||||
* - replace the first byte (int3) by the first byte of
|
* - replace the first byte (int3) by the first byte of
|
||||||
* replacing opcode
|
* replacing opcode
|
||||||
* - sync cores
|
* - sync cores
|
||||||
*
|
|
||||||
* Note: must be called under text_mutex.
|
|
||||||
*/
|
*/
|
||||||
void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
|
void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
|
||||||
{
|
{
|
||||||
|
@ -792,6 +790,9 @@ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
|
||||||
bp_int3_handler = handler;
|
bp_int3_handler = handler;
|
||||||
bp_int3_addr = (u8 *)addr + sizeof(int3);
|
bp_int3_addr = (u8 *)addr + sizeof(int3);
|
||||||
bp_patching_in_progress = true;
|
bp_patching_in_progress = true;
|
||||||
|
|
||||||
|
lockdep_assert_held(&text_mutex);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Corresponding read barrier in int3 notifier for making sure the
|
* Corresponding read barrier in int3 notifier for making sure the
|
||||||
* in_progress and handler are correctly ordered wrt. patching.
|
* in_progress and handler are correctly ordered wrt. patching.
|
||||||
|
|
|
@ -668,6 +668,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
|
||||||
enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
|
enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
|
||||||
EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
|
EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* These CPUs all support 44bits physical address space internally in the
|
||||||
|
* cache but CPUID can report a smaller number of physical address bits.
|
||||||
|
*
|
||||||
|
* The L1TF mitigation uses the top most address bit for the inversion of
|
||||||
|
* non present PTEs. When the installed memory reaches into the top most
|
||||||
|
* address bit due to memory holes, which has been observed on machines
|
||||||
|
* which report 36bits physical address bits and have 32G RAM installed,
|
||||||
|
* then the mitigation range check in l1tf_select_mitigation() triggers.
|
||||||
|
* This is a false positive because the mitigation is still possible due to
|
||||||
|
* the fact that the cache uses 44bit internally. Use the cache bits
|
||||||
|
* instead of the reported physical bits and adjust them on the affected
|
||||||
|
* machines to 44bit if the reported bits are less than 44.
|
||||||
|
*/
|
||||||
|
static void override_cache_bits(struct cpuinfo_x86 *c)
|
||||||
|
{
|
||||||
|
if (c->x86 != 6)
|
||||||
|
return;
|
||||||
|
|
||||||
|
switch (c->x86_model) {
|
||||||
|
case INTEL_FAM6_NEHALEM:
|
||||||
|
case INTEL_FAM6_WESTMERE:
|
||||||
|
case INTEL_FAM6_SANDYBRIDGE:
|
||||||
|
case INTEL_FAM6_IVYBRIDGE:
|
||||||
|
case INTEL_FAM6_HASWELL_CORE:
|
||||||
|
case INTEL_FAM6_HASWELL_ULT:
|
||||||
|
case INTEL_FAM6_HASWELL_GT3E:
|
||||||
|
case INTEL_FAM6_BROADWELL_CORE:
|
||||||
|
case INTEL_FAM6_BROADWELL_GT3E:
|
||||||
|
case INTEL_FAM6_SKYLAKE_MOBILE:
|
||||||
|
case INTEL_FAM6_SKYLAKE_DESKTOP:
|
||||||
|
case INTEL_FAM6_KABYLAKE_MOBILE:
|
||||||
|
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
||||||
|
if (c->x86_cache_bits < 44)
|
||||||
|
c->x86_cache_bits = 44;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void __init l1tf_select_mitigation(void)
|
static void __init l1tf_select_mitigation(void)
|
||||||
{
|
{
|
||||||
u64 half_pa;
|
u64 half_pa;
|
||||||
|
@ -675,6 +714,8 @@ static void __init l1tf_select_mitigation(void)
|
||||||
if (!boot_cpu_has_bug(X86_BUG_L1TF))
|
if (!boot_cpu_has_bug(X86_BUG_L1TF))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
override_cache_bits(&boot_cpu_data);
|
||||||
|
|
||||||
switch (l1tf_mitigation) {
|
switch (l1tf_mitigation) {
|
||||||
case L1TF_MITIGATION_OFF:
|
case L1TF_MITIGATION_OFF:
|
||||||
case L1TF_MITIGATION_FLUSH_NOWARN:
|
case L1TF_MITIGATION_FLUSH_NOWARN:
|
||||||
|
@ -694,11 +735,6 @@ static void __init l1tf_select_mitigation(void)
|
||||||
return;
|
return;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* This is extremely unlikely to happen because almost all
|
|
||||||
* systems have far more MAX_PA/2 than RAM can be fit into
|
|
||||||
* DIMM slots.
|
|
||||||
*/
|
|
||||||
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
|
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
|
||||||
if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
|
if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
|
||||||
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
|
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
|
||||||
|
|
|
@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
|
||||||
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
|
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
|
||||||
c->x86_phys_bits = 36;
|
c->x86_phys_bits = 36;
|
||||||
#endif
|
#endif
|
||||||
|
c->x86_cache_bits = c->x86_phys_bits;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
||||||
|
|
|
@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
|
||||||
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
|
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
if (c->x86 != 6)
|
||||||
|
return false;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
|
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
|
||||||
if (c->x86_model == spectre_bad_microcodes[i].model &&
|
if (c->x86_model == spectre_bad_microcodes[i].model &&
|
||||||
c->x86_stepping == spectre_bad_microcodes[i].stepping)
|
c->x86_stepping == spectre_bad_microcodes[i].stepping)
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
#include <linux/nmi.h>
|
#include <linux/nmi.h>
|
||||||
#include <linux/sysfs.h>
|
#include <linux/sysfs.h>
|
||||||
|
#include <linux/kasan.h>
|
||||||
|
|
||||||
#include <asm/cpu_entry_area.h>
|
#include <asm/cpu_entry_area.h>
|
||||||
#include <asm/stacktrace.h>
|
#include <asm/stacktrace.h>
|
||||||
|
@ -89,14 +90,24 @@ static void printk_stack_address(unsigned long address, int reliable,
|
||||||
* Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random
|
* Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random
|
||||||
* guesstimate in attempt to achieve all of the above.
|
* guesstimate in attempt to achieve all of the above.
|
||||||
*/
|
*/
|
||||||
void show_opcodes(u8 *rip, const char *loglvl)
|
void show_opcodes(struct pt_regs *regs, const char *loglvl)
|
||||||
{
|
{
|
||||||
#define PROLOGUE_SIZE 42
|
#define PROLOGUE_SIZE 42
|
||||||
#define EPILOGUE_SIZE 21
|
#define EPILOGUE_SIZE 21
|
||||||
#define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
|
#define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
|
||||||
u8 opcodes[OPCODE_BUFSIZE];
|
u8 opcodes[OPCODE_BUFSIZE];
|
||||||
|
unsigned long prologue = regs->ip - PROLOGUE_SIZE;
|
||||||
|
bool bad_ip;
|
||||||
|
|
||||||
if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) {
|
/*
|
||||||
|
* Make sure userspace isn't trying to trick us into dumping kernel
|
||||||
|
* memory by pointing the userspace instruction pointer at it.
|
||||||
|
*/
|
||||||
|
bad_ip = user_mode(regs) &&
|
||||||
|
__chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX);
|
||||||
|
|
||||||
|
if (bad_ip || probe_kernel_read(opcodes, (u8 *)prologue,
|
||||||
|
OPCODE_BUFSIZE)) {
|
||||||
printk("%sCode: Bad RIP value.\n", loglvl);
|
printk("%sCode: Bad RIP value.\n", loglvl);
|
||||||
} else {
|
} else {
|
||||||
printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
|
printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
|
||||||
|
@ -112,7 +123,7 @@ void show_ip(struct pt_regs *regs, const char *loglvl)
|
||||||
#else
|
#else
|
||||||
printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip);
|
printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip);
|
||||||
#endif
|
#endif
|
||||||
show_opcodes((u8 *)regs->ip, loglvl);
|
show_opcodes(regs, loglvl);
|
||||||
}
|
}
|
||||||
|
|
||||||
void show_iret_regs(struct pt_regs *regs)
|
void show_iret_regs(struct pt_regs *regs)
|
||||||
|
@ -346,7 +357,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
|
||||||
* We're not going to return, but we might be on an IST stack or
|
* We're not going to return, but we might be on an IST stack or
|
||||||
* have very little stack space left. Rewind the stack and kill
|
* have very little stack space left. Rewind the stack and kill
|
||||||
* the task.
|
* the task.
|
||||||
|
* Before we rewind the stack, we have to tell KASAN that we're going to
|
||||||
|
* reuse the task stack and that existing poisons are invalid.
|
||||||
*/
|
*/
|
||||||
|
kasan_unpoison_task_stack(current);
|
||||||
rewind_stack_do_exit(signr);
|
rewind_stack_do_exit(signr);
|
||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(oops_end);
|
NOKPROBE_SYMBOL(oops_end);
|
||||||
|
|
|
@ -7,6 +7,8 @@
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
|
|
||||||
|
#include <asm/tlbflush.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
|
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
|
||||||
* nested NMI paths are careful to preserve CR2.
|
* nested NMI paths are careful to preserve CR2.
|
||||||
|
@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
||||||
if (__range_not_ok(from, n, TASK_SIZE))
|
if (__range_not_ok(from, n, TASK_SIZE))
|
||||||
return n;
|
return n;
|
||||||
|
|
||||||
|
if (!nmi_uaccess_okay())
|
||||||
|
return n;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Even though this function is typically called from NMI/IRQ context
|
* Even though this function is typically called from NMI/IRQ context
|
||||||
* disable pagefaults so that its behaviour is consistent even when
|
* disable pagefaults so that its behaviour is consistent even when
|
||||||
|
|
|
@ -837,7 +837,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
|
||||||
|
|
||||||
printk(KERN_CONT "\n");
|
printk(KERN_CONT "\n");
|
||||||
|
|
||||||
show_opcodes((u8 *)regs->ip, loglvl);
|
show_opcodes(regs, loglvl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -1420,6 +1420,29 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Machine check recovery code needs to change cache mode of poisoned
|
||||||
|
* pages to UC to avoid speculative access logging another error. But
|
||||||
|
* passing the address of the 1:1 mapping to set_memory_uc() is a fine
|
||||||
|
* way to encourage a speculative access. So we cheat and flip the top
|
||||||
|
* bit of the address. This works fine for the code that updates the
|
||||||
|
* page tables. But at the end of the process we need to flush the cache
|
||||||
|
* and the non-canonical address causes a #GP fault when used by the
|
||||||
|
* CLFLUSH instruction.
|
||||||
|
*
|
||||||
|
* But in the common case we already have a canonical address. This code
|
||||||
|
* will fix the top bit if needed and is a no-op otherwise.
|
||||||
|
*/
|
||||||
|
static inline unsigned long make_addr_canonical_again(unsigned long addr)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
return (long)(addr << 1) >> 1;
|
||||||
|
#else
|
||||||
|
return addr;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static int change_page_attr_set_clr(unsigned long *addr, int numpages,
|
static int change_page_attr_set_clr(unsigned long *addr, int numpages,
|
||||||
pgprot_t mask_set, pgprot_t mask_clr,
|
pgprot_t mask_set, pgprot_t mask_clr,
|
||||||
int force_split, int in_flag,
|
int force_split, int in_flag,
|
||||||
|
@ -1465,7 +1488,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
|
||||||
* Save address for cache flush. *addr is modified in the call
|
* Save address for cache flush. *addr is modified in the call
|
||||||
* to __change_page_attr_set_clr() below.
|
* to __change_page_attr_set_clr() below.
|
||||||
*/
|
*/
|
||||||
baddr = *addr;
|
baddr = make_addr_canonical_again(*addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Must avoid aliasing mappings in the highmem code */
|
/* Must avoid aliasing mappings in the highmem code */
|
||||||
|
|
|
@ -248,7 +248,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
|
||||||
*
|
*
|
||||||
* Returns a pointer to a PTE on success, or NULL on failure.
|
* Returns a pointer to a PTE on success, or NULL on failure.
|
||||||
*/
|
*/
|
||||||
static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
|
static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
|
||||||
{
|
{
|
||||||
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
|
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
|
||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
|
|
|
@ -305,6 +305,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||||
|
|
||||||
choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
|
choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
|
||||||
|
|
||||||
|
/* Let nmi_uaccess_okay() know that we're changing CR3. */
|
||||||
|
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
|
||||||
|
barrier();
|
||||||
|
|
||||||
if (need_flush) {
|
if (need_flush) {
|
||||||
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
|
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
|
||||||
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
|
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
|
||||||
|
@ -335,6 +339,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||||
if (next != &init_mm)
|
if (next != &init_mm)
|
||||||
this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
|
this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
|
||||||
|
|
||||||
|
/* Make sure we write CR3 before loaded_mm. */
|
||||||
|
barrier();
|
||||||
|
|
||||||
this_cpu_write(cpu_tlbstate.loaded_mm, next);
|
this_cpu_write(cpu_tlbstate.loaded_mm, next);
|
||||||
this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
|
this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,14 +85,10 @@ pgd_t * __init efi_call_phys_prolog(void)
|
||||||
|
|
||||||
void __init efi_call_phys_epilog(pgd_t *save_pgd)
|
void __init efi_call_phys_epilog(pgd_t *save_pgd)
|
||||||
{
|
{
|
||||||
struct desc_ptr gdt_descr;
|
|
||||||
|
|
||||||
gdt_descr.address = (unsigned long)get_cpu_gdt_rw(0);
|
|
||||||
gdt_descr.size = GDT_SIZE - 1;
|
|
||||||
load_gdt(&gdt_descr);
|
|
||||||
|
|
||||||
load_cr3(save_pgd);
|
load_cr3(save_pgd);
|
||||||
__flush_tlb_all();
|
__flush_tlb_all();
|
||||||
|
|
||||||
|
load_fixmap_gdt(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init efi_runtime_update_mappings(void)
|
void __init efi_runtime_update_mappings(void)
|
||||||
|
|
|
@ -153,10 +153,6 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \
|
||||||
# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
|
# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
|
||||||
cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
|
cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
|
||||||
|
|
||||||
# cc-if-fullversion
|
|
||||||
# Usage: EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1)
|
|
||||||
cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4))
|
|
||||||
|
|
||||||
# cc-ldoption
|
# cc-ldoption
|
||||||
# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
|
# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
|
||||||
cc-ldoption = $(call try-run,\
|
cc-ldoption = $(call try-run,\
|
||||||
|
|
Loading…
Reference in New Issue