mirror of https://gitee.com/openkylin/linux.git
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull misc x86 fixes from Ingo Molnar: - make CR4 handling irq-safe, which bug vmware guests ran into - don't crash on early IRQs in Xen guests - don't crash secondary CPU bringup if #UD assisted WARN()ings are triggered - make X86_BUG_FXSAVE_LEAK optional on newer AMD CPUs that have the fix - fix AMD Fam17h microcode loading - fix broadcom_postcore_init() if ACPI is disabled - fix resume regression in __restore_processor_context() - fix Sparse warnings - fix a GCC-8 warning * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/vdso: Change time() prototype to match __vdso_time() x86: Fix Sparse warnings about non-static functions x86/power: Fix some ordering bugs in __restore_processor_context() x86/PCI: Make broadcom_postcore_init() check acpi_disabled x86/microcode/AMD: Add support for fam17h microcode loading x86/cpufeatures: Make X86_BUG_FXSAVE_LEAK detectable in CPUID on AMD x86/idt: Load idt early in start_secondary x86/xen: Support early interrupts in xen pv guests x86/tlb: Disable interrupts when changing CR4 x86/tlb: Refactor CR4 setting and shadow write
This commit is contained in:
commit
dd53a4214d
|
@ -324,5 +324,5 @@ notrace time_t __vdso_time(time_t *t)
|
|||
*t = result;
|
||||
return result;
|
||||
}
|
||||
int time(time_t *t)
|
||||
time_t time(time_t *t)
|
||||
__attribute__((weak, alias("__vdso_time")));
|
||||
|
|
|
@ -266,6 +266,7 @@
|
|||
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
|
||||
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
|
||||
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
|
||||
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
|
||||
|
||||
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
|
||||
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
|
||||
|
|
|
@ -236,11 +236,23 @@
|
|||
*/
|
||||
#define EARLY_IDT_HANDLER_SIZE 9
|
||||
|
||||
/*
|
||||
* xen_early_idt_handler_array is for Xen pv guests: for each entry in
|
||||
* early_idt_handler_array it contains a prequel in the form of
|
||||
* pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to
|
||||
* max 8 bytes.
|
||||
*/
|
||||
#define XEN_EARLY_IDT_HANDLER_SIZE 8
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
|
||||
extern void early_ignore_irq(void);
|
||||
|
||||
#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
|
||||
extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE];
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Load a segment. Fall back on loading the zero segment if something goes
|
||||
* wrong. This variant assumes that loading zero fully clears the segment.
|
||||
|
|
|
@ -173,40 +173,43 @@ static inline void cr4_init_shadow(void)
|
|||
this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
|
||||
}
|
||||
|
||||
static inline void __cr4_set(unsigned long cr4)
|
||||
{
|
||||
lockdep_assert_irqs_disabled();
|
||||
this_cpu_write(cpu_tlbstate.cr4, cr4);
|
||||
__write_cr4(cr4);
|
||||
}
|
||||
|
||||
/* Set in this cpu's CR4. */
|
||||
static inline void cr4_set_bits(unsigned long mask)
|
||||
{
|
||||
unsigned long cr4;
|
||||
unsigned long cr4, flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
cr4 = this_cpu_read(cpu_tlbstate.cr4);
|
||||
if ((cr4 | mask) != cr4) {
|
||||
cr4 |= mask;
|
||||
this_cpu_write(cpu_tlbstate.cr4, cr4);
|
||||
__write_cr4(cr4);
|
||||
}
|
||||
if ((cr4 | mask) != cr4)
|
||||
__cr4_set(cr4 | mask);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/* Clear in this cpu's CR4. */
|
||||
static inline void cr4_clear_bits(unsigned long mask)
|
||||
{
|
||||
unsigned long cr4;
|
||||
unsigned long cr4, flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
cr4 = this_cpu_read(cpu_tlbstate.cr4);
|
||||
if ((cr4 & ~mask) != cr4) {
|
||||
cr4 &= ~mask;
|
||||
this_cpu_write(cpu_tlbstate.cr4, cr4);
|
||||
__write_cr4(cr4);
|
||||
}
|
||||
if ((cr4 & ~mask) != cr4)
|
||||
__cr4_set(cr4 & ~mask);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void cr4_toggle_bits(unsigned long mask)
|
||||
static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
|
||||
{
|
||||
unsigned long cr4;
|
||||
|
||||
cr4 = this_cpu_read(cpu_tlbstate.cr4);
|
||||
cr4 ^= mask;
|
||||
this_cpu_write(cpu_tlbstate.cr4, cr4);
|
||||
__write_cr4(cr4);
|
||||
__cr4_set(cr4 ^ mask);
|
||||
}
|
||||
|
||||
/* Read the CR4 shadow. */
|
||||
|
|
|
@ -542,8 +542,8 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
|
||||
void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
|
||||
struct irq_data *irqd, int ind)
|
||||
static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
|
||||
struct irq_data *irqd, int ind)
|
||||
{
|
||||
unsigned int cpu, vector, prev_cpu, prev_vector;
|
||||
struct apic_chip_data *apicd;
|
||||
|
|
|
@ -804,8 +804,11 @@ static void init_amd(struct cpuinfo_x86 *c)
|
|||
case 0x17: init_amd_zn(c); break;
|
||||
}
|
||||
|
||||
/* Enable workaround for FXSAVE leak */
|
||||
if (c->x86 >= 6)
|
||||
/*
|
||||
* Enable workaround for FXSAVE leak on CPUs
|
||||
* without a XSaveErPtr feature
|
||||
*/
|
||||
if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
|
||||
set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
|
||||
|
||||
cpu_detect_cache_sizes(c);
|
||||
|
|
|
@ -470,6 +470,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
|
|||
#define F14H_MPB_MAX_SIZE 1824
|
||||
#define F15H_MPB_MAX_SIZE 4096
|
||||
#define F16H_MPB_MAX_SIZE 3458
|
||||
#define F17H_MPB_MAX_SIZE 3200
|
||||
|
||||
switch (family) {
|
||||
case 0x14:
|
||||
|
@ -481,6 +482,9 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
|
|||
case 0x16:
|
||||
max_size = F16H_MPB_MAX_SIZE;
|
||||
break;
|
||||
case 0x17:
|
||||
max_size = F17H_MPB_MAX_SIZE;
|
||||
break;
|
||||
default:
|
||||
max_size = F1XH_MPB_MAX_SIZE;
|
||||
break;
|
||||
|
|
|
@ -299,7 +299,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
|||
}
|
||||
|
||||
if ((tifp ^ tifn) & _TIF_NOTSC)
|
||||
cr4_toggle_bits(X86_CR4_TSD);
|
||||
cr4_toggle_bits_irqsoff(X86_CR4_TSD);
|
||||
|
||||
if ((tifp ^ tifn) & _TIF_NOCPUID)
|
||||
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
|
||||
|
|
|
@ -237,7 +237,7 @@ static void notrace start_secondary(void *unused)
|
|||
load_cr3(swapper_pg_dir);
|
||||
__flush_tlb_all();
|
||||
#endif
|
||||
|
||||
load_current_idt();
|
||||
cpu_init();
|
||||
x86_cpuinit.early_percpu_clock_init();
|
||||
preempt_disable();
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#include <linux/extable.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/sched/debug.h>
|
||||
#include <xen/xen.h>
|
||||
|
||||
#include <asm/fpu/internal.h>
|
||||
#include <asm/traps.h>
|
||||
|
@ -212,8 +213,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
|
|||
* Old CPUs leave the high bits of CS on the stack
|
||||
* undefined. I'm not sure which CPUs do this, but at least
|
||||
* the 486 DX works this way.
|
||||
* Xen pv domains are not using the default __KERNEL_CS.
|
||||
*/
|
||||
if (regs->cs != __KERNEL_CS)
|
||||
if (!xen_pv_domain() && regs->cs != __KERNEL_CS)
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
|
|
|
@ -97,7 +97,7 @@ static int __init broadcom_postcore_init(void)
|
|||
* We should get host bridge information from ACPI unless the BIOS
|
||||
* doesn't support it.
|
||||
*/
|
||||
if (acpi_os_get_root_pointer())
|
||||
if (!acpi_disabled && acpi_os_get_root_pointer())
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -905,7 +905,7 @@ static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
|
|||
/*
|
||||
* UV NMI handler
|
||||
*/
|
||||
int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
|
||||
static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
|
||||
{
|
||||
struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
|
||||
int cpu = smp_processor_id();
|
||||
|
@ -1013,7 +1013,7 @@ void uv_nmi_init(void)
|
|||
}
|
||||
|
||||
/* Setup HUB NMI info */
|
||||
void __init uv_nmi_setup_common(bool hubbed)
|
||||
static void __init uv_nmi_setup_common(bool hubbed)
|
||||
{
|
||||
int size = sizeof(void *) * (1 << NODES_SHIFT);
|
||||
int cpu;
|
||||
|
|
|
@ -226,8 +226,20 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
|
|||
load_idt((const struct desc_ptr *)&ctxt->idt_limit);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* segment registers
|
||||
* We need GSBASE restored before percpu access can work.
|
||||
* percpu access can happen in exception handlers or in complicated
|
||||
* helpers like load_gs_index().
|
||||
*/
|
||||
wrmsrl(MSR_GS_BASE, ctxt->gs_base);
|
||||
#endif
|
||||
|
||||
fix_processor_context();
|
||||
|
||||
/*
|
||||
* Restore segment registers. This happens after restoring the GDT
|
||||
* and LDT, which happen in fix_processor_context().
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
loadsegment(es, ctxt->es);
|
||||
|
@ -248,13 +260,14 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
|
|||
load_gs_index(ctxt->gs);
|
||||
asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
|
||||
|
||||
/*
|
||||
* Restore FSBASE and user GSBASE after reloading the respective
|
||||
* segment selectors.
|
||||
*/
|
||||
wrmsrl(MSR_FS_BASE, ctxt->fs_base);
|
||||
wrmsrl(MSR_GS_BASE, ctxt->gs_base);
|
||||
wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
|
||||
#endif
|
||||
|
||||
fix_processor_context();
|
||||
|
||||
do_fpu_end();
|
||||
tsc_verify_tsc_adjust(true);
|
||||
x86_platform.restore_sched_clock_state();
|
||||
|
|
|
@ -622,7 +622,7 @@ static struct trap_array_entry trap_array[] = {
|
|||
{ simd_coprocessor_error, xen_simd_coprocessor_error, false },
|
||||
};
|
||||
|
||||
static bool get_trap_addr(void **addr, unsigned int ist)
|
||||
static bool __ref get_trap_addr(void **addr, unsigned int ist)
|
||||
{
|
||||
unsigned int nr;
|
||||
bool ist_okay = false;
|
||||
|
@ -644,6 +644,14 @@ static bool get_trap_addr(void **addr, unsigned int ist)
|
|||
}
|
||||
}
|
||||
|
||||
if (nr == ARRAY_SIZE(trap_array) &&
|
||||
*addr >= (void *)early_idt_handler_array[0] &&
|
||||
*addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) {
|
||||
nr = (*addr - (void *)early_idt_handler_array[0]) /
|
||||
EARLY_IDT_HANDLER_SIZE;
|
||||
*addr = (void *)xen_early_idt_handler_array[nr];
|
||||
}
|
||||
|
||||
if (WARN_ON(ist != 0 && !ist_okay))
|
||||
return false;
|
||||
|
||||
|
@ -1262,6 +1270,21 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
|||
xen_setup_gdt(0);
|
||||
|
||||
xen_init_irq_ops();
|
||||
|
||||
/* Let's presume PV guests always boot on vCPU with id 0. */
|
||||
per_cpu(xen_vcpu_id, 0) = 0;
|
||||
|
||||
/*
|
||||
* Setup xen_vcpu early because idt_setup_early_handler needs it for
|
||||
* local_irq_disable(), irqs_disabled().
|
||||
*
|
||||
* Don't do the full vcpu_info placement stuff until we have
|
||||
* the cpu_possible_mask and a non-dummy shared_info.
|
||||
*/
|
||||
xen_vcpu_info_reset(0);
|
||||
|
||||
idt_setup_early_handler();
|
||||
|
||||
xen_init_capabilities();
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
@ -1295,18 +1318,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
|||
*/
|
||||
acpi_numa = -1;
|
||||
#endif
|
||||
/* Let's presume PV guests always boot on vCPU with id 0. */
|
||||
per_cpu(xen_vcpu_id, 0) = 0;
|
||||
|
||||
/*
|
||||
* Setup xen_vcpu early because start_kernel needs it for
|
||||
* local_irq_disable(), irqs_disabled().
|
||||
*
|
||||
* Don't do the full vcpu_info placement stuff until we have
|
||||
* the cpu_possible_mask and a non-dummy shared_info.
|
||||
*/
|
||||
xen_vcpu_info_reset(0);
|
||||
|
||||
WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
|
||||
|
||||
local_irq_disable();
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#include <xen/interface/xen.h>
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.macro xen_pv_trap name
|
||||
|
@ -54,6 +55,19 @@ xen_pv_trap entry_INT80_compat
|
|||
#endif
|
||||
xen_pv_trap hypervisor_callback
|
||||
|
||||
__INIT
|
||||
ENTRY(xen_early_idt_handler_array)
|
||||
i = 0
|
||||
.rept NUM_EXCEPTION_VECTORS
|
||||
pop %rcx
|
||||
pop %r11
|
||||
jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
|
||||
i = i + 1
|
||||
.fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
|
||||
.endr
|
||||
END(xen_early_idt_handler_array)
|
||||
__FINIT
|
||||
|
||||
hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
|
||||
/*
|
||||
* Xen64 iret frame:
|
||||
|
|
Loading…
Reference in New Issue