mirror of https://gitee.com/openkylin/linux.git
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 updates and fixes from Thomas Gleixner: - Fix the (late) fallout from the vector management rework causing hlist corruption and irq descriptor reference leaks caused by a missing sanity check. The straight forward fix triggered another long standing issue to surface. The pre rework code hid the issue due to being way slower, but now the chance that user space sees an EBUSY error return when updating irq affinities is way higher, though quite a bunch of userspace tools do not handle it properly despite the fact that EBUSY could be returned for at least 10 years. It turned out that the EBUSY return can be avoided completely by utilizing the existing delayed affinity update mechanism for irq remapped scenarios as well. That's a bit more error handling in the kernel, but avoids fruitless fingerpointing discussions with tool developers. - Decouple PHYSICAL_MASK from AMD SME as its going to be required for the upcoming Intel memory encryption support as well. - Handle legacy device ACPI detection properly for newer platforms - Fix the wrong argument ordering in the vector allocation tracepoint - Simplify the IDT setup code for the APIC=n case - Use the proper string helpers in the MTRR code - Remove a stale unused VDSO source file - Convert the microcode update lock to a raw spinlock as its used in atomic context. * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/intel_rdt: Enable CMT and MBM on new Skylake stepping x86/apic/vector: Print APIC control bits in debugfs genirq/affinity: Defer affinity setting if irq chip is busy x86/platform/uv: Use apic_ack_irq() x86/ioapic: Use apic_ack_irq() irq_remapping: Use apic_ack_irq() x86/apic: Provide apic_ack_irq() genirq/migration: Avoid out of line call if pending is not set genirq/generic_pending: Do not lose pending affinity update x86/apic/vector: Prevent hlist corruption and leaks x86/vector: Fix the args of vector_alloc tracepoint x86/idt: Simplify the idt_setup_apic_and_irq_gates() x86/platform/uv: Remove extra parentheses x86/mm: Decouple dynamic __PHYSICAL_MASK from AMD SME x86: Mark native_set_p4d() as __always_inline x86/microcode: Make the late update update_lock a raw lock for RT x86/mtrr: Convert to use strncpy_from_user() helper x86/mtrr: Convert to use match_string() helper x86/vdso: Remove unused file x86/i8237: Register device based on FADT legacy boot flag
This commit is contained in:
commit
f4e5b30d80
|
@ -334,6 +334,9 @@ config ARCH_SUPPORTS_UPROBES
|
||||||
config FIX_EARLYCON_MEM
|
config FIX_EARLYCON_MEM
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
config DYNAMIC_PHYSICAL_MASK
|
||||||
|
bool
|
||||||
|
|
||||||
config PGTABLE_LEVELS
|
config PGTABLE_LEVELS
|
||||||
int
|
int
|
||||||
default 5 if X86_5LEVEL
|
default 5 if X86_5LEVEL
|
||||||
|
@ -1486,6 +1489,7 @@ config ARCH_HAS_MEM_ENCRYPT
|
||||||
config AMD_MEM_ENCRYPT
|
config AMD_MEM_ENCRYPT
|
||||||
bool "AMD Secure Memory Encryption (SME) support"
|
bool "AMD Secure Memory Encryption (SME) support"
|
||||||
depends on X86_64 && CPU_SUP_AMD
|
depends on X86_64 && CPU_SUP_AMD
|
||||||
|
select DYNAMIC_PHYSICAL_MASK
|
||||||
---help---
|
---help---
|
||||||
Say yes to enable support for the encryption of system memory.
|
Say yes to enable support for the encryption of system memory.
|
||||||
This requires an AMD processor that supports Secure Memory
|
This requires an AMD processor that supports Secure Memory
|
||||||
|
|
|
@ -69,6 +69,8 @@ static struct alloc_pgt_data pgt_data;
|
||||||
/* The top level page table entry pointer. */
|
/* The top level page table entry pointer. */
|
||||||
static unsigned long top_level_pgt;
|
static unsigned long top_level_pgt;
|
||||||
|
|
||||||
|
phys_addr_t physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mapping information structure passed to kernel_ident_mapping_init().
|
* Mapping information structure passed to kernel_ident_mapping_init().
|
||||||
* Due to relocation, pointers must be assigned at run time not build time.
|
* Due to relocation, pointers must be assigned at run time not build time.
|
||||||
|
@ -81,6 +83,9 @@ void initialize_identity_maps(void)
|
||||||
/* If running as an SEV guest, the encryption mask is required. */
|
/* If running as an SEV guest, the encryption mask is required. */
|
||||||
set_sev_encryption_mask();
|
set_sev_encryption_mask();
|
||||||
|
|
||||||
|
/* Exclude the encryption mask from __PHYSICAL_MASK */
|
||||||
|
physical_mask &= ~sme_me_mask;
|
||||||
|
|
||||||
/* Init mapping_info with run-time function/buffer pointers. */
|
/* Init mapping_info with run-time function/buffer pointers. */
|
||||||
mapping_info.alloc_pgt_page = alloc_pgt_page;
|
mapping_info.alloc_pgt_page = alloc_pgt_page;
|
||||||
mapping_info.context = &pgt_data;
|
mapping_info.context = &pgt_data;
|
||||||
|
|
|
@ -436,6 +436,8 @@ static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {}
|
||||||
|
|
||||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||||
|
|
||||||
|
extern void apic_ack_irq(struct irq_data *data);
|
||||||
|
|
||||||
static inline void ack_APIC_irq(void)
|
static inline void ack_APIC_irq(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
|
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
|
||||||
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
|
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
|
||||||
|
|
||||||
#define __PHYSICAL_MASK ((phys_addr_t)(__sme_clr((1ULL << __PHYSICAL_MASK_SHIFT) - 1)))
|
|
||||||
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
|
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
|
||||||
|
|
||||||
/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
|
/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
|
||||||
|
@ -55,6 +54,13 @@
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
|
#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
|
||||||
|
extern phys_addr_t physical_mask;
|
||||||
|
#define __PHYSICAL_MASK physical_mask
|
||||||
|
#else
|
||||||
|
#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
|
||||||
|
#endif
|
||||||
|
|
||||||
extern int devmem_is_allowed(unsigned long pagenr);
|
extern int devmem_is_allowed(unsigned long pagenr);
|
||||||
|
|
||||||
extern unsigned long max_low_pfn_mapped;
|
extern unsigned long max_low_pfn_mapped;
|
||||||
|
|
|
@ -216,7 +216,7 @@ static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
|
static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
|
||||||
{
|
{
|
||||||
pgd_t pgd;
|
pgd_t pgd;
|
||||||
|
|
||||||
|
@ -230,7 +230,7 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
|
||||||
*p4dp = native_make_p4d(native_pgd_val(pgd));
|
*p4dp = native_make_p4d(native_pgd_val(pgd));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void native_p4d_clear(p4d_t *p4d)
|
static __always_inline void native_p4d_clear(p4d_t *p4d)
|
||||||
{
|
{
|
||||||
native_set_p4d(p4d, native_make_p4d(0));
|
native_set_p4d(p4d, native_make_p4d(0));
|
||||||
}
|
}
|
||||||
|
|
|
@ -236,7 +236,7 @@ TRACE_EVENT(vector_alloc,
|
||||||
TP_PROTO(unsigned int irq, unsigned int vector, bool reserved,
|
TP_PROTO(unsigned int irq, unsigned int vector, bool reserved,
|
||||||
int ret),
|
int ret),
|
||||||
|
|
||||||
TP_ARGS(irq, vector, ret, reserved),
|
TP_ARGS(irq, vector, reserved, ret),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field( unsigned int, irq )
|
__field( unsigned int, irq )
|
||||||
|
|
|
@ -301,5 +301,6 @@ extern struct x86_apic_ops x86_apic_ops;
|
||||||
extern void x86_early_init_platform_quirks(void);
|
extern void x86_early_init_platform_quirks(void);
|
||||||
extern void x86_init_noop(void);
|
extern void x86_init_noop(void);
|
||||||
extern void x86_init_uint_noop(unsigned int unused);
|
extern void x86_init_uint_noop(unsigned int unused);
|
||||||
|
extern bool x86_pnpbios_disabled(void);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1851,7 +1851,7 @@ static void ioapic_ir_ack_level(struct irq_data *irq_data)
|
||||||
* intr-remapping table entry. Hence for the io-apic
|
* intr-remapping table entry. Hence for the io-apic
|
||||||
* EOI we use the pin number.
|
* EOI we use the pin number.
|
||||||
*/
|
*/
|
||||||
ack_APIC_irq();
|
apic_ack_irq(irq_data);
|
||||||
eoi_ioapic_pin(data->entry.vector, data);
|
eoi_ioapic_pin(data->entry.vector, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -235,6 +235,15 @@ static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
|
||||||
if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
|
if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Careful here. @apicd might either have move_in_progress set or
|
||||||
|
* be enqueued for cleanup. Assigning a new vector would either
|
||||||
|
* leave a stale vector on some CPU around or in case of a pending
|
||||||
|
* cleanup corrupt the hlist.
|
||||||
|
*/
|
||||||
|
if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
|
vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
|
||||||
if (vector > 0)
|
if (vector > 0)
|
||||||
apic_update_vector(irqd, vector, cpu);
|
apic_update_vector(irqd, vector, cpu);
|
||||||
|
@ -579,8 +588,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
|
||||||
static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
|
static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
|
||||||
struct irq_data *irqd, int ind)
|
struct irq_data *irqd, int ind)
|
||||||
{
|
{
|
||||||
unsigned int cpu, vector, prev_cpu, prev_vector;
|
struct apic_chip_data apicd;
|
||||||
struct apic_chip_data *apicd;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int irq;
|
int irq;
|
||||||
|
|
||||||
|
@ -596,24 +604,26 @@ static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
apicd = irqd->chip_data;
|
if (!irqd->chip_data) {
|
||||||
if (!apicd) {
|
|
||||||
seq_printf(m, "%*sVector: Not assigned\n", ind, "");
|
seq_printf(m, "%*sVector: Not assigned\n", ind, "");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&vector_lock, flags);
|
raw_spin_lock_irqsave(&vector_lock, flags);
|
||||||
cpu = apicd->cpu;
|
memcpy(&apicd, irqd->chip_data, sizeof(apicd));
|
||||||
vector = apicd->vector;
|
|
||||||
prev_cpu = apicd->prev_cpu;
|
|
||||||
prev_vector = apicd->prev_vector;
|
|
||||||
raw_spin_unlock_irqrestore(&vector_lock, flags);
|
raw_spin_unlock_irqrestore(&vector_lock, flags);
|
||||||
seq_printf(m, "%*sVector: %5u\n", ind, "", vector);
|
|
||||||
seq_printf(m, "%*sTarget: %5u\n", ind, "", cpu);
|
seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector);
|
||||||
if (prev_vector) {
|
seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu);
|
||||||
seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", prev_vector);
|
if (apicd.prev_vector) {
|
||||||
seq_printf(m, "%*sPrevious target: %5u\n", ind, "", prev_cpu);
|
seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector);
|
||||||
|
seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu);
|
||||||
}
|
}
|
||||||
|
seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0);
|
||||||
|
seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0);
|
||||||
|
seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0);
|
||||||
|
seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0);
|
||||||
|
seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -800,11 +810,16 @@ static int apic_retrigger_irq(struct irq_data *irqd)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void apic_ack_irq(struct irq_data *irqd)
|
||||||
|
{
|
||||||
|
irq_move_irq(irqd);
|
||||||
|
ack_APIC_irq();
|
||||||
|
}
|
||||||
|
|
||||||
void apic_ack_edge(struct irq_data *irqd)
|
void apic_ack_edge(struct irq_data *irqd)
|
||||||
{
|
{
|
||||||
irq_complete_move(irqd_cfg(irqd));
|
irq_complete_move(irqd_cfg(irqd));
|
||||||
irq_move_irq(irqd);
|
apic_ack_irq(irqd);
|
||||||
ack_APIC_irq();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct irq_chip lapic_controller = {
|
static struct irq_chip lapic_controller = {
|
||||||
|
|
|
@ -845,6 +845,8 @@ static __init void rdt_quirks(void)
|
||||||
case INTEL_FAM6_SKYLAKE_X:
|
case INTEL_FAM6_SKYLAKE_X:
|
||||||
if (boot_cpu_data.x86_stepping <= 4)
|
if (boot_cpu_data.x86_stepping <= 4)
|
||||||
set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
|
set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
|
||||||
|
else
|
||||||
|
set_rdt_options("!l3cat");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -70,7 +70,7 @@ static DEFINE_MUTEX(microcode_mutex);
|
||||||
/*
|
/*
|
||||||
* Serialize late loading so that CPUs get updated one-by-one.
|
* Serialize late loading so that CPUs get updated one-by-one.
|
||||||
*/
|
*/
|
||||||
static DEFINE_SPINLOCK(update_lock);
|
static DEFINE_RAW_SPINLOCK(update_lock);
|
||||||
|
|
||||||
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
|
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
|
||||||
|
|
||||||
|
@ -560,9 +560,9 @@ static int __reload_late(void *info)
|
||||||
if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
|
if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
spin_lock(&update_lock);
|
raw_spin_lock(&update_lock);
|
||||||
apply_microcode_local(&err);
|
apply_microcode_local(&err);
|
||||||
spin_unlock(&update_lock);
|
raw_spin_unlock(&update_lock);
|
||||||
|
|
||||||
/* siblings return UCODE_OK because their engine got updated already */
|
/* siblings return UCODE_OK because their engine got updated already */
|
||||||
if (err > UCODE_NFOUND) {
|
if (err > UCODE_NFOUND) {
|
||||||
|
|
|
@ -106,17 +106,9 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
|
||||||
|
|
||||||
memset(line, 0, LINE_SIZE);
|
memset(line, 0, LINE_SIZE);
|
||||||
|
|
||||||
length = len;
|
length = strncpy_from_user(line, buf, LINE_SIZE - 1);
|
||||||
length--;
|
|
||||||
|
|
||||||
if (length > LINE_SIZE - 1)
|
|
||||||
length = LINE_SIZE - 1;
|
|
||||||
|
|
||||||
if (length < 0)
|
if (length < 0)
|
||||||
return -EINVAL;
|
return length;
|
||||||
|
|
||||||
if (copy_from_user(line, buf, length))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
linelen = strlen(line);
|
linelen = strlen(line);
|
||||||
ptr = line + linelen - 1;
|
ptr = line + linelen - 1;
|
||||||
|
@ -149,17 +141,16 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
ptr = skip_spaces(ptr + 5);
|
ptr = skip_spaces(ptr + 5);
|
||||||
|
|
||||||
for (i = 0; i < MTRR_NUM_TYPES; ++i) {
|
i = match_string(mtrr_strings, MTRR_NUM_TYPES, ptr);
|
||||||
if (strcmp(ptr, mtrr_strings[i]))
|
if (i < 0)
|
||||||
continue;
|
return i;
|
||||||
base >>= PAGE_SHIFT;
|
|
||||||
size >>= PAGE_SHIFT;
|
base >>= PAGE_SHIFT;
|
||||||
err = mtrr_add_page((unsigned long)base, (unsigned long)size, i, true);
|
size >>= PAGE_SHIFT;
|
||||||
if (err < 0)
|
err = mtrr_add_page((unsigned long)base, (unsigned long)size, i, true);
|
||||||
return err;
|
if (err < 0)
|
||||||
return len;
|
return err;
|
||||||
}
|
return len;
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static long
|
static long
|
||||||
|
|
|
@ -9,10 +9,12 @@
|
||||||
* your option) any later version.
|
* your option) any later version.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/dmi.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/syscore_ops.h>
|
#include <linux/syscore_ops.h>
|
||||||
|
|
||||||
#include <asm/dma.h>
|
#include <asm/dma.h>
|
||||||
|
#include <asm/x86_init.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This module just handles suspend/resume issues with the
|
* This module just handles suspend/resume issues with the
|
||||||
|
@ -49,6 +51,29 @@ static struct syscore_ops i8237_syscore_ops = {
|
||||||
|
|
||||||
static int __init i8237A_init_ops(void)
|
static int __init i8237A_init_ops(void)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* From SKL PCH onwards, the legacy DMA device is removed in which the
|
||||||
|
* I/O ports (81h-83h, 87h, 89h-8Bh, 8Fh) related to it are removed
|
||||||
|
* as well. All removed ports must return 0xff for a inb() request.
|
||||||
|
*
|
||||||
|
* Note: DMA_PAGE_2 (port 0x81) should not be checked for detecting
|
||||||
|
* the presence of DMA device since it may be used by BIOS to decode
|
||||||
|
* LPC traffic for POST codes. Original LPC only decodes one byte of
|
||||||
|
* port 0x80 but some BIOS may choose to enhance PCH LPC port 0x8x
|
||||||
|
* decoding.
|
||||||
|
*/
|
||||||
|
if (dma_inb(DMA_PAGE_0) == 0xFF)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It is not required to load this driver as newer SoC may not
|
||||||
|
* support 8237 DMA or bus mastering from LPC. Platform firmware
|
||||||
|
* must announce the support for such legacy devices via
|
||||||
|
* ACPI_FADT_LEGACY_DEVICES field in FADT table.
|
||||||
|
*/
|
||||||
|
if (x86_pnpbios_disabled() && dmi_get_bios_year() >= 2017)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
register_syscore_ops(&i8237_syscore_ops);
|
register_syscore_ops(&i8237_syscore_ops);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -317,15 +317,12 @@ void __init idt_setup_apic_and_irq_gates(void)
|
||||||
set_intr_gate(i, entry);
|
set_intr_gate(i, entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_clear_bit_from(i, system_vectors, NR_VECTORS) {
|
|
||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
#ifdef CONFIG_X86_LOCAL_APIC
|
||||||
|
for_each_clear_bit_from(i, system_vectors, NR_VECTORS) {
|
||||||
set_bit(i, system_vectors);
|
set_bit(i, system_vectors);
|
||||||
set_intr_gate(i, spurious_interrupt);
|
set_intr_gate(i, spurious_interrupt);
|
||||||
#else
|
|
||||||
entry = irq_entries_start + 8 * (i - FIRST_EXTERNAL_VECTOR);
|
|
||||||
set_intr_gate(i, entry);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -33,9 +33,14 @@ void __init x86_early_init_platform_quirks(void)
|
||||||
x86_platform.set_legacy_features();
|
x86_platform.set_legacy_features();
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_PNPBIOS)
|
bool __init x86_pnpbios_disabled(void)
|
||||||
bool __init arch_pnpbios_disabled(void)
|
|
||||||
{
|
{
|
||||||
return x86_platform.legacy.devices.pnpbios == 0;
|
return x86_platform.legacy.devices.pnpbios == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_PNPBIOS)
|
||||||
|
bool __init arch_pnpbios_disabled(void)
|
||||||
|
{
|
||||||
|
return x86_pnpbios_disabled();
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -527,6 +527,7 @@ void __init sme_enable(struct boot_params *bp)
|
||||||
/* SEV state cannot be controlled by a command line option */
|
/* SEV state cannot be controlled by a command line option */
|
||||||
sme_me_mask = me_mask;
|
sme_me_mask = me_mask;
|
||||||
sev_enabled = true;
|
sev_enabled = true;
|
||||||
|
physical_mask &= ~sme_me_mask;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -561,4 +562,6 @@ void __init sme_enable(struct boot_params *bp)
|
||||||
sme_me_mask = 0;
|
sme_me_mask = 0;
|
||||||
else
|
else
|
||||||
sme_me_mask = active_by_default ? me_mask : 0;
|
sme_me_mask = active_by_default ? me_mask : 0;
|
||||||
|
|
||||||
|
physical_mask &= ~sme_me_mask;
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,11 @@
|
||||||
#include <asm/fixmap.h>
|
#include <asm/fixmap.h>
|
||||||
#include <asm/mtrr.h>
|
#include <asm/mtrr.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
|
||||||
|
phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
|
||||||
|
EXPORT_SYMBOL(physical_mask);
|
||||||
|
#endif
|
||||||
|
|
||||||
#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
|
#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHPTE
|
#ifdef CONFIG_HIGHPTE
|
||||||
|
|
|
@ -615,7 +615,7 @@ static int uv2_3_wait_completion(struct bau_desc *bau_desc,
|
||||||
|
|
||||||
/* spin on the status MMR, waiting for it to go idle */
|
/* spin on the status MMR, waiting for it to go idle */
|
||||||
while (descriptor_stat != UV2H_DESC_IDLE) {
|
while (descriptor_stat != UV2H_DESC_IDLE) {
|
||||||
if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT)) {
|
if (descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) {
|
||||||
/*
|
/*
|
||||||
* A h/w bug on the destination side may
|
* A h/w bug on the destination side may
|
||||||
* have prevented the message being marked
|
* have prevented the message being marked
|
||||||
|
|
|
@ -47,11 +47,6 @@ static void uv_program_mmr(struct irq_cfg *cfg, struct uv_irq_2_mmr_pnode *info)
|
||||||
|
|
||||||
static void uv_noop(struct irq_data *data) { }
|
static void uv_noop(struct irq_data *data) { }
|
||||||
|
|
||||||
static void uv_ack_apic(struct irq_data *data)
|
|
||||||
{
|
|
||||||
ack_APIC_irq();
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
|
uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||||
bool force)
|
bool force)
|
||||||
|
@ -73,7 +68,7 @@ static struct irq_chip uv_irq_chip = {
|
||||||
.name = "UV-CORE",
|
.name = "UV-CORE",
|
||||||
.irq_mask = uv_noop,
|
.irq_mask = uv_noop,
|
||||||
.irq_unmask = uv_noop,
|
.irq_unmask = uv_noop,
|
||||||
.irq_eoi = uv_ack_apic,
|
.irq_eoi = apic_ack_irq,
|
||||||
.irq_set_affinity = uv_set_irq_affinity,
|
.irq_set_affinity = uv_set_irq_affinity,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -4385,7 +4385,7 @@ static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
|
||||||
|
|
||||||
static struct irq_chip amd_ir_chip = {
|
static struct irq_chip amd_ir_chip = {
|
||||||
.name = "AMD-IR",
|
.name = "AMD-IR",
|
||||||
.irq_ack = ir_ack_apic_edge,
|
.irq_ack = apic_ack_irq,
|
||||||
.irq_set_affinity = amd_ir_set_affinity,
|
.irq_set_affinity = amd_ir_set_affinity,
|
||||||
.irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
|
.irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
|
||||||
.irq_compose_msi_msg = ir_compose_msi_msg,
|
.irq_compose_msi_msg = ir_compose_msi_msg,
|
||||||
|
|
|
@ -1223,7 +1223,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
|
||||||
|
|
||||||
static struct irq_chip intel_ir_chip = {
|
static struct irq_chip intel_ir_chip = {
|
||||||
.name = "INTEL-IR",
|
.name = "INTEL-IR",
|
||||||
.irq_ack = ir_ack_apic_edge,
|
.irq_ack = apic_ack_irq,
|
||||||
.irq_set_affinity = intel_ir_set_affinity,
|
.irq_set_affinity = intel_ir_set_affinity,
|
||||||
.irq_compose_msi_msg = intel_ir_compose_msi_msg,
|
.irq_compose_msi_msg = intel_ir_compose_msi_msg,
|
||||||
.irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
|
.irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
|
||||||
|
|
|
@ -156,11 +156,6 @@ void panic_if_irq_remap(const char *msg)
|
||||||
panic(msg);
|
panic(msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ir_ack_apic_edge(struct irq_data *data)
|
|
||||||
{
|
|
||||||
ack_APIC_irq();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* irq_remapping_get_ir_irq_domain - Get the irqdomain associated with the IOMMU
|
* irq_remapping_get_ir_irq_domain - Get the irqdomain associated with the IOMMU
|
||||||
* device serving request @info
|
* device serving request @info
|
||||||
|
|
|
@ -65,8 +65,6 @@ struct irq_remap_ops {
|
||||||
extern struct irq_remap_ops intel_irq_remap_ops;
|
extern struct irq_remap_ops intel_irq_remap_ops;
|
||||||
extern struct irq_remap_ops amd_iommu_irq_ops;
|
extern struct irq_remap_ops amd_iommu_irq_ops;
|
||||||
|
|
||||||
extern void ir_ack_apic_edge(struct irq_data *data);
|
|
||||||
|
|
||||||
#else /* CONFIG_IRQ_REMAP */
|
#else /* CONFIG_IRQ_REMAP */
|
||||||
|
|
||||||
#define irq_remapping_enabled 0
|
#define irq_remapping_enabled 0
|
||||||
|
|
|
@ -552,7 +552,12 @@ extern int irq_affinity_online_cpu(unsigned int cpu);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
|
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
|
||||||
void irq_move_irq(struct irq_data *data);
|
void __irq_move_irq(struct irq_data *data);
|
||||||
|
static inline void irq_move_irq(struct irq_data *data)
|
||||||
|
{
|
||||||
|
if (unlikely(irqd_is_setaffinity_pending(data)))
|
||||||
|
__irq_move_irq(data);
|
||||||
|
}
|
||||||
void irq_move_masked_irq(struct irq_data *data);
|
void irq_move_masked_irq(struct irq_data *data);
|
||||||
void irq_force_complete_move(struct irq_desc *desc);
|
void irq_force_complete_move(struct irq_desc *desc);
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -205,6 +205,39 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||||
|
static inline int irq_set_affinity_pending(struct irq_data *data,
|
||||||
|
const struct cpumask *dest)
|
||||||
|
{
|
||||||
|
struct irq_desc *desc = irq_data_to_desc(data);
|
||||||
|
|
||||||
|
irqd_set_move_pending(data);
|
||||||
|
irq_copy_pending(desc, dest);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline int irq_set_affinity_pending(struct irq_data *data,
|
||||||
|
const struct cpumask *dest)
|
||||||
|
{
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static int irq_try_set_affinity(struct irq_data *data,
|
||||||
|
const struct cpumask *dest, bool force)
|
||||||
|
{
|
||||||
|
int ret = irq_do_set_affinity(data, dest, force);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In case that the underlying vector management is busy and the
|
||||||
|
* architecture supports the generic pending mechanism then utilize
|
||||||
|
* this to avoid returning an error to user space.
|
||||||
|
*/
|
||||||
|
if (ret == -EBUSY && !force)
|
||||||
|
ret = irq_set_affinity_pending(data, dest);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
|
int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
|
||||||
bool force)
|
bool force)
|
||||||
{
|
{
|
||||||
|
@ -215,8 +248,8 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
|
||||||
if (!chip || !chip->irq_set_affinity)
|
if (!chip || !chip->irq_set_affinity)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (irq_can_move_pcntxt(data)) {
|
if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
|
||||||
ret = irq_do_set_affinity(data, mask, force);
|
ret = irq_try_set_affinity(data, mask, force);
|
||||||
} else {
|
} else {
|
||||||
irqd_set_move_pending(data);
|
irqd_set_move_pending(data);
|
||||||
irq_copy_pending(desc, mask);
|
irq_copy_pending(desc, mask);
|
||||||
|
|
|
@ -38,17 +38,18 @@ bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
|
||||||
void irq_move_masked_irq(struct irq_data *idata)
|
void irq_move_masked_irq(struct irq_data *idata)
|
||||||
{
|
{
|
||||||
struct irq_desc *desc = irq_data_to_desc(idata);
|
struct irq_desc *desc = irq_data_to_desc(idata);
|
||||||
struct irq_chip *chip = desc->irq_data.chip;
|
struct irq_data *data = &desc->irq_data;
|
||||||
|
struct irq_chip *chip = data->chip;
|
||||||
|
|
||||||
if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
|
if (likely(!irqd_is_setaffinity_pending(data)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
irqd_clr_move_pending(&desc->irq_data);
|
irqd_clr_move_pending(data);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
|
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
|
||||||
*/
|
*/
|
||||||
if (irqd_is_per_cpu(&desc->irq_data)) {
|
if (irqd_is_per_cpu(data)) {
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -73,13 +74,24 @@ void irq_move_masked_irq(struct irq_data *idata)
|
||||||
* For correct operation this depends on the caller
|
* For correct operation this depends on the caller
|
||||||
* masking the irqs.
|
* masking the irqs.
|
||||||
*/
|
*/
|
||||||
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
|
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
|
||||||
irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
|
int ret;
|
||||||
|
|
||||||
|
ret = irq_do_set_affinity(data, desc->pending_mask, false);
|
||||||
|
/*
|
||||||
|
* If the there is a cleanup pending in the underlying
|
||||||
|
* vector management, reschedule the move for the next
|
||||||
|
* interrupt. Leave desc->pending_mask intact.
|
||||||
|
*/
|
||||||
|
if (ret == -EBUSY) {
|
||||||
|
irqd_set_move_pending(data);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
cpumask_clear(desc->pending_mask);
|
cpumask_clear(desc->pending_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
void irq_move_irq(struct irq_data *idata)
|
void __irq_move_irq(struct irq_data *idata)
|
||||||
{
|
{
|
||||||
bool masked;
|
bool masked;
|
||||||
|
|
||||||
|
@ -90,9 +102,6 @@ void irq_move_irq(struct irq_data *idata)
|
||||||
*/
|
*/
|
||||||
idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
|
idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
|
||||||
|
|
||||||
if (likely(!irqd_is_setaffinity_pending(idata)))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (unlikely(irqd_irq_disabled(idata)))
|
if (unlikely(irqd_irq_disabled(idata)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue