x86/MSI: Support multiple MSIs in presense of IRQ remapping

The MSI specification has several constraints in comparison with
MSI-X, most notable of them is the inability to configure MSIs
independently. As a result, it is impossible to dispatch
interrupts from different queues to different CPUs. This is
largely devalues the support of multiple MSIs in SMP systems.

Also, a necessity to allocate a contiguous block of vector
numbers for devices capable of multiple MSIs might cause a
considerable pressure on x86 interrupt vector allocator and
could lead to fragmentation of the interrupt vectors space.

This patch overcomes both drawbacks in presense of IRQ remapping
and lets devices take advantage of multiple queues and per-IRQ
affinity assignments.

Signed-off-by: Alexander Gordeev <agordeev@redhat.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Matthew Wilcox <willy@linux.intel.com>
Cc: Jeff Garzik <jgarzik@pobox.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/c8bd86ff56b5fc118257436768aaa04489ac0a4c.1353324359.git.agordeev@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Alexander Gordeev 2012-11-19 16:01:29 +01:00 committed by Ingo Molnar
parent 4cca6ea04d
commit 51906e779f
3 changed files with 162 additions and 42 deletions

View File

@ -300,9 +300,9 @@ static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
return cfg;
}
static int alloc_irq_from(unsigned int from, int node)
static int alloc_irqs_from(unsigned int from, unsigned int count, int node)
{
return irq_alloc_desc_from(from, node);
return irq_alloc_descs_from(from, count, node);
}
static void free_irq_at(unsigned int at, struct irq_cfg *cfg)
@ -2982,37 +2982,58 @@ device_initcall(ioapic_init_ops);
/*
* Dynamic irq allocate and deallocation
*/
unsigned int create_irq_nr(unsigned int from, int node)
unsigned int __create_irqs(unsigned int from, unsigned int count, int node)
{
struct irq_cfg *cfg;
struct irq_cfg **cfg;
unsigned long flags;
unsigned int ret = 0;
int irq;
int irq, i;
if (from < nr_irqs_gsi)
from = nr_irqs_gsi;
irq = alloc_irq_from(from, node);
cfg = kzalloc_node(count * sizeof(cfg[0]), GFP_KERNEL, node);
if (!cfg)
return 0;
irq = alloc_irqs_from(from, count, node);
if (irq < 0)
return 0;
cfg = alloc_irq_cfg(irq, node);
if (!cfg) {
free_irq_at(irq, NULL);
return 0;
goto out_cfgs;
for (i = 0; i < count; i++) {
cfg[i] = alloc_irq_cfg(irq + i, node);
if (!cfg[i])
goto out_irqs;
}
raw_spin_lock_irqsave(&vector_lock, flags);
if (!__assign_irq_vector(irq, cfg, apic->target_cpus()))
ret = irq;
for (i = 0; i < count; i++)
if (__assign_irq_vector(irq + i, cfg[i], apic->target_cpus()))
goto out_vecs;
raw_spin_unlock_irqrestore(&vector_lock, flags);
if (ret) {
irq_set_chip_data(irq, cfg);
irq_clear_status_flags(irq, IRQ_NOREQUEST);
} else {
free_irq_at(irq, cfg);
for (i = 0; i < count; i++) {
irq_set_chip_data(irq + i, cfg[i]);
irq_clear_status_flags(irq + i, IRQ_NOREQUEST);
}
return ret;
kfree(cfg);
return irq;
out_vecs:
for (i--; i >= 0; i--)
__clear_irq_vector(irq + i, cfg[i]);
raw_spin_unlock_irqrestore(&vector_lock, flags);
out_irqs:
for (i = 0; i < count; i++)
free_irq_at(irq + i, cfg[i]);
out_cfgs:
kfree(cfg);
return 0;
}
unsigned int create_irq_nr(unsigned int from, int node)
{
return __create_irqs(from, 1, node);
}
int create_irq(void)
@ -3045,6 +3066,14 @@ void destroy_irq(unsigned int irq)
free_irq_at(irq, cfg);
}
static inline void destroy_irqs(unsigned int irq, unsigned int count)
{
unsigned int i;
for (i = 0; i < count; i++)
destroy_irq(irq + i);
}
/*
* MSI message composition
*/
@ -3071,7 +3100,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
if (irq_remapped(cfg)) {
compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id);
return err;
return 0;
}
if (x2apic_enabled())
@ -3098,7 +3127,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
MSI_DATA_DELIVERY_LOWPRI) |
MSI_DATA_VECTOR(cfg->vector);
return err;
return 0;
}
static int
@ -3136,18 +3165,26 @@ static struct irq_chip msi_chip = {
.irq_retrigger = ioapic_retrigger_irq,
};
static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
unsigned int irq_base, unsigned int irq_offset)
{
struct irq_chip *chip = &msi_chip;
struct msi_msg msg;
unsigned int irq = irq_base + irq_offset;
int ret;
ret = msi_compose_msg(dev, irq, &msg, -1);
if (ret < 0)
return ret;
irq_set_msi_desc(irq, msidesc);
write_msi_msg(irq, &msg);
irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
/*
* MSI-X message is written per-IRQ, the offset is always 0.
* MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
*/
if (!irq_offset)
write_msi_msg(irq, &msg);
if (irq_remapped(irq_get_chip_data(irq))) {
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
@ -3161,23 +3198,19 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
return 0;
}
int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
int setup_msix_irqs(struct pci_dev *dev, int nvec)
{
int node, ret, sub_handle, index = 0;
unsigned int irq, irq_want;
struct msi_desc *msidesc;
/* x86 doesn't support multiple MSI yet */
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
node = dev_to_node(&dev->dev);
irq_want = nr_irqs_gsi;
sub_handle = 0;
list_for_each_entry(msidesc, &dev->msi_list, list) {
irq = create_irq_nr(irq_want, node);
if (irq == 0)
return -1;
return -ENOSPC;
irq_want = irq + 1;
if (!irq_remapping_enabled)
goto no_ir;
@ -3199,7 +3232,7 @@ int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
goto error;
}
no_ir:
ret = setup_msi_irq(dev, msidesc, irq);
ret = setup_msi_irq(dev, msidesc, irq, 0);
if (ret < 0)
goto error;
sub_handle++;
@ -3211,6 +3244,74 @@ int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
return ret;
}
int setup_msi_irqs(struct pci_dev *dev, int nvec)
{
int node, ret, sub_handle, index = 0;
unsigned int irq;
struct msi_desc *msidesc;
if (nvec > 1 && !irq_remapping_enabled)
return 1;
nvec = __roundup_pow_of_two(nvec);
WARN_ON(!list_is_singular(&dev->msi_list));
msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
WARN_ON(msidesc->irq);
WARN_ON(msidesc->msi_attrib.multiple);
node = dev_to_node(&dev->dev);
irq = __create_irqs(nr_irqs_gsi, nvec, node);
if (irq == 0)
return -ENOSPC;
if (!irq_remapping_enabled) {
ret = setup_msi_irq(dev, msidesc, irq, 0);
if (ret < 0)
goto error;
return 0;
}
msidesc->msi_attrib.multiple = ilog2(nvec);
for (sub_handle = 0; sub_handle < nvec; sub_handle++) {
if (!sub_handle) {
index = msi_alloc_remapped_irq(dev, irq, nvec);
if (index < 0) {
ret = index;
goto error;
}
} else {
ret = msi_setup_remapped_irq(dev, irq + sub_handle,
index, sub_handle);
if (ret < 0)
goto error;
}
ret = setup_msi_irq(dev, msidesc, irq, sub_handle);
if (ret < 0)
goto error;
}
return 0;
error:
destroy_irqs(irq, nvec);
/*
* Restore altered MSI descriptor fields and prevent just destroyed
* IRQs from tearing down again in default_teardown_msi_irqs()
*/
msidesc->irq = 0;
msidesc->msi_attrib.multiple = 0;
return ret;
}
int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
if (type == PCI_CAP_ID_MSI)
return setup_msi_irqs(dev, nvec);
return setup_msix_irqs(dev, nvec);
}
void native_teardown_msi_irq(unsigned int irq)
{
destroy_irq(irq);

View File

@ -528,6 +528,8 @@ extern int irq_set_handler_data(unsigned int irq, void *data);
extern int irq_set_chip_data(unsigned int irq, void *data);
extern int irq_set_irq_type(unsigned int irq, unsigned int type);
extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
struct msi_desc *entry);
extern struct irq_data *irq_get_irq_data(unsigned int irq);
static inline struct irq_chip *irq_get_chip(unsigned int irq)
@ -590,6 +592,9 @@ int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
#define irq_alloc_desc_from(from, node) \
irq_alloc_descs(-1, from, 1, node)
#define irq_alloc_descs_from(from, cnt, node) \
irq_alloc_descs(-1, from, cnt, node)
void irq_free_descs(unsigned int irq, unsigned int cnt);
int irq_reserve_irqs(unsigned int from, unsigned int cnt);

View File

@ -89,6 +89,29 @@ int irq_set_handler_data(unsigned int irq, void *data)
}
EXPORT_SYMBOL(irq_set_handler_data);
/**
* irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
* @irq_base: Interrupt number base
* @irq_offset: Interrupt number offset
* @entry: Pointer to MSI descriptor data
*
* Set the MSI descriptor entry for an irq at offset
*/
int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
struct msi_desc *entry)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return -EINVAL;
desc->irq_data.msi_desc = entry;
if (entry && !irq_offset)
entry->irq = irq_base;
irq_put_desc_unlock(desc, flags);
return 0;
}
/**
* irq_set_msi_desc - set MSI descriptor data for an irq
* @irq: Interrupt number
@ -98,16 +121,7 @@ EXPORT_SYMBOL(irq_set_handler_data);
*/
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return -EINVAL;
desc->irq_data.msi_desc = entry;
if (entry)
entry->irq = irq;
irq_put_desc_unlock(desc, flags);
return 0;
return irq_set_msi_desc_off(irq, 0, entry);
}
/**