iommu/amd: Support multiple PCI DMA aliases in IRQ Remapping

Non-Transparent Bridge (NTB) devices (among others) may have many DMA
aliases seeing the hardware will send requests with different device ids
depending on their origin across the bridged hardware.

See commit ad281ecf1c ("PCI: Add DMA alias quirk for Microsemi Switchtec
NTB") for more information on this.

The AMD IOMMU IRQ remapping functionality ignores all PCI aliases for
IRQs so if devices send an interrupt from one of their aliases they
will be blocked on AMD hardware with the IOMMU enabled.

To fix this, ensure IRQ remapping is enabled for all aliases with
MSI interrupts.

This is analogous to the functionality added to the Intel IRQ remapping
code in commit 3f0c625c6a ("iommu/vt-d: Allow interrupts from the entire
bus for aliased devices")

Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Logan Gunthorpe 2019-10-22 16:01:21 -06:00 committed by Joerg Roedel
parent 3332364e4e
commit 3c124435e8
1 changed files with 30 additions and 7 deletions

View File

@ -3174,7 +3174,20 @@ static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
iommu_flush_dte(iommu, devid);
}
static struct irq_remap_table *alloc_irq_table(u16 devid)
static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
void *data)
{
struct irq_remap_table *table = data;
irq_lookup_table[alias] = table;
set_dte_irq_entry(alias, table);
iommu_flush_dte(amd_iommu_rlookup_table[alias], alias);
return 0;
}
static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
{
struct irq_remap_table *table = NULL;
struct irq_remap_table *new_table = NULL;
@ -3220,7 +3233,12 @@ static struct irq_remap_table *alloc_irq_table(u16 devid)
table = new_table;
new_table = NULL;
set_remap_table_entry(iommu, devid, table);
if (pdev)
pci_for_each_dma_alias(pdev, set_remap_table_entry_alias,
table);
else
set_remap_table_entry(iommu, devid, table);
if (devid != alias)
set_remap_table_entry(iommu, alias, table);
@ -3237,7 +3255,8 @@ static struct irq_remap_table *alloc_irq_table(u16 devid)
return table;
}
static int alloc_irq_index(u16 devid, int count, bool align)
static int alloc_irq_index(u16 devid, int count, bool align,
struct pci_dev *pdev)
{
struct irq_remap_table *table;
int index, c, alignment = 1;
@ -3247,7 +3266,7 @@ static int alloc_irq_index(u16 devid, int count, bool align)
if (!iommu)
return -ENODEV;
table = alloc_irq_table(devid);
table = alloc_irq_table(devid, pdev);
if (!table)
return -ENODEV;
@ -3680,7 +3699,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
struct irq_remap_table *table;
struct amd_iommu *iommu;
table = alloc_irq_table(devid);
table = alloc_irq_table(devid, NULL);
if (table) {
if (!table->min_index) {
/*
@ -3697,11 +3716,15 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
} else {
index = -ENOMEM;
}
} else {
} else if (info->type == X86_IRQ_ALLOC_TYPE_MSI ||
info->type == X86_IRQ_ALLOC_TYPE_MSIX) {
bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
index = alloc_irq_index(devid, nr_irqs, align);
index = alloc_irq_index(devid, nr_irqs, align, info->msi_dev);
} else {
index = alloc_irq_index(devid, nr_irqs, false, NULL);
}
if (index < 0) {
pr_warn("Failed to allocate IRTE\n");
ret = index;