mirror of https://gitee.com/openkylin/linux.git
dmaengine: idxd: cleanup pci interrupt vector allocation management
The devm managed lifetime is incompatible with 'struct device' objects that
resides in idxd context. This is one of the series that clean up the idxd
driver 'struct device' lifetime. Remove devm managed pci interrupt vectors
and replace with unmanged allocators.
Reported-by: Jason Gunthorpe <jgg@nvidia.com>
Fixes: bfe1d56091
("dmaengine: idxd: Init and probe for Intel data accelerators")
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Link: https://lore.kernel.org/r/161852983563.2203940.8116028229124776669.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
parent
3978628556
commit
5fc8e85ff1
|
@ -19,7 +19,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
|
||||||
/* Interrupt control bits */
|
/* Interrupt control bits */
|
||||||
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
|
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
|
||||||
{
|
{
|
||||||
struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
|
struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
|
||||||
|
|
||||||
pci_msi_mask_irq(data);
|
pci_msi_mask_irq(data);
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ void idxd_mask_msix_vectors(struct idxd_device *idxd)
|
||||||
|
|
||||||
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
|
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
|
||||||
{
|
{
|
||||||
struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
|
struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
|
||||||
|
|
||||||
pci_msi_unmask_irq(data);
|
pci_msi_unmask_irq(data);
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,6 +36,7 @@ struct idxd_device_driver {
|
||||||
struct idxd_irq_entry {
|
struct idxd_irq_entry {
|
||||||
struct idxd_device *idxd;
|
struct idxd_device *idxd;
|
||||||
int id;
|
int id;
|
||||||
|
int vector;
|
||||||
struct llist_head pending_llist;
|
struct llist_head pending_llist;
|
||||||
struct list_head work_list;
|
struct list_head work_list;
|
||||||
/*
|
/*
|
||||||
|
@ -219,7 +220,6 @@ struct idxd_device {
|
||||||
|
|
||||||
union sw_err_reg sw_err;
|
union sw_err_reg sw_err;
|
||||||
wait_queue_head_t cmd_waitq;
|
wait_queue_head_t cmd_waitq;
|
||||||
struct msix_entry *msix_entries;
|
|
||||||
int num_wq_irqs;
|
int num_wq_irqs;
|
||||||
struct idxd_irq_entry *irq_entries;
|
struct idxd_irq_entry *irq_entries;
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = idxd->pdev;
|
struct pci_dev *pdev = idxd->pdev;
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
struct msix_entry *msix;
|
|
||||||
struct idxd_irq_entry *irq_entry;
|
struct idxd_irq_entry *irq_entry;
|
||||||
int i, msixcnt;
|
int i, msixcnt;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
@ -69,23 +68,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
|
||||||
msixcnt = pci_msix_vec_count(pdev);
|
msixcnt = pci_msix_vec_count(pdev);
|
||||||
if (msixcnt < 0) {
|
if (msixcnt < 0) {
|
||||||
dev_err(dev, "Not MSI-X interrupt capable.\n");
|
dev_err(dev, "Not MSI-X interrupt capable.\n");
|
||||||
goto err_no_irq;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
|
|
||||||
idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
|
rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
|
||||||
msixcnt, GFP_KERNEL);
|
if (rc != msixcnt) {
|
||||||
if (!idxd->msix_entries) {
|
dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
|
||||||
rc = -ENOMEM;
|
return -ENOSPC;
|
||||||
goto err_no_irq;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < msixcnt; i++)
|
|
||||||
idxd->msix_entries[i].entry = i;
|
|
||||||
|
|
||||||
rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
|
|
||||||
if (rc) {
|
|
||||||
dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
|
|
||||||
goto err_no_irq;
|
|
||||||
}
|
}
|
||||||
dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
|
dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
|
||||||
|
|
||||||
|
@ -98,58 +87,57 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!idxd->irq_entries) {
|
if (!idxd->irq_entries) {
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
goto err_no_irq;
|
goto err_irq_entries;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < msixcnt; i++) {
|
for (i = 0; i < msixcnt; i++) {
|
||||||
idxd->irq_entries[i].id = i;
|
idxd->irq_entries[i].id = i;
|
||||||
idxd->irq_entries[i].idxd = idxd;
|
idxd->irq_entries[i].idxd = idxd;
|
||||||
|
idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
|
||||||
spin_lock_init(&idxd->irq_entries[i].list_lock);
|
spin_lock_init(&idxd->irq_entries[i].list_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
msix = &idxd->msix_entries[0];
|
|
||||||
irq_entry = &idxd->irq_entries[0];
|
irq_entry = &idxd->irq_entries[0];
|
||||||
rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
|
rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler, idxd_misc_thread,
|
||||||
idxd_misc_thread, 0, "idxd-misc",
|
0, "idxd-misc", irq_entry);
|
||||||
irq_entry);
|
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
dev_err(dev, "Failed to allocate misc interrupt.\n");
|
dev_err(dev, "Failed to allocate misc interrupt.\n");
|
||||||
goto err_no_irq;
|
goto err_misc_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
|
dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
|
||||||
msix->vector);
|
|
||||||
|
|
||||||
/* first MSI-X entry is not for wq interrupts */
|
/* first MSI-X entry is not for wq interrupts */
|
||||||
idxd->num_wq_irqs = msixcnt - 1;
|
idxd->num_wq_irqs = msixcnt - 1;
|
||||||
|
|
||||||
for (i = 1; i < msixcnt; i++) {
|
for (i = 1; i < msixcnt; i++) {
|
||||||
msix = &idxd->msix_entries[i];
|
|
||||||
irq_entry = &idxd->irq_entries[i];
|
irq_entry = &idxd->irq_entries[i];
|
||||||
|
|
||||||
init_llist_head(&idxd->irq_entries[i].pending_llist);
|
init_llist_head(&idxd->irq_entries[i].pending_llist);
|
||||||
INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
|
INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
|
||||||
rc = devm_request_threaded_irq(dev, msix->vector,
|
rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler,
|
||||||
idxd_irq_handler,
|
idxd_wq_thread, 0, "idxd-portal", irq_entry);
|
||||||
idxd_wq_thread, 0,
|
|
||||||
"idxd-portal", irq_entry);
|
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
dev_err(dev, "Failed to allocate irq %d.\n",
|
dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
|
||||||
msix->vector);
|
goto err_wq_irqs;
|
||||||
goto err_no_irq;
|
|
||||||
}
|
}
|
||||||
dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
|
dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
|
||||||
i, msix->vector);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
idxd_unmask_error_interrupts(idxd);
|
idxd_unmask_error_interrupts(idxd);
|
||||||
idxd_msix_perm_setup(idxd);
|
idxd_msix_perm_setup(idxd);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_no_irq:
|
err_wq_irqs:
|
||||||
|
while (--i >= 0) {
|
||||||
|
irq_entry = &idxd->irq_entries[i];
|
||||||
|
free_irq(irq_entry->vector, irq_entry);
|
||||||
|
}
|
||||||
|
err_misc_irq:
|
||||||
/* Disable error interrupt generation */
|
/* Disable error interrupt generation */
|
||||||
idxd_mask_error_interrupts(idxd);
|
idxd_mask_error_interrupts(idxd);
|
||||||
pci_disable_msix(pdev);
|
err_irq_entries:
|
||||||
|
pci_free_irq_vectors(pdev);
|
||||||
dev_err(dev, "No usable interrupts\n");
|
dev_err(dev, "No usable interrupts\n");
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -495,7 +483,8 @@ static void idxd_shutdown(struct pci_dev *pdev)
|
||||||
|
|
||||||
for (i = 0; i < msixcnt; i++) {
|
for (i = 0; i < msixcnt; i++) {
|
||||||
irq_entry = &idxd->irq_entries[i];
|
irq_entry = &idxd->irq_entries[i];
|
||||||
synchronize_irq(idxd->msix_entries[i].vector);
|
synchronize_irq(irq_entry->vector);
|
||||||
|
free_irq(irq_entry->vector, irq_entry);
|
||||||
if (i == 0)
|
if (i == 0)
|
||||||
continue;
|
continue;
|
||||||
idxd_flush_pending_llist(irq_entry);
|
idxd_flush_pending_llist(irq_entry);
|
||||||
|
@ -503,6 +492,7 @@ static void idxd_shutdown(struct pci_dev *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
idxd_msix_perm_clear(idxd);
|
idxd_msix_perm_clear(idxd);
|
||||||
|
pci_free_irq_vectors(pdev);
|
||||||
destroy_workqueue(idxd->wq);
|
destroy_workqueue(idxd->wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue