IB/hfi1: Replace deprecated pci functions with new API

pci_enable_msix_range() and pci_disable_msix() have been deprecated.
Updating to the new pci_alloc_irq_vectors() interface.

Reviewed-by: Sebastian Sanchez <sebastian.sanchez@intel.com>
Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Michael J. Ruhl 2017-05-26 05:35:31 -07:00 committed by Doug Ledford
parent 721c462123
commit bb7dde8784
4 changed files with 78 additions and 131 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright(c) 2015, 2016 Intel Corporation.
* Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@ -335,10 +335,10 @@ static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
sde->cpu = cpu;
cpumask_clear(&msix->mask);
cpumask_set_cpu(cpu, &msix->mask);
dd_dev_dbg(dd, "IRQ vector: %u, type %s engine %u -> cpu: %d\n",
msix->msix.vector, irq_type_names[msix->type],
dd_dev_dbg(dd, "IRQ: %u, type %s engine %u -> cpu: %d\n",
msix->irq, irq_type_names[msix->type],
sde->this_idx, cpu);
irq_set_affinity_hint(msix->msix.vector, &msix->mask);
irq_set_affinity_hint(msix->irq, &msix->mask);
/*
* Set the new cpu in the hfi1_affinity_node and clean
@ -387,7 +387,7 @@ static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix)
{
struct irq_affinity_notify *notify = &msix->notify;
notify->irq = msix->msix.vector;
notify->irq = msix->irq;
notify->notify = hfi1_irq_notifier_notify;
notify->release = hfi1_irq_notifier_release;
@ -472,10 +472,10 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
}
cpumask_set_cpu(cpu, &msix->mask);
dd_dev_info(dd, "IRQ vector: %u, type %s %s -> cpu: %d\n",
msix->msix.vector, irq_type_names[msix->type],
dd_dev_info(dd, "IRQ: %u, type %s %s -> cpu: %d\n",
msix->irq, irq_type_names[msix->type],
extra, cpu);
irq_set_affinity_hint(msix->msix.vector, &msix->mask);
irq_set_affinity_hint(msix->irq, &msix->mask);
if (msix->type == IRQ_SDMA) {
sde->cpu = cpu;
@ -533,7 +533,7 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
}
}
irq_set_affinity_hint(msix->msix.vector, NULL);
irq_set_affinity_hint(msix->irq, NULL);
cpumask_clear(&msix->mask);
mutex_unlock(&node_affinity.lock);
}

View File

@ -12800,30 +12800,24 @@ static void clean_up_interrupts(struct hfi1_devdata *dd)
for (i = 0; i < dd->num_msix_entries; i++, me++) {
if (!me->arg) /* => no irq, no affinity */
continue;
hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
free_irq(me->msix.vector, me->arg);
hfi1_put_irq_affinity(dd, me);
free_irq(me->irq, me->arg);
}
/* clean structures */
kfree(dd->msix_entries);
dd->msix_entries = NULL;
dd->num_msix_entries = 0;
} else {
/* INTx */
if (dd->requested_intx_irq) {
free_irq(dd->pcidev->irq, dd);
dd->requested_intx_irq = 0;
}
}
/* turn off interrupts */
if (dd->num_msix_entries) {
/* MSI-X */
pci_disable_msix(dd->pcidev);
} else {
/* INTx */
disable_intx(dd->pcidev);
}
/* clean structures */
kfree(dd->msix_entries);
dd->msix_entries = NULL;
dd->num_msix_entries = 0;
pci_free_irq_vectors(dd->pcidev);
}
/*
@ -12972,13 +12966,21 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
continue;
/* make sure the name is terminated */
me->name[sizeof(me->name) - 1] = 0;
me->irq = pci_irq_vector(dd->pcidev, i);
/*
* On err return me->irq. Don't need to clear this
* because 'arg' has not been set, and cleanup will
* do the right thing.
*/
if (me->irq < 0)
return me->irq;
ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
ret = request_threaded_irq(me->irq, handler, thread, 0,
me->name, arg);
if (ret) {
dd_dev_err(dd,
"unable to allocate %s interrupt, vector %d, index %d, err %d\n",
err_info, me->msix.vector, idx, ret);
"unable to allocate %s interrupt, irq %d, index %d, err %d\n",
err_info, me->irq, idx, ret);
return ret;
}
/*
@ -12989,8 +12991,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
ret = hfi1_get_irq_affinity(dd, me);
if (ret)
dd_dev_err(dd,
"unable to pin IRQ %d\n", ret);
dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
}
return ret;
@ -13009,7 +13010,7 @@ void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
synchronize_irq(me->msix.vector);
synchronize_irq(me->irq);
}
}
@ -13022,7 +13023,7 @@ void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
return;
hfi1_put_irq_affinity(dd, me);
free_irq(me->msix.vector, me->arg);
free_irq(me->irq, me->arg);
me->arg = NULL;
}
@ -13050,14 +13051,19 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
me->name[sizeof(me->name) - 1] = 0;
me->type = IRQ_RCVCTXT;
me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
if (me->irq < 0) {
dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n",
idx, me->irq);
return;
}
remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
ret = request_threaded_irq(me->msix.vector, receive_context_interrupt,
ret = request_threaded_irq(me->irq, receive_context_interrupt,
receive_context_thread, 0, me->name, arg);
if (ret) {
dd_dev_err(dd, "vnic irq request (vector %d, idx %d) fail %d\n",
me->msix.vector, idx, ret);
dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
me->irq, idx, ret);
return;
}
/*
@ -13070,7 +13076,7 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
if (ret) {
dd_dev_err(dd,
"unable to pin IRQ %d\n", ret);
free_irq(me->msix.vector, me->arg);
free_irq(me->irq, me->arg);
}
}
@ -13093,9 +13099,8 @@ static void reset_interrupts(struct hfi1_devdata *dd)
static int set_up_interrupts(struct hfi1_devdata *dd)
{
struct hfi1_msix_entry *entries;
u32 total, request;
int i, ret;
u32 total;
int ret, request;
int single_interrupt = 0; /* we expect to have all the interrupts */
/*
@ -13107,39 +13112,31 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
*/
total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
if (!entries) {
ret = -ENOMEM;
goto fail;
}
/* 1-1 MSI-X entry assignment */
for (i = 0; i < total; i++)
entries[i].msix.entry = i;
/* ask for MSI-X interrupts */
request = total;
request_msix(dd, &request, entries);
if (request == 0) {
request = request_msix(dd, total);
if (request < 0) {
ret = request;
goto fail;
} else if (request == 0) {
/* using INTx */
/* dd->num_msix_entries already zero */
kfree(entries);
single_interrupt = 1;
dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
} else if (request < total) {
/* using MSI-X, with reduced interrupts */
dd_dev_err(dd, "reduced interrupt found, wanted %u, got %u\n",
total, request);
ret = -EINVAL;
goto fail;
} else {
/* using MSI-X */
dd->num_msix_entries = request;
dd->msix_entries = entries;
if (request != total) {
/* using MSI-X, with reduced interrupts */
dd_dev_err(
dd,
"cannot handle reduced interrupt case, want %u, got %u\n",
total, request);
ret = -EINVAL;
dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
GFP_KERNEL);
if (!dd->msix_entries) {
ret = -ENOMEM;
goto fail;
}
/* using MSI-X */
dd->num_msix_entries = total;
dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
}

View File

@ -521,7 +521,7 @@ static inline void incr_cntr32(u32 *cntr)
#define MAX_NAME_SIZE 64
struct hfi1_msix_entry {
enum irq_type type;
struct msix_entry msix;
int irq;
void *arg;
char name[MAX_NAME_SIZE];
cpumask_t mask;
@ -1838,9 +1838,7 @@ void hfi1_pcie_cleanup(struct pci_dev *pdev);
int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev);
void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
int pcie_speeds(struct hfi1_devdata *dd);
void request_msix(struct hfi1_devdata *dd, u32 *nent,
struct hfi1_msix_entry *entry);
void hfi1_enable_intx(struct pci_dev *pdev);
int request_msix(struct hfi1_devdata *dd, u32 msireq);
void restore_pci_variables(struct hfi1_devdata *dd);
int do_pcie_gen3_transition(struct hfi1_devdata *dd);
int parse_platform_config(struct hfi1_devdata *dd);

View File

@ -1,5 +1,5 @@
/*
* Copyright(c) 2015, 2016 Intel Corporation.
* Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@ -240,50 +240,6 @@ void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd)
iounmap(dd->piobase);
}
static void msix_setup(struct hfi1_devdata *dd, int pos, u32 *msixcnt,
struct hfi1_msix_entry *hfi1_msix_entry)
{
int ret;
int nvec = *msixcnt;
struct msix_entry *msix_entry;
int i;
/*
* We can't pass hfi1_msix_entry array to msix_setup
* so use a dummy msix_entry array and copy the allocated
* irq back to the hfi1_msix_entry array.
*/
msix_entry = kmalloc_array(nvec, sizeof(*msix_entry), GFP_KERNEL);
if (!msix_entry) {
ret = -ENOMEM;
goto do_intx;
}
for (i = 0; i < nvec; i++)
msix_entry[i] = hfi1_msix_entry[i].msix;
ret = pci_enable_msix_range(dd->pcidev, msix_entry, 1, nvec);
if (ret < 0)
goto free_msix_entry;
nvec = ret;
for (i = 0; i < nvec; i++)
hfi1_msix_entry[i].msix = msix_entry[i];
kfree(msix_entry);
*msixcnt = nvec;
return;
free_msix_entry:
kfree(msix_entry);
do_intx:
dd_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
nvec, ret);
*msixcnt = 0;
hfi1_enable_intx(dd->pcidev);
}
/* return the PCIe link speed from the given link status */
static u32 extract_speed(u16 linkstat)
{
@ -364,33 +320,29 @@ int pcie_speeds(struct hfi1_devdata *dd)
}
/*
* Returns in *nent:
* - actual number of interrupts allocated
* Returns:
* - actual number of interrupts allocated or
* - 0 if fell back to INTx.
* - error
*/
void request_msix(struct hfi1_devdata *dd, u32 *nent,
struct hfi1_msix_entry *entry)
int request_msix(struct hfi1_devdata *dd, u32 msireq)
{
int pos;
int nvec;
pos = dd->pcidev->msix_cap;
if (*nent && pos) {
msix_setup(dd, pos, nent, entry);
/* did it, either MSI-X or INTx */
} else {
*nent = 0;
hfi1_enable_intx(dd->pcidev);
nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq,
PCI_IRQ_MSIX | PCI_IRQ_LEGACY);
if (nvec < 0) {
dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", nvec);
return nvec;
}
tune_pcie_caps(dd);
}
void hfi1_enable_intx(struct pci_dev *pdev)
{
/* first, turn on INTx */
pci_intx(pdev, 1);
/* then turn off MSI-X */
pci_disable_msix(pdev);
/* check for legacy IRQ */
if (nvec == 1 && !dd->pcidev->msix_enabled)
return 0;
return nvec;
}
/* restore command and BARs after a reset has wiped them out */