mirror of https://gitee.com/openkylin/linux.git
IB/ipath: Add locking for interrupt use of ipath_pd contexts vs free
Fixes timing race resulting in panic. Not a performance sensitive path. Signed-off-by: Dave Olson <dave.olson@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
1bf7724e09
commit
3d0890985a
|
@ -661,6 +661,8 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
|
|||
static void __devexit cleanup_device(struct ipath_devdata *dd)
|
||||
{
|
||||
int port;
|
||||
struct ipath_portdata **tmp;
|
||||
unsigned long flags;
|
||||
|
||||
if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
|
||||
/* can't do anything more with chip; needs re-init */
|
||||
|
@ -742,20 +744,21 @@ static void __devexit cleanup_device(struct ipath_devdata *dd)
|
|||
|
||||
/*
|
||||
* free any resources still in use (usually just kernel ports)
|
||||
* at unload; we do for portcnt, not cfgports, because cfgports
|
||||
* could have changed while we were loaded.
|
||||
* at unload; we do for portcnt, because that's what we allocate.
|
||||
* We acquire lock to be really paranoid that ipath_pd isn't being
|
||||
* accessed from some interrupt-related code (that should not happen,
|
||||
* but best to be sure).
|
||||
*/
|
||||
spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
|
||||
tmp = dd->ipath_pd;
|
||||
dd->ipath_pd = NULL;
|
||||
spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
|
||||
for (port = 0; port < dd->ipath_portcnt; port++) {
|
||||
struct ipath_portdata *pd = dd->ipath_pd[port];
|
||||
dd->ipath_pd[port] = NULL;
|
||||
struct ipath_portdata *pd = tmp[port];
|
||||
tmp[port] = NULL; /* debugging paranoia */
|
||||
ipath_free_pddata(dd, pd);
|
||||
}
|
||||
kfree(dd->ipath_pd);
|
||||
/*
|
||||
* debuggability, in case some cleanup path tries to use it
|
||||
* after this
|
||||
*/
|
||||
dd->ipath_pd = NULL;
|
||||
kfree(tmp);
|
||||
}
|
||||
|
||||
static void __devexit ipath_remove_one(struct pci_dev *pdev)
|
||||
|
@ -2586,6 +2589,7 @@ int ipath_reset_device(int unit)
|
|||
{
|
||||
int ret, i;
|
||||
struct ipath_devdata *dd = ipath_lookup(unit);
|
||||
unsigned long flags;
|
||||
|
||||
if (!dd) {
|
||||
ret = -ENODEV;
|
||||
|
@ -2611,9 +2615,12 @@ int ipath_reset_device(int unit)
|
|||
goto bail;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
|
||||
if (dd->ipath_pd)
|
||||
for (i = 1; i < dd->ipath_cfgports; i++) {
|
||||
if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) {
|
||||
if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
|
||||
continue;
|
||||
spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
|
||||
ipath_dbg("unit %u port %d is in use "
|
||||
"(PID %u cmd %s), can't reset\n",
|
||||
unit, i,
|
||||
|
@ -2622,7 +2629,7 @@ int ipath_reset_device(int unit)
|
|||
ret = -EBUSY;
|
||||
goto bail;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
|
||||
|
||||
if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
|
||||
teardown_sdma(dd);
|
||||
|
@ -2656,9 +2663,12 @@ static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
|
|||
{
|
||||
int i, sub, any = 0;
|
||||
struct pid *pid;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dd->ipath_pd)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
|
||||
for (i = 1; i < dd->ipath_cfgports; i++) {
|
||||
if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
|
||||
continue;
|
||||
|
@ -2682,6 +2692,7 @@ static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
|
|||
any++;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
|
||||
return any;
|
||||
}
|
||||
|
||||
|
|
|
@ -2046,7 +2046,9 @@ static int ipath_close(struct inode *in, struct file *fp)
|
|||
struct ipath_filedata *fd;
|
||||
struct ipath_portdata *pd;
|
||||
struct ipath_devdata *dd;
|
||||
unsigned long flags;
|
||||
unsigned port;
|
||||
struct pid *pid;
|
||||
|
||||
ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
|
||||
(long)in->i_rdev, fp->private_data);
|
||||
|
@ -2079,14 +2081,13 @@ static int ipath_close(struct inode *in, struct file *fp)
|
|||
mutex_unlock(&ipath_mutex);
|
||||
goto bail;
|
||||
}
|
||||
/* early; no interrupt users after this */
|
||||
spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
|
||||
port = pd->port_port;
|
||||
|
||||
if (pd->port_hdrqfull) {
|
||||
ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
|
||||
"during run\n", pd->port_comm, pid_nr(pd->port_pid),
|
||||
pd->port_hdrqfull);
|
||||
pd->port_hdrqfull = 0;
|
||||
}
|
||||
dd->ipath_pd[port] = NULL;
|
||||
pid = pd->port_pid;
|
||||
pd->port_pid = NULL;
|
||||
spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
|
||||
|
||||
if (pd->port_rcvwait_to || pd->port_piowait_to
|
||||
|| pd->port_rcvnowait || pd->port_pionowait) {
|
||||
|
@ -2143,13 +2144,11 @@ static int ipath_close(struct inode *in, struct file *fp)
|
|||
unlock_expected_tids(pd);
|
||||
ipath_stats.sps_ports--;
|
||||
ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
|
||||
pd->port_comm, pid_nr(pd->port_pid),
|
||||
pd->port_comm, pid_nr(pid),
|
||||
dd->ipath_unit, port);
|
||||
}
|
||||
|
||||
put_pid(pd->port_pid);
|
||||
pd->port_pid = NULL;
|
||||
dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */
|
||||
put_pid(pid);
|
||||
mutex_unlock(&ipath_mutex);
|
||||
ipath_free_pddata(dd, pd); /* after releasing the mutex */
|
||||
|
||||
|
|
|
@ -229,6 +229,7 @@ static int init_chip_first(struct ipath_devdata *dd)
|
|||
spin_lock_init(&dd->ipath_kernel_tid_lock);
|
||||
spin_lock_init(&dd->ipath_user_tid_lock);
|
||||
spin_lock_init(&dd->ipath_sendctrl_lock);
|
||||
spin_lock_init(&dd->ipath_uctxt_lock);
|
||||
spin_lock_init(&dd->ipath_sdma_lock);
|
||||
spin_lock_init(&dd->ipath_gpio_lock);
|
||||
spin_lock_init(&dd->ipath_eep_st_lock);
|
||||
|
|
|
@ -477,6 +477,8 @@ struct ipath_devdata {
|
|||
spinlock_t ipath_kernel_tid_lock;
|
||||
spinlock_t ipath_user_tid_lock;
|
||||
spinlock_t ipath_sendctrl_lock;
|
||||
/* around ipath_pd and (user ports) port_cnt use (intr vs free) */
|
||||
spinlock_t ipath_uctxt_lock;
|
||||
|
||||
/*
|
||||
* IPATH_STATUS_*,
|
||||
|
|
|
@ -132,6 +132,7 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
|
|||
* (see ipath_get_dma_mr and ipath_dma.c).
|
||||
*/
|
||||
if (sge->lkey == 0) {
|
||||
/* always a kernel port, no locking needed */
|
||||
struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
|
||||
|
||||
if (pd->user) {
|
||||
|
@ -211,6 +212,7 @@ int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
|
|||
* (see ipath_get_dma_mr and ipath_dma.c).
|
||||
*/
|
||||
if (rkey == 0) {
|
||||
/* always a kernel port, no locking needed */
|
||||
struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
|
||||
|
||||
if (pd->user) {
|
||||
|
|
|
@ -348,6 +348,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
|
|||
*/
|
||||
static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
|
||||
{
|
||||
/* always a kernel port, no locking needed */
|
||||
struct ipath_portdata *pd = dd->ipath_pd[0];
|
||||
|
||||
memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
|
||||
|
@ -730,6 +731,7 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
|
|||
int i;
|
||||
int changed = 0;
|
||||
|
||||
/* always a kernel port, no locking needed */
|
||||
pd = dd->ipath_pd[0];
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
|
||||
|
|
|
@ -1852,7 +1852,7 @@ unsigned ipath_get_npkeys(struct ipath_devdata *dd)
|
|||
}
|
||||
|
||||
/**
|
||||
* ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table
|
||||
* ipath_get_pkey - return the indexed PKEY from the port PKEY table
|
||||
* @dd: the infinipath device
|
||||
* @index: the PKEY index
|
||||
*/
|
||||
|
@ -1860,6 +1860,7 @@ unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
|
|||
{
|
||||
unsigned ret;
|
||||
|
||||
/* always a kernel port, no locking needed */
|
||||
if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
|
||||
ret = 0;
|
||||
else
|
||||
|
|
Loading…
Reference in New Issue