Additional 4.5-rc3 fixes

- One fix to ipoib multicast joins
 - One fix to mlx4 error handling
 - One fix to mlx5 size computation
 - One fix to a thinko in core code
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJWvjoAAAoJELgmozMOVy/dxTcP/3ELxMBGStqhdIJhxf1UfnYh
 IwdMWpBrvRZo92H+/YHZstnK1kHFZyQLejvdWU24loymzcs94vzswHPMpa0baKd1
 PfBTtTKCts5CA4kTdxECiBjHlLYkrcsAv8il1x09RNF3ZzlMxrvupPa9fjulQalP
 ULbNsV+DyZr7hGXly8dX6imYQCF0433uxrYSWOY9o98IbTRYmRQhp1R31TDj/KMP
 jXdLrMYzs7glmtfIQdfA4Vup7PhPAZZ2tKNejNI3ECCnB/wCQsh86k7hl3ghlmDz
 rvrsFheIk114jgHALb+l/FMgNNjTHxSmy98s8Fcuow753oxf940YryUORUIrI/B/
 50CnjjtgCWwG6FzRPM41yYxJ2nvfGMTURKrV6LfR0hre5AxMAxb+Eu0sGl3NAe/6
 rbsdQjAUsctIpSjbo2xjmazFk3Itda7xTzMYS72ovqRQ/VvdMtH63fdUXVXStxUv
 b121ZS3tGYWh6k/4FasRyvvExqgJy3TRHsIEbwC8P+GFAnulhN11pLAc56Eug5Bw
 +U057Uzj3lm2iKZUzC/OvCBARufNZXH2P/oAPXqIDJPBnMAia+U76m8sq1N6tO83
 ct0kn3aYwZ6q1O69SzOpashnTqJCfkuwMtf+sbsDs0zHavjMzKMWhp9UVnQ73vf9
 7mRZc/Uz3+Jrl8c/K1eX
 =lzqm
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull more rdma fixes from Doug Ledford:
 "I think we are getting pretty close to done now.  There are four
  one-off fixes in this update:

   - fix ipoib multicast joins
   - fix mlx4 error handling
   - fix mlx5 size computation
   - fix a thinko in core code"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  IB/mlx5: Fix RC transport send queue overhead computation
  IB/ipoib: fix for rare multicast join race condition
  IB/core: Fix reading capability mask of the port info class
  net/mlx4: fix some error handling in mlx4_multi_func_init()
This commit is contained in:
Linus Torvalds 2016-02-13 17:35:23 -08:00
commit 7686e3c16c
4 changed files with 27 additions and 16 deletions

View File

@ -720,12 +720,11 @@ static struct attribute_group *get_counter_table(struct ib_device *dev,
if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO,
&cpi, 40, sizeof(cpi)) >= 0) {
if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH)
if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH)
/* We have extended counters */
return &pma_group_ext;
if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF)
if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF)
/* But not the IETF ones */
return &pma_group_noietf;
}

View File

@ -270,8 +270,10 @@ static int sq_overhead(enum ib_qp_type qp_type)
/* fall through */
case IB_QPT_RC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_atomic_seg) +
sizeof(struct mlx5_wqe_raddr_seg);
max(sizeof(struct mlx5_wqe_atomic_seg) +
sizeof(struct mlx5_wqe_raddr_seg),
sizeof(struct mlx5_wqe_umr_ctrl_seg) +
sizeof(struct mlx5_mkey_seg));
break;
case IB_QPT_XRC_TGT:
@ -279,9 +281,9 @@ static int sq_overhead(enum ib_qp_type qp_type)
case IB_QPT_UC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_raddr_seg) +
sizeof(struct mlx5_wqe_umr_ctrl_seg) +
sizeof(struct mlx5_mkey_seg);
max(sizeof(struct mlx5_wqe_raddr_seg),
sizeof(struct mlx5_wqe_umr_ctrl_seg) +
sizeof(struct mlx5_mkey_seg));
break;
case IB_QPT_UD:

View File

@ -456,7 +456,10 @@ static int ipoib_mcast_join_complete(int status,
return status;
}
static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
/*
* Caller must hold 'priv->lock'
*/
static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_sa_multicast *multicast;
@ -466,6 +469,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
ib_sa_comp_mask comp_mask;
int ret = 0;
if (!priv->broadcast ||
!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
return -EINVAL;
ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
rec.mgid = mcast->mcmember.mgid;
@ -525,20 +532,23 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
rec.join_state = 4;
#endif
}
spin_unlock_irq(&priv->lock);
multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast);
spin_lock_irq(&priv->lock);
if (IS_ERR(multicast)) {
ret = PTR_ERR(multicast);
ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
spin_lock_irq(&priv->lock);
/* Requeue this join task with a backoff delay */
__ipoib_mcast_schedule_join_thread(priv, mcast, 1);
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
spin_unlock_irq(&priv->lock);
complete(&mcast->done);
spin_lock_irq(&priv->lock);
}
return 0;
}
void ipoib_mcast_join_task(struct work_struct *work)
@ -620,9 +630,10 @@ void ipoib_mcast_join_task(struct work_struct *work)
/* Found the next unjoined group */
init_completion(&mcast->done);
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
spin_unlock_irq(&priv->lock);
ipoib_mcast_join(dev, mcast);
spin_lock_irq(&priv->lock);
if (ipoib_mcast_join(dev, mcast)) {
spin_unlock_irq(&priv->lock);
return;
}
} else if (!delay_until ||
time_before(mcast->delay_until, delay_until))
delay_until = mcast->delay_until;
@ -641,10 +652,9 @@ void ipoib_mcast_join_task(struct work_struct *work)
if (mcast) {
init_completion(&mcast->done);
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
ipoib_mcast_join(dev, mcast);
}
spin_unlock_irq(&priv->lock);
if (mcast)
ipoib_mcast_join(dev, mcast);
}
int ipoib_mcast_start_thread(struct net_device *dev)

View File

@ -2429,7 +2429,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
flush_workqueue(priv->mfunc.master.comm_wq);
destroy_workqueue(priv->mfunc.master.comm_wq);
err_slaves:
while (--i) {
while (i--) {
for (port = 1; port <= MLX4_MAX_PORTS; port++)
kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
}