RDMA/cm: Allow ib_send_cm_sidr_rep() to be done under lock

The first thing ib_send_cm_sidr_rep() does is obtain the lock, so use the
usual unlocked wrapper, locked actor pattern here.

Get rid of the cm_reject_sidr_req() wrapper so each call site can call the
locked or unlocked version as required.

This avoids a sketchy lock/unlock sequence (which could allow state to
change) during cm_destroy_id().

Link: https://lore.kernel.org/r/20200310092545.251365-15-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Jason Gunthorpe 2020-03-10 11:25:44 +02:00
parent 81ddb41f87
commit 6a8824a74b
1 changed files with 28 additions and 30 deletions

View File

@ -83,6 +83,8 @@ EXPORT_SYMBOL(ibcm_reject_msg);
struct cm_id_private; struct cm_id_private;
static void cm_add_one(struct ib_device *device); static void cm_add_one(struct ib_device *device);
static void cm_remove_one(struct ib_device *device, void *client_data); static void cm_remove_one(struct ib_device *device, void *client_data);
static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
struct ib_cm_sidr_rep_param *param);
static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv, static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
const void *private_data, u8 private_data_len); const void *private_data, u8 private_data_len);
static int cm_send_drep_locked(struct cm_id_private *cm_id_priv, static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
@ -830,16 +832,6 @@ static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
return NULL; return NULL;
} }
static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
enum ib_cm_sidr_status status)
{
struct ib_cm_sidr_rep_param param;
memset(&param, 0, sizeof param);
param.status = status;
ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
}
static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device, static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
ib_cm_handler cm_handler, ib_cm_handler cm_handler,
void *context) void *context)
@ -1058,8 +1050,10 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
spin_unlock_irq(&cm_id_priv->lock); spin_unlock_irq(&cm_id_priv->lock);
break; break;
case IB_CM_SIDR_REQ_RCVD: case IB_CM_SIDR_REQ_RCVD:
cm_send_sidr_rep_locked(cm_id_priv,
&(struct ib_cm_sidr_rep_param){
.status = IB_SIDR_REJECT });
spin_unlock_irq(&cm_id_priv->lock); spin_unlock_irq(&cm_id_priv->lock);
cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
break; break;
case IB_CM_REQ_SENT: case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD: case IB_CM_MRA_REQ_RCVD:
@ -3640,7 +3634,9 @@ static int cm_sidr_req_handler(struct cm_work *work)
cm_id_priv->id.service_id); cm_id_priv->id.service_id);
if (!listen_cm_id_priv) { if (!listen_cm_id_priv) {
spin_unlock_irq(&cm.lock); spin_unlock_irq(&cm.lock);
cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED); ib_send_cm_sidr_rep(&cm_id_priv->id,
&(struct ib_cm_sidr_rep_param){
.status = IB_SIDR_UNSUPPORTED });
goto out; /* No match. */ goto out; /* No match. */
} }
refcount_inc(&listen_cm_id_priv->refcount); refcount_inc(&listen_cm_id_priv->refcount);
@ -3694,50 +3690,52 @@ static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
param->private_data, param->private_data_len); param->private_data, param->private_data_len);
} }
int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
struct ib_cm_sidr_rep_param *param) struct ib_cm_sidr_rep_param *param)
{ {
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg; struct ib_mad_send_buf *msg;
unsigned long flags;
int ret; int ret;
lockdep_assert_held(&cm_id_priv->lock);
if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
(param->private_data && (param->private_data &&
param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
return -EINVAL; return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id); if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
spin_lock_irqsave(&cm_id_priv->lock, flags); return -EINVAL;
if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
ret = -EINVAL;
goto error;
}
ret = cm_alloc_msg(cm_id_priv, &msg); ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret) if (ret)
goto error; return ret;
cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
param); param);
ret = ib_post_send_mad(msg, NULL); ret = ib_post_send_mad(msg, NULL);
if (ret) { if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg); cm_free_msg(msg);
return ret; return ret;
} }
cm_id->state = IB_CM_IDLE; cm_id_priv->id.state = IB_CM_IDLE;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
spin_lock_irqsave(&cm.lock, flags);
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
} }
spin_unlock_irqrestore(&cm.lock, flags);
return 0; return 0;
}
error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
struct ib_cm_sidr_rep_param *param)
{
struct cm_id_private *cm_id_priv =
container_of(cm_id, struct cm_id_private, id);
unsigned long flags;
int ret;
spin_lock_irqsave(&cm_id_priv->lock, flags);
ret = cm_send_sidr_rep_locked(cm_id_priv, param);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret; return ret;
} }
EXPORT_SYMBOL(ib_send_cm_sidr_rep); EXPORT_SYMBOL(ib_send_cm_sidr_rep);