mirror of https://gitee.com/openkylin/linux.git
IB: Improve uverbs_cleanup_ucontext algorithm
Improve uverbs_cleanup_ucontext algorithm to work properly when the topology graph of the objects cannot be determined at compile time. This is the case with objects created via the devx interface in mlx5. Typically uverbs objects must be created in a strict topologically sorted order, so that LIFO ordering will generally cause them to be freed properly. There are only a few cases (eg memory windows) where objects can point to things out of the strict LIFO order. Instead of using an explicit ordering scheme where the HW destroy is not allowed to fail, go over the list multiple times and allow the destroy function to fail. If progress halts then a final, desperate, cleanup is done before leaking the memory. This indicates a driver bug. Signed-off-by: Yishai Hadas <yishaih@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
e620ebfc22
commit
1c77483e4c
|
@ -360,9 +360,10 @@ static int __must_check remove_commit_idr_uobject(struct ib_uobject *uobj,
|
|||
|
||||
/*
|
||||
* We can only fail gracefully if the user requested to destroy the
|
||||
* object. In the rest of the cases, just remove whatever you can.
|
||||
* object or when a retry may be called upon an error.
|
||||
* In the rest of the cases, just remove whatever you can.
|
||||
*/
|
||||
if (why == RDMA_REMOVE_DESTROY && ret)
|
||||
if (ib_is_destroy_retryable(ret, why, uobj))
|
||||
return ret;
|
||||
|
||||
ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
|
||||
|
@ -393,7 +394,7 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
|
|||
container_of(uobj, struct ib_uobject_file, uobj);
|
||||
int ret = fd_type->context_closed(uobj_file, why);
|
||||
|
||||
if (why == RDMA_REMOVE_DESTROY && ret)
|
||||
if (ib_is_destroy_retryable(ret, why, uobj))
|
||||
return ret;
|
||||
|
||||
if (why == RDMA_REMOVE_DURING_CLEANUP) {
|
||||
|
@ -422,7 +423,7 @@ static int __must_check _rdma_remove_commit_uobject(struct ib_uobject *uobj,
|
|||
struct ib_ucontext *ucontext = uobj->context;
|
||||
|
||||
ret = uobj->type->type_class->remove_commit(uobj, why);
|
||||
if (ret && why == RDMA_REMOVE_DESTROY) {
|
||||
if (ib_is_destroy_retryable(ret, why, uobj)) {
|
||||
/* We couldn't remove the object, so just unlock the uobject */
|
||||
atomic_set(&uobj->usecnt, 0);
|
||||
uobj->type->type_class->lookup_put(uobj, true);
|
||||
|
@ -645,61 +646,77 @@ void uverbs_close_fd(struct file *f)
|
|||
kref_put(uverbs_file_ref, ib_uverbs_release_file);
|
||||
}
|
||||
|
||||
static int __uverbs_cleanup_ucontext(struct ib_ucontext *ucontext,
|
||||
enum rdma_remove_reason reason)
|
||||
{
|
||||
struct ib_uobject *obj, *next_obj;
|
||||
int ret = -EINVAL;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* This shouldn't run while executing other commands on this
|
||||
* context. Thus, the only thing we should take care of is
|
||||
* releasing a FD while traversing this list. The FD could be
|
||||
* closed and released from the _release fop of this FD.
|
||||
* In order to mitigate this, we add a lock.
|
||||
* We take and release the lock per traversal in order to let
|
||||
* other threads (which might still use the FDs) chance to run.
|
||||
*/
|
||||
mutex_lock(&ucontext->uobjects_lock);
|
||||
ucontext->cleanup_reason = reason;
|
||||
list_for_each_entry_safe(obj, next_obj, &ucontext->uobjects, list) {
|
||||
/*
|
||||
* if we hit this WARN_ON, that means we are
|
||||
* racing with a lookup_get.
|
||||
*/
|
||||
WARN_ON(uverbs_try_lock_object(obj, true));
|
||||
err = obj->type->type_class->remove_commit(obj, reason);
|
||||
|
||||
if (ib_is_destroy_retryable(err, reason, obj)) {
|
||||
pr_debug("ib_uverbs: failed to remove uobject id %d err %d\n",
|
||||
obj->id, err);
|
||||
atomic_set(&obj->usecnt, 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (err)
|
||||
pr_err("ib_uverbs: unable to remove uobject id %d err %d\n",
|
||||
obj->id, err);
|
||||
|
||||
list_del(&obj->list);
|
||||
/* put the ref we took when we created the object */
|
||||
uverbs_uobject_put(obj);
|
||||
ret = 0;
|
||||
}
|
||||
mutex_unlock(&ucontext->uobjects_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed)
|
||||
{
|
||||
enum rdma_remove_reason reason = device_removed ?
|
||||
RDMA_REMOVE_DRIVER_REMOVE : RDMA_REMOVE_CLOSE;
|
||||
unsigned int cur_order = 0;
|
||||
|
||||
ucontext->cleanup_reason = reason;
|
||||
RDMA_REMOVE_DRIVER_REMOVE :
|
||||
RDMA_REMOVE_CLOSE;
|
||||
/*
|
||||
* Waits for all remove_commit and alloc_commit to finish. Logically, We
|
||||
* want to hold this forever as the context is going to be destroyed,
|
||||
* but we'll release it since it causes a "held lock freed" BUG message.
|
||||
*/
|
||||
down_write(&ucontext->cleanup_rwsem);
|
||||
|
||||
while (!list_empty(&ucontext->uobjects)) {
|
||||
struct ib_uobject *obj, *next_obj;
|
||||
unsigned int next_order = UINT_MAX;
|
||||
|
||||
/*
|
||||
* This shouldn't run while executing other commands on this
|
||||
* context. Thus, the only thing we should take care of is
|
||||
* releasing a FD while traversing this list. The FD could be
|
||||
* closed and released from the _release fop of this FD.
|
||||
* In order to mitigate this, we add a lock.
|
||||
* We take and release the lock per order traversal in order
|
||||
* to let other threads (which might still use the FDs) chance
|
||||
* to run.
|
||||
*/
|
||||
mutex_lock(&ucontext->uobjects_lock);
|
||||
list_for_each_entry_safe(obj, next_obj, &ucontext->uobjects,
|
||||
list) {
|
||||
if (obj->type->destroy_order == cur_order) {
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* if we hit this WARN_ON, that means we are
|
||||
* racing with a lookup_get.
|
||||
*/
|
||||
WARN_ON(uverbs_try_lock_object(obj, true));
|
||||
ret = obj->type->type_class->remove_commit(obj,
|
||||
reason);
|
||||
list_del(&obj->list);
|
||||
if (ret)
|
||||
pr_warn("ib_uverbs: failed to remove uobject id %d order %u\n",
|
||||
obj->id, cur_order);
|
||||
/* put the ref we took when we created the object */
|
||||
uverbs_uobject_put(obj);
|
||||
} else {
|
||||
next_order = min(next_order,
|
||||
obj->type->destroy_order);
|
||||
}
|
||||
ucontext->cleanup_retryable = true;
|
||||
while (!list_empty(&ucontext->uobjects))
|
||||
if (__uverbs_cleanup_ucontext(ucontext, reason)) {
|
||||
/*
|
||||
* No entry was cleaned-up successfully during this
|
||||
* iteration
|
||||
*/
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&ucontext->uobjects_lock);
|
||||
cur_order = next_order;
|
||||
}
|
||||
|
||||
ucontext->cleanup_retryable = false;
|
||||
if (!list_empty(&ucontext->uobjects))
|
||||
__uverbs_cleanup_ucontext(ucontext, reason);
|
||||
|
||||
up_write(&ucontext->cleanup_rwsem);
|
||||
}
|
||||
|
||||
|
|
|
@ -230,7 +230,7 @@ void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr);
|
|||
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
|
||||
void ib_uverbs_event_handler(struct ib_event_handler *handler,
|
||||
struct ib_event *event);
|
||||
int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, struct ib_xrcd *xrcd,
|
||||
int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
|
||||
enum rdma_remove_reason why);
|
||||
|
||||
int uverbs_dealloc_mw(struct ib_mw *mw);
|
||||
|
|
|
@ -116,6 +116,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
|
|||
ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
|
||||
rcu_read_unlock();
|
||||
ucontext->closing = 0;
|
||||
ucontext->cleanup_retryable = false;
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
ucontext->umem_tree = RB_ROOT_CACHED;
|
||||
|
@ -611,12 +612,13 @@ ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
|
|||
return ret ?: in_len;
|
||||
}
|
||||
|
||||
int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
|
||||
int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject,
|
||||
struct ib_xrcd *xrcd,
|
||||
enum rdma_remove_reason why)
|
||||
{
|
||||
struct inode *inode;
|
||||
int ret;
|
||||
struct ib_uverbs_device *dev = uobject->context->ufile->device;
|
||||
|
||||
inode = xrcd->inode;
|
||||
if (inode && !atomic_dec_and_test(&xrcd->usecnt))
|
||||
|
@ -624,9 +626,12 @@ int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
|
|||
|
||||
ret = ib_dealloc_xrcd(xrcd);
|
||||
|
||||
if (why == RDMA_REMOVE_DESTROY && ret)
|
||||
if (ib_is_destroy_retryable(ret, why, uobject)) {
|
||||
atomic_inc(&xrcd->usecnt);
|
||||
else if (inode)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (inode)
|
||||
xrcd_table_delete(dev, inode);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -77,6 +77,13 @@ static int uverbs_free_qp(struct ib_uobject *uobject,
|
|||
container_of(uobject, struct ib_uqp_object, uevent.uobject);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If this is a user triggered destroy then do not allow destruction
|
||||
* until the user cleans up all the mcast bindings. Unlike in other
|
||||
* places we forcibly clean up the mcast attachments for !DESTROY
|
||||
* because the mcast attaches are not ubojects and will not be
|
||||
* destroyed by anything else during cleanup processing.
|
||||
*/
|
||||
if (why == RDMA_REMOVE_DESTROY) {
|
||||
if (!list_empty(&uqp->mcast_list))
|
||||
return -EBUSY;
|
||||
|
@ -85,7 +92,7 @@ static int uverbs_free_qp(struct ib_uobject *uobject,
|
|||
}
|
||||
|
||||
ret = ib_destroy_qp(qp);
|
||||
if (ret && why == RDMA_REMOVE_DESTROY)
|
||||
if (ib_is_destroy_retryable(ret, why, uobject))
|
||||
return ret;
|
||||
|
||||
if (uqp->uxrcd)
|
||||
|
@ -103,8 +110,10 @@ static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
|
|||
int ret;
|
||||
|
||||
ret = ib_destroy_rwq_ind_table(rwq_ind_tbl);
|
||||
if (!ret || why != RDMA_REMOVE_DESTROY)
|
||||
kfree(ind_tbl);
|
||||
if (ib_is_destroy_retryable(ret, why, uobject))
|
||||
return ret;
|
||||
|
||||
kfree(ind_tbl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -117,8 +126,10 @@ static int uverbs_free_wq(struct ib_uobject *uobject,
|
|||
int ret;
|
||||
|
||||
ret = ib_destroy_wq(wq);
|
||||
if (!ret || why != RDMA_REMOVE_DESTROY)
|
||||
ib_uverbs_release_uevent(uobject->context->ufile, &uwq->uevent);
|
||||
if (ib_is_destroy_retryable(ret, why, uobject))
|
||||
return ret;
|
||||
|
||||
ib_uverbs_release_uevent(uobject->context->ufile, &uwq->uevent);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -132,8 +143,7 @@ static int uverbs_free_srq(struct ib_uobject *uobject,
|
|||
int ret;
|
||||
|
||||
ret = ib_destroy_srq(srq);
|
||||
|
||||
if (ret && why == RDMA_REMOVE_DESTROY)
|
||||
if (ib_is_destroy_retryable(ret, why, uobject))
|
||||
return ret;
|
||||
|
||||
if (srq_type == IB_SRQT_XRC) {
|
||||
|
@ -155,12 +165,12 @@ static int uverbs_free_xrcd(struct ib_uobject *uobject,
|
|||
container_of(uobject, struct ib_uxrcd_object, uobject);
|
||||
int ret;
|
||||
|
||||
ret = ib_destroy_usecnt(&uxrcd->refcnt, why, uobject);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&uobject->context->ufile->device->xrcd_tree_mutex);
|
||||
if (why == RDMA_REMOVE_DESTROY && atomic_read(&uxrcd->refcnt))
|
||||
ret = -EBUSY;
|
||||
else
|
||||
ret = ib_uverbs_dealloc_xrcd(uobject->context->ufile->device,
|
||||
xrcd, why);
|
||||
ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why);
|
||||
mutex_unlock(&uobject->context->ufile->device->xrcd_tree_mutex);
|
||||
|
||||
return ret;
|
||||
|
@ -170,9 +180,11 @@ static int uverbs_free_pd(struct ib_uobject *uobject,
|
|||
enum rdma_remove_reason why)
|
||||
{
|
||||
struct ib_pd *pd = uobject->object;
|
||||
int ret;
|
||||
|
||||
if (why == RDMA_REMOVE_DESTROY && atomic_read(&pd->usecnt))
|
||||
return -EBUSY;
|
||||
ret = ib_destroy_usecnt(&pd->usecnt, why, uobject);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ib_dealloc_pd((struct ib_pd *)uobject->object);
|
||||
return 0;
|
||||
|
@ -249,44 +261,42 @@ void create_udata(struct uverbs_attr_bundle *ctx, struct ib_udata *udata)
|
|||
}
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_COMP_CHANNEL,
|
||||
&UVERBS_TYPE_ALLOC_FD(0,
|
||||
sizeof(struct ib_uverbs_completion_event_file),
|
||||
&UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_completion_event_file),
|
||||
uverbs_hot_unplug_completion_event_file,
|
||||
&uverbs_event_fops,
|
||||
"[infinibandevent]", O_RDONLY));
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_QP,
|
||||
&UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), 0,
|
||||
&UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object),
|
||||
uverbs_free_qp));
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MW,
|
||||
&UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_mw));
|
||||
&UVERBS_TYPE_ALLOC_IDR(uverbs_free_mw));
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_SRQ,
|
||||
&UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object), 0,
|
||||
&UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object),
|
||||
uverbs_free_srq));
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_AH,
|
||||
&UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_ah));
|
||||
&UVERBS_TYPE_ALLOC_IDR(uverbs_free_ah));
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_FLOW,
|
||||
&UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uflow_object),
|
||||
0, uverbs_free_flow));
|
||||
uverbs_free_flow));
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_WQ,
|
||||
&UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), 0,
|
||||
&UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object),
|
||||
uverbs_free_wq));
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL,
|
||||
&UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_rwq_ind_tbl));
|
||||
&UVERBS_TYPE_ALLOC_IDR(uverbs_free_rwq_ind_tbl));
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_XRCD,
|
||||
&UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object), 0,
|
||||
&UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object),
|
||||
uverbs_free_xrcd));
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_PD,
|
||||
/* 2 is used in order to free the PD after MRs */
|
||||
&UVERBS_TYPE_ALLOC_IDR(2, uverbs_free_pd));
|
||||
&UVERBS_TYPE_ALLOC_IDR(uverbs_free_pd));
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_DEVICE, NULL);
|
||||
|
||||
|
|
|
@ -38,10 +38,11 @@ static int uverbs_free_counters(struct ib_uobject *uobject,
|
|||
enum rdma_remove_reason why)
|
||||
{
|
||||
struct ib_counters *counters = uobject->object;
|
||||
int ret;
|
||||
|
||||
if (why == RDMA_REMOVE_DESTROY &&
|
||||
atomic_read(&counters->usecnt))
|
||||
return -EBUSY;
|
||||
ret = ib_destroy_usecnt(&counters->usecnt, why, uobject);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return counters->device->destroy_counters(counters);
|
||||
}
|
||||
|
@ -150,7 +151,7 @@ static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_COUNTERS_READ,
|
|||
UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_COUNTERS,
|
||||
&UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_counters),
|
||||
&UVERBS_TYPE_ALLOC_IDR(uverbs_free_counters),
|
||||
&UVERBS_METHOD(UVERBS_METHOD_COUNTERS_CREATE),
|
||||
&UVERBS_METHOD(UVERBS_METHOD_COUNTERS_DESTROY),
|
||||
&UVERBS_METHOD(UVERBS_METHOD_COUNTERS_READ));
|
||||
|
|
|
@ -44,12 +44,16 @@ static int uverbs_free_cq(struct ib_uobject *uobject,
|
|||
int ret;
|
||||
|
||||
ret = ib_destroy_cq(cq);
|
||||
if (!ret || why != RDMA_REMOVE_DESTROY)
|
||||
ib_uverbs_release_ucq(uobject->context->ufile, ev_queue ?
|
||||
container_of(ev_queue,
|
||||
struct ib_uverbs_completion_event_file,
|
||||
ev_queue) : NULL,
|
||||
ucq);
|
||||
if (ib_is_destroy_retryable(ret, why, uobject))
|
||||
return ret;
|
||||
|
||||
ib_uverbs_release_ucq(
|
||||
uobject->context->ufile,
|
||||
ev_queue ? container_of(ev_queue,
|
||||
struct ib_uverbs_completion_event_file,
|
||||
ev_queue) :
|
||||
NULL,
|
||||
ucq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -201,7 +205,7 @@ static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_CQ_DESTROY,
|
|||
UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_CQ,
|
||||
&UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_ucq_object), 0,
|
||||
&UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_ucq_object),
|
||||
uverbs_free_cq),
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI)
|
||||
&UVERBS_METHOD(UVERBS_METHOD_CQ_CREATE),
|
||||
|
|
|
@ -37,9 +37,11 @@ static int uverbs_free_dm(struct ib_uobject *uobject,
|
|||
enum rdma_remove_reason why)
|
||||
{
|
||||
struct ib_dm *dm = uobject->object;
|
||||
int ret;
|
||||
|
||||
if (why == RDMA_REMOVE_DESTROY && atomic_read(&dm->usecnt))
|
||||
return -EBUSY;
|
||||
ret = ib_destroy_usecnt(&dm->usecnt, why, uobject);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return dm->device->dealloc_dm(dm);
|
||||
}
|
||||
|
@ -102,7 +104,6 @@ static DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(UVERBS_METHOD_DM_FREE,
|
|||
UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_DM,
|
||||
/* 1 is used in order to free the DM after MRs */
|
||||
&UVERBS_TYPE_ALLOC_IDR(1, uverbs_free_dm),
|
||||
&UVERBS_TYPE_ALLOC_IDR(uverbs_free_dm),
|
||||
&UVERBS_METHOD(UVERBS_METHOD_DM_ALLOC),
|
||||
&UVERBS_METHOD(UVERBS_METHOD_DM_FREE));
|
||||
|
|
|
@ -37,10 +37,11 @@ static int uverbs_free_flow_action(struct ib_uobject *uobject,
|
|||
enum rdma_remove_reason why)
|
||||
{
|
||||
struct ib_flow_action *action = uobject->object;
|
||||
int ret;
|
||||
|
||||
if (why == RDMA_REMOVE_DESTROY &&
|
||||
atomic_read(&action->usecnt))
|
||||
return -EBUSY;
|
||||
ret = ib_destroy_usecnt(&action->usecnt, why, uobject);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return action->device->destroy_flow_action(action);
|
||||
}
|
||||
|
@ -428,7 +429,7 @@ static DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(UVERBS_METHOD_FLOW_ACTION_DESTRO
|
|||
UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_FLOW_ACTION,
|
||||
&UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_flow_action),
|
||||
&UVERBS_TYPE_ALLOC_IDR(uverbs_free_flow_action),
|
||||
&UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE),
|
||||
&UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY),
|
||||
&UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY));
|
||||
|
|
|
@ -142,6 +142,5 @@ static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_DM_MR_REG,
|
|||
UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MR,
|
||||
/* 1 is used in order to free the MR after all the MWs */
|
||||
&UVERBS_TYPE_ALLOC_IDR(1, uverbs_free_mr),
|
||||
&UVERBS_TYPE_ALLOC_IDR(uverbs_free_mr),
|
||||
&UVERBS_METHOD(UVERBS_METHOD_DM_MR_REG));
|
||||
|
|
|
@ -675,7 +675,7 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
|
|||
int ret;
|
||||
|
||||
ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
|
||||
if (ret && why == RDMA_REMOVE_DESTROY)
|
||||
if (ib_is_destroy_retryable(ret, why, uobject))
|
||||
return ret;
|
||||
|
||||
kfree(obj);
|
||||
|
@ -976,7 +976,7 @@ static int devx_umem_cleanup(struct ib_uobject *uobject,
|
|||
int err;
|
||||
|
||||
err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
|
||||
if (err && why == RDMA_REMOVE_DESTROY)
|
||||
if (ib_is_destroy_retryable(err, why, uobject))
|
||||
return err;
|
||||
|
||||
ib_umem_release(obj->umem);
|
||||
|
@ -1085,14 +1085,14 @@ static DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
|
|||
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN));
|
||||
|
||||
static DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
|
||||
&UVERBS_TYPE_ALLOC_IDR(0, devx_obj_cleanup),
|
||||
&UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
|
||||
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
|
||||
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
|
||||
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
|
||||
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY));
|
||||
|
||||
static DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
|
||||
&UVERBS_TYPE_ALLOC_IDR(0, devx_umem_cleanup),
|
||||
&UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
|
||||
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
|
||||
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
|
||||
|
||||
|
|
|
@ -1476,7 +1476,10 @@ struct ib_fmr_attr {
|
|||
struct ib_umem;
|
||||
|
||||
enum rdma_remove_reason {
|
||||
/* Userspace requested uobject deletion. Call could fail */
|
||||
/*
|
||||
* Userspace requested uobject deletion or initial try
|
||||
* to remove uobject via cleanup. Call could fail
|
||||
*/
|
||||
RDMA_REMOVE_DESTROY,
|
||||
/* Context deletion. This call should delete the actual object itself */
|
||||
RDMA_REMOVE_CLOSE,
|
||||
|
@ -1503,6 +1506,7 @@ struct ib_ucontext {
|
|||
/* protects cleanup process from other actions */
|
||||
struct rw_semaphore cleanup_rwsem;
|
||||
enum rdma_remove_reason cleanup_reason;
|
||||
bool cleanup_retryable;
|
||||
|
||||
struct pid *tgid;
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
|
@ -2684,6 +2688,46 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata,
|
|||
return ib_is_buffer_cleared(udata->inbuf + offset, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_is_destroy_retryable - Check whether the uobject destruction
|
||||
* is retryable.
|
||||
* @ret: The initial destruction return code
|
||||
* @why: remove reason
|
||||
* @uobj: The uobject that is destroyed
|
||||
*
|
||||
* This function is a helper function that IB layer and low-level drivers
|
||||
* can use to consider whether the destruction of the given uobject is
|
||||
* retry-able.
|
||||
* It checks the original return code, if it wasn't success the destruction
|
||||
* is retryable according to the ucontext state (i.e. cleanup_retryable) and
|
||||
* the remove reason. (i.e. why).
|
||||
* Must be called with the object locked for destroy.
|
||||
*/
|
||||
static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
|
||||
struct ib_uobject *uobj)
|
||||
{
|
||||
return ret && (why == RDMA_REMOVE_DESTROY ||
|
||||
uobj->context->cleanup_retryable);
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_destroy_usecnt - Called during destruction to check the usecnt
|
||||
* @usecnt: The usecnt atomic
|
||||
* @why: remove reason
|
||||
* @uobj: The uobject that is destroyed
|
||||
*
|
||||
* Non-zero usecnts will block destruction unless destruction was triggered by
|
||||
* a ucontext cleanup.
|
||||
*/
|
||||
static inline int ib_destroy_usecnt(atomic_t *usecnt,
|
||||
enum rdma_remove_reason why,
|
||||
struct ib_uobject *uobj)
|
||||
{
|
||||
if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
|
||||
return -EBUSY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_modify_qp_is_ok - Check that the supplied attribute mask
|
||||
* contains all required attributes and no attributes not allowed for
|
||||
|
|
|
@ -93,7 +93,6 @@ struct uverbs_obj_type_class {
|
|||
struct uverbs_obj_type {
|
||||
const struct uverbs_obj_type_class * const type_class;
|
||||
size_t obj_size;
|
||||
unsigned int destroy_order;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -152,10 +151,9 @@ extern const struct uverbs_obj_type_class uverbs_fd_class;
|
|||
|
||||
#define UVERBS_BUILD_BUG_ON(cond) (sizeof(char[1 - 2 * !!(cond)]) - \
|
||||
sizeof(char))
|
||||
#define UVERBS_TYPE_ALLOC_FD(_order, _obj_size, _context_closed, _fops, _name, _flags)\
|
||||
#define UVERBS_TYPE_ALLOC_FD(_obj_size, _context_closed, _fops, _name, _flags)\
|
||||
((&((const struct uverbs_obj_fd_type) \
|
||||
{.type = { \
|
||||
.destroy_order = _order, \
|
||||
.type_class = &uverbs_fd_class, \
|
||||
.obj_size = (_obj_size) + \
|
||||
UVERBS_BUILD_BUG_ON((_obj_size) < sizeof(struct ib_uobject_file)), \
|
||||
|
@ -164,18 +162,17 @@ extern const struct uverbs_obj_type_class uverbs_fd_class;
|
|||
.fops = _fops, \
|
||||
.name = _name, \
|
||||
.flags = _flags}))->type)
|
||||
#define UVERBS_TYPE_ALLOC_IDR_SZ(_size, _order, _destroy_object) \
|
||||
#define UVERBS_TYPE_ALLOC_IDR_SZ(_size, _destroy_object) \
|
||||
((&((const struct uverbs_obj_idr_type) \
|
||||
{.type = { \
|
||||
.destroy_order = _order, \
|
||||
.type_class = &uverbs_idr_class, \
|
||||
.obj_size = (_size) + \
|
||||
UVERBS_BUILD_BUG_ON((_size) < \
|
||||
sizeof(struct ib_uobject)) \
|
||||
}, \
|
||||
.destroy_object = _destroy_object,}))->type)
|
||||
#define UVERBS_TYPE_ALLOC_IDR(_order, _destroy_object) \
|
||||
UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uobject), _order, \
|
||||
#define UVERBS_TYPE_ALLOC_IDR(_destroy_object) \
|
||||
UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uobject), \
|
||||
_destroy_object)
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue