Merge branch 'odp_rework' into rdma.git for-next
Jason Gunthorpe says: ==================== In order to hoist the interval tree code out of the drivers and into the mmu_notifiers it is necessary for the drivers to not use the interval tree for other things. This series replaces the interval tree with an xarray and along the way re-aligns all the locking to use a sensible SRCU model where the 'update' step is done by modifying an xarray. The result is overall much simpler and with less locking in the critical path. Many functions were reworked for clarity and small details like using 'imr' to refer to the implicit MR make the entire code flow here more readable. This also squashes at least two race bugs on its own, and quite possibily more that haven't been identified. ==================== Merge conflicts with the odp statistics patch resolved. * branch 'odp_rework': RDMA/odp: Remove broken debugging call to invalidate_range RDMA/mlx5: Do not race with mlx5_ib_invalidate_range during create and destroy RDMA/mlx5: Do not store implicit children in the odp_mkeys xarray RDMA/mlx5: Rework implicit ODP destroy RDMA/mlx5: Avoid double lookups on the pagefault path RDMA/mlx5: Reduce locking in implicit_mr_get_data() RDMA/mlx5: Use an xarray for the children of an implicit ODP RDMA/mlx5: Split implicit handling from pagefault_mr RDMA/mlx5: Set the HW IOVA of the child MRs to their place in the tree RDMA/mlx5: Lift implicit_mr_alloc() into the two routines that call it RDMA/mlx5: Rework implicit_mr_get_data RDMA/mlx5: Delete struct mlx5_priv->mkey_table RDMA/mlx5: Use a dedicated mkey xarray for ODP RDMA/mlx5: Split sig_err MR data into its own xarray RDMA/mlx5: Use SRCU properly in ODP prefetch Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
commit
bb3dba3300
|
@ -508,7 +508,6 @@ static int ib_umem_odp_map_dma_single_page(
|
|||
{
|
||||
struct ib_device *dev = umem_odp->umem.ibdev;
|
||||
dma_addr_t dma_addr;
|
||||
int remove_existing_mapping = 0;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
|
@ -534,28 +533,29 @@ static int ib_umem_odp_map_dma_single_page(
|
|||
} else if (umem_odp->page_list[page_index] == page) {
|
||||
umem_odp->dma_list[page_index] |= access_mask;
|
||||
} else {
|
||||
pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
|
||||
umem_odp->page_list[page_index], page);
|
||||
/* Better remove the mapping now, to prevent any further
|
||||
* damage. */
|
||||
remove_existing_mapping = 1;
|
||||
/*
|
||||
* This is a race here where we could have done:
|
||||
*
|
||||
* CPU0 CPU1
|
||||
* get_user_pages()
|
||||
* invalidate()
|
||||
* page_fault()
|
||||
* mutex_lock(umem_mutex)
|
||||
* page from GUP != page in ODP
|
||||
*
|
||||
* It should be prevented by the retry test above as reading
|
||||
* the seq number should be reliable under the
|
||||
* umem_mutex. Thus something is really not working right if
|
||||
* things get here.
|
||||
*/
|
||||
WARN(true,
|
||||
"Got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
|
||||
umem_odp->page_list[page_index], page);
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
|
||||
out:
|
||||
put_user_page(page);
|
||||
|
||||
if (remove_existing_mapping) {
|
||||
ib_umem_notifier_start_account(umem_odp);
|
||||
dev->ops.invalidate_range(
|
||||
umem_odp,
|
||||
ib_umem_start(umem_odp) +
|
||||
(page_index << umem_odp->page_shift),
|
||||
ib_umem_start(umem_odp) +
|
||||
((page_index + 1) << umem_odp->page_shift));
|
||||
ib_umem_notifier_end_account(umem_odp);
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -423,9 +423,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
|
|||
struct mlx5_cqe64 *cqe64;
|
||||
struct mlx5_core_qp *mqp;
|
||||
struct mlx5_ib_wq *wq;
|
||||
struct mlx5_sig_err_cqe *sig_err_cqe;
|
||||
struct mlx5_core_mkey *mmkey;
|
||||
struct mlx5_ib_mr *mr;
|
||||
uint8_t opcode;
|
||||
uint32_t qpn;
|
||||
u16 wqe_ctr;
|
||||
|
@ -519,27 +516,29 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
|
|||
}
|
||||
}
|
||||
break;
|
||||
case MLX5_CQE_SIG_ERR:
|
||||
sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
|
||||
case MLX5_CQE_SIG_ERR: {
|
||||
struct mlx5_sig_err_cqe *sig_err_cqe =
|
||||
(struct mlx5_sig_err_cqe *)cqe64;
|
||||
struct mlx5_core_sig_ctx *sig;
|
||||
|
||||
xa_lock(&dev->mdev->priv.mkey_table);
|
||||
mmkey = xa_load(&dev->mdev->priv.mkey_table,
|
||||
xa_lock(&dev->sig_mrs);
|
||||
sig = xa_load(&dev->sig_mrs,
|
||||
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
|
||||
mr = to_mibmr(mmkey);
|
||||
get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
|
||||
mr->sig->sig_err_exists = true;
|
||||
mr->sig->sigerr_count++;
|
||||
get_sig_err_item(sig_err_cqe, &sig->err_item);
|
||||
sig->sig_err_exists = true;
|
||||
sig->sigerr_count++;
|
||||
|
||||
mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
|
||||
cq->mcq.cqn, mr->sig->err_item.key,
|
||||
mr->sig->err_item.err_type,
|
||||
mr->sig->err_item.sig_err_offset,
|
||||
mr->sig->err_item.expected,
|
||||
mr->sig->err_item.actual);
|
||||
cq->mcq.cqn, sig->err_item.key,
|
||||
sig->err_item.err_type,
|
||||
sig->err_item.sig_err_offset,
|
||||
sig->err_item.expected,
|
||||
sig->err_item.actual);
|
||||
|
||||
xa_unlock(&dev->mdev->priv.mkey_table);
|
||||
xa_unlock(&dev->sig_mrs);
|
||||
goto repoll;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1265,8 +1265,8 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
|
|||
mkey->pd = MLX5_GET(mkc, mkc, pd);
|
||||
devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
|
||||
|
||||
return xa_err(xa_store(&dev->mdev->priv.mkey_table,
|
||||
mlx5_base_mkey(mkey->key), mkey, GFP_KERNEL));
|
||||
return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mkey->key), mkey,
|
||||
GFP_KERNEL));
|
||||
}
|
||||
|
||||
static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
|
||||
|
@ -1345,9 +1345,9 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
|
|||
* the mmkey, we must wait for that to stop before freeing the
|
||||
* mkey, as another allocation could get the same mkey #.
|
||||
*/
|
||||
xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
|
||||
xa_erase(&obj->ib_dev->odp_mkeys,
|
||||
mlx5_base_mkey(obj->devx_mr.mmkey.key));
|
||||
synchronize_srcu(&dev->mr_srcu);
|
||||
synchronize_srcu(&dev->odp_srcu);
|
||||
}
|
||||
|
||||
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
|
||||
|
|
|
@ -6133,11 +6133,10 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
|
|||
static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
mlx5_ib_cleanup_multiport_master(dev);
|
||||
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
|
||||
srcu_barrier(&dev->mr_srcu);
|
||||
cleanup_srcu_struct(&dev->mr_srcu);
|
||||
}
|
||||
WARN_ON(!xa_empty(&dev->odp_mkeys));
|
||||
cleanup_srcu_struct(&dev->odp_srcu);
|
||||
|
||||
WARN_ON(!xa_empty(&dev->sig_mrs));
|
||||
WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
|
||||
}
|
||||
|
||||
|
@ -6189,15 +6188,15 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
|
|||
mutex_init(&dev->cap_mask_mutex);
|
||||
INIT_LIST_HEAD(&dev->qp_list);
|
||||
spin_lock_init(&dev->reset_flow_resource_lock);
|
||||
xa_init(&dev->odp_mkeys);
|
||||
xa_init(&dev->sig_mrs);
|
||||
|
||||
spin_lock_init(&dev->dm.lock);
|
||||
dev->dm.dev = mdev;
|
||||
|
||||
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
|
||||
err = init_srcu_struct(&dev->mr_srcu);
|
||||
if (err)
|
||||
goto err_mp;
|
||||
}
|
||||
err = init_srcu_struct(&dev->odp_srcu);
|
||||
if (err)
|
||||
goto err_mp;
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -604,7 +604,6 @@ struct mlx5_ib_mr {
|
|||
struct mlx5_ib_dev *dev;
|
||||
u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
|
||||
struct mlx5_core_sig_ctx *sig;
|
||||
unsigned int live;
|
||||
void *descs_alloc;
|
||||
int access_flags; /* Needed for rereg MR */
|
||||
|
||||
|
@ -616,12 +615,18 @@ struct mlx5_ib_mr {
|
|||
u64 data_iova;
|
||||
u64 pi_iova;
|
||||
|
||||
atomic_t num_leaf_free;
|
||||
wait_queue_head_t q_leaf_free;
|
||||
struct mlx5_async_work cb_work;
|
||||
atomic_t num_pending_prefetch;
|
||||
/* For ODP and implicit */
|
||||
atomic_t num_deferred_work;
|
||||
struct xarray implicit_children;
|
||||
union {
|
||||
struct rcu_head rcu;
|
||||
struct list_head elm;
|
||||
struct work_struct work;
|
||||
} odp_destroy;
|
||||
struct ib_odp_counters odp_stats;
|
||||
bool is_odp_implicit;
|
||||
|
||||
struct mlx5_async_work cb_work;
|
||||
};
|
||||
|
||||
static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
|
||||
|
@ -977,7 +982,9 @@ struct mlx5_ib_dev {
|
|||
* Sleepable RCU that prevents destruction of MRs while they are still
|
||||
* being used by a page fault handler.
|
||||
*/
|
||||
struct srcu_struct mr_srcu;
|
||||
struct srcu_struct odp_srcu;
|
||||
struct xarray odp_mkeys;
|
||||
|
||||
u32 null_mkey;
|
||||
struct mlx5_ib_flow_db *flow_db;
|
||||
/* protect resources needed as part of reset flow */
|
||||
|
@ -999,6 +1006,8 @@ struct mlx5_ib_dev {
|
|||
struct mlx5_srq_table srq_table;
|
||||
struct mlx5_async_ctx async_ctx;
|
||||
struct mlx5_devx_event_table devx_event_table;
|
||||
|
||||
struct xarray sig_mrs;
|
||||
};
|
||||
|
||||
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
|
||||
|
@ -1162,6 +1171,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
|
|||
struct ib_udata *udata,
|
||||
int access_flags);
|
||||
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
|
||||
void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr);
|
||||
int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
||||
u64 length, u64 virt_addr, int access_flags,
|
||||
struct ib_pd *pd, struct ib_udata *udata);
|
||||
|
@ -1223,6 +1233,8 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
|
|||
|
||||
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry);
|
||||
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||
int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr);
|
||||
|
||||
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
|
||||
struct ib_mr_status *mr_status);
|
||||
struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
|
||||
|
|
|
@ -50,7 +50,6 @@ enum {
|
|||
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
|
||||
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||
|
||||
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
|
@ -59,13 +58,9 @@ static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
|
|||
|
||||
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
{
|
||||
int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
|
||||
WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
|
||||
|
||||
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
|
||||
/* Wait until all page fault handlers using the mr complete. */
|
||||
synchronize_srcu(&dev->mr_srcu);
|
||||
|
||||
return err;
|
||||
return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
|
||||
}
|
||||
|
||||
static int order2idx(struct mlx5_ib_dev *dev, int order)
|
||||
|
@ -94,8 +89,6 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context)
|
|||
struct mlx5_cache_ent *ent = &cache->ent[c];
|
||||
u8 key;
|
||||
unsigned long flags;
|
||||
struct xarray *mkeys = &dev->mdev->priv.mkey_table;
|
||||
int err;
|
||||
|
||||
spin_lock_irqsave(&ent->lock, flags);
|
||||
ent->pending--;
|
||||
|
@ -122,13 +115,6 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context)
|
|||
ent->size++;
|
||||
spin_unlock_irqrestore(&ent->lock, flags);
|
||||
|
||||
xa_lock_irqsave(mkeys, flags);
|
||||
err = xa_err(__xa_store(mkeys, mlx5_base_mkey(mr->mmkey.key),
|
||||
&mr->mmkey, GFP_ATOMIC));
|
||||
xa_unlock_irqrestore(mkeys, flags);
|
||||
if (err)
|
||||
pr_err("Error inserting to mkey tree. 0x%x\n", -err);
|
||||
|
||||
if (!completion_done(&ent->compl))
|
||||
complete(&ent->compl);
|
||||
}
|
||||
|
@ -218,9 +204,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
|
|||
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
|
||||
synchronize_srcu(&dev->mr_srcu);
|
||||
|
||||
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
|
||||
list_del(&mr->list);
|
||||
kfree(mr);
|
||||
|
@ -511,7 +494,7 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|||
c = order2idx(dev, mr->order);
|
||||
WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
|
||||
|
||||
if (unreg_umr(dev, mr)) {
|
||||
if (mlx5_mr_cache_invalidate(mr)) {
|
||||
mr->allocated_from_cache = false;
|
||||
destroy_mkey(dev, mr);
|
||||
ent = &cache->ent[c];
|
||||
|
@ -555,10 +538,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
|
|||
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
synchronize_srcu(&dev->mr_srcu);
|
||||
#endif
|
||||
|
||||
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
|
||||
list_del(&mr->list);
|
||||
kfree(mr);
|
||||
|
@ -1335,10 +1314,15 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
|
||||
if (is_odp_mr(mr)) {
|
||||
to_ib_umem_odp(mr->umem)->private = mr;
|
||||
atomic_set(&mr->num_pending_prefetch, 0);
|
||||
atomic_set(&mr->num_deferred_work, 0);
|
||||
err = xa_err(xa_store(&dev->odp_mkeys,
|
||||
mlx5_base_mkey(mr->mmkey.key), &mr->mmkey,
|
||||
GFP_KERNEL));
|
||||
if (err) {
|
||||
dereg_mr(dev, mr);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
|
||||
smp_store_release(&mr->live, 1);
|
||||
|
||||
return &mr->ibmr;
|
||||
error:
|
||||
|
@ -1346,22 +1330,29 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
/**
|
||||
* mlx5_mr_cache_invalidate - Fence all DMA on the MR
|
||||
* @mr: The MR to fence
|
||||
*
|
||||
* Upon return the NIC will not be doing any DMA to the pages under the MR,
|
||||
* and any DMA inprogress will be completed. Failure of this function
|
||||
* indicates the HW has failed catastrophically.
|
||||
*/
|
||||
int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
struct mlx5_umr_wr umrwr = {};
|
||||
|
||||
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
||||
if (mr->dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
||||
return 0;
|
||||
|
||||
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
|
||||
MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
|
||||
umrwr.wr.opcode = MLX5_IB_WR_UMR;
|
||||
umrwr.pd = dev->umrc.pd;
|
||||
umrwr.pd = mr->dev->umrc.pd;
|
||||
umrwr.mkey = mr->mmkey.key;
|
||||
umrwr.ignore_free_state = 1;
|
||||
|
||||
return mlx5_ib_post_send_wait(dev, &umrwr);
|
||||
return mlx5_ib_post_send_wait(mr->dev, &umrwr);
|
||||
}
|
||||
|
||||
static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
|
||||
|
@ -1445,7 +1436,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
|||
* UMR can't be used - MKey needs to be replaced.
|
||||
*/
|
||||
if (mr->allocated_from_cache)
|
||||
err = unreg_umr(dev, mr);
|
||||
err = mlx5_mr_cache_invalidate(mr);
|
||||
else
|
||||
err = destroy_mkey(dev, mr);
|
||||
if (err)
|
||||
|
@ -1558,6 +1549,7 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|||
mr->sig->psv_wire.psv_idx))
|
||||
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
|
||||
mr->sig->psv_wire.psv_idx);
|
||||
xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key));
|
||||
kfree(mr->sig);
|
||||
mr->sig = NULL;
|
||||
}
|
||||
|
@ -1573,54 +1565,20 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|||
int npages = mr->npages;
|
||||
struct ib_umem *umem = mr->umem;
|
||||
|
||||
if (is_odp_mr(mr)) {
|
||||
struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
|
||||
/* Stop all DMA */
|
||||
if (is_odp_mr(mr))
|
||||
mlx5_ib_fence_odp_mr(mr);
|
||||
else
|
||||
clean_mr(dev, mr);
|
||||
|
||||
/* Prevent new page faults and
|
||||
* prefetch requests from succeeding
|
||||
*/
|
||||
WRITE_ONCE(mr->live, 0);
|
||||
|
||||
/* Wait for all running page-fault handlers to finish. */
|
||||
synchronize_srcu(&dev->mr_srcu);
|
||||
|
||||
/* dequeue pending prefetch requests for the mr */
|
||||
if (atomic_read(&mr->num_pending_prefetch))
|
||||
flush_workqueue(system_unbound_wq);
|
||||
WARN_ON(atomic_read(&mr->num_pending_prefetch));
|
||||
|
||||
/* Destroy all page mappings */
|
||||
if (!umem_odp->is_implicit_odp)
|
||||
mlx5_ib_invalidate_range(umem_odp,
|
||||
ib_umem_start(umem_odp),
|
||||
ib_umem_end(umem_odp));
|
||||
else
|
||||
mlx5_ib_free_implicit_mr(mr);
|
||||
/*
|
||||
* We kill the umem before the MR for ODP,
|
||||
* so that there will not be any invalidations in
|
||||
* flight, looking at the *mr struct.
|
||||
*/
|
||||
ib_umem_odp_release(umem_odp);
|
||||
atomic_sub(npages, &dev->mdev->priv.reg_pages);
|
||||
|
||||
/* Avoid double-freeing the umem. */
|
||||
umem = NULL;
|
||||
}
|
||||
|
||||
clean_mr(dev, mr);
|
||||
|
||||
/*
|
||||
* We should unregister the DMA address from the HCA before
|
||||
* remove the DMA mapping.
|
||||
*/
|
||||
mlx5_mr_cache_free(dev, mr);
|
||||
ib_umem_release(umem);
|
||||
if (umem)
|
||||
atomic_sub(npages, &dev->mdev->priv.reg_pages);
|
||||
|
||||
if (!mr->allocated_from_cache)
|
||||
if (mr->allocated_from_cache)
|
||||
mlx5_mr_cache_free(dev, mr);
|
||||
else
|
||||
kfree(mr);
|
||||
|
||||
ib_umem_release(umem);
|
||||
atomic_sub(npages, &dev->mdev->priv.reg_pages);
|
||||
|
||||
}
|
||||
|
||||
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
|
||||
|
@ -1632,6 +1590,11 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
|
|||
dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr);
|
||||
}
|
||||
|
||||
if (is_odp_mr(mmr) && to_ib_umem_odp(mmr->umem)->is_implicit_odp) {
|
||||
mlx5_ib_free_implicit_mr(mmr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dereg_mr(to_mdev(ibmr->device), mmr);
|
||||
|
||||
return 0;
|
||||
|
@ -1795,8 +1758,15 @@ static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
|
|||
if (err)
|
||||
goto err_free_mtt_mr;
|
||||
|
||||
err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
|
||||
mr->sig, GFP_KERNEL));
|
||||
if (err)
|
||||
goto err_free_descs;
|
||||
return 0;
|
||||
|
||||
err_free_descs:
|
||||
destroy_mkey(dev, mr);
|
||||
mlx5_free_priv_descs(mr);
|
||||
err_free_mtt_mr:
|
||||
dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
|
||||
mr->mtt_mr = NULL;
|
||||
|
@ -1949,9 +1919,19 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
|||
}
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
|
||||
err = xa_err(xa_store(&dev->odp_mkeys,
|
||||
mlx5_base_mkey(mw->mmkey.key), &mw->mmkey,
|
||||
GFP_KERNEL));
|
||||
if (err)
|
||||
goto free_mkey;
|
||||
}
|
||||
|
||||
kfree(in);
|
||||
return &mw->ibmw;
|
||||
|
||||
free_mkey:
|
||||
mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
|
||||
free:
|
||||
kfree(mw);
|
||||
kfree(in);
|
||||
|
@ -1965,13 +1945,12 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
|
|||
int err;
|
||||
|
||||
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
|
||||
xa_erase(&dev->mdev->priv.mkey_table,
|
||||
mlx5_base_mkey(mmw->mmkey.key));
|
||||
xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key));
|
||||
/*
|
||||
* pagefault_single_data_segment() may be accessing mmw under
|
||||
* SRCU if the user bound an ODP MR to this MW.
|
||||
*/
|
||||
synchronize_srcu(&dev->mr_srcu);
|
||||
synchronize_srcu(&dev->odp_srcu);
|
||||
}
|
||||
|
||||
err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -837,8 +837,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
|
|||
|
||||
mlx5_init_qp_table(dev);
|
||||
|
||||
mlx5_init_mkey_table(dev);
|
||||
|
||||
mlx5_init_reserved_gids(dev);
|
||||
|
||||
mlx5_init_clock(dev);
|
||||
|
@ -896,7 +894,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
|
|||
err_tables_cleanup:
|
||||
mlx5_geneve_destroy(dev->geneve);
|
||||
mlx5_vxlan_destroy(dev->vxlan);
|
||||
mlx5_cleanup_mkey_table(dev);
|
||||
mlx5_cleanup_qp_table(dev);
|
||||
mlx5_cq_debugfs_cleanup(dev);
|
||||
mlx5_events_cleanup(dev);
|
||||
|
@ -924,7 +921,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
|
|||
mlx5_vxlan_destroy(dev->vxlan);
|
||||
mlx5_cleanup_clock(dev);
|
||||
mlx5_cleanup_reserved_gids(dev);
|
||||
mlx5_cleanup_mkey_table(dev);
|
||||
mlx5_cleanup_qp_table(dev);
|
||||
mlx5_cq_debugfs_cleanup(dev);
|
||||
mlx5_events_cleanup(dev);
|
||||
|
|
|
@ -36,16 +36,6 @@
|
|||
#include <linux/mlx5/cmd.h>
|
||||
#include "mlx5_core.h"
|
||||
|
||||
void mlx5_init_mkey_table(struct mlx5_core_dev *dev)
|
||||
{
|
||||
xa_init_flags(&dev->priv.mkey_table, XA_FLAGS_LOCK_IRQ);
|
||||
}
|
||||
|
||||
void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
|
||||
{
|
||||
WARN_ON(!xa_empty(&dev->priv.mkey_table));
|
||||
}
|
||||
|
||||
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_mkey *mkey,
|
||||
struct mlx5_async_ctx *async_ctx, u32 *in,
|
||||
|
@ -54,7 +44,6 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
|
|||
struct mlx5_async_work *context)
|
||||
{
|
||||
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
|
||||
struct xarray *mkeys = &dev->priv.mkey_table;
|
||||
u32 mkey_index;
|
||||
void *mkc;
|
||||
int err;
|
||||
|
@ -84,16 +73,7 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
|
|||
|
||||
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
|
||||
mkey_index, key, mkey->key);
|
||||
|
||||
err = xa_err(xa_store_irq(mkeys, mlx5_base_mkey(mkey->key), mkey,
|
||||
GFP_KERNEL));
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "failed xarray insert of mkey 0x%x, %d\n",
|
||||
mlx5_base_mkey(mkey->key), err);
|
||||
mlx5_core_destroy_mkey(dev, mkey);
|
||||
}
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_create_mkey_cb);
|
||||
|
||||
|
@ -111,12 +91,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
|
|||
{
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
|
||||
struct xarray *mkeys = &dev->priv.mkey_table;
|
||||
unsigned long flags;
|
||||
|
||||
xa_lock_irqsave(mkeys, flags);
|
||||
__xa_erase(mkeys, mlx5_base_mkey(mkey->key));
|
||||
xa_unlock_irqrestore(mkeys, flags);
|
||||
|
||||
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
|
||||
MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
|
||||
|
|
|
@ -556,8 +556,6 @@ struct mlx5_priv {
|
|||
struct dentry *cmdif_debugfs;
|
||||
/* end: qp staff */
|
||||
|
||||
struct xarray mkey_table;
|
||||
|
||||
/* start: alloc staff */
|
||||
/* protect buffer alocation according to numa node */
|
||||
struct mutex alloc_mutex;
|
||||
|
@ -942,8 +940,6 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
|
|||
gfp_t flags, int npages);
|
||||
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
|
||||
struct mlx5_cmd_mailbox *head);
|
||||
void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
|
||||
void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
|
||||
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_mkey *mkey,
|
||||
struct mlx5_async_ctx *async_ctx, u32 *in,
|
||||
|
|
|
@ -78,9 +78,7 @@ struct ib_umem_odp {
|
|||
bool is_implicit_odp;
|
||||
|
||||
struct completion notifier_completion;
|
||||
int dying;
|
||||
unsigned int page_shift;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
|
||||
|
@ -156,22 +154,6 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
|
|||
umem_call_back cb,
|
||||
bool blockable, void *cookie);
|
||||
|
||||
/*
|
||||
* Find first region intersecting with address range.
|
||||
* Return NULL if not found
|
||||
*/
|
||||
static inline struct ib_umem_odp *
|
||||
rbt_ib_umem_lookup(struct rb_root_cached *root, u64 addr, u64 length)
|
||||
{
|
||||
struct interval_tree_node *node;
|
||||
|
||||
node = interval_tree_iter_first(root, addr, addr + length - 1);
|
||||
if (!node)
|
||||
return NULL;
|
||||
return container_of(node, struct ib_umem_odp, interval_tree);
|
||||
|
||||
}
|
||||
|
||||
static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
|
||||
unsigned long mmu_seq)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue