mirror of https://gitee.com/openkylin/linux.git
RDMA/mlx5: Split sig_err MR data into its own xarray
The locking model for signature is completely different than ODP, do not share the same xarray that relies on SRCU locking to support ODP. Simply store the active mlx5_core_sig_ctx's in an xarray when signature MRs are created and rely on trivial xarray locking to serialize everything. The overhead of storing only a handful of SIG related MRs is going to be much less than an xarray full of every mkey. Link: https://lore.kernel.org/r/20191009160934.3143-3-jgg@ziepe.ca Reviewed-by: Artemy Kovalyov <artemyko@mellanox.com> Reviewed-by: Max Gurtovoy <maxg@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
fb985e278a
commit
50211ec944
|
@ -423,9 +423,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
|
||||||
struct mlx5_cqe64 *cqe64;
|
struct mlx5_cqe64 *cqe64;
|
||||||
struct mlx5_core_qp *mqp;
|
struct mlx5_core_qp *mqp;
|
||||||
struct mlx5_ib_wq *wq;
|
struct mlx5_ib_wq *wq;
|
||||||
struct mlx5_sig_err_cqe *sig_err_cqe;
|
|
||||||
struct mlx5_core_mkey *mmkey;
|
|
||||||
struct mlx5_ib_mr *mr;
|
|
||||||
uint8_t opcode;
|
uint8_t opcode;
|
||||||
uint32_t qpn;
|
uint32_t qpn;
|
||||||
u16 wqe_ctr;
|
u16 wqe_ctr;
|
||||||
|
@ -519,27 +516,29 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case MLX5_CQE_SIG_ERR:
|
case MLX5_CQE_SIG_ERR: {
|
||||||
sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
|
struct mlx5_sig_err_cqe *sig_err_cqe =
|
||||||
|
(struct mlx5_sig_err_cqe *)cqe64;
|
||||||
|
struct mlx5_core_sig_ctx *sig;
|
||||||
|
|
||||||
xa_lock(&dev->mdev->priv.mkey_table);
|
xa_lock(&dev->sig_mrs);
|
||||||
mmkey = xa_load(&dev->mdev->priv.mkey_table,
|
sig = xa_load(&dev->sig_mrs,
|
||||||
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
|
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
|
||||||
mr = to_mibmr(mmkey);
|
get_sig_err_item(sig_err_cqe, &sig->err_item);
|
||||||
get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
|
sig->sig_err_exists = true;
|
||||||
mr->sig->sig_err_exists = true;
|
sig->sigerr_count++;
|
||||||
mr->sig->sigerr_count++;
|
|
||||||
|
|
||||||
mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
|
mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
|
||||||
cq->mcq.cqn, mr->sig->err_item.key,
|
cq->mcq.cqn, sig->err_item.key,
|
||||||
mr->sig->err_item.err_type,
|
sig->err_item.err_type,
|
||||||
mr->sig->err_item.sig_err_offset,
|
sig->err_item.sig_err_offset,
|
||||||
mr->sig->err_item.expected,
|
sig->err_item.expected,
|
||||||
mr->sig->err_item.actual);
|
sig->err_item.actual);
|
||||||
|
|
||||||
xa_unlock(&dev->mdev->priv.mkey_table);
|
xa_unlock(&dev->sig_mrs);
|
||||||
goto repoll;
|
goto repoll;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -6150,6 +6150,7 @@ static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
|
||||||
cleanup_srcu_struct(&dev->mr_srcu);
|
cleanup_srcu_struct(&dev->mr_srcu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
WARN_ON(!xa_empty(&dev->sig_mrs));
|
||||||
WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
|
WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6201,6 +6202,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
|
||||||
mutex_init(&dev->cap_mask_mutex);
|
mutex_init(&dev->cap_mask_mutex);
|
||||||
INIT_LIST_HEAD(&dev->qp_list);
|
INIT_LIST_HEAD(&dev->qp_list);
|
||||||
spin_lock_init(&dev->reset_flow_resource_lock);
|
spin_lock_init(&dev->reset_flow_resource_lock);
|
||||||
|
xa_init(&dev->sig_mrs);
|
||||||
|
|
||||||
spin_lock_init(&dev->dm.lock);
|
spin_lock_init(&dev->dm.lock);
|
||||||
dev->dm.dev = mdev;
|
dev->dm.dev = mdev;
|
||||||
|
|
|
@ -999,6 +999,8 @@ struct mlx5_ib_dev {
|
||||||
struct mlx5_srq_table srq_table;
|
struct mlx5_srq_table srq_table;
|
||||||
struct mlx5_async_ctx async_ctx;
|
struct mlx5_async_ctx async_ctx;
|
||||||
struct mlx5_devx_event_table devx_event_table;
|
struct mlx5_devx_event_table devx_event_table;
|
||||||
|
|
||||||
|
struct xarray sig_mrs;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
|
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
|
||||||
|
|
|
@ -1560,6 +1560,7 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||||
mr->sig->psv_wire.psv_idx))
|
mr->sig->psv_wire.psv_idx))
|
||||||
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
|
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
|
||||||
mr->sig->psv_wire.psv_idx);
|
mr->sig->psv_wire.psv_idx);
|
||||||
|
xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key));
|
||||||
kfree(mr->sig);
|
kfree(mr->sig);
|
||||||
mr->sig = NULL;
|
mr->sig = NULL;
|
||||||
}
|
}
|
||||||
|
@ -1797,8 +1798,15 @@ static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
|
||||||
if (err)
|
if (err)
|
||||||
goto err_free_mtt_mr;
|
goto err_free_mtt_mr;
|
||||||
|
|
||||||
|
err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
|
||||||
|
mr->sig, GFP_KERNEL));
|
||||||
|
if (err)
|
||||||
|
goto err_free_descs;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_free_descs:
|
||||||
|
destroy_mkey(dev, mr);
|
||||||
|
mlx5_free_priv_descs(mr);
|
||||||
err_free_mtt_mr:
|
err_free_mtt_mr:
|
||||||
dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
|
dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
|
||||||
mr->mtt_mr = NULL;
|
mr->mtt_mr = NULL;
|
||||||
|
|
Loading…
Reference in New Issue