mirror of https://gitee.com/openkylin/linux.git
IB/mlx4: Add support for resizing CQs
Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
3fdcb97f0b
commit
bbf8eed1a0
|
@ -93,6 +93,74 @@ int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
|
|||
return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
|
||||
}
|
||||
|
||||
static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
|
||||
PAGE_SIZE * 2, &buf->buf);
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
|
||||
&buf->mtt);
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
|
||||
return 0;
|
||||
|
||||
err_mtt:
|
||||
mlx4_mtt_cleanup(dev->dev, &buf->mtt);
|
||||
|
||||
err_buf:
|
||||
mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
|
||||
&buf->buf);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
|
||||
{
|
||||
mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
|
||||
}
|
||||
|
||||
static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
|
||||
struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
|
||||
u64 buf_addr, int cqe)
|
||||
{
|
||||
int err;
|
||||
|
||||
*umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
|
||||
IB_ACCESS_LOCAL_WRITE);
|
||||
if (IS_ERR(*umem))
|
||||
return PTR_ERR(*umem);
|
||||
|
||||
err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
|
||||
ilog2((*umem)->page_size), &buf->mtt);
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
|
||||
return 0;
|
||||
|
||||
err_mtt:
|
||||
mlx4_mtt_cleanup(dev->dev, &buf->mtt);
|
||||
|
||||
err_buf:
|
||||
ib_umem_release(*umem);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
|
@ -100,7 +168,6 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
|
|||
struct mlx4_ib_dev *dev = to_mdev(ibdev);
|
||||
struct mlx4_ib_cq *cq;
|
||||
struct mlx4_uar *uar;
|
||||
int buf_size;
|
||||
int err;
|
||||
|
||||
if (entries < 1 || entries > dev->dev->caps.max_cqes)
|
||||
|
@ -112,8 +179,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
|
|||
|
||||
entries = roundup_pow_of_two(entries + 1);
|
||||
cq->ibcq.cqe = entries - 1;
|
||||
buf_size = entries * sizeof (struct mlx4_cqe);
|
||||
mutex_init(&cq->resize_mutex);
|
||||
spin_lock_init(&cq->lock);
|
||||
cq->resize_buf = NULL;
|
||||
cq->resize_umem = NULL;
|
||||
|
||||
if (context) {
|
||||
struct mlx4_ib_create_cq ucmd;
|
||||
|
@ -123,21 +192,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
|
|||
goto err_cq;
|
||||
}
|
||||
|
||||
cq->umem = ib_umem_get(context, ucmd.buf_addr, buf_size,
|
||||
IB_ACCESS_LOCAL_WRITE);
|
||||
if (IS_ERR(cq->umem)) {
|
||||
err = PTR_ERR(cq->umem);
|
||||
err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
|
||||
ucmd.buf_addr, entries);
|
||||
if (err)
|
||||
goto err_cq;
|
||||
}
|
||||
|
||||
err = mlx4_mtt_init(dev->dev, ib_umem_page_count(cq->umem),
|
||||
ilog2(cq->umem->page_size), &cq->buf.mtt);
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
err = mlx4_ib_umem_write_mtt(dev, &cq->buf.mtt, cq->umem);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
|
||||
err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
|
||||
&cq->db);
|
||||
|
@ -155,19 +213,9 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
|
|||
*cq->mcq.set_ci_db = 0;
|
||||
*cq->mcq.arm_db = 0;
|
||||
|
||||
if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &cq->buf.buf)) {
|
||||
err = -ENOMEM;
|
||||
err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
|
||||
if (err)
|
||||
goto err_db;
|
||||
}
|
||||
|
||||
err = mlx4_mtt_init(dev->dev, cq->buf.buf.npages, cq->buf.buf.page_shift,
|
||||
&cq->buf.mtt);
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
err = mlx4_buf_write_mtt(dev->dev, &cq->buf.mtt, &cq->buf.buf);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
|
||||
uar = &dev->priv_uar;
|
||||
}
|
||||
|
@ -195,12 +243,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
|
|||
err_mtt:
|
||||
mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
|
||||
|
||||
err_buf:
|
||||
if (context)
|
||||
ib_umem_release(cq->umem);
|
||||
else
|
||||
mlx4_buf_free(dev->dev, entries * sizeof (struct mlx4_cqe),
|
||||
&cq->buf.buf);
|
||||
mlx4_ib_free_cq_buf(dev, &cq->buf, entries);
|
||||
|
||||
err_db:
|
||||
if (!context)
|
||||
|
@ -212,6 +258,170 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
|
||||
int entries)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (cq->resize_buf)
|
||||
return -EBUSY;
|
||||
|
||||
cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
|
||||
if (!cq->resize_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
|
||||
if (err) {
|
||||
kfree(cq->resize_buf);
|
||||
cq->resize_buf = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
cq->resize_buf->cqe = entries - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
|
||||
int entries, struct ib_udata *udata)
|
||||
{
|
||||
struct mlx4_ib_resize_cq ucmd;
|
||||
int err;
|
||||
|
||||
if (cq->resize_umem)
|
||||
return -EBUSY;
|
||||
|
||||
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
|
||||
return -EFAULT;
|
||||
|
||||
cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
|
||||
if (!cq->resize_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
|
||||
&cq->resize_umem, ucmd.buf_addr, entries);
|
||||
if (err) {
|
||||
kfree(cq->resize_buf);
|
||||
cq->resize_buf = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
cq->resize_buf->cqe = entries - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
i = cq->mcq.cons_index;
|
||||
while (get_sw_cqe(cq, i & cq->ibcq.cqe))
|
||||
++i;
|
||||
|
||||
return i - cq->mcq.cons_index;
|
||||
}
|
||||
|
||||
static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
|
||||
{
|
||||
struct mlx4_cqe *cqe;
|
||||
int i;
|
||||
|
||||
i = cq->mcq.cons_index;
|
||||
cqe = get_cqe(cq, i & cq->ibcq.cqe);
|
||||
while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
|
||||
memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
|
||||
(i + 1) & cq->resize_buf->cqe),
|
||||
get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
|
||||
cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
|
||||
}
|
||||
++cq->mcq.cons_index;
|
||||
}
|
||||
|
||||
int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
|
||||
{
|
||||
struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
|
||||
struct mlx4_ib_cq *cq = to_mcq(ibcq);
|
||||
int outst_cqe;
|
||||
int err;
|
||||
|
||||
mutex_lock(&cq->resize_mutex);
|
||||
|
||||
if (entries < 1 || entries > dev->dev->caps.max_cqes) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
entries = roundup_pow_of_two(entries + 1);
|
||||
if (entries == ibcq->cqe + 1) {
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ibcq->uobject) {
|
||||
err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
|
||||
if (err)
|
||||
goto out;
|
||||
} else {
|
||||
/* Can't be smaller then the number of outstanding CQEs */
|
||||
outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
|
||||
if (entries < outst_cqe + 1) {
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = mlx4_alloc_resize_buf(dev, cq, entries);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
if (ibcq->uobject) {
|
||||
cq->buf = cq->resize_buf->buf;
|
||||
cq->ibcq.cqe = cq->resize_buf->cqe;
|
||||
ib_umem_release(cq->umem);
|
||||
cq->umem = cq->resize_umem;
|
||||
|
||||
kfree(cq->resize_buf);
|
||||
cq->resize_buf = NULL;
|
||||
cq->resize_umem = NULL;
|
||||
} else {
|
||||
spin_lock_irq(&cq->lock);
|
||||
if (cq->resize_buf) {
|
||||
mlx4_ib_cq_resize_copy_cqes(cq);
|
||||
mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
|
||||
cq->buf = cq->resize_buf->buf;
|
||||
cq->ibcq.cqe = cq->resize_buf->cqe;
|
||||
|
||||
kfree(cq->resize_buf);
|
||||
cq->resize_buf = NULL;
|
||||
}
|
||||
spin_unlock_irq(&cq->lock);
|
||||
}
|
||||
|
||||
goto out;
|
||||
|
||||
err_buf:
|
||||
if (!ibcq->uobject)
|
||||
mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
|
||||
cq->resize_buf->cqe);
|
||||
|
||||
kfree(cq->resize_buf);
|
||||
cq->resize_buf = NULL;
|
||||
|
||||
if (cq->resize_umem) {
|
||||
ib_umem_release(cq->resize_umem);
|
||||
cq->resize_umem = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&cq->resize_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx4_ib_destroy_cq(struct ib_cq *cq)
|
||||
{
|
||||
struct mlx4_ib_dev *dev = to_mdev(cq->device);
|
||||
|
@ -224,8 +434,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq)
|
|||
mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
|
||||
ib_umem_release(mcq->umem);
|
||||
} else {
|
||||
mlx4_buf_free(dev->dev, (cq->cqe + 1) * sizeof (struct mlx4_cqe),
|
||||
&mcq->buf.buf);
|
||||
mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1);
|
||||
mlx4_ib_db_free(dev, &mcq->db);
|
||||
}
|
||||
|
||||
|
@ -332,6 +541,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
|
|||
u32 g_mlpath_rqpn;
|
||||
u16 wqe_ctr;
|
||||
|
||||
repoll:
|
||||
cqe = next_cqe_sw(cq);
|
||||
if (!cqe)
|
||||
return -EAGAIN;
|
||||
|
@ -354,6 +564,22 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Resize CQ in progress */
|
||||
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
|
||||
if (cq->resize_buf) {
|
||||
struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
|
||||
|
||||
mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
|
||||
cq->buf = cq->resize_buf->buf;
|
||||
cq->ibcq.cqe = cq->resize_buf->cqe;
|
||||
|
||||
kfree(cq->resize_buf);
|
||||
cq->resize_buf = NULL;
|
||||
}
|
||||
|
||||
goto repoll;
|
||||
}
|
||||
|
||||
if (!*cur_qp ||
|
||||
(be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
|
||||
/*
|
||||
|
|
|
@ -571,6 +571,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|||
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
|
||||
(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
|
||||
(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
|
||||
(1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
|
||||
(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
|
||||
(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
|
||||
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
|
||||
|
@ -610,6 +611,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|||
ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
|
||||
ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
|
||||
ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
|
||||
ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
|
||||
ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
|
||||
ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
|
||||
ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
|
||||
|
|
|
@ -78,13 +78,21 @@ struct mlx4_ib_cq_buf {
|
|||
struct mlx4_mtt mtt;
|
||||
};
|
||||
|
||||
struct mlx4_ib_cq_resize {
|
||||
struct mlx4_ib_cq_buf buf;
|
||||
int cqe;
|
||||
};
|
||||
|
||||
struct mlx4_ib_cq {
|
||||
struct ib_cq ibcq;
|
||||
struct mlx4_cq mcq;
|
||||
struct mlx4_ib_cq_buf buf;
|
||||
struct mlx4_ib_cq_resize *resize_buf;
|
||||
struct mlx4_ib_db db;
|
||||
spinlock_t lock;
|
||||
struct mutex resize_mutex;
|
||||
struct ib_umem *umem;
|
||||
struct ib_umem *resize_umem;
|
||||
};
|
||||
|
||||
struct mlx4_ib_mr {
|
||||
|
@ -255,6 +263,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
int mlx4_ib_dereg_mr(struct ib_mr *mr);
|
||||
|
||||
int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
||||
int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
|
||||
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
|
|
|
@ -159,6 +159,34 @@ int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_cq_modify);
|
||||
|
||||
int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
|
||||
int entries, struct mlx4_mtt *mtt)
|
||||
{
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_cq_context *cq_context;
|
||||
u64 mtt_addr;
|
||||
int err;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
cq_context = mailbox->buf;
|
||||
memset(cq_context, 0, sizeof *cq_context);
|
||||
|
||||
cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
|
||||
cq_context->log_page_size = mtt->page_shift - 12;
|
||||
mtt_addr = mlx4_mtt_addr(dev, mtt);
|
||||
cq_context->mtt_base_addr_h = mtt_addr >> 32;
|
||||
cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
|
||||
|
||||
err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
|
||||
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_cq_resize);
|
||||
|
||||
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
|
||||
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq)
|
||||
{
|
||||
|
|
|
@ -132,5 +132,7 @@ enum {
|
|||
|
||||
int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
|
||||
u16 count, u16 period);
|
||||
int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
|
||||
int entries, struct mlx4_mtt *mtt);
|
||||
|
||||
#endif /* MLX4_CQ_H */
|
||||
|
|
Loading…
Reference in New Issue