mirror of https://gitee.com/openkylin/linux.git
RDMA: Add a dedicated CQ resource tracker function
In order to avoid double multiplexing of the resource when it is a CQ, add a dedicated callback function. Link: https://lore.kernel.org/r/20200623113043.1228482-6-leon@kernel.org Signed-off-by: Maor Gottlieb <maorg@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
f443452900
commit
9e2a187a93
|
@ -2617,6 +2617,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
|
|||
SET_DEVICE_OP(dev_ops, drain_rq);
|
||||
SET_DEVICE_OP(dev_ops, drain_sq);
|
||||
SET_DEVICE_OP(dev_ops, enable_driver);
|
||||
SET_DEVICE_OP(dev_ops, fill_res_cq_entry);
|
||||
SET_DEVICE_OP(dev_ops, fill_res_entry);
|
||||
SET_DEVICE_OP(dev_ops, fill_res_mr_entry);
|
||||
SET_DEVICE_OP(dev_ops, fill_stat_mr_entry);
|
||||
|
|
|
@ -598,9 +598,8 @@ static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
|
|||
if (fill_res_name_pid(msg, res))
|
||||
goto err;
|
||||
|
||||
if (fill_res_entry(dev, msg, res))
|
||||
goto err;
|
||||
|
||||
if (dev->ops.fill_res_cq_entry)
|
||||
return dev->ops.fill_res_cq_entry(msg, cq);
|
||||
return 0;
|
||||
|
||||
err: return -EMSGSIZE;
|
||||
|
|
|
@ -1056,6 +1056,7 @@ struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
|
|||
typedef int c4iw_restrack_func(struct sk_buff *msg,
|
||||
struct rdma_restrack_entry *res);
|
||||
int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr);
|
||||
int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq);
|
||||
extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX];
|
||||
|
||||
#endif
|
||||
|
|
|
@ -485,6 +485,7 @@ static const struct ib_device_ops c4iw_dev_ops = {
|
|||
.destroy_cq = c4iw_destroy_cq,
|
||||
.destroy_qp = c4iw_destroy_qp,
|
||||
.destroy_srq = c4iw_destroy_srq,
|
||||
.fill_res_cq_entry = c4iw_fill_res_cq_entry,
|
||||
.fill_res_entry = fill_res_entry,
|
||||
.fill_res_mr_entry = c4iw_fill_res_mr_entry,
|
||||
.get_dev_fw_str = get_dev_fw_str,
|
||||
|
|
|
@ -372,10 +372,8 @@ static int fill_swcqes(struct sk_buff *msg, struct t4_cq *cq,
|
|||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int fill_res_cq_entry(struct sk_buff *msg,
|
||||
struct rdma_restrack_entry *res)
|
||||
int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq)
|
||||
{
|
||||
struct ib_cq *ibcq = container_of(res, struct ib_cq, res);
|
||||
struct c4iw_cq *chp = to_c4iw_cq(ibcq);
|
||||
struct nlattr *table_attr;
|
||||
struct t4_cqe hwcqes[2];
|
||||
|
@ -494,5 +492,4 @@ int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
|
|||
c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = {
|
||||
[RDMA_RESTRACK_QP] = fill_res_qp_entry,
|
||||
[RDMA_RESTRACK_CM_ID] = fill_res_ep_entry,
|
||||
[RDMA_RESTRACK_CQ] = fill_res_cq_entry,
|
||||
};
|
||||
|
|
|
@ -1266,6 +1266,6 @@ void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
|
|||
int hns_roce_init(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_exit(struct hns_roce_dev *hr_dev);
|
||||
|
||||
int hns_roce_fill_res_entry(struct sk_buff *msg,
|
||||
struct rdma_restrack_entry *res);
|
||||
int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
|
||||
struct ib_cq *ib_cq);
|
||||
#endif /* _HNS_ROCE_DEVICE_H */
|
||||
|
|
|
@ -428,7 +428,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
|
|||
.destroy_ah = hns_roce_destroy_ah,
|
||||
.destroy_cq = hns_roce_destroy_cq,
|
||||
.disassociate_ucontext = hns_roce_disassociate_ucontext,
|
||||
.fill_res_entry = hns_roce_fill_res_entry,
|
||||
.fill_res_cq_entry = hns_roce_fill_res_cq_entry,
|
||||
.get_dma_mr = hns_roce_get_dma_mr,
|
||||
.get_link_layer = hns_roce_get_link_layer,
|
||||
.get_port_immutable = hns_roce_port_immutable,
|
||||
|
|
|
@ -76,10 +76,9 @@ static int hns_roce_fill_cq(struct sk_buff *msg,
|
|||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
|
||||
struct rdma_restrack_entry *res)
|
||||
int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
|
||||
struct ib_cq *ib_cq)
|
||||
{
|
||||
struct ib_cq *ib_cq = container_of(res, struct ib_cq, res);
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
|
||||
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
|
||||
struct hns_roce_v2_cq_context *context;
|
||||
|
@ -119,12 +118,3 @@ static int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
|
|||
kfree(context);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hns_roce_fill_res_entry(struct sk_buff *msg,
|
||||
struct rdma_restrack_entry *res)
|
||||
{
|
||||
if (res->type == RDMA_RESTRACK_CQ)
|
||||
return hns_roce_fill_res_cq_entry(msg, res);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2584,6 +2584,7 @@ struct ib_device_ops {
|
|||
int (*fill_res_entry)(struct sk_buff *msg,
|
||||
struct rdma_restrack_entry *entry);
|
||||
int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
|
||||
int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
|
||||
|
||||
/* Device lifecycle callbacks */
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue