RDMA/mlx5: Enable vport loopback when user context or QP mandate
A user can create a QP which can accept loopback traffic, but that's not enough. We need to enable loopback on the vport as well. Currently vport loopback is enabled only when more than 1 users are using the IB device, update the logic to consider whatever a QP which supports loopback was created, if so enable vport loopback even if there is only a single user. Signed-off-by: Mark Bloch <markb@mellanox.com> Reviewed-by: Yishai Hadas <yishaih@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
175edba856
commit
0042f9e458
|
@ -1571,28 +1571,44 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
|
||||||
mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
|
mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev)
|
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
mutex_lock(&dev->lb.mutex);
|
mutex_lock(&dev->lb.mutex);
|
||||||
dev->lb.user_td++;
|
if (td)
|
||||||
|
dev->lb.user_td++;
|
||||||
|
if (qp)
|
||||||
|
dev->lb.qps++;
|
||||||
|
|
||||||
if (dev->lb.user_td == 2)
|
if (dev->lb.user_td == 2 ||
|
||||||
err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
|
dev->lb.qps == 1) {
|
||||||
|
if (!dev->lb.enabled) {
|
||||||
|
err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
|
||||||
|
dev->lb.enabled = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
mutex_unlock(&dev->lb.mutex);
|
mutex_unlock(&dev->lb.mutex);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev)
|
void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
|
||||||
{
|
{
|
||||||
mutex_lock(&dev->lb.mutex);
|
mutex_lock(&dev->lb.mutex);
|
||||||
dev->lb.user_td--;
|
if (td)
|
||||||
|
dev->lb.user_td--;
|
||||||
|
if (qp)
|
||||||
|
dev->lb.qps--;
|
||||||
|
|
||||||
if (dev->lb.user_td < 2)
|
if (dev->lb.user_td == 1 &&
|
||||||
mlx5_nic_vport_update_local_lb(dev->mdev, false);
|
dev->lb.qps == 0) {
|
||||||
|
if (dev->lb.enabled) {
|
||||||
|
mlx5_nic_vport_update_local_lb(dev->mdev, false);
|
||||||
|
dev->lb.enabled = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
mutex_unlock(&dev->lb.mutex);
|
mutex_unlock(&dev->lb.mutex);
|
||||||
}
|
}
|
||||||
|
@ -1613,7 +1629,7 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
|
||||||
!MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
|
!MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
return mlx5_ib_enable_lb(dev);
|
return mlx5_ib_enable_lb(dev, true, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
|
static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
|
||||||
|
@ -1628,7 +1644,7 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
|
||||||
!MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
|
!MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mlx5_ib_disable_lb(dev);
|
mlx5_ib_disable_lb(dev, true, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
||||||
|
|
|
@ -862,6 +862,8 @@ struct mlx5_ib_lb_state {
|
||||||
/* protect the user_td */
|
/* protect the user_td */
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
u32 user_td;
|
u32 user_td;
|
||||||
|
int qps;
|
||||||
|
bool enabled;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_ib_dev {
|
struct mlx5_ib_dev {
|
||||||
|
@ -1020,6 +1022,8 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
|
||||||
int mlx5_ib_destroy_srq(struct ib_srq *srq);
|
int mlx5_ib_destroy_srq(struct ib_srq *srq);
|
||||||
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
|
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
|
||||||
const struct ib_recv_wr **bad_wr);
|
const struct ib_recv_wr **bad_wr);
|
||||||
|
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
|
||||||
|
void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
|
||||||
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
|
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
|
||||||
struct ib_qp_init_attr *init_attr,
|
struct ib_qp_init_attr *init_attr,
|
||||||
struct ib_udata *udata);
|
struct ib_udata *udata);
|
||||||
|
|
|
@ -1256,6 +1256,16 @@ static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
|
||||||
MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
|
MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
|
||||||
|
struct mlx5_ib_rq *rq,
|
||||||
|
u32 qp_flags_en)
|
||||||
|
{
|
||||||
|
if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
|
||||||
|
MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
|
||||||
|
mlx5_ib_disable_lb(dev, false, true);
|
||||||
|
mlx5_core_destroy_tir(dev->mdev, rq->tirn);
|
||||||
|
}
|
||||||
|
|
||||||
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
|
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
|
||||||
struct mlx5_ib_rq *rq, u32 tdn,
|
struct mlx5_ib_rq *rq, u32 tdn,
|
||||||
u32 *qp_flags_en)
|
u32 *qp_flags_en)
|
||||||
|
@ -1293,17 +1303,17 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
|
||||||
|
|
||||||
err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
|
err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
|
||||||
|
|
||||||
|
if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
|
||||||
|
err = mlx5_ib_enable_lb(dev, false, true);
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
destroy_raw_packet_qp_tir(dev, rq, 0);
|
||||||
|
}
|
||||||
kvfree(in);
|
kvfree(in);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
|
|
||||||
struct mlx5_ib_rq *rq)
|
|
||||||
{
|
|
||||||
mlx5_core_destroy_tir(dev->mdev, rq->tirn);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
||||||
u32 *in, size_t inlen,
|
u32 *in, size_t inlen,
|
||||||
struct ib_pd *pd)
|
struct ib_pd *pd)
|
||||||
|
@ -1372,7 +1382,7 @@ static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev,
|
||||||
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
|
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
|
||||||
|
|
||||||
if (qp->rq.wqe_cnt) {
|
if (qp->rq.wqe_cnt) {
|
||||||
destroy_raw_packet_qp_tir(dev, rq);
|
destroy_raw_packet_qp_tir(dev, rq, qp->flags_en);
|
||||||
destroy_raw_packet_qp_rq(dev, rq);
|
destroy_raw_packet_qp_rq(dev, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1396,6 +1406,9 @@ static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
|
||||||
|
|
||||||
static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
|
static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
|
||||||
{
|
{
|
||||||
|
if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
|
||||||
|
MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
|
||||||
|
mlx5_ib_disable_lb(dev, false, true);
|
||||||
mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
|
mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1606,6 +1619,13 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
||||||
create_tir:
|
create_tir:
|
||||||
err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
|
err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
|
||||||
|
|
||||||
|
if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
|
||||||
|
err = mlx5_ib_enable_lb(dev, false, true);
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
|
||||||
|
}
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue