From b2590bdd0b1dfb91737e6cb07ebb47bd74957f7e Mon Sep 17 00:00:00 2001 From: Kaike Wan Date: Mon, 15 Jul 2019 12:45:46 -0400 Subject: [PATCH 01/99] IB/hfi1: Do not update hcrc for a KDETH packet during fault injection When a KDETH packet is subject to fault injection during transmission, HCRC is supposed to be omitted from the packet so that the hardware on the receiver side would drop the packet. When creating pbc, the PbcInsertHcrc field is set to be PBC_IHCRC_NONE if the KDETH packet is subject to fault injection, but overwritten with PBC_IHCRC_LKDETH when update_hcrc() is called later. This problem is fixed by not calling update_hcrc() when the packet is subject to fault injection. Fixes: 6b6cf9357f78 ("IB/hfi1: Set PbcInsertHcrc for TID RDMA packets") Cc: Link: https://lore.kernel.org/r/20190715164546.74174.99296.stgit@awfm-01.aw.intel.com Reviewed-by: Mike Marciniszyn Signed-off-by: Kaike Wan Signed-off-by: Mike Marciniszyn Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hfi1/verbs.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index c4b243f50c76..f4ca436118ab 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -873,16 +873,17 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, else pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); - if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) - pbc = hfi1_fault_tx(qp, ps->opcode, pbc); pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen); - /* Update HCRC based on packet opcode */ - pbc = update_hcrc(ps->opcode, pbc); + if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) + pbc = hfi1_fault_tx(qp, ps->opcode, pbc); + else + /* Update HCRC based on packet opcode */ + pbc = update_hcrc(ps->opcode, pbc); } tx->wqe = qp->s_wqe; ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc); @@ -1029,12 +1030,12 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, else pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); + pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen); if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) pbc = hfi1_fault_tx(qp, ps->opcode, pbc); - pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen); - - /* Update HCRC based on packet opcode */ - pbc = update_hcrc(ps->opcode, pbc); + else + /* Update HCRC based on packet opcode */ + pbc = update_hcrc(ps->opcode, pbc); } if (cb) iowait_pio_inc(&priv->s_iowait); From 795130b31986e7dcb9682a37f962f49df5a25e82 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Mon, 15 Jul 2019 12:45:52 -0400 Subject: [PATCH 02/99] IB/hfi1: Remove unused define The patch noted in Fixes missed deleting the define it obsoleted. Fixes: da9de5f8527f ("IB/hfi1: Close PSM sdma_progress sleep window") Link: https://lore.kernel.org/r/20190715164552.74174.99396.stgit@awfm-01.aw.intel.com Reviewed-by: Dennis Dalessandro Reviewed-by: Kaike Wan Signed-off-by: Mike Marciniszyn Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hfi1/user_sdma.h | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h index 4d8510b0fc38..9972e0e6545e 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.h +++ b/drivers/infiniband/hw/hfi1/user_sdma.h @@ -110,12 +110,6 @@ enum pkt_q_sdma_state { SDMA_PKT_Q_DEFERRED, }; -/* - * Maximum retry attempts to submit a TX request - * before putting the process to sleep. - */ -#define MAX_DEFER_RETRY_COUNT 1 - #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */ #define SDMA_DBG(req, fmt, ...) \ From ecc53f8a3c097d13f4b6793855e0bd30a63a5c3a Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 23 Jul 2019 19:58:48 +0200 Subject: [PATCH 03/99] RDMA/mlx4: Untag user pointers in mlx4_get_umem_mr This patch is a part of a series that extends kernel ABI to allow to pass tagged user pointers (with the top byte set to something else other than 0x00) as syscall arguments. mlx4_get_umem_mr() uses provided user pointers for vma lookups, which can only by done with untagged pointers. Untag user pointers in this function. Link: https://lore.kernel.org/r/7969018013a67ddbbf784ac7afeea5a57b1e2bcb.1563904656.git.andreyknvl@google.com Reviewed-by: Jason Gunthorpe Acked-by: Catalin Marinas Reviewed-by: Kees Cook Signed-off-by: Andrey Konovalov Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx4/mr.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 753479285ce9..6ae503cfc526 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -377,6 +377,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start, * again */ if (!ib_access_writable(access_flags)) { + unsigned long untagged_start = untagged_addr(start); struct vm_area_struct *vma; down_read(¤t->mm->mmap_sem); @@ -385,9 +386,9 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start, * cover the memory, but for now it requires a single vma to * entirely cover the MR to support RO mappings. */ - vma = find_vma(current->mm, start); - if (vma && vma->vm_end >= start + length && - vma->vm_start <= start) { + vma = find_vma(current->mm, untagged_start); + if (vma && vma->vm_end >= untagged_start + length && + vma->vm_start <= untagged_start) { if (vma->vm_flags & VM_WRITE) access_flags |= IB_ACCESS_LOCAL_WRITE; } else { From a511f82218fb14b3d6fbbce3c35759aa4920a734 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 31 May 2019 10:21:00 +0100 Subject: [PATCH 04/99] RDMA/hns: Fix comparison of unsigned long variable 'end' with less than zero Currently the comparison of end with less than zero is always false because end is an unsigned long. Also, replace checks of end with non-zero with end > 0 as it is possible that the #defined decrement may be changed in the future causing end to step over zero and go negative. The initialization of end with 0 is also redundant as this value is never read and is later set to HW_SYNC_TIMEOUT_MSECS, so fix this by initializing it with this value to begin with. Link: https://lore.kernel.org/r/20190531092101.28772-1-colin.king@canonical.com Addresses-Coverity: ("Unsigned compared against 0") Fixes: 669cefb654cb ("RDMA/hns: Remove jiffies operation in disable interrupt context") Signed-off-by: Colin Ian King Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hem.c | 4 ++-- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index f4da5bd2884f..198163d3510f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -324,7 +324,7 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, { spinlock_t *lock = &hr_dev->bt_cmd_lock; struct device *dev = hr_dev->dev; - unsigned long end = 0; + long end; unsigned long flags; struct hns_roce_hem_iter iter; void __iomem *bt_cmd; @@ -375,7 +375,7 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; end = HW_SYNC_TIMEOUT_MSECS; - while (end) { + while (end > 0) { if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT)) break; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 81e6dedb1e02..bea71db30461 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -969,7 +969,7 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev) struct hns_roce_free_mr *free_mr; struct hns_roce_v1_priv *priv; struct completion comp; - unsigned long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS; + long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS; priv = (struct hns_roce_v1_priv *)hr_dev->priv; free_mr = &priv->free_mr; @@ -989,7 +989,7 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev) queue_work(free_mr->free_mr_wq, &(lp_qp_work->work)); - while (end) { + while (end > 0) { if (try_wait_for_completion(&comp)) return 0; msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE); @@ -1107,7 +1107,7 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, struct hns_roce_free_mr *free_mr; struct hns_roce_v1_priv *priv; struct completion comp; - unsigned long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS; + long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS; unsigned long start = jiffies; int npages; int ret = 0; @@ -1137,7 +1137,7 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, queue_work(free_mr->free_mr_wq, &(mr_work->work)); - while (end) { + while (end > 0) { if (try_wait_for_completion(&comp)) goto free_mr; msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); @@ -2428,7 +2428,8 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, { struct device *dev = &hr_dev->pdev->dev; struct hns_roce_v1_priv *priv; - unsigned long end = 0, flags = 0; + unsigned long flags = 0; + long end = HW_SYNC_TIMEOUT_MSECS; __le32 bt_cmd_val[2] = {0}; void __iomem *bt_cmd; u64 bt_ba = 0; @@ -2466,7 +2467,6 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; - end = HW_SYNC_TIMEOUT_MSECS; while (1) { if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { if (!end) { From 56594ae1d250f839945cda89d0138eece46dd607 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Tue, 23 Jul 2019 09:57:24 +0300 Subject: [PATCH 05/99] RDMA/core: Annotate destroy of mutex to ensure that it is released as unlocked While compiled with CONFIG_DEBUG_MUTEXES, the kernel ensures that mutex is not held during destroy. Hence add mutex_destroy() for mutexes used in RDMA modules. Link: https://lore.kernel.org/r/20190723065733.4899-2-leon@kernel.org Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cache.c | 1 + drivers/infiniband/core/cma_configfs.c | 8 +++++++- drivers/infiniband/core/counters.c | 8 ++++---- drivers/infiniband/core/device.c | 3 +++ drivers/infiniband/core/user_mad.c | 2 +- drivers/infiniband/core/uverbs_main.c | 4 ++++ drivers/infiniband/core/verbs.c | 1 + 7 files changed, 21 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 18e476b3ced0..00fb3eacda19 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -810,6 +810,7 @@ static void release_gid_table(struct ib_device *device, if (leak) return; + mutex_destroy(&table->lock); kfree(table->data_vec); kfree(table); } diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c index 3ec2c415bb70..8b0b5ae22e4c 100644 --- a/drivers/infiniband/core/cma_configfs.c +++ b/drivers/infiniband/core/cma_configfs.c @@ -342,12 +342,18 @@ static struct configfs_subsystem cma_subsys = { int __init cma_configfs_init(void) { + int ret; + config_group_init(&cma_subsys.su_group); mutex_init(&cma_subsys.su_mutex); - return configfs_register_subsystem(&cma_subsys); + ret = configfs_register_subsystem(&cma_subsys); + if (ret) + mutex_destroy(&cma_subsys.su_mutex); + return ret; } void __exit cma_configfs_exit(void) { configfs_unregister_subsystem(&cma_subsys); + mutex_destroy(&cma_subsys.su_mutex); } diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index 01faef7bc061..9ed7fce85747 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -592,7 +592,7 @@ int rdma_counter_get_mode(struct ib_device *dev, u8 port, void rdma_counter_init(struct ib_device *dev) { struct rdma_port_counter *port_counter; - u32 port; + u32 port, i; if (!dev->ops.alloc_hw_stats || !dev->port_data) return; @@ -610,13 +610,12 @@ void rdma_counter_init(struct ib_device *dev) return; fail: - rdma_for_each_port(dev, port) { + for (i = port; i >= rdma_start_port(dev); i--) { port_counter = &dev->port_data[port].port_counter; kfree(port_counter->hstats); port_counter->hstats = NULL; + mutex_destroy(&port_counter->lock); } - - return; } void rdma_counter_release(struct ib_device *dev) @@ -630,5 +629,6 @@ void rdma_counter_release(struct ib_device *dev) rdma_for_each_port(dev, port) { port_counter = &dev->port_data[port].port_counter; kfree(port_counter->hstats); + mutex_destroy(&port_counter->lock); } } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 9773145dee09..309210d2d4a8 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -508,6 +508,9 @@ static void ib_device_release(struct device *device) rcu_head); } + mutex_destroy(&dev->unregistration_lock); + mutex_destroy(&dev->compat_devs_mutex); + xa_destroy(&dev->compat_devs); xa_destroy(&dev->client_data); kfree_rcu(dev, rcu_head); diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 9f8a48016b41..e0512aef033c 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1038,7 +1038,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp) ib_unregister_mad_agent(file->agent[i]); mutex_unlock(&file->port->file_mutex); - + mutex_destroy(&file->mutex); kfree(file); return 0; } diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 11c13c1381cf..02b57240176c 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -120,6 +120,8 @@ static void ib_uverbs_release_dev(struct device *device) uverbs_destroy_api(dev->uapi); cleanup_srcu_struct(&dev->disassociate_srcu); + mutex_destroy(&dev->lists_mutex); + mutex_destroy(&dev->xrcd_tree_mutex); kfree(dev); } @@ -212,6 +214,8 @@ void ib_uverbs_release_file(struct kref *ref) if (file->disassociate_page) __free_pages(file->disassociate_page, 0); + mutex_destroy(&file->umap_lock); + mutex_destroy(&file->ucontext_lock); kfree(file); } diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 92349bf37589..f974b6854224 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -2259,6 +2259,7 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) if (ret) return ret; } + mutex_destroy(&xrcd->tgt_qp_mutex); return xrcd->device->ops.dealloc_xrcd(xrcd, udata); } From a5c9c299d1e1f3c6fb421d73367cc2f793af1a2d Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Tue, 23 Jul 2019 09:57:31 +0300 Subject: [PATCH 06/99] IB/mlx5: Avoid unnecessary typecast IB device pointer is already available while deallocating IB device, Hence do not typecast it. Link: https://lore.kernel.org/r/20190723065733.4899-9-leon@kernel.org Signed-off-by: Parav Pandit Reviewed-by: Daniel Jurgens Signed-off-by: Leon Romanovsky Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index c2a5780cb394..6cdd98ec9c1e 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -6933,7 +6933,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->port = kcalloc(num_ports, sizeof(*dev->port), GFP_KERNEL); if (!dev->port) { - ib_dealloc_device((struct ib_device *)dev); + ib_dealloc_device(&dev->ib_dev); return NULL; } From 4f8f0d5e33ddb0d102cf6e61c8e8f497eb1c3866 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Mon, 8 Jul 2019 21:41:17 +0800 Subject: [PATCH 07/99] RDMA/hns: Package the flow of creating cq Moves the related code of creating cq into separate functions in order to improve comprehensibility. Link: https://lore.kernel.org/r/1562593285-8037-2-git-send-email-oulijun@huawei.com Signed-off-by: Lijun Ou Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_cq.c | 186 ++++++++++++++++-------- 1 file changed, 124 insertions(+), 62 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 4e50c22a2da4..507d3c478404 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -298,21 +298,132 @@ static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev, &buf->hr_buf); } +static int create_user_cq(struct hns_roce_dev *hr_dev, + struct hns_roce_cq *hr_cq, + struct ib_udata *udata, + struct hns_roce_ib_create_cq_resp *resp, + struct hns_roce_uar *uar, + int cq_entries) +{ + struct hns_roce_ib_create_cq ucmd; + struct device *dev = hr_dev->dev; + int ret; + struct hns_roce_ucontext *context = rdma_udata_to_drv_context( + udata, struct hns_roce_ucontext, ibucontext); + + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { + dev_err(dev, "Failed to copy_from_udata.\n"); + return -EFAULT; + } + + /* Get user space address, write it into mtt table */ + ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf, + &hr_cq->umem, ucmd.buf_addr, + cq_entries); + if (ret) { + dev_err(dev, "Failed to get_cq_umem.\n"); + return ret; + } + + if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && + (udata->outlen >= sizeof(*resp))) { + ret = hns_roce_db_map_user(context, udata, ucmd.db_addr, + &hr_cq->db); + if (ret) { + dev_err(dev, "cq record doorbell map failed!\n"); + goto err_mtt; + } + hr_cq->db_en = 1; + resp->cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB; + } + + /* Get user space parameters */ + uar = &context->uar; + + return 0; + +err_mtt: + hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); + ib_umem_release(hr_cq->umem); + + return ret; +} + +static int create_kernel_cq(struct hns_roce_dev *hr_dev, + struct hns_roce_cq *hr_cq, struct hns_roce_uar *uar, + int cq_entries) +{ + struct device *dev = hr_dev->dev; + int ret; + + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { + ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1); + if (ret) + return ret; + + hr_cq->set_ci_db = hr_cq->db.db_record; + *hr_cq->set_ci_db = 0; + hr_cq->db_en = 1; + } + + /* Init mtt table and write buff address to mtt table */ + ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries); + if (ret) { + dev_err(dev, "Failed to alloc_cq_buf.\n"); + goto err_db; + } + + uar = &hr_dev->priv_uar; + hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset + + DB_REG_OFFSET * uar->index; + + return 0; + +err_db: + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) + hns_roce_free_db(hr_dev, &hr_cq->db); + + return ret; +} + +static void destroy_user_cq(struct hns_roce_dev *hr_dev, + struct hns_roce_cq *hr_cq, + struct ib_udata *udata, + struct hns_roce_ib_create_cq_resp *resp) +{ + struct hns_roce_ucontext *context = rdma_udata_to_drv_context( + udata, struct hns_roce_ucontext, ibucontext); + + if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && + (udata->outlen >= sizeof(*resp))) + hns_roce_db_unmap_user(context, &hr_cq->db); + + hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); + ib_umem_release(hr_cq->umem); +} + +static void destroy_kernel_cq(struct hns_roce_dev *hr_dev, + struct hns_roce_cq *hr_cq) +{ + hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); + hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe); + + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) + hns_roce_free_db(hr_dev, &hr_cq->db); +} + int hns_roce_ib_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); struct device *dev = hr_dev->dev; - struct hns_roce_ib_create_cq ucmd; struct hns_roce_ib_create_cq_resp resp = {}; struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); struct hns_roce_uar *uar = NULL; int vector = attr->comp_vector; int cq_entries = attr->cqe; int ret; - struct hns_roce_ucontext *context = rdma_udata_to_drv_context( - udata, struct hns_roce_ucontext, ibucontext); if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n", @@ -328,57 +439,18 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq, spin_lock_init(&hr_cq->lock); if (udata) { - if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { - dev_err(dev, "Failed to copy_from_udata.\n"); - ret = -EFAULT; - goto err_cq; - } - - /* Get user space address, write it into mtt table */ - ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf, - &hr_cq->umem, ucmd.buf_addr, - cq_entries); + ret = create_user_cq(hr_dev, hr_cq, udata, &resp, uar, + cq_entries); if (ret) { - dev_err(dev, "Failed to get_cq_umem.\n"); + dev_err(dev, "Create cq failed in user mode!\n"); goto err_cq; } - - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && - (udata->outlen >= sizeof(resp))) { - ret = hns_roce_db_map_user(context, udata, ucmd.db_addr, - &hr_cq->db); - if (ret) { - dev_err(dev, "cq record doorbell map failed!\n"); - goto err_mtt; - } - hr_cq->db_en = 1; - resp.cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB; - } - - /* Get user space parameters */ - uar = &context->uar; } else { - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { - ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1); - if (ret) - goto err_cq; - - hr_cq->set_ci_db = hr_cq->db.db_record; - *hr_cq->set_ci_db = 0; - hr_cq->db_en = 1; - } - - /* Init mmt table and write buff address to mtt table */ - ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, - cq_entries); + ret = create_kernel_cq(hr_dev, hr_cq, uar, cq_entries); if (ret) { - dev_err(dev, "Failed to alloc_cq_buf.\n"); - goto err_db; + dev_err(dev, "Create cq failed in kernel mode!\n"); + goto err_cq; } - - uar = &hr_dev->priv_uar; - hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset + - DB_REG_OFFSET * uar->index; } /* Allocate cq index, fill cq_context */ @@ -416,20 +488,10 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq, hns_roce_free_cq(hr_dev, hr_cq); err_dbmap: - if (udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && - (udata->outlen >= sizeof(resp))) - hns_roce_db_unmap_user(context, &hr_cq->db); - -err_mtt: - hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); - ib_umem_release(hr_cq->umem); - if (!udata) - hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, - hr_cq->ib_cq.cqe); - -err_db: - if (!udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) - hns_roce_free_db(hr_dev, &hr_cq->db); + if (udata) + destroy_user_cq(hr_dev, hr_cq, udata, &resp); + else + destroy_kernel_cq(hr_dev, hr_cq); err_cq: return ret; From 2a2f1887e08964cf155bd67fc094eeb5b02c2f5c Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Mon, 8 Jul 2019 21:41:18 +0800 Subject: [PATCH 08/99] RDMA/hns: Refactor the code of creating srq Move the related codes of creating user srq and kernel srq into two independent functions as well as remove some unused code and simplifications. Link: https://lore.kernel.org/r/1562593285-8037-3-git-send-email-oulijun@huawei.com Signed-off-by: Lijun Ou Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_srq.c | 310 +++++++++++++---------- 1 file changed, 183 insertions(+), 127 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 38bb548eaa6d..c011422112b2 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -175,6 +175,91 @@ static void hns_roce_srq_free(struct hns_roce_dev *hr_dev, hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR); } +static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata, + int srq_buf_size) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device); + struct hns_roce_ib_create_srq ucmd; + u32 page_shift; + u32 npages; + int ret; + + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) + return -EFAULT; + + srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0); + if (IS_ERR(srq->umem)) + return PTR_ERR(srq->umem); + + if (hr_dev->caps.srqwqe_buf_pg_sz) { + npages = (ib_umem_page_count(srq->umem) + + (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) / + (1 << hr_dev->caps.srqwqe_buf_pg_sz); + page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; + ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt); + } else + ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->umem), + PAGE_SHIFT, &srq->mtt); + if (ret) + goto err_user_buf; + + ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem); + if (ret) + goto err_user_srq_mtt; + + /* config index queue BA */ + srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr, + srq->idx_que.buf_size, 0, 0); + if (IS_ERR(srq->idx_que.umem)) { + dev_err(hr_dev->dev, "ib_umem_get error for index queue\n"); + ret = PTR_ERR(srq->idx_que.umem); + goto err_user_srq_mtt; + } + + if (hr_dev->caps.idx_buf_pg_sz) { + npages = (ib_umem_page_count(srq->idx_que.umem) + + (1 << hr_dev->caps.idx_buf_pg_sz) - 1) / + (1 << hr_dev->caps.idx_buf_pg_sz); + page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz; + ret = hns_roce_mtt_init(hr_dev, npages, page_shift, + &srq->idx_que.mtt); + } else { + ret = hns_roce_mtt_init(hr_dev, + ib_umem_page_count(srq->idx_que.umem), + PAGE_SHIFT, + &srq->idx_que.mtt); + } + + if (ret) { + dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n"); + goto err_user_idx_mtt; + } + + ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt, + srq->idx_que.umem); + if (ret) { + dev_err(hr_dev->dev, + "hns_roce_ib_umem_write_mtt error for idx que\n"); + goto err_user_idx_buf; + } + + return 0; + +err_user_idx_buf: + hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); + +err_user_idx_mtt: + ib_umem_release(srq->idx_que.umem); + +err_user_srq_mtt: + hns_roce_mtt_cleanup(hr_dev, &srq->mtt); + +err_user_buf: + ib_umem_release(srq->umem); + + return ret; +} + static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq, u32 page_shift) { @@ -196,6 +281,93 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq, return 0; } +static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device); + u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; + int ret; + + if (hns_roce_buf_alloc(hr_dev, srq_buf_size, (1 << page_shift) * 2, + &srq->buf, page_shift)) + return -ENOMEM; + + srq->head = 0; + srq->tail = srq->max - 1; + + ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift, + &srq->mtt); + if (ret) + goto err_kernel_buf; + + ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf); + if (ret) + goto err_kernel_srq_mtt; + + page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz; + ret = hns_roce_create_idx_que(srq->ibsrq.pd, srq, page_shift); + if (ret) { + dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", ret); + goto err_kernel_srq_mtt; + } + + /* Init mtt table for idx_que */ + ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages, + srq->idx_que.idx_buf.page_shift, + &srq->idx_que.mtt); + if (ret) + goto err_kernel_create_idx; + + /* Write buffer address into the mtt table */ + ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt, + &srq->idx_que.idx_buf); + if (ret) + goto err_kernel_idx_buf; + + srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL); + if (!srq->wrid) { + ret = -ENOMEM; + goto err_kernel_idx_buf; + } + + return 0; + +err_kernel_idx_buf: + hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); + +err_kernel_create_idx: + hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, + &srq->idx_que.idx_buf); + kfree(srq->idx_que.bitmap); + +err_kernel_srq_mtt: + hns_roce_mtt_cleanup(hr_dev, &srq->mtt); + +err_kernel_buf: + hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf); + + return ret; +} + +static void destroy_user_srq(struct hns_roce_dev *hr_dev, + struct hns_roce_srq *srq) +{ + hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); + ib_umem_release(srq->idx_que.umem); + hns_roce_mtt_cleanup(hr_dev, &srq->mtt); + ib_umem_release(srq->umem); +} + +static void destroy_kernel_srq(struct hns_roce_dev *hr_dev, + struct hns_roce_srq *srq, int srq_buf_size) +{ + kvfree(srq->wrid); + hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); + hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, &srq->idx_que.idx_buf); + kfree(srq->idx_que.bitmap); + hns_roce_mtt_cleanup(hr_dev, &srq->mtt); + hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf); +} + int hns_roce_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *srq_init_attr, struct ib_udata *udata) @@ -205,9 +377,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, struct hns_roce_srq *srq = to_hr_srq(ib_srq); int srq_desc_size; int srq_buf_size; - u32 page_shift; int ret = 0; - u32 npages; u32 cqn; /* Check the actual SRQ wqe and SRQ sge num */ @@ -233,115 +403,16 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX; if (udata) { - struct hns_roce_ib_create_srq ucmd; - - if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) - return -EFAULT; - - srq->umem = - ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0); - if (IS_ERR(srq->umem)) - return PTR_ERR(srq->umem); - - if (hr_dev->caps.srqwqe_buf_pg_sz) { - npages = (ib_umem_page_count(srq->umem) + - (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) / - (1 << hr_dev->caps.srqwqe_buf_pg_sz); - page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; - ret = hns_roce_mtt_init(hr_dev, npages, - page_shift, - &srq->mtt); - } else - ret = hns_roce_mtt_init(hr_dev, - ib_umem_page_count(srq->umem), - PAGE_SHIFT, &srq->mtt); - if (ret) - goto err_buf; - - ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem); - if (ret) - goto err_srq_mtt; - - /* config index queue BA */ - srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr, - srq->idx_que.buf_size, 0, 0); - if (IS_ERR(srq->idx_que.umem)) { - dev_err(hr_dev->dev, - "ib_umem_get error for index queue\n"); - ret = PTR_ERR(srq->idx_que.umem); - goto err_srq_mtt; - } - - if (hr_dev->caps.idx_buf_pg_sz) { - npages = (ib_umem_page_count(srq->idx_que.umem) + - (1 << hr_dev->caps.idx_buf_pg_sz) - 1) / - (1 << hr_dev->caps.idx_buf_pg_sz); - page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz; - ret = hns_roce_mtt_init(hr_dev, npages, - page_shift, &srq->idx_que.mtt); - } else { - ret = hns_roce_mtt_init( - hr_dev, ib_umem_page_count(srq->idx_que.umem), - PAGE_SHIFT, &srq->idx_que.mtt); - } - + ret = create_user_srq(srq, udata, srq_buf_size); if (ret) { - dev_err(hr_dev->dev, - "hns_roce_mtt_init error for idx que\n"); - goto err_idx_mtt; - } - - ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt, - srq->idx_que.umem); - if (ret) { - dev_err(hr_dev->dev, - "hns_roce_ib_umem_write_mtt error for idx que\n"); - goto err_idx_buf; + dev_err(hr_dev->dev, "Create user srq failed\n"); + goto err_srq; } } else { - page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; - if (hns_roce_buf_alloc(hr_dev, srq_buf_size, - (1 << page_shift) * 2, &srq->buf, - page_shift)) - return -ENOMEM; - - srq->head = 0; - srq->tail = srq->max - 1; - - ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, - srq->buf.page_shift, &srq->mtt); - if (ret) - goto err_buf; - - ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf); - if (ret) - goto err_srq_mtt; - - page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz; - ret = hns_roce_create_idx_que(ib_srq->pd, srq, page_shift); + ret = create_kernel_srq(srq, srq_buf_size); if (ret) { - dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", - ret); - goto err_srq_mtt; - } - - /* Init mtt table for idx_que */ - ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages, - srq->idx_que.idx_buf.page_shift, - &srq->idx_que.mtt); - if (ret) - goto err_create_idx; - - /* Write buffer address into the mtt table */ - ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt, - &srq->idx_que.idx_buf); - if (ret) - goto err_idx_buf; - - srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL); - if (!srq->wrid) { - ret = -ENOMEM; - goto err_idx_buf; + dev_err(hr_dev->dev, "Create kernel srq failed\n"); + goto err_srq; } } @@ -373,27 +444,12 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, hns_roce_srq_free(hr_dev, srq); err_wrid: - kvfree(srq->wrid); - -err_idx_buf: - hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); - -err_idx_mtt: - ib_umem_release(srq->idx_que.umem); - -err_create_idx: - hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, - &srq->idx_que.idx_buf); - bitmap_free(srq->idx_que.bitmap); - -err_srq_mtt: - hns_roce_mtt_cleanup(hr_dev, &srq->mtt); - -err_buf: - ib_umem_release(srq->umem); - if (!udata) - hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf); + if (udata) + destroy_user_srq(hr_dev, srq); + else + destroy_kernel_srq(hr_dev, srq, srq_buf_size); +err_srq: return ret; } From 606bf89e98efb1174891c4d96afd39c1a974ce3d Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Mon, 8 Jul 2019 21:41:19 +0800 Subject: [PATCH 09/99] RDMA/hns: Refactor for hns_roce_v2_modify_qp function Move some lines which exist hns_roce_v2_modify_qp function into a new function. The code refactored mainly includes some absolute fields of qp context and some optional fields of qp context. Link: https://lore.kernel.org/r/1562593285-8037-4-git-send-email-oulijun@huawei.com Signed-off-by: Lijun Ou Signed-off-by: Xi Wang Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 356 ++++++++++++--------- 1 file changed, 206 insertions(+), 150 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index b76e3beeafb8..edc5dd257d16 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -3974,30 +3974,119 @@ static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state, } -static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, - const struct ib_qp_attr *attr, - int attr_mask, enum ib_qp_state cur_state, - enum ib_qp_state new_state) +static int hns_roce_v2_set_path(struct ib_qp *ibqp, + const struct ib_qp_attr *attr, + int attr_mask, + struct hns_roce_v2_qp_context *context, + struct hns_roce_v2_qp_context *qpc_mask) { + const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_v2_qp_context *context; - struct hns_roce_v2_qp_context *qpc_mask; - struct device *dev = hr_dev->dev; - int ret = -EINVAL; + const struct ib_gid_attr *gid_attr = NULL; + int is_roce_protocol; + bool is_udp = false; + u16 vlan = 0xffff; + u8 ib_port; + u8 hr_port; + int ret; - context = kcalloc(2, sizeof(*context), GFP_ATOMIC); - if (!context) - return -ENOMEM; + ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1; + hr_port = ib_port - 1; + is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) && + rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH; + + if (is_roce_protocol) { + gid_attr = attr->ah_attr.grh.sgid_attr; + ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL); + if (ret) + return ret; + + if (gid_attr) + is_udp = (gid_attr->gid_type == + IB_GID_TYPE_ROCE_UDP_ENCAP); + } + + if (vlan < VLAN_CFI_MASK) { + roce_set_bit(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1); + roce_set_bit(qpc_mask->byte_76_srqn_op_en, + V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0); + roce_set_bit(context->byte_168_irrl_idx, + V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1); + roce_set_bit(qpc_mask->byte_168_irrl_idx, + V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0); + } + + roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, + V2_QPC_BYTE_24_VLAN_ID_S, vlan); + roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, + V2_QPC_BYTE_24_VLAN_ID_S, 0); + + if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) { + dev_err(hr_dev->dev, "sgid_index(%u) too large. max is %d\n", + grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]); + return -EINVAL; + } + + if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) { + dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n"); + return -EINVAL; + } + + roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M, + V2_QPC_BYTE_52_UDPSPN_S, + is_udp ? 0x12b7 : 0); + + roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M, + V2_QPC_BYTE_52_UDPSPN_S, 0); + + roce_set_field(context->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, + grh->sgid_index); + + roce_set_field(qpc_mask->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0); + + roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M, + V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit); + roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M, + V2_QPC_BYTE_24_HOP_LIMIT_S, 0); + + if (hr_dev->pci_dev->revision == 0x21 && is_udp) + roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, + V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2); + else + roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, + V2_QPC_BYTE_24_TC_S, grh->traffic_class); + roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, + V2_QPC_BYTE_24_TC_S, 0); + roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, + V2_QPC_BYTE_28_FL_S, grh->flow_label); + roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, + V2_QPC_BYTE_28_FL_S, 0); + memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); + memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw)); + roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M, + V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr)); + roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M, + V2_QPC_BYTE_28_SL_S, 0); + hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); + + return 0; +} + +static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, + const struct ib_qp_attr *attr, + int attr_mask, + enum ib_qp_state cur_state, + enum ib_qp_state new_state, + struct hns_roce_v2_qp_context *context, + struct hns_roce_v2_qp_context *qpc_mask) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); + int ret = 0; - qpc_mask = context + 1; - /* - * In v2 engine, software pass context and context mask to hardware - * when modifying qp. If software need modify some fields in context, - * we should set all bits of the relevant fields in context mask to - * 0 at the same time, else set them to 0x1. - */ - memset(qpc_mask, 0xff, sizeof(*qpc_mask)); if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { memset(qpc_mask, 0, sizeof(*qpc_mask)); modify_qp_reset_to_init(ibqp, attr, attr_mask, context, @@ -4019,134 +4108,30 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, /* Nothing */ ; } else { - dev_err(dev, "Illegal state for QP!\n"); + dev_err(hr_dev->dev, "Illegal state for QP!\n"); ret = -EINVAL; goto out; } - /* When QP state is err, SQ and RQ WQE should be flushed */ - if (new_state == IB_QPS_ERR) { - roce_set_field(context->byte_160_sq_ci_pi, - V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, - V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, - hr_qp->sq.head); - roce_set_field(qpc_mask->byte_160_sq_ci_pi, - V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, - V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0); +out: + return ret; +} - if (!ibqp->srq) { - roce_set_field(context->byte_84_rq_ci_pi, - V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, - V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, - hr_qp->rq.head); - roce_set_field(qpc_mask->byte_84_rq_ci_pi, - V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, - V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0); - } - } +static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp, + const struct ib_qp_attr *attr, + int attr_mask, + struct hns_roce_v2_qp_context *context, + struct hns_roce_v2_qp_context *qpc_mask) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); + struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); + int ret = 0; if (attr_mask & IB_QP_AV) { - const struct ib_global_route *grh = - rdma_ah_read_grh(&attr->ah_attr); - const struct ib_gid_attr *gid_attr = NULL; - int is_roce_protocol; - u16 vlan = 0xffff; - u8 ib_port; - u8 hr_port; - - ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : - hr_qp->port + 1; - hr_port = ib_port - 1; - is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) && - rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH; - - if (is_roce_protocol) { - gid_attr = attr->ah_attr.grh.sgid_attr; - ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL); - if (ret) - goto out; - } - - if (vlan < VLAN_CFI_MASK) { - roce_set_bit(context->byte_76_srqn_op_en, - V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1); - roce_set_bit(qpc_mask->byte_76_srqn_op_en, - V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0); - roce_set_bit(context->byte_168_irrl_idx, - V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1); - roce_set_bit(qpc_mask->byte_168_irrl_idx, - V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0); - } - - roce_set_field(context->byte_24_mtu_tc, - V2_QPC_BYTE_24_VLAN_ID_M, - V2_QPC_BYTE_24_VLAN_ID_S, vlan); - roce_set_field(qpc_mask->byte_24_mtu_tc, - V2_QPC_BYTE_24_VLAN_ID_M, - V2_QPC_BYTE_24_VLAN_ID_S, 0); - - if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) { - dev_err(hr_dev->dev, - "sgid_index(%u) too large. max is %d\n", - grh->sgid_index, - hr_dev->caps.gid_table_len[hr_port]); - ret = -EINVAL; - goto out; - } - - if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) { - dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n"); - ret = -EINVAL; - goto out; - } - - roce_set_field(context->byte_52_udpspn_dmac, - V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S, - (gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ? - 0 : 0x12b7); - - roce_set_field(qpc_mask->byte_52_udpspn_dmac, - V2_QPC_BYTE_52_UDPSPN_M, - V2_QPC_BYTE_52_UDPSPN_S, 0); - - roce_set_field(context->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_SGID_IDX_M, - V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index); - - roce_set_field(qpc_mask->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_SGID_IDX_M, - V2_QPC_BYTE_20_SGID_IDX_S, 0); - - roce_set_field(context->byte_24_mtu_tc, - V2_QPC_BYTE_24_HOP_LIMIT_M, - V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit); - roce_set_field(qpc_mask->byte_24_mtu_tc, - V2_QPC_BYTE_24_HOP_LIMIT_M, - V2_QPC_BYTE_24_HOP_LIMIT_S, 0); - - if (hr_dev->pci_dev->revision == 0x21 && - gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) - roce_set_field(context->byte_24_mtu_tc, - V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S, - grh->traffic_class >> 2); - else - roce_set_field(context->byte_24_mtu_tc, - V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S, - grh->traffic_class); - roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, - V2_QPC_BYTE_24_TC_S, 0); - roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, - V2_QPC_BYTE_28_FL_S, grh->flow_label); - roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, - V2_QPC_BYTE_28_FL_S, 0); - memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); - memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw)); - roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M, - V2_QPC_BYTE_28_SL_S, - rdma_ah_get_sl(&attr->ah_attr)); - roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M, - V2_QPC_BYTE_28_SL_S, 0); - hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); + ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context, + qpc_mask); + if (ret) + return ret; } if (attr_mask & IB_QP_TIMEOUT) { @@ -4158,7 +4143,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S, 0); } else { - dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n"); + dev_warn(hr_dev->dev, + "Local ACK timeout shall be 0 to 30.\n"); } } @@ -4196,6 +4182,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, V2_QPC_BYTE_244_RNR_CNT_S, 0); } + /* RC&UC&UD required attr */ if (attr_mask & IB_QP_SQ_PSN) { roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M, @@ -4295,6 +4282,83 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, hr_qp->qkey = attr->qkey; } + return ret; +} + +static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp, + const struct ib_qp_attr *attr, + int attr_mask) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); + struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); + + if (attr_mask & IB_QP_ACCESS_FLAGS) + hr_qp->atomic_rd_en = attr->qp_access_flags; + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) + hr_qp->resp_depth = attr->max_dest_rd_atomic; + if (attr_mask & IB_QP_PORT) { + hr_qp->port = attr->port_num - 1; + hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; + } +} + +static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, + const struct ib_qp_attr *attr, + int attr_mask, enum ib_qp_state cur_state, + enum ib_qp_state new_state) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); + struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); + struct hns_roce_v2_qp_context *context; + struct hns_roce_v2_qp_context *qpc_mask; + struct device *dev = hr_dev->dev; + int ret = -EINVAL; + + context = kcalloc(2, sizeof(*context), GFP_ATOMIC); + if (!context) + return -ENOMEM; + + qpc_mask = context + 1; + /* + * In v2 engine, software pass context and context mask to hardware + * when modifying qp. If software need modify some fields in context, + * we should set all bits of the relevant fields in context mask to + * 0 at the same time, else set them to 0x1. + */ + memset(qpc_mask, 0xff, sizeof(*qpc_mask)); + ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state, + new_state, context, qpc_mask); + if (ret) + goto out; + + /* When QP state is err, SQ and RQ WQE should be flushed */ + if (new_state == IB_QPS_ERR) { + roce_set_field(context->byte_160_sq_ci_pi, + V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, + V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, + hr_qp->sq.head); + roce_set_field(qpc_mask->byte_160_sq_ci_pi, + V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, + V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0); + + if (!ibqp->srq) { + roce_set_field(context->byte_84_rq_ci_pi, + V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, + V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, + hr_qp->rq.head); + roce_set_field(qpc_mask->byte_84_rq_ci_pi, + V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, + V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0); + } + } + + /* Configure the optional fields */ + ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context, + qpc_mask); + if (ret) + goto out; + roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S, ibqp->srq ? 1 : 0); roce_set_bit(qpc_mask->byte_108_rx_reqepsn, @@ -4316,15 +4380,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, hr_qp->state = new_state; - if (attr_mask & IB_QP_ACCESS_FLAGS) - hr_qp->atomic_rd_en = attr->qp_access_flags; - - if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) - hr_qp->resp_depth = attr->max_dest_rd_atomic; - if (attr_mask & IB_QP_PORT) { - hr_qp->port = attr->port_num - 1; - hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; - } + hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask); if (new_state == IB_QPS_RESET && !ibqp->uobject) { hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, From 947441eadb909b031fc0793d049e1af0d5855a21 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Mon, 8 Jul 2019 21:41:20 +0800 Subject: [PATCH 10/99] RDMA/hns: Use a separated function for setting extend sge paramters Moves the related lines of setting extended sge size into a separate function as well as remove the unused variables. Link: https://lore.kernel.org/r/1562593285-8037-5-git-send-email-oulijun@huawei.com Signed-off-by: Lijun Ou Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_device.h | 2 - drivers/infiniband/hw/hns/hns_roce_qp.c | 89 ++++++++++++--------- 2 files changed, 52 insertions(+), 39 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index a548b28aab63..b39497a13b61 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -654,8 +654,6 @@ struct hns_roce_qp { u32 doorbell_qpn; __le32 sq_signal_bits; u32 sq_next_wqe; - int sq_max_wqes_per_wr; - int sq_spare_wqes; struct hns_roce_wq sq; struct ib_umem *umem; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index e0424029b058..9c272c20bbf9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -501,43 +501,10 @@ static int calc_wqe_bt_page_shift(struct hns_roce_dev *hr_dev, return bt_pg_shift - PAGE_SHIFT; } -static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, - struct ib_qp_cap *cap, - struct hns_roce_qp *hr_qp) +static int set_extend_sge_param(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp) { struct device *dev = hr_dev->dev; - u32 page_size; - u32 max_cnt; - int size; - - if (cap->max_send_wr > hr_dev->caps.max_wqes || - cap->max_send_sge > hr_dev->caps.max_sq_sg || - cap->max_inline_data > hr_dev->caps.max_sq_inline) { - dev_err(dev, "SQ WR or sge or inline data error!\n"); - return -EINVAL; - } - - hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); - hr_qp->sq_max_wqes_per_wr = 1; - hr_qp->sq_spare_wqes = 0; - - if (hr_dev->caps.min_wqes) - max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes); - else - max_cnt = cap->max_send_wr; - - hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt); - if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) { - dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n"); - return -EINVAL; - } - - /* Get data_seg numbers */ - max_cnt = max(1U, cap->max_send_sge); - if (hr_dev->caps.max_sq_sg <= 2) - hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); - else - hr_qp->sq.max_gs = max_cnt; if (hr_qp->sq.max_gs > 2) { hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * @@ -560,6 +527,52 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, } } + return 0; +} + +static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, + struct ib_qp_cap *cap, + struct hns_roce_qp *hr_qp) +{ + struct device *dev = hr_dev->dev; + u32 page_size; + u32 max_cnt; + int size; + int ret; + + if (cap->max_send_wr > hr_dev->caps.max_wqes || + cap->max_send_sge > hr_dev->caps.max_sq_sg || + cap->max_inline_data > hr_dev->caps.max_sq_inline) { + dev_err(dev, "SQ WR or sge or inline data error!\n"); + return -EINVAL; + } + + hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); + + if (hr_dev->caps.min_wqes) + max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes); + else + max_cnt = cap->max_send_wr; + + hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt); + if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) { + dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n"); + return -EINVAL; + } + + /* Get data_seg numbers */ + max_cnt = max(1U, cap->max_send_sge); + if (hr_dev->caps.max_sq_sg <= 2) + hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); + else + hr_qp->sq.max_gs = max_cnt; + + ret = set_extend_sge_param(hr_dev, hr_qp); + if (ret) { + dev_err(dev, "set extend sge parameters fail\n"); + return ret; + } + /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */ page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); hr_qp->sq.offset = 0; @@ -942,11 +955,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hns_roce_free_db(hr_dev, &hr_qp->rdb); err_rq_sge_list: - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) + if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && + hns_roce_qp_has_rq(init_attr)) kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); err_wqe_list: - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) + if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && + hns_roce_qp_has_rq(init_attr)) kfree(hr_qp->rq_inl_buf.wqe_list); err_out: From 99441ab552f1e1713a562838215c080387a94267 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Mon, 8 Jul 2019 21:41:21 +0800 Subject: [PATCH 11/99] RDMA/hns: optimize the duplicated code for qpc setting flow Currently, more than 20 lines of duplicate code exist in function 'modify_qp_init_to_init' and function 'modify_qp_reset_to_init', which affects the readability of the code. Consolidate them. Link: https://lore.kernel.org/r/1562593285-8037-6-git-send-email-oulijun@huawei.com Signed-off-by: Xi Wang Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 96 +++++++++------------- 1 file changed, 38 insertions(+), 58 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index edc5dd257d16..c2ddfc123bee 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -3118,6 +3118,43 @@ static void set_access_flags(struct hns_roce_qp *hr_qp, roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0); } +static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp, + struct hns_roce_v2_qp_context *context, + struct hns_roce_v2_qp_context *qpc_mask) +{ + if (hr_qp->ibqp.qp_type == IB_QPT_GSI) + roce_set_field(context->byte_4_sqpn_tst, + V2_QPC_BYTE_4_SGE_SHIFT_M, + V2_QPC_BYTE_4_SGE_SHIFT_S, + ilog2((unsigned int)hr_qp->sge.sge_cnt)); + else + roce_set_field(context->byte_4_sqpn_tst, + V2_QPC_BYTE_4_SGE_SHIFT_M, + V2_QPC_BYTE_4_SGE_SHIFT_S, + hr_qp->sq.max_gs > + HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ? + ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0); + + roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M, + V2_QPC_BYTE_4_SGE_SHIFT_S, 0); + + roce_set_field(context->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, + ilog2((unsigned int)hr_qp->sq.wqe_cnt)); + roce_set_field(qpc_mask->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0); + + roce_set_field(context->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, + (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI || + hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || + hr_qp->ibqp.srq) ? 0 : + ilog2((unsigned int)hr_qp->rq.wqe_cnt)); + + roce_set_field(qpc_mask->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0); +} + static void modify_qp_reset_to_init(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, @@ -3138,21 +3175,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M, V2_QPC_BYTE_4_TST_S, 0); - if (ibqp->qp_type == IB_QPT_GSI) - roce_set_field(context->byte_4_sqpn_tst, - V2_QPC_BYTE_4_SGE_SHIFT_M, - V2_QPC_BYTE_4_SGE_SHIFT_S, - ilog2((unsigned int)hr_qp->sge.sge_cnt)); - else - roce_set_field(context->byte_4_sqpn_tst, - V2_QPC_BYTE_4_SGE_SHIFT_M, - V2_QPC_BYTE_4_SGE_SHIFT_S, - hr_qp->sq.max_gs > 2 ? - ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0); - - roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M, - V2_QPC_BYTE_4_SGE_SHIFT_S, 0); - roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn); roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, @@ -3168,19 +3190,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M, V2_QPC_BYTE_20_RQWS_S, 0); - roce_set_field(context->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(qpc_mask->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0); - - roce_set_field(context->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, - (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI || - hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 : - ilog2((unsigned int)hr_qp->rq.wqe_cnt)); - roce_set_field(qpc_mask->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0); + set_qpc_wqe_cnt(hr_qp, context, qpc_mask); /* No VLAN need to set 0xFFF */ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, @@ -3456,22 +3466,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M, V2_QPC_BYTE_4_TST_S, 0); - if (ibqp->qp_type == IB_QPT_GSI) - roce_set_field(context->byte_4_sqpn_tst, - V2_QPC_BYTE_4_SGE_SHIFT_M, - V2_QPC_BYTE_4_SGE_SHIFT_S, - ilog2((unsigned int)hr_qp->sge.sge_cnt)); - else - roce_set_field(context->byte_4_sqpn_tst, - V2_QPC_BYTE_4_SGE_SHIFT_M, - V2_QPC_BYTE_4_SGE_SHIFT_S, - hr_qp->sq.max_gs > - HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ? - ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0); - - roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M, - V2_QPC_BYTE_4_SGE_SHIFT_S, 0); - if (attr_mask & IB_QP_ACCESS_FLAGS) { roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ)); @@ -3506,20 +3500,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, 0); } - roce_set_field(context->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(qpc_mask->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0); - - roce_set_field(context->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, - (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI || - hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 : - ilog2((unsigned int)hr_qp->rq.wqe_cnt)); - roce_set_field(qpc_mask->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0); - roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn); roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, From 89b4b70b974c98139f57eeb06facab40473268d3 Mon Sep 17 00:00:00 2001 From: chenglang Date: Mon, 8 Jul 2019 21:41:22 +0800 Subject: [PATCH 12/99] RDMA/hns: Optimize hns_roce_mhop_alloc function. Move some lines for allocating multi-hop addressing into independent functions in order to improve readability. Link: https://lore.kernel.org/r/1562593285-8037-7-git-send-email-oulijun@huawei.com Signed-off-by: Lang Cheng Signed-off-by: Lijun Ou Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_mr.c | 285 ++++++++++++++---------- 1 file changed, 165 insertions(+), 120 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 549e1a38dfe0..c4b758cf7dad 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -347,49 +347,179 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev, mr->pbl_bt_l0 = NULL; mr->pbl_l0_dma_addr = 0; } +static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages, + struct hns_roce_mr *mr, u32 pbl_bt_sz) +{ + struct device *dev = hr_dev->dev; + + if (npages > pbl_bt_sz / 8) { + dev_err(dev, "npages %d is larger than buf_pg_sz!", + npages); + return -EINVAL; + } + mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, + &(mr->pbl_dma_addr), + GFP_KERNEL); + if (!mr->pbl_buf) + return -ENOMEM; + + mr->pbl_size = npages; + mr->pbl_ba = mr->pbl_dma_addr; + mr->pbl_hop_num = 1; + mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; + mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; + return 0; + +} + + +static int pbl_2hop_alloc(struct hns_roce_dev *hr_dev, int npages, + struct hns_roce_mr *mr, u32 pbl_bt_sz) +{ + struct device *dev = hr_dev->dev; + int npages_allocated; + u64 pbl_last_bt_num; + u64 pbl_bt_cnt = 0; + u64 size; + int i; + + pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); + + /* alloc L1 BT */ + for (i = 0; i < pbl_bt_sz / 8; i++) { + if (pbl_bt_cnt + 1 < pbl_last_bt_num) { + size = pbl_bt_sz; + } else { + npages_allocated = i * (pbl_bt_sz / 8); + size = (npages - npages_allocated) * 8; + } + mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size, + &(mr->pbl_l1_dma_addr[i]), + GFP_KERNEL); + if (!mr->pbl_bt_l1[i]) { + hns_roce_loop_free(hr_dev, mr, 1, i, 0); + return -ENOMEM; + } + + *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; + + pbl_bt_cnt++; + if (pbl_bt_cnt >= pbl_last_bt_num) + break; + } + + mr->l0_chunk_last_num = i + 1; + + return 0; +} + +static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages, + struct hns_roce_mr *mr, u32 pbl_bt_sz) +{ + struct device *dev = hr_dev->dev; + int mr_alloc_done = 0; + int npages_allocated; + u64 pbl_last_bt_num; + u64 pbl_bt_cnt = 0; + u64 bt_idx; + u64 size; + int i; + int j = 0; + + pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); + + mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num, + sizeof(*mr->pbl_l2_dma_addr), + GFP_KERNEL); + if (!mr->pbl_l2_dma_addr) + return -ENOMEM; + + mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num, + sizeof(*mr->pbl_bt_l2), + GFP_KERNEL); + if (!mr->pbl_bt_l2) + goto err_kcalloc_bt_l2; + + /* alloc L1, L2 BT */ + for (i = 0; i < pbl_bt_sz / 8; i++) { + mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz, + &(mr->pbl_l1_dma_addr[i]), + GFP_KERNEL); + if (!mr->pbl_bt_l1[i]) { + hns_roce_loop_free(hr_dev, mr, 1, i, 0); + goto err_dma_alloc_l0; + } + + *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; + + for (j = 0; j < pbl_bt_sz / 8; j++) { + bt_idx = i * pbl_bt_sz / 8 + j; + + if (pbl_bt_cnt + 1 < pbl_last_bt_num) { + size = pbl_bt_sz; + } else { + npages_allocated = bt_idx * + (pbl_bt_sz / 8); + size = (npages - npages_allocated) * 8; + } + mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent( + dev, size, + &(mr->pbl_l2_dma_addr[bt_idx]), + GFP_KERNEL); + if (!mr->pbl_bt_l2[bt_idx]) { + hns_roce_loop_free(hr_dev, mr, 2, i, j); + goto err_dma_alloc_l0; + } + + *(mr->pbl_bt_l1[i] + j) = + mr->pbl_l2_dma_addr[bt_idx]; + + pbl_bt_cnt++; + if (pbl_bt_cnt >= pbl_last_bt_num) { + mr_alloc_done = 1; + break; + } + } + + if (mr_alloc_done) + break; + } + + mr->l0_chunk_last_num = i + 1; + mr->l1_chunk_last_num = j + 1; + + + return 0; + +err_dma_alloc_l0: + kfree(mr->pbl_bt_l2); + mr->pbl_bt_l2 = NULL; + +err_kcalloc_bt_l2: + kfree(mr->pbl_l2_dma_addr); + mr->pbl_l2_dma_addr = NULL; + + return -ENOMEM; +} + /* PBL multi hop addressing */ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, struct hns_roce_mr *mr) { struct device *dev = hr_dev->dev; - int mr_alloc_done = 0; - int npages_allocated; - int i = 0, j = 0; u32 pbl_bt_sz; u32 mhop_num; - u64 pbl_last_bt_num; - u64 pbl_bt_cnt = 0; - u64 bt_idx; - u64 size; mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num); pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); - pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); if (mhop_num == HNS_ROCE_HOP_NUM_0) return 0; /* hop_num = 1 */ - if (mhop_num == 1) { - if (npages > pbl_bt_sz / 8) { - dev_err(dev, "npages %d is larger than buf_pg_sz!", - npages); - return -EINVAL; - } - mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, - &(mr->pbl_dma_addr), - GFP_KERNEL); - if (!mr->pbl_buf) - return -ENOMEM; - - mr->pbl_size = npages; - mr->pbl_ba = mr->pbl_dma_addr; - mr->pbl_hop_num = mhop_num; - mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; - mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; - return 0; - } + if (mhop_num == 1) + return pbl_1hop_alloc(hr_dev, npages, mr, pbl_bt_sz); mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_l1_dma_addr), @@ -402,100 +532,23 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, if (!mr->pbl_bt_l1) goto err_kcalloc_bt_l1; - if (mhop_num == 3) { - mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num, - sizeof(*mr->pbl_l2_dma_addr), - GFP_KERNEL); - if (!mr->pbl_l2_dma_addr) - goto err_kcalloc_l2_dma; - - mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num, - sizeof(*mr->pbl_bt_l2), - GFP_KERNEL); - if (!mr->pbl_bt_l2) - goto err_kcalloc_bt_l2; - } - /* alloc L0 BT */ mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz, &(mr->pbl_l0_dma_addr), GFP_KERNEL); if (!mr->pbl_bt_l0) - goto err_dma_alloc_l0; + goto err_kcalloc_l2_dma; if (mhop_num == 2) { - /* alloc L1 BT */ - for (i = 0; i < pbl_bt_sz / 8; i++) { - if (pbl_bt_cnt + 1 < pbl_last_bt_num) { - size = pbl_bt_sz; - } else { - npages_allocated = i * (pbl_bt_sz / 8); - size = (npages - npages_allocated) * 8; - } - mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size, - &(mr->pbl_l1_dma_addr[i]), - GFP_KERNEL); - if (!mr->pbl_bt_l1[i]) { - hns_roce_loop_free(hr_dev, mr, 1, i, 0); - goto err_dma_alloc_l0; - } - - *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; - - pbl_bt_cnt++; - if (pbl_bt_cnt >= pbl_last_bt_num) - break; - } - } else if (mhop_num == 3) { - /* alloc L1, L2 BT */ - for (i = 0; i < pbl_bt_sz / 8; i++) { - mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz, - &(mr->pbl_l1_dma_addr[i]), - GFP_KERNEL); - if (!mr->pbl_bt_l1[i]) { - hns_roce_loop_free(hr_dev, mr, 1, i, 0); - goto err_dma_alloc_l0; - } - - *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; - - for (j = 0; j < pbl_bt_sz / 8; j++) { - bt_idx = i * pbl_bt_sz / 8 + j; - - if (pbl_bt_cnt + 1 < pbl_last_bt_num) { - size = pbl_bt_sz; - } else { - npages_allocated = bt_idx * - (pbl_bt_sz / 8); - size = (npages - npages_allocated) * 8; - } - mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent( - dev, size, - &(mr->pbl_l2_dma_addr[bt_idx]), - GFP_KERNEL); - if (!mr->pbl_bt_l2[bt_idx]) { - hns_roce_loop_free(hr_dev, mr, 2, i, j); - goto err_dma_alloc_l0; - } - - *(mr->pbl_bt_l1[i] + j) = - mr->pbl_l2_dma_addr[bt_idx]; - - pbl_bt_cnt++; - if (pbl_bt_cnt >= pbl_last_bt_num) { - mr_alloc_done = 1; - break; - } - } - - if (mr_alloc_done) - break; - } + if (pbl_2hop_alloc(hr_dev, npages, mr, pbl_bt_sz)) + goto err_kcalloc_l2_dma; + } + + if (mhop_num == 3) { + if (pbl_3hop_alloc(hr_dev, npages, mr, pbl_bt_sz)) + goto err_kcalloc_l2_dma; } - mr->l0_chunk_last_num = i + 1; - if (mhop_num == 3) - mr->l1_chunk_last_num = j + 1; mr->pbl_size = npages; mr->pbl_ba = mr->pbl_l0_dma_addr; @@ -505,14 +558,6 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, return 0; -err_dma_alloc_l0: - kfree(mr->pbl_bt_l2); - mr->pbl_bt_l2 = NULL; - -err_kcalloc_bt_l2: - kfree(mr->pbl_l2_dma_addr); - mr->pbl_l2_dma_addr = NULL; - err_kcalloc_l2_dma: kfree(mr->pbl_bt_l1); mr->pbl_bt_l1 = NULL; From 3ee0e170d72c05172b0aa4e46d58795fdc19081e Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Mon, 8 Jul 2019 21:41:23 +0800 Subject: [PATCH 13/99] RDMA/hns: Package for hns_roce_rereg_user_mr function Move some code of the hns_roce_rereg_user_mr() function into an independent function in oder to improve readability. Link: https://lore.kernel.org/r/1562593285-8037-8-git-send-email-oulijun@huawei.com Signed-off-by: Lang Cheng Signed-off-by: Lijun Ou Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_mr.c | 153 ++++++++++++++---------- 1 file changed, 89 insertions(+), 64 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index c4b758cf7dad..0cfa94605f77 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -1206,6 +1206,83 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, return ERR_PTR(ret); } +static int rereg_mr_trans(struct ib_mr *ibmr, int flags, + u64 start, u64 length, + u64 virt_addr, int mr_access_flags, + struct hns_roce_cmd_mailbox *mailbox, + u32 pdn, struct ib_udata *udata) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); + struct hns_roce_mr *mr = to_hr_mr(ibmr); + struct device *dev = hr_dev->dev; + int npages; + int ret; + + if (mr->size != ~0ULL) { + npages = ib_umem_page_count(mr->umem); + + if (hr_dev->caps.pbl_hop_num) + hns_roce_mhop_free(hr_dev, mr); + else + dma_free_coherent(dev, npages * 8, + mr->pbl_buf, mr->pbl_dma_addr); + } + ib_umem_release(mr->umem); + + mr->umem = ib_umem_get(udata, start, length, mr_access_flags, 0); + if (IS_ERR(mr->umem)) { + ret = PTR_ERR(mr->umem); + mr->umem = NULL; + return -ENOMEM; + } + npages = ib_umem_page_count(mr->umem); + + if (hr_dev->caps.pbl_hop_num) { + ret = hns_roce_mhop_alloc(hr_dev, npages, mr); + if (ret) + goto release_umem; + } else { + mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, + &(mr->pbl_dma_addr), + GFP_KERNEL); + if (!mr->pbl_buf) { + ret = -ENOMEM; + goto release_umem; + } + } + + ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn, + mr_access_flags, virt_addr, + length, mailbox->buf); + if (ret) + goto release_umem; + + + ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem); + if (ret) { + if (mr->size != ~0ULL) { + npages = ib_umem_page_count(mr->umem); + + if (hr_dev->caps.pbl_hop_num) + hns_roce_mhop_free(hr_dev, mr); + else + dma_free_coherent(dev, npages * 8, + mr->pbl_buf, + mr->pbl_dma_addr); + } + + goto release_umem; + } + + return 0; + +release_umem: + ib_umem_release(mr->umem); + return ret; + +} + + int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_pd *pd, struct ib_udata *udata) @@ -1216,7 +1293,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, struct device *dev = hr_dev->dev; unsigned long mtpt_idx; u32 pdn = 0; - int npages; int ret; if (!mr->enabled) @@ -1243,73 +1319,25 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, pdn = to_hr_pd(pd)->pdn; if (flags & IB_MR_REREG_TRANS) { - if (mr->size != ~0ULL) { - npages = ib_umem_page_count(mr->umem); - - if (hr_dev->caps.pbl_hop_num) - hns_roce_mhop_free(hr_dev, mr); - else - dma_free_coherent(dev, npages * 8, mr->pbl_buf, - mr->pbl_dma_addr); - } - ib_umem_release(mr->umem); - - mr->umem = - ib_umem_get(udata, start, length, mr_access_flags, 0); - if (IS_ERR(mr->umem)) { - ret = PTR_ERR(mr->umem); - mr->umem = NULL; + ret = rereg_mr_trans(ibmr, flags, + start, length, + virt_addr, mr_access_flags, + mailbox, pdn, udata); + if (ret) goto free_cmd_mbox; - } - npages = ib_umem_page_count(mr->umem); - - if (hr_dev->caps.pbl_hop_num) { - ret = hns_roce_mhop_alloc(hr_dev, npages, mr); - if (ret) - goto release_umem; - } else { - mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, - &(mr->pbl_dma_addr), - GFP_KERNEL); - if (!mr->pbl_buf) { - ret = -ENOMEM; - goto release_umem; - } - } - } - - ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn, - mr_access_flags, virt_addr, - length, mailbox->buf); - if (ret) { - if (flags & IB_MR_REREG_TRANS) - goto release_umem; - else + } else { + ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn, + mr_access_flags, virt_addr, + length, mailbox->buf); + if (ret) goto free_cmd_mbox; } - if (flags & IB_MR_REREG_TRANS) { - ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem); - if (ret) { - if (mr->size != ~0ULL) { - npages = ib_umem_page_count(mr->umem); - - if (hr_dev->caps.pbl_hop_num) - hns_roce_mhop_free(hr_dev, mr); - else - dma_free_coherent(dev, npages * 8, - mr->pbl_buf, - mr->pbl_dma_addr); - } - - goto release_umem; - } - } - ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx); if (ret) { dev_err(dev, "SW2HW_MPT failed (%d)\n", ret); - goto release_umem; + ib_umem_release(mr->umem); + goto free_cmd_mbox; } mr->enabled = 1; @@ -1320,9 +1348,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, return 0; -release_umem: - ib_umem_release(mr->umem); - free_cmd_mbox: hns_roce_free_cmd_mailbox(hr_dev, mailbox); From d7019c0f47ae497bae27be5397287ca72c704bf4 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Mon, 8 Jul 2019 21:41:24 +0800 Subject: [PATCH 14/99] RDMA/hns: Refactor hem table mhop check and calculation The calculation of mhop for hem is duplicated in hns_roce_init_hem_table and hns_roce_calc_hem_mhop, extracting it from them to a separate function. Moreover, this patch refactors hns_roce_check_whether_mhop to reduce complexity. Link: https://lore.kernel.org/r/1562593285-8037-9-git-send-email-oulijun@huawei.com Signed-off-by: Yixian Liu Signed-off-by: Lijun Ou Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hem.c | 178 +++++++++-------------- 1 file changed, 69 insertions(+), 109 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index 198163d3510f..d3e72a0be0b3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -41,20 +41,47 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type) { - if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) || - (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) || - (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) || - (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) || - (hr_dev->caps.sccc_hop_num && type == HEM_TYPE_SCCC) || - (hr_dev->caps.qpc_timer_hop_num && type == HEM_TYPE_QPC_TIMER) || - (hr_dev->caps.cqc_timer_hop_num && type == HEM_TYPE_CQC_TIMER) || - (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) || - (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) || - (hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) || - (hr_dev->caps.idx_hop_num && type == HEM_TYPE_IDX)) - return true; + int hop_num = 0; - return false; + switch (type) { + case HEM_TYPE_QPC: + hop_num = hr_dev->caps.qpc_hop_num; + break; + case HEM_TYPE_MTPT: + hop_num = hr_dev->caps.mpt_hop_num; + break; + case HEM_TYPE_CQC: + hop_num = hr_dev->caps.cqc_hop_num; + break; + case HEM_TYPE_SRQC: + hop_num = hr_dev->caps.srqc_hop_num; + break; + case HEM_TYPE_SCCC: + hop_num = hr_dev->caps.sccc_hop_num; + break; + case HEM_TYPE_QPC_TIMER: + hop_num = hr_dev->caps.qpc_timer_hop_num; + break; + case HEM_TYPE_CQC_TIMER: + hop_num = hr_dev->caps.cqc_timer_hop_num; + break; + case HEM_TYPE_CQE: + hop_num = hr_dev->caps.cqe_hop_num; + break; + case HEM_TYPE_MTT: + hop_num = hr_dev->caps.mtt_hop_num; + break; + case HEM_TYPE_SRQWQE: + hop_num = hr_dev->caps.srqwqe_hop_num; + break; + case HEM_TYPE_IDX: + hop_num = hr_dev->caps.idx_hop_num; + break; + default: + return false; + } + + return hop_num ? true : false; } static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx, @@ -92,17 +119,13 @@ static int hns_roce_get_bt_num(u32 table_type, u32 hop_num) return 0; } -int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, unsigned long *obj, - struct hns_roce_hem_mhop *mhop) +static int get_hem_table_config(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_mhop *mhop, + u32 type) { struct device *dev = hr_dev->dev; - u32 chunk_ba_num; - u32 table_idx; - u32 bt_num; - u32 chunk_size; - switch (table->type) { + switch (type) { case HEM_TYPE_QPC: mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz + PAGE_SHIFT); @@ -193,10 +216,26 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, break; default: dev_err(dev, "Table %d not support multi-hop addressing!\n", - table->type); + type); return -EINVAL; } + return 0; +} + +int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_table *table, unsigned long *obj, + struct hns_roce_hem_mhop *mhop) +{ + struct device *dev = hr_dev->dev; + u32 chunk_ba_num; + u32 table_idx; + u32 bt_num; + u32 chunk_size; + + if (get_hem_table_config(hr_dev, mhop, table->type)) + return -EINVAL; + if (!obj) return 0; @@ -887,7 +926,6 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, unsigned long obj_size, unsigned long nobj, int use_lowmem) { - struct device *dev = hr_dev->dev; unsigned long obj_per_chunk; unsigned long num_hem; @@ -900,99 +938,21 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, if (!table->hem) return -ENOMEM; } else { + struct hns_roce_hem_mhop mhop = {}; unsigned long buf_chunk_size; unsigned long bt_chunk_size; unsigned long bt_chunk_num; unsigned long num_bt_l0 = 0; u32 hop_num; - switch (type) { - case HEM_TYPE_QPC: - buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz - + PAGE_SHIFT); - bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz - + PAGE_SHIFT); - num_bt_l0 = hr_dev->caps.qpc_bt_num; - hop_num = hr_dev->caps.qpc_hop_num; - break; - case HEM_TYPE_MTPT: - buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz - + PAGE_SHIFT); - bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz - + PAGE_SHIFT); - num_bt_l0 = hr_dev->caps.mpt_bt_num; - hop_num = hr_dev->caps.mpt_hop_num; - break; - case HEM_TYPE_CQC: - buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz - + PAGE_SHIFT); - bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz - + PAGE_SHIFT); - num_bt_l0 = hr_dev->caps.cqc_bt_num; - hop_num = hr_dev->caps.cqc_hop_num; - break; - case HEM_TYPE_SCCC: - buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz - + PAGE_SHIFT); - bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz - + PAGE_SHIFT); - num_bt_l0 = hr_dev->caps.sccc_bt_num; - hop_num = hr_dev->caps.sccc_hop_num; - break; - case HEM_TYPE_QPC_TIMER: - buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz - + PAGE_SHIFT); - bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz - + PAGE_SHIFT); - num_bt_l0 = hr_dev->caps.qpc_timer_bt_num; - hop_num = hr_dev->caps.qpc_timer_hop_num; - break; - case HEM_TYPE_CQC_TIMER: - buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz - + PAGE_SHIFT); - bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz - + PAGE_SHIFT); - num_bt_l0 = hr_dev->caps.cqc_timer_bt_num; - hop_num = hr_dev->caps.cqc_timer_hop_num; - break; - case HEM_TYPE_SRQC: - buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz - + PAGE_SHIFT); - bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz - + PAGE_SHIFT); - num_bt_l0 = hr_dev->caps.srqc_bt_num; - hop_num = hr_dev->caps.srqc_hop_num; - break; - case HEM_TYPE_MTT: - buf_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz - + PAGE_SHIFT); - bt_chunk_size = buf_chunk_size; - hop_num = hr_dev->caps.mtt_hop_num; - break; - case HEM_TYPE_CQE: - buf_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz - + PAGE_SHIFT); - bt_chunk_size = buf_chunk_size; - hop_num = hr_dev->caps.cqe_hop_num; - break; - case HEM_TYPE_SRQWQE: - buf_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz - + PAGE_SHIFT); - bt_chunk_size = buf_chunk_size; - hop_num = hr_dev->caps.srqwqe_hop_num; - break; - case HEM_TYPE_IDX: - buf_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz - + PAGE_SHIFT); - bt_chunk_size = buf_chunk_size; - hop_num = hr_dev->caps.idx_hop_num; - break; - default: - dev_err(dev, - "Table %d not support to init hem table here!\n", - type); + if (get_hem_table_config(hr_dev, &mhop, type)) return -EINVAL; - } + + buf_chunk_size = mhop.buf_chunk_size; + bt_chunk_size = mhop.bt_chunk_size; + num_bt_l0 = mhop.ba_l0_num; + hop_num = mhop.hop_num; + obj_per_chunk = buf_chunk_size / obj_size; num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk; bt_chunk_num = bt_chunk_size / BA_BYTE_LEN; From 33db6f94847c33ebdeb2e73ef5cd8fd04fb58e86 Mon Sep 17 00:00:00 2001 From: Yixian Liu Date: Mon, 8 Jul 2019 21:41:25 +0800 Subject: [PATCH 15/99] RDMA/hns: Refactor eq table init for hip08 To make the code more readable, move the part of naming irq and request irq out of eq table init into a separate function. Link: https://lore.kernel.org/r/1562593285-8037-10-git-send-email-oulijun@huawei.com Signed-off-by: Yixian Liu Signed-off-by: Lijun Ou Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 176 +++++++++++++-------- 1 file changed, 107 insertions(+), 69 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index c2ddfc123bee..83c58be388af 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5735,6 +5735,94 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, return ret; } +static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num, + int comp_num, int aeq_num, int other_num) +{ + struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; + int i, j; + int ret; + + for (i = 0; i < irq_num; i++) { + hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN, + GFP_KERNEL); + if (!hr_dev->irq_names[i]) { + ret = -ENOMEM; + goto err_kzalloc_failed; + } + } + + /* irq contains: abnormal + AEQ + CEQ*/ + for (j = 0; j < irq_num; j++) + if (j < other_num) + snprintf((char *)hr_dev->irq_names[j], + HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", j); + else if (j < (other_num + aeq_num)) + snprintf((char *)hr_dev->irq_names[j], + HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d", + j - other_num); + else + snprintf((char *)hr_dev->irq_names[j], + HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d", + j - other_num - aeq_num); + + for (j = 0; j < irq_num; j++) { + if (j < other_num) + ret = request_irq(hr_dev->irq[j], + hns_roce_v2_msix_interrupt_abn, + 0, hr_dev->irq_names[j], hr_dev); + + else if (j < (other_num + comp_num)) + ret = request_irq(eq_table->eq[j - other_num].irq, + hns_roce_v2_msix_interrupt_eq, + 0, hr_dev->irq_names[j + aeq_num], + &eq_table->eq[j - other_num]); + else + ret = request_irq(eq_table->eq[j - other_num].irq, + hns_roce_v2_msix_interrupt_eq, + 0, hr_dev->irq_names[j - comp_num], + &eq_table->eq[j - other_num]); + if (ret) { + dev_err(hr_dev->dev, "Request irq error!\n"); + goto err_request_failed; + } + } + + return 0; + +err_request_failed: + for (j -= 1; j >= 0; j--) + if (j < other_num) + free_irq(hr_dev->irq[j], hr_dev); + else + free_irq(eq_table->eq[j - other_num].irq, + &eq_table->eq[j - other_num]); + +err_kzalloc_failed: + for (i -= 1; i >= 0; i--) + kfree(hr_dev->irq_names[i]); + + return ret; +} + +static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev) +{ + int irq_num; + int eq_num; + int i; + + eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; + irq_num = eq_num + hr_dev->caps.num_other_vectors; + + for (i = 0; i < hr_dev->caps.num_other_vectors; i++) + free_irq(hr_dev->irq[i], hr_dev); + + for (i = 0; i < eq_num; i++) + free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]); + + for (i = 0; i < irq_num; i++) + kfree(hr_dev->irq_names[i]); +} + static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) { struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; @@ -5746,7 +5834,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) int other_num; int comp_num; int aeq_num; - int i, j, k; + int i; int ret; other_num = hr_dev->caps.num_other_vectors; @@ -5760,27 +5848,18 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) if (!eq_table->eq) return -ENOMEM; - for (i = 0; i < irq_num; i++) { - hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN, - GFP_KERNEL); - if (!hr_dev->irq_names[i]) { - ret = -ENOMEM; - goto err_failed_kzalloc; - } - } - /* create eq */ - for (j = 0; j < eq_num; j++) { - eq = &eq_table->eq[j]; + for (i = 0; i < eq_num; i++) { + eq = &eq_table->eq[i]; eq->hr_dev = hr_dev; - eq->eqn = j; - if (j < comp_num) { + eq->eqn = i; + if (i < comp_num) { /* CEQ */ eq_cmd = HNS_ROCE_CMD_CREATE_CEQC; eq->type_flag = HNS_ROCE_CEQ; eq->entries = hr_dev->caps.ceqe_depth; eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE; - eq->irq = hr_dev->irq[j + other_num + aeq_num]; + eq->irq = hr_dev->irq[i + other_num + aeq_num]; eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM; eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL; } else { @@ -5789,7 +5868,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) eq->type_flag = HNS_ROCE_AEQ; eq->entries = hr_dev->caps.aeqe_depth; eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE; - eq->irq = hr_dev->irq[j - comp_num + other_num]; + eq->irq = hr_dev->irq[i - comp_num + other_num]; eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM; eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL; } @@ -5804,40 +5883,11 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) /* enable irq */ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE); - /* irq contains: abnormal + AEQ + CEQ*/ - for (k = 0; k < irq_num; k++) - if (k < other_num) - snprintf((char *)hr_dev->irq_names[k], - HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k); - else if (k < (other_num + aeq_num)) - snprintf((char *)hr_dev->irq_names[k], - HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d", - k - other_num); - else - snprintf((char *)hr_dev->irq_names[k], - HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d", - k - other_num - aeq_num); - - for (k = 0; k < irq_num; k++) { - if (k < other_num) - ret = request_irq(hr_dev->irq[k], - hns_roce_v2_msix_interrupt_abn, - 0, hr_dev->irq_names[k], hr_dev); - - else if (k < (other_num + comp_num)) - ret = request_irq(eq_table->eq[k - other_num].irq, - hns_roce_v2_msix_interrupt_eq, - 0, hr_dev->irq_names[k + aeq_num], - &eq_table->eq[k - other_num]); - else - ret = request_irq(eq_table->eq[k - other_num].irq, - hns_roce_v2_msix_interrupt_eq, - 0, hr_dev->irq_names[k - comp_num], - &eq_table->eq[k - other_num]); - if (ret) { - dev_err(dev, "Request irq error!\n"); - goto err_request_irq_fail; - } + ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num, + aeq_num, other_num); + if (ret) { + dev_err(dev, "Request irq failed.\n"); + goto err_request_irq_fail; } hr_dev->irq_workq = @@ -5845,26 +5895,20 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) if (!hr_dev->irq_workq) { dev_err(dev, "Create irq workqueue failed!\n"); ret = -ENOMEM; - goto err_request_irq_fail; + goto err_create_wq_fail; } return 0; +err_create_wq_fail: + __hns_roce_free_irq(hr_dev); + err_request_irq_fail: - for (k -= 1; k >= 0; k--) - if (k < other_num) - free_irq(hr_dev->irq[k], hr_dev); - else - free_irq(eq_table->eq[k - other_num].irq, - &eq_table->eq[k - other_num]); + hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE); err_create_eq_fail: - for (j -= 1; j >= 0; j--) - hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]); - -err_failed_kzalloc: for (i -= 1; i >= 0; i--) - kfree(hr_dev->irq_names[i]); + hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]); kfree(eq_table->eq); return ret; @@ -5883,20 +5927,14 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev) /* Disable irq */ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE); - for (i = 0; i < hr_dev->caps.num_other_vectors; i++) - free_irq(hr_dev->irq[i], hr_dev); + __hns_roce_free_irq(hr_dev); for (i = 0; i < eq_num; i++) { hns_roce_v2_destroy_eqc(hr_dev, i); - free_irq(eq_table->eq[i].irq, &eq_table->eq[i]); - hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]); } - for (i = 0; i < irq_num; i++) - kfree(hr_dev->irq_names[i]); - kfree(eq_table->eq); flush_workqueue(hr_dev->irq_workq); From 4cc315c53f959c5d71b96111a7d317f40771558e Mon Sep 17 00:00:00 2001 From: Hariprasad Kelam Date: Tue, 16 Jul 2019 22:59:25 +0530 Subject: [PATCH 16/99] RDMA/qib: Unneeded variable ret Fix the below warning reported by coccicheck: drivers/infiniband/hw/qib/qib_file_ops.c:1792:5-8: Unneeded variable: "ret". Return "0" on line 1876 Link: https://lore.kernel.org/r/20190716172924.GA12241@hari-Inspiron-1545 Signed-off-by: Hariprasad Kelam Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qib/qib_file_ops.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 27b6e664e59d..b0144229cf3b 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -1789,7 +1789,6 @@ static void unlock_expected_tids(struct qib_ctxtdata *rcd) static int qib_close(struct inode *in, struct file *fp) { - int ret = 0; struct qib_filedata *fd; struct qib_ctxtdata *rcd; struct qib_devdata *dd; @@ -1873,7 +1872,7 @@ static int qib_close(struct inode *in, struct file *fp) bail: kfree(fd); - return ret; + return 0; } static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo) From cf167e5eb92c143ac2b0300ea65ed9f0ce572c3b Mon Sep 17 00:00:00 2001 From: Hariprasad Kelam Date: Tue, 16 Jul 2019 23:07:12 +0530 Subject: [PATCH 17/99] RDMA/qedr: Remove Unneeded variable rc Fix the below warning reported by coccicheck: drivers/infiniband/hw/qedr/verbs.c:2454:5-7: Unneeded variable: "rc". Return "0" on line 2499 Link: https://lore.kernel.org/r/20190716173712.GA12949@hari-Inspiron-1545 Signed-off-by: Hariprasad Kelam Acked-by: Michal Kalderon Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/verbs.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 27d90a84ea01..0c6a4bc848f5 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2451,7 +2451,6 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) struct qedr_dev *dev = qp->dev; struct ib_qp_attr attr; int attr_mask = 0; - int rc = 0; DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n", qp, qp->qp_type); @@ -2496,7 +2495,7 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) xa_erase_irq(&dev->qps, qp->qp_id); kfree(qp); } - return rc; + return 0; } int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags, From 39289bfc221423b27570e7c9157b690828e786cb Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 22 Jul 2019 17:01:30 +0000 Subject: [PATCH 18/99] RDMA: Make most headers compile stand alone So that rdma can work with CONFIG_KERNEL_HEADER_TEST and CONFIG_HEADERS_CHECK. Link: https://lore.kernel.org/r/20190722170126.GA16453@ziepe.ca Signed-off-by: Jason Gunthorpe --- include/Kbuild | 6 ------ include/rdma/ib.h | 2 ++ include/rdma/iw_portmap.h | 3 +++ include/rdma/opa_port_info.h | 2 ++ include/rdma/rdmavt_cq.h | 1 + include/rdma/signature.h | 2 ++ 6 files changed, 10 insertions(+), 6 deletions(-) diff --git a/include/Kbuild b/include/Kbuild index c38f0d46b267..fc2aa4e20658 100644 --- a/include/Kbuild +++ b/include/Kbuild @@ -945,12 +945,6 @@ header-test- += net/xdp.h header-test- += net/xdp_priv.h header-test- += pcmcia/cistpl.h header-test- += pcmcia/ds.h -header-test- += rdma/ib.h -header-test- += rdma/iw_portmap.h -header-test- += rdma/opa_port_info.h -header-test- += rdma/rdmavt_cq.h -header-test- += rdma/restrack.h -header-test- += rdma/signature.h header-test- += rdma/tid_rdma_defs.h header-test- += scsi/fc/fc_encaps.h header-test- += scsi/fc/fc_fc2.h diff --git a/include/rdma/ib.h b/include/rdma/ib.h index 4f385ec54f80..fe2fc9e91588 100644 --- a/include/rdma/ib.h +++ b/include/rdma/ib.h @@ -36,6 +36,8 @@ #include #include #include +#include +#include struct ib_addr { union { diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h index b9fee7feeeb5..c89535047c42 100644 --- a/include/rdma/iw_portmap.h +++ b/include/rdma/iw_portmap.h @@ -33,6 +33,9 @@ #ifndef _IW_PORTMAP_H #define _IW_PORTMAP_H +#include +#include + #define IWPM_ULIBNAME_SIZE 32 #define IWPM_DEVNAME_SIZE 32 #define IWPM_IFNAME_SIZE 16 diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h index 7147a9263011..bdbfe25d3854 100644 --- a/include/rdma/opa_port_info.h +++ b/include/rdma/opa_port_info.h @@ -33,6 +33,8 @@ #if !defined(OPA_PORT_INFO_H) #define OPA_PORT_INFO_H +#include + #define OPA_PORT_LINK_MODE_NOP 0 /* No change */ #define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */ diff --git a/include/rdma/rdmavt_cq.h b/include/rdma/rdmavt_cq.h index 04c519ef6d71..574eb7278f46 100644 --- a/include/rdma/rdmavt_cq.h +++ b/include/rdma/rdmavt_cq.h @@ -53,6 +53,7 @@ #include #include +#include /* * Define an ib_cq_notify value that is not valid so we know when CQ diff --git a/include/rdma/signature.h b/include/rdma/signature.h index f24cc2a1d3c5..d16b0fcc8344 100644 --- a/include/rdma/signature.h +++ b/include/rdma/signature.h @@ -6,6 +6,8 @@ #ifndef _RDMA_SIGNATURE_H_ #define _RDMA_SIGNATURE_H_ +#include + enum ib_signature_prot_cap { IB_PROT_T10DIF_TYPE_1 = 1, IB_PROT_T10DIF_TYPE_2 = 1 << 1, From 4f96061b92da395ecd5325a87368670e1b1a7cc9 Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Tue, 23 Jul 2019 19:49:28 +0800 Subject: [PATCH 19/99] IB/usnic: Use dev_get_drvdata Instead of using to_pci_dev + pci_get_drvdata, use dev_get_drvdata to make the code simpler. Link: https://lore.kernel.org/r/20190723114928.18424-1-hslester96@gmail.com Signed-off-by: Chuhong Yuan Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index eeb07b245ef9..a354c7c86547 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -194,7 +194,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, return ERR_CAST(dev_list); for (i = 0; dev_list[i]; i++) { dev = dev_list[i]; - vf = pci_get_drvdata(to_pci_dev(dev)); + vf = dev_get_drvdata(dev); spin_lock(&vf->lock); vnic = vf->vnic; if (!usnic_vnic_check_room(vnic, res_spec)) { From 089b645d19b2de7c9b541828d445b8c9a5dba792 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 4 Jul 2019 16:09:35 +0300 Subject: [PATCH 20/99] RDMA/mlx4: Separate creation of RWQ and QP The mlx4 WQ is implemented with HW QP without special HW object. Current implementation which tried to reuse the code did it with common QP creation flows. Such decision caused to the absence of mlx4_ib_wq struct, which is needed to ensure proper allocation of ib_wq inside of IB/core. Separate create_qp_common() to pure QP flow and to create_rq() for RWQ. Link: https://lore.kernel.org/r/20190704130936.8705-2-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx4/qp.c | 236 +++++++++++++++++++++----------- 1 file changed, 154 insertions(+), 82 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 82aff2f2fdc2..e409adac4e2e 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -855,12 +855,143 @@ static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext *context, mutex_unlock(&context->wqn_ranges_mutex); } -static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, - enum mlx4_ib_source_type src, - struct ib_qp_init_attr *init_attr, +static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, struct mlx4_ib_qp *qp) +{ + struct mlx4_ib_dev *dev = to_mdev(pd->device); + int qpn; + int err; + struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context( + udata, struct mlx4_ib_ucontext, ibucontext); + struct mlx4_ib_cq *mcq; + unsigned long flags; + int range_size; + struct mlx4_ib_create_wq wq; + size_t copy_len; + int shift; + int n; + + qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET; + + mutex_init(&qp->mutex); + spin_lock_init(&qp->sq.lock); + spin_lock_init(&qp->rq.lock); + INIT_LIST_HEAD(&qp->gid_list); + INIT_LIST_HEAD(&qp->steering_rules); + + qp->state = IB_QPS_RESET; + + copy_len = min(sizeof(struct mlx4_ib_create_wq), udata->inlen); + + if (ib_copy_from_udata(&wq, udata, copy_len)) { + err = -EFAULT; + goto err; + } + + if (wq.comp_mask || wq.reserved[0] || wq.reserved[1] || + wq.reserved[2]) { + pr_debug("user command isn't supported\n"); + err = -EOPNOTSUPP; + goto err; + } + + if (wq.log_range_size > ilog2(dev->dev->caps.max_rss_tbl_sz)) { + pr_debug("WQN range size must be equal or smaller than %d\n", + dev->dev->caps.max_rss_tbl_sz); + err = -EOPNOTSUPP; + goto err; + } + range_size = 1 << wq.log_range_size; + + if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) + qp->flags |= MLX4_IB_QP_SCATTER_FCS; + + err = set_rq_size(dev, &init_attr->cap, true, 1, qp, qp->inl_recv_sz); + if (err) + goto err; + + qp->sq_no_prefetch = 1; + qp->sq.wqe_cnt = 1; + qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; + qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + + (qp->sq.wqe_cnt << qp->sq.wqe_shift); + + qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0, 0); + if (IS_ERR(qp->umem)) { + err = PTR_ERR(qp->umem); + goto err; + } + + n = ib_umem_page_count(qp->umem); + shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); + err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); + + if (err) + goto err_buf; + + err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); + if (err) + goto err_mtt; + + err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db); + if (err) + goto err_mtt; + qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; + + err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn); + if (err) + goto err_wrid; + + err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); + if (err) + goto err_qpn; + + /* + * Hardware wants QPN written in big-endian order (after + * shifting) for send doorbell. Precompute this value to save + * a little bit when posting sends. + */ + qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); + + qp->mqp.event = mlx4_ib_wq_event; + + spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); + mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), + to_mcq(init_attr->recv_cq)); + /* Maintain device to QPs access, needed for further handling + * via reset flow + */ + list_add_tail(&qp->qps_list, &dev->qp_list); + /* Maintain CQ to QPs access, needed for further handling + * via reset flow + */ + mcq = to_mcq(init_attr->send_cq); + list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); + mcq = to_mcq(init_attr->recv_cq); + list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); + mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), + to_mcq(init_attr->recv_cq)); + spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); + return 0; + +err_qpn: + mlx4_ib_release_wqn(context, qp, 0); +err_wrid: + mlx4_ib_db_unmap_user(context, &qp->db); + +err_mtt: + mlx4_mtt_cleanup(dev->dev, &qp->mtt); +err_buf: + ib_umem_release(qp->umem); +err: + return err; +} + +static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp) { + struct mlx4_ib_dev *dev = to_mdev(pd->device); int qpn; int err; struct mlx4_ib_sqp *sqp = NULL; @@ -870,7 +1001,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; struct mlx4_ib_cq *mcq; unsigned long flags; - int range_size = 0; /* When tunneling special qps, we use a plain UD qp */ if (sqpn) { @@ -921,15 +1051,13 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (!sqp) return -ENOMEM; qp = &sqp->qp; - qp->pri.vid = 0xFFFF; - qp->alt.vid = 0xFFFF; } else { qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL); if (!qp) return -ENOMEM; - qp->pri.vid = 0xFFFF; - qp->alt.vid = 0xFFFF; } + qp->pri.vid = 0xFFFF; + qp->alt.vid = 0xFFFF; } else qp = *caller_qp; @@ -941,48 +1069,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, INIT_LIST_HEAD(&qp->gid_list); INIT_LIST_HEAD(&qp->steering_rules); - qp->state = IB_QPS_RESET; + qp->state = IB_QPS_RESET; if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); - if (udata) { - union { - struct mlx4_ib_create_qp qp; - struct mlx4_ib_create_wq wq; - } ucmd; + struct mlx4_ib_create_qp ucmd; size_t copy_len; int shift; int n; - copy_len = (src == MLX4_IB_QP_SRC) ? - sizeof(struct mlx4_ib_create_qp) : - min(sizeof(struct mlx4_ib_create_wq), udata->inlen); + copy_len = sizeof(struct mlx4_ib_create_qp); if (ib_copy_from_udata(&ucmd, udata, copy_len)) { err = -EFAULT; goto err; } - if (src == MLX4_IB_RWQ_SRC) { - if (ucmd.wq.comp_mask || ucmd.wq.reserved[0] || - ucmd.wq.reserved[1] || ucmd.wq.reserved[2]) { - pr_debug("user command isn't supported\n"); - err = -EOPNOTSUPP; - goto err; - } - - if (ucmd.wq.log_range_size > - ilog2(dev->dev->caps.max_rss_tbl_sz)) { - pr_debug("WQN range size must be equal or smaller than %d\n", - dev->dev->caps.max_rss_tbl_sz); - err = -EOPNOTSUPP; - goto err; - } - range_size = 1 << ucmd.wq.log_range_size; - } else { - qp->inl_recv_sz = ucmd.qp.inl_recv_sz; - } + qp->inl_recv_sz = ucmd.inl_recv_sz; if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) { if (!(dev->dev->caps.flags & @@ -1000,30 +1104,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (err) goto err; - if (src == MLX4_IB_QP_SRC) { - qp->sq_no_prefetch = ucmd.qp.sq_no_prefetch; + qp->sq_no_prefetch = ucmd.sq_no_prefetch; - err = set_user_sq_size(dev, qp, - (struct mlx4_ib_create_qp *) - &ucmd); - if (err) - goto err; - } else { - qp->sq_no_prefetch = 1; - qp->sq.wqe_cnt = 1; - qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; - /* Allocated buffer expects to have at least that SQ - * size. - */ - qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + - (qp->sq.wqe_cnt << qp->sq.wqe_shift); - } + err = set_user_sq_size(dev, qp, &ucmd); + if (err) + goto err; qp->umem = - ib_umem_get(udata, - (src == MLX4_IB_QP_SRC) ? ucmd.qp.buf_addr : - ucmd.wq.buf_addr, - qp->buf_size, 0, 0); + ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0, 0); if (IS_ERR(qp->umem)) { err = PTR_ERR(qp->umem); goto err; @@ -1041,11 +1129,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, goto err_mtt; if (qp_has_rq(init_attr)) { - err = mlx4_ib_db_map_user(udata, - (src == MLX4_IB_QP_SRC) ? - ucmd.qp.db_addr : - ucmd.wq.db_addr, - &qp->db); + err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db); if (err) goto err_mtt; } @@ -1115,10 +1199,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, goto err_wrid; } } - } else if (src == MLX4_IB_RWQ_SRC) { - err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn); - if (err) - goto err_wrid; } else { /* Raw packet QPNs may not have bits 6,7 set in their qp_num; * otherwise, the WQE BlueFlame setup flow wrongly causes @@ -1157,8 +1237,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, */ qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); - qp->mqp.event = (src == MLX4_IB_QP_SRC) ? mlx4_ib_qp_event : - mlx4_ib_wq_event; + qp->mqp.event = mlx4_ib_qp_event; if (!*caller_qp) *caller_qp = qp; @@ -1186,8 +1265,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (!sqpn) { if (qp->flags & MLX4_IB_QP_NETIF) mlx4_ib_steer_qp_free(dev, qpn, 1); - else if (src == MLX4_IB_RWQ_SRC) - mlx4_ib_release_wqn(context, qp, 0); else mlx4_qp_release_range(dev->dev, qpn, 1); } @@ -1518,8 +1595,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, /* fall through */ case IB_QPT_UD: { - err = create_qp_common(to_mdev(pd->device), pd, MLX4_IB_QP_SRC, - init_attr, udata, 0, &qp); + err = create_qp_common(pd, init_attr, udata, 0, &qp); if (err) { kfree(qp); return ERR_PTR(err); @@ -1549,8 +1625,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, sqpn = get_sqp_num(to_mdev(pd->device), init_attr); } - err = create_qp_common(to_mdev(pd->device), pd, MLX4_IB_QP_SRC, - init_attr, udata, sqpn, &qp); + err = create_qp_common(pd, init_attr, udata, sqpn, &qp); if (err) return ERR_PTR(err); @@ -4047,8 +4122,8 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, struct ib_wq_init_attr *init_attr, struct ib_udata *udata) { - struct mlx4_ib_dev *dev; - struct ib_qp_init_attr ib_qp_init_attr; + struct mlx4_dev *dev = to_mdev(pd->device)->dev; + struct ib_qp_init_attr ib_qp_init_attr = {}; struct mlx4_ib_qp *qp; struct mlx4_ib_create_wq ucmd; int err, required_cmd_sz; @@ -4073,14 +4148,13 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, if (udata->outlen) return ERR_PTR(-EOPNOTSUPP); - dev = to_mdev(pd->device); - if (init_attr->wq_type != IB_WQT_RQ) { pr_debug("unsupported wq type %d\n", init_attr->wq_type); return ERR_PTR(-EOPNOTSUPP); } - if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS) { + if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS || + !(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { pr_debug("unsupported create_flags %u\n", init_attr->create_flags); return ERR_PTR(-EOPNOTSUPP); @@ -4093,7 +4167,6 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, qp->pri.vid = 0xFFFF; qp->alt.vid = 0xFFFF; - memset(&ib_qp_init_attr, 0, sizeof(ib_qp_init_attr)); ib_qp_init_attr.qp_context = init_attr->wq_context; ib_qp_init_attr.qp_type = IB_QPT_RAW_PACKET; ib_qp_init_attr.cap.max_recv_wr = init_attr->max_wr; @@ -4104,8 +4177,7 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) ib_qp_init_attr.create_flags |= IB_QP_CREATE_SCATTER_FCS; - err = create_qp_common(dev, pd, MLX4_IB_RWQ_SRC, &ib_qp_init_attr, - udata, 0, &qp); + err = create_rq(pd, &ib_qp_init_attr, udata, qp); if (err) { kfree(qp); return ERR_PTR(err); From 913df8c35322dcd15d993cabc8cc92814dee3d50 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 4 Jul 2019 16:09:36 +0300 Subject: [PATCH 21/99] RDMA/mlx4: Annotate boolean arguments as bool and not int Information provided by qp_has_rq() and used latter is boolean, so update callers to proper type. Link: https://lore.kernel.org/r/20190704130936.8705-3-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx4/qp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index e409adac4e2e..bd4aa04416c6 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -325,7 +325,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags) } static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, - bool is_user, int has_rq, struct mlx4_ib_qp *qp, + bool is_user, bool has_rq, struct mlx4_ib_qp *qp, u32 inl_recv_sz) { /* Sanity check RQ size before proceeding */ @@ -506,10 +506,10 @@ static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) kfree(qp->sqp_proxy_rcv); } -static int qp_has_rq(struct ib_qp_init_attr *attr) +static bool qp_has_rq(struct ib_qp_init_attr *attr) { if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT) - return 0; + return false; return !attr->srq; } @@ -906,7 +906,7 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) qp->flags |= MLX4_IB_QP_SCATTER_FCS; - err = set_rq_size(dev, &init_attr->cap, true, 1, qp, qp->inl_recv_sz); + err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz); if (err) goto err; From 1d2fedd8561dc469a7503855ee602f4bb3eccfa7 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Tue, 23 Jul 2019 10:02:05 +0300 Subject: [PATCH 22/99] RDMA/core: Support netlink commands in non init_net net namespaces Now that IB core supports RDMA device binding with specific net namespace, enable IB core to accept netlink commands in non init_net namespaces. This is done by having per net namespace netlink socket. At present only netlink device handling client RDMA_NL_NLDEV supports device handling in multiple net namespaces. Hence do not accept netlink messages for other clients in non init_net net namespaces. Link: https://lore.kernel.org/r/20190723070205.6247-1-leon@kernel.org Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/addr.c | 2 +- drivers/infiniband/core/core_priv.h | 24 ++++++++- drivers/infiniband/core/device.c | 38 +++++--------- drivers/infiniband/core/iwpm_msg.c | 8 +-- drivers/infiniband/core/iwpm_util.c | 6 +-- drivers/infiniband/core/netlink.c | 77 +++++++++++++++++++---------- drivers/infiniband/core/nldev.c | 20 ++++---- drivers/infiniband/core/sa_query.c | 2 +- include/rdma/rdma_netlink.h | 10 ++-- 9 files changed, 112 insertions(+), 75 deletions(-) diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 9b76a8fcdd24..1dd467bed8fc 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -183,7 +183,7 @@ static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr, /* Repair the nlmsg header length */ nlmsg_end(skb, nlh); - rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, GFP_KERNEL); + rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, GFP_KERNEL); /* Make the request retry, so when we get the response from userspace * we will have something. diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 888d89ce81df..589ed805e0ad 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -36,6 +36,8 @@ #include #include #include +#include +#include #include #include @@ -54,8 +56,26 @@ struct pkey_index_qp_list { struct list_head qp_list; }; +/** + * struct rdma_dev_net - rdma net namespace metadata for a net + * @nl_sock: Pointer to netlink socket + * @net: Pointer to owner net namespace + * @id: xarray id to identify the net namespace. + */ +struct rdma_dev_net { + struct sock *nl_sock; + possible_net_t net; + u32 id; +}; + extern const struct attribute_group ib_dev_attr_group; extern bool ib_devices_shared_netns; +extern unsigned int rdma_dev_net_id; + +static inline struct rdma_dev_net *rdma_net_to_dev_net(struct net *net) +{ + return net_generic(net, rdma_dev_net_id); +} int ib_device_register_sysfs(struct ib_device *device); void ib_device_unregister_sysfs(struct ib_device *device); @@ -179,7 +199,6 @@ void ib_mad_cleanup(void); int ib_sa_init(void); void ib_sa_cleanup(void); -int rdma_nl_init(void); void rdma_nl_exit(void); int ib_nl_handle_resolve_resp(struct sk_buff *skb, @@ -362,4 +381,7 @@ void ib_port_unregister_module_stat(struct kobject *kobj); int ib_device_set_netns_put(struct sk_buff *skb, struct ib_device *dev, u32 ns_fd); + +int rdma_nl_net_init(struct rdma_dev_net *rnet); +void rdma_nl_net_exit(struct rdma_dev_net *rnet); #endif /* _CORE_PRIV_H */ diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 309210d2d4a8..c3576c7d2e8f 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -105,17 +104,7 @@ static DECLARE_RWSEM(clients_rwsem); */ #define CLIENT_DATA_REGISTERED XA_MARK_1 -/** - * struct rdma_dev_net - rdma net namespace metadata for a net - * @net: Pointer to owner net namespace - * @id: xarray id to identify the net namespace. - */ -struct rdma_dev_net { - possible_net_t net; - u32 id; -}; - -static unsigned int rdma_dev_net_id; +unsigned int rdma_dev_net_id; /* * A list of net namespaces is maintained in an xarray. This is necessary @@ -1050,7 +1039,7 @@ int rdma_compatdev_set(u8 enable) static void rdma_dev_exit_net(struct net *net) { - struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id); + struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); struct ib_device *dev; unsigned long index; int ret; @@ -1084,25 +1073,32 @@ static void rdma_dev_exit_net(struct net *net) } up_read(&devices_rwsem); + rdma_nl_net_exit(rnet); xa_erase(&rdma_nets, rnet->id); } static __net_init int rdma_dev_init_net(struct net *net) { - struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id); + struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); unsigned long index; struct ib_device *dev; int ret; + write_pnet(&rnet->net, net); + + ret = rdma_nl_net_init(rnet); + if (ret) + return ret; + /* No need to create any compat devices in default init_net. */ if (net_eq(net, &init_net)) return 0; - write_pnet(&rnet->net, net); - ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL); - if (ret) + if (ret) { + rdma_nl_net_exit(rnet); return ret; + } down_read(&devices_rwsem); xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { @@ -2629,12 +2625,6 @@ static int __init ib_core_init(void) goto err_comp_unbound; } - ret = rdma_nl_init(); - if (ret) { - pr_warn("Couldn't init IB netlink interface: err %d\n", ret); - goto err_sysfs; - } - ret = addr_init(); if (ret) { pr_warn("Could't init IB address resolution\n"); @@ -2680,8 +2670,6 @@ static int __init ib_core_init(void) err_addr: addr_cleanup(); err_ibnl: - rdma_nl_exit(); -err_sysfs: class_unregister(&ib_class); err_comp_unbound: destroy_workqueue(ib_comp_unbound_wq); diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c index 2452b0ddcf0d..f1a873d4e842 100644 --- a/drivers/infiniband/core/iwpm_msg.c +++ b/drivers/infiniband/core/iwpm_msg.c @@ -112,7 +112,7 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client) pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n", __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name); - ret = rdma_nl_multicast(skb, RDMA_NL_GROUP_IWPM, GFP_KERNEL); + ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_IWPM, GFP_KERNEL); if (ret) { skb = NULL; /* skb is freed in the netlink send-op handling */ iwpm_user_pid = IWPM_PID_UNAVAILABLE; @@ -202,7 +202,7 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) nlmsg_end(skb, nlh); nlmsg_request->req_buffer = pm_msg; - ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); + ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid); if (ret) { skb = NULL; /* skb is freed in the netlink send-op handling */ iwpm_user_pid = IWPM_PID_UNDEFINED; @@ -297,7 +297,7 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) nlmsg_end(skb, nlh); nlmsg_request->req_buffer = pm_msg; - ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); + ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid); if (ret) { skb = NULL; /* skb is freed in the netlink send-op handling */ err_str = "Unable to send a nlmsg"; @@ -364,7 +364,7 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client) nlmsg_end(skb, nlh); - ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); + ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid); if (ret) { skb = NULL; /* skb is freed in the netlink send-op handling */ iwpm_user_pid = IWPM_PID_UNDEFINED; diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index 41929bb83739..c7ad3499228c 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -645,7 +645,7 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) nlmsg_end(skb, nlh); - ret = rdma_nl_unicast(skb, iwpm_pid); + ret = rdma_nl_unicast(&init_net, skb, iwpm_pid); if (ret) { skb = NULL; err_str = "Unable to send a nlmsg"; @@ -674,7 +674,7 @@ static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid) return -ENOMEM; } nlh->nlmsg_type = NLMSG_DONE; - ret = rdma_nl_unicast(skb, iwpm_pid); + ret = rdma_nl_unicast(&init_net, skb, iwpm_pid); if (ret) pr_warn("%s Unable to send a nlmsg\n", __func__); return ret; @@ -824,7 +824,7 @@ int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version) goto hello_num_error; nlmsg_end(skb, nlh); - ret = rdma_nl_unicast(skb, iwpm_pid); + ret = rdma_nl_unicast(&init_net, skb, iwpm_pid); if (ret) { skb = NULL; err_str = "Unable to send a nlmsg"; diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index eecfc0b377c9..67a76aca2dd6 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -36,20 +36,22 @@ #include #include #include +#include #include #include #include #include "core_priv.h" static DEFINE_MUTEX(rdma_nl_mutex); -static struct sock *nls; static struct { const struct rdma_nl_cbs *cb_table; } rdma_nl_types[RDMA_NL_NUM_CLIENTS]; bool rdma_nl_chk_listeners(unsigned int group) { - return netlink_has_listeners(nls, group); + struct rdma_dev_net *rnet = rdma_net_to_dev_net(&init_net); + + return netlink_has_listeners(rnet->nl_sock, group); } EXPORT_SYMBOL(rdma_nl_chk_listeners); @@ -73,13 +75,21 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op) return (op < max_num_ops[type]) ? true : false; } -static bool is_nl_valid(unsigned int type, unsigned int op) +static bool +is_nl_valid(const struct sk_buff *skb, unsigned int type, unsigned int op) { const struct rdma_nl_cbs *cb_table; if (!is_nl_msg_valid(type, op)) return false; + /* + * Currently only NLDEV client is supporting netlink commands in + * non init_net net namespace. + */ + if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV) + return false; + if (!rdma_nl_types[type].cb_table) { mutex_unlock(&rdma_nl_mutex); request_module("rdma-netlink-subsys-%d", type); @@ -161,7 +171,7 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, unsigned int op = RDMA_NL_GET_OP(type); const struct rdma_nl_cbs *cb_table; - if (!is_nl_valid(index, op)) + if (!is_nl_valid(skb, index, op)) return -EINVAL; cb_table = rdma_nl_types[index].cb_table; @@ -185,7 +195,7 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, .dump = cb_table[op].dump, }; if (c.dump) - return netlink_dump_start(nls, skb, nlh, &c); + return netlink_dump_start(skb->sk, skb, nlh, &c); return -EINVAL; } @@ -258,52 +268,65 @@ static void rdma_nl_rcv(struct sk_buff *skb) mutex_unlock(&rdma_nl_mutex); } -int rdma_nl_unicast(struct sk_buff *skb, u32 pid) +int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid) { + struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); int err; - err = netlink_unicast(nls, skb, pid, MSG_DONTWAIT); + err = netlink_unicast(rnet->nl_sock, skb, pid, MSG_DONTWAIT); return (err < 0) ? err : 0; } EXPORT_SYMBOL(rdma_nl_unicast); -int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid) +int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid) { + struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); int err; - err = netlink_unicast(nls, skb, pid, 0); + err = netlink_unicast(rnet->nl_sock, skb, pid, 0); return (err < 0) ? err : 0; } EXPORT_SYMBOL(rdma_nl_unicast_wait); -int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags) +int rdma_nl_multicast(struct net *net, struct sk_buff *skb, + unsigned int group, gfp_t flags) { - return nlmsg_multicast(nls, skb, 0, group, flags); + struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); + + return nlmsg_multicast(rnet->nl_sock, skb, 0, group, flags); } EXPORT_SYMBOL(rdma_nl_multicast); -int __init rdma_nl_init(void) -{ - struct netlink_kernel_cfg cfg = { - .input = rdma_nl_rcv, - }; - - nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg); - if (!nls) - return -ENOMEM; - - nls->sk_sndtimeo = 10 * HZ; - return 0; -} - void rdma_nl_exit(void) { int idx; for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++) - rdma_nl_unregister(idx); + WARN(rdma_nl_types[idx].cb_table, + "Nelink client %d wasn't released prior to unloading %s\n", + idx, KBUILD_MODNAME); +} - netlink_kernel_release(nls); +int rdma_nl_net_init(struct rdma_dev_net *rnet) +{ + struct net *net = read_pnet(&rnet->net); + struct netlink_kernel_cfg cfg = { + .input = rdma_nl_rcv, + }; + struct sock *nls; + + nls = netlink_kernel_create(net, NETLINK_RDMA, &cfg); + if (!nls) + return -ENOMEM; + + nls->sk_sndtimeo = 10 * HZ; + rnet->nl_sock = nls; + return 0; +} + +void rdma_nl_net_exit(struct rdma_dev_net *rnet) +{ + netlink_kernel_release(rnet->nl_sock); } MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 783e465e7c41..e287b71a1cfd 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -832,7 +832,7 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, nlmsg_end(msg, nlh); ib_device_put(device); - return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); @@ -972,7 +972,7 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, nlmsg_end(msg, nlh); ib_device_put(device); - return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); @@ -1074,7 +1074,7 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, nlmsg_end(msg, nlh); ib_device_put(device); - return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); @@ -1251,7 +1251,7 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, nlmsg_end(msg, nlh); ib_device_put(device); - return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); @@ -1596,7 +1596,7 @@ static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh, put_device(data.cdev); if (ibdev) ib_device_put(ibdev); - return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); out_data: put_device(data.cdev); @@ -1636,7 +1636,7 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, return err; } nlmsg_end(msg, nlh); - return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); } static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, @@ -1734,7 +1734,7 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, nlmsg_end(msg, nlh); ib_device_put(device); - return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_fill: rdma_counter_unbind_qpn(device, port, qpn, cntn); @@ -1802,7 +1802,7 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh, nlmsg_end(msg, nlh); ib_device_put(device); - return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_fill: rdma_counter_bind_qpn(device, port, qpn, cntn); @@ -1893,7 +1893,7 @@ static int stat_get_doit_default_counter(struct sk_buff *skb, mutex_unlock(&stats->lock); nlmsg_end(msg, nlh); ib_device_put(device); - return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_table: nla_nest_cancel(msg, table_attr); @@ -1961,7 +1961,7 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh, nlmsg_end(msg, nlh); ib_device_put(device); - return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_msg: nlmsg_free(msg); diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 7d8071c7e564..17fc2936c077 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -860,7 +860,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) /* Repair the nlmsg header length */ nlmsg_end(skb, nlh); - return rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask); + return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask); } static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h index 6631624e4d7c..ab22759de7ea 100644 --- a/include/rdma/rdma_netlink.h +++ b/include/rdma/rdma_netlink.h @@ -76,28 +76,32 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, /** * Send the supplied skb to a specific userspace PID. + * @net: Net namespace in which to send the skb * @skb: The netlink skb * @pid: Userspace netlink process ID * Returns 0 on success or a negative error code. */ -int rdma_nl_unicast(struct sk_buff *skb, u32 pid); +int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid); /** * Send, with wait/1 retry, the supplied skb to a specific userspace PID. + * @net: Net namespace in which to send the skb * @skb: The netlink skb * @pid: Userspace netlink process ID * Returns 0 on success or a negative error code. */ -int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid); +int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid); /** * Send the supplied skb to a netlink group. + * @net: Net namespace in which to send the skb * @skb: The netlink skb * @group: Netlink group ID * @flags: allocation flags * Returns 0 on success or a negative error code. */ -int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags); +int rdma_nl_multicast(struct net *net, struct sk_buff *skb, + unsigned int group, gfp_t flags); /** * Check if there are any listeners to the netlink group From 8b38c538d460f46680d0fb8524efa15a8652b2d1 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Tue, 23 Jul 2019 10:04:12 +0300 Subject: [PATCH 23/99] IB/mlx5: Add CREATE_PSV/DESTROY_PSV for devx interface Limit the number of PSV's created through devx to 1, to create a symmetry between create/destroy cmds. In the kernel, one can create up to 4 PSV's using CREATE_PSV cmd but the destruction is one by one. Add a protection for this a-symmetric definition for devx. Link: https://lore.kernel.org/r/20190723070412.6385-1-leon@kernel.org Signed-off-by: Max Gurtovoy Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/devx.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index ec4370f99381..a527cf7f01ac 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -776,6 +776,14 @@ static bool devx_is_obj_create_cmd(const void *in, u16 *opcode) return true; return false; } + case MLX5_CMD_OP_CREATE_PSV: + { + u8 num_psv = MLX5_GET(create_psv_in, in, num_psv); + + if (num_psv == 1) + return true; + return false; + } default: return false; } @@ -1215,6 +1223,12 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din, case MLX5_CMD_OP_ALLOC_XRCD: MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD); break; + case MLX5_CMD_OP_CREATE_PSV: + MLX5_SET(general_obj_in_cmd_hdr, din, opcode, + MLX5_CMD_OP_DESTROY_PSV); + MLX5_SET(destroy_psv_in, din, psvn, + MLX5_GET(create_psv_out, out, psv0_index)); + break; default: /* The entry must match to one of the devx_is_obj_create_cmd */ WARN_ON(true); From 0058eb589881056b49a4ba15dfa3f1b8db53991c Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Tue, 9 Jul 2019 17:17:33 +0300 Subject: [PATCH 24/99] qed*: Change dpi_addr to be denoted with __iomem Several casts were required around dpi_addr parameter in qed_rdma_if.h This is an address on the doorbell bar and should therefore be marked with __iomem. Link: https://lore.kernel.org/r/20190709141735.19193-5-michal.kalderon@marvell.com Reported-by: Jason Gunthorpe Signed-off-by: Ariel Elior Signed-off-by: Michal Kalderon Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/main.c | 2 +- drivers/infiniband/hw/qedr/qedr.h | 2 +- drivers/net/ethernet/qlogic/qed/qed_rdma.c | 5 ++--- include/linux/qed/qed_rdma_if.h | 2 +- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 533157a2a3be..ca5f6f65c929 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -820,7 +820,7 @@ static int qedr_init_hw(struct qedr_dev *dev) if (rc) goto out; - dev->db_addr = (void __iomem *)(uintptr_t)out_params.dpi_addr; + dev->db_addr = out_params.dpi_addr; dev->db_phys_addr = out_params.dpi_phys_addr; dev->db_size = out_params.dpi_size; dev->dpi = out_params.dpi; diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index a92ca22e5de1..0cfd849b13d6 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -229,7 +229,7 @@ struct qedr_ucontext { struct ib_ucontext ibucontext; struct qedr_dev *dev; struct qedr_pd *pd; - u64 dpi_addr; + void __iomem *dpi_addr; u64 dpi_phys_addr; u32 dpi_size; u16 dpi; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index f900fde448db..95eba277f6e2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -799,9 +799,8 @@ static int qed_rdma_add_user(void *rdma_cxt, /* Calculate the corresponding DPI address */ dpi_start_offset = p_hwfn->dpi_start_offset; - out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells + - dpi_start_offset + - ((out_params->dpi) * p_hwfn->dpi_size)); + out_params->dpi_addr = p_hwfn->doorbells + dpi_start_offset + + out_params->dpi * p_hwfn->dpi_size; out_params->dpi_phys_addr = p_hwfn->db_phys_addr + dpi_start_offset + diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index 898f595ea3d6..74efca15fde7 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -225,7 +225,7 @@ struct qed_rdma_start_in_params { struct qed_rdma_add_user_out_params { u16 dpi; - u64 dpi_addr; + void __iomem *dpi_addr; u64 dpi_phys_addr; u32 dpi_size; u16 wid_count; From 5dcecbc96755793e9417a9135b290d16639ec6aa Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Tue, 23 Jul 2019 10:31:16 +0300 Subject: [PATCH 25/99] IB/mlx5: Refactor code for counters allocation To support per device counters in switchdev mode (instead of per port counter), refactor query routines to work on mlx5_ib_counter structure instead of mlx5_ib_port structure. Signed-off-by: Parav Pandit Reviewed-by: Daniel Jurgens Signed-off-by: Leon Romanovsky Link: https://lore.kernel.org/r/20190723073117.7175-2-leon@kernel.org Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 60 +++++++++++++++---------------- 1 file changed, 29 insertions(+), 31 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 6cdd98ec9c1e..4cfb55ac0ed7 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -5466,21 +5466,21 @@ static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, u8 port_num) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - struct mlx5_ib_port *port = &dev->port[port_num - 1]; + const struct mlx5_ib_counters *cnts = &dev->port[port_num - 1].cnts; /* We support only per port stats */ if (port_num == 0) return NULL; - return rdma_alloc_hw_stats_struct(port->cnts.names, - port->cnts.num_q_counters + - port->cnts.num_cong_counters + - port->cnts.num_ext_ppcnt_counters, + return rdma_alloc_hw_stats_struct(cnts->names, + cnts->num_q_counters + + cnts->num_cong_counters + + cnts->num_ext_ppcnt_counters, RDMA_HW_STATS_DEFAULT_LIFESPAN); } static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev, - struct mlx5_ib_port *port, + const struct mlx5_ib_counters *cnts, struct rdma_hw_stats *stats, u16 set_id) { @@ -5497,8 +5497,8 @@ static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev, if (ret) goto free; - for (i = 0; i < port->cnts.num_q_counters; i++) { - val = *(__be32 *)(out + port->cnts.offsets[i]); + for (i = 0; i < cnts->num_q_counters; i++) { + val = *(__be32 *)(out + cnts->offsets[i]); stats->value[i] = (u64)be32_to_cpu(val); } @@ -5508,10 +5508,10 @@ static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev, } static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev, - struct mlx5_ib_port *port, - struct rdma_hw_stats *stats) + const struct mlx5_ib_counters *cnts, + struct rdma_hw_stats *stats) { - int offset = port->cnts.num_q_counters + port->cnts.num_cong_counters; + int offset = cnts->num_q_counters + cnts->num_cong_counters; int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); int ret, i; void *out; @@ -5524,12 +5524,10 @@ static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev, if (ret) goto free; - for (i = 0; i < port->cnts.num_ext_ppcnt_counters; i++) { + for (i = 0; i < cnts->num_ext_ppcnt_counters; i++) stats->value[i + offset] = be64_to_cpup((__be64 *)(out + - port->cnts.offsets[i + offset])); - } - + cnts->offsets[i + offset])); free: kvfree(out); return ret; @@ -5540,7 +5538,7 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, u8 port_num, int index) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - struct mlx5_ib_port *port = &dev->port[port_num - 1]; + struct mlx5_ib_counters *cnts = &dev->port[port_num - 1].cnts; struct mlx5_core_dev *mdev; int ret, num_counters; u8 mdev_port_num; @@ -5548,18 +5546,17 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, if (!stats) return -EINVAL; - num_counters = port->cnts.num_q_counters + - port->cnts.num_cong_counters + - port->cnts.num_ext_ppcnt_counters; + num_counters = cnts->num_q_counters + + cnts->num_cong_counters + + cnts->num_ext_ppcnt_counters; /* q_counters are per IB device, query the master mdev */ - ret = mlx5_ib_query_q_counters(dev->mdev, port, stats, - port->cnts.set_id); + ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, cnts->set_id); if (ret) return ret; if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { - ret = mlx5_ib_query_ext_ppcnt_counters(dev, port, stats); + ret = mlx5_ib_query_ext_ppcnt_counters(dev, cnts, stats); if (ret) return ret; } @@ -5576,10 +5573,10 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, } ret = mlx5_lag_query_cong_counters(dev->mdev, stats->value + - port->cnts.num_q_counters, - port->cnts.num_cong_counters, - port->cnts.offsets + - port->cnts.num_q_counters); + cnts->num_q_counters, + cnts->num_cong_counters, + cnts->offsets + + cnts->num_q_counters); mlx5_ib_put_native_port_mdev(dev, port_num); if (ret) @@ -5594,20 +5591,21 @@ static struct rdma_hw_stats * mlx5_ib_counter_alloc_stats(struct rdma_counter *counter) { struct mlx5_ib_dev *dev = to_mdev(counter->device); - struct mlx5_ib_port *port = &dev->port[counter->port - 1]; + const struct mlx5_ib_counters *cnts = + &dev->port[counter->port - 1].cnts; /* Q counters are in the beginning of all counters */ - return rdma_alloc_hw_stats_struct(port->cnts.names, - port->cnts.num_q_counters, + return rdma_alloc_hw_stats_struct(cnts->names, + cnts->num_q_counters, RDMA_HW_STATS_DEFAULT_LIFESPAN); } static int mlx5_ib_counter_update_stats(struct rdma_counter *counter) { struct mlx5_ib_dev *dev = to_mdev(counter->device); - struct mlx5_ib_port *port = &dev->port[counter->port - 1]; + struct mlx5_ib_counters *cnts = &dev->port[counter->port - 1].cnts; - return mlx5_ib_query_q_counters(dev->mdev, port, + return mlx5_ib_query_q_counters(dev->mdev, cnts, counter->stats, counter->id); } From 3e1f000ff74627c1adb99ee513f29ec2522ee309 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Tue, 23 Jul 2019 10:31:17 +0300 Subject: [PATCH 26/99] IB/mlx5: Support per device q counters in switchdev mode When parent mlx5_core_dev is in switchdev mode, q_counters are not applicable to multiple non uplink vports. Hence, have make them limited to device level. While at it, correct __mlx5_ib_qp_set_counter() and __mlx5_ib_modify_qp() to use u16 set_id as defined by the device. Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Link: https://lore.kernel.org/r/20190723073117.7175-3-leon@kernel.org Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 55 +++++++++++++++++++++++----- drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 + drivers/infiniband/hw/mlx5/qp.c | 25 +++++++------ 3 files changed, 60 insertions(+), 21 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 4cfb55ac0ed7..4a3d700cd783 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -5322,11 +5322,21 @@ static const struct mlx5_ib_counter ext_ppcnt_cnts[] = { INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated), }; +static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev) +{ + return MLX5_ESWITCH_MANAGER(mdev) && + mlx5_ib_eswitch_mode(mdev->priv.eswitch) == + MLX5_ESWITCH_OFFLOADS; +} + static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) { + int num_cnt_ports; int i; - for (i = 0; i < dev->num_ports; i++) { + num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports; + + for (i = 0; i < num_cnt_ports; i++) { if (dev->port[i].cnts.set_id_valid) mlx5_core_dealloc_q_counter(dev->mdev, dev->port[i].cnts.set_id); @@ -5428,13 +5438,15 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev, static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) { + int num_cnt_ports; int err = 0; int i; bool is_shared; is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0; + num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports; - for (i = 0; i < dev->num_ports; i++) { + for (i = 0; i < num_cnt_ports; i++) { err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts); if (err) goto err_alloc; @@ -5454,7 +5466,6 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) } dev->port[i].cnts.set_id_valid = true; } - return 0; err_alloc: @@ -5462,16 +5473,41 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) return err; } +static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev, + u8 port_num) +{ + return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts : + &dev->port[port_num].cnts; +} + +/** + * mlx5_ib_get_counters_id - Returns counters id to use for device+port + * @dev: Pointer to mlx5 IB device + * @port_num: Zero based port number + * + * mlx5_ib_get_counters_id() Returns counters set id to use for given + * device port combination in switchdev and non switchdev mode of the + * parent device. + */ +u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num) +{ + const struct mlx5_ib_counters *cnts = get_counters(dev, port_num); + + return cnts->set_id; +} + static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, u8 port_num) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - const struct mlx5_ib_counters *cnts = &dev->port[port_num - 1].cnts; + const struct mlx5_ib_counters *cnts; + bool is_switchdev = is_mdev_switchdev_mode(dev->mdev); - /* We support only per port stats */ - if (port_num == 0) + if ((is_switchdev && port_num) || (!is_switchdev && !port_num)) return NULL; + cnts = get_counters(dev, port_num - 1); + return rdma_alloc_hw_stats_struct(cnts->names, cnts->num_q_counters + cnts->num_cong_counters + @@ -5538,7 +5574,7 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, u8 port_num, int index) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - struct mlx5_ib_counters *cnts = &dev->port[port_num - 1].cnts; + const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1); struct mlx5_core_dev *mdev; int ret, num_counters; u8 mdev_port_num; @@ -5592,7 +5628,7 @@ mlx5_ib_counter_alloc_stats(struct rdma_counter *counter) { struct mlx5_ib_dev *dev = to_mdev(counter->device); const struct mlx5_ib_counters *cnts = - &dev->port[counter->port - 1].cnts; + get_counters(dev, counter->port - 1); /* Q counters are in the beginning of all counters */ return rdma_alloc_hw_stats_struct(cnts->names, @@ -5603,7 +5639,8 @@ mlx5_ib_counter_alloc_stats(struct rdma_counter *counter) static int mlx5_ib_counter_update_stats(struct rdma_counter *counter) { struct mlx5_ib_dev *dev = to_mdev(counter->device); - struct mlx5_ib_counters *cnts = &dev->port[counter->port - 1].cnts; + const struct mlx5_ib_counters *cnts = + get_counters(dev, counter->port - 1); return mlx5_ib_query_q_counters(dev->mdev, cnts, counter->stats, counter->id); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index c482f19958b3..7a62454ad979 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1474,4 +1474,5 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev, bool dyn_bfreg); int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); +u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num); #endif /* MLX5_IB_H */ diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 2a97619ed603..f344b101928e 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3387,19 +3387,16 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp, struct mlx5_ib_dev *dev = to_mdev(qp->device); struct mlx5_ib_qp *mqp = to_mqp(qp); struct mlx5_qp_context context = {}; - struct mlx5_ib_port *mibport = NULL; struct mlx5_ib_qp_base *base; u32 set_id; if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) return 0; - if (counter) { + if (counter) set_id = counter->id; - } else { - mibport = &dev->port[mqp->port - 1]; - set_id = mibport->cnts.set_id; - } + else + set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1); base = &mqp->trans_qp.base; context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff); @@ -3460,7 +3457,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, struct mlx5_ib_cq *send_cq, *recv_cq; struct mlx5_qp_context *context; struct mlx5_ib_pd *pd; - struct mlx5_ib_port *mibport = NULL; enum mlx5_qp_state mlx5_cur, mlx5_new; enum mlx5_qp_optpar optpar; u32 set_id = 0; @@ -3625,11 +3621,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, if (qp->flags & MLX5_IB_QP_UNDERLAY) port_num = 0; - mibport = &dev->port[port_num]; if (ibqp->counter) set_id = ibqp->counter->id; else - set_id = mibport->cnts.set_id; + set_id = mlx5_ib_get_counters_id(dev, port_num); context->qp_counter_set_usr_page |= cpu_to_be32(set_id << 24); } @@ -3818,6 +3813,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { + u16 set_id; + required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; if (!is_valid_mask(attr_mask, required, 0)) return -EINVAL; @@ -3844,7 +3841,9 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, } MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index); MLX5_SET(dctc, dctc, port, attr->port_num); - MLX5_SET(dctc, dctc, counter_set_id, dev->port[attr->port_num - 1].cnts.set_id); + + set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1); + MLX5_SET(dctc, dctc, counter_set_id, set_id); } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { struct mlx5_ib_modify_qp_resp resp = {}; @@ -6328,11 +6327,13 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, } if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) { + u16 set_id; + + set_id = mlx5_ib_get_counters_id(dev, 0); if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) { MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID); - MLX5_SET(rqc, rqc, counter_set_id, - dev->port->cnts.set_id); + MLX5_SET(rqc, rqc, counter_set_id, set_id); } else dev_info_once( &dev->ib_dev.dev, From bda9045a200c173b2883a878b68b8b9218ae65c6 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 26 Jul 2019 13:26:52 -0500 Subject: [PATCH 27/99] IB/bnxt_re: Do not notifify GID change event GID table entry operations such as add/remove/modify are triggered by the IB core for RoCE ports. Hence, remove GID change notification from hw driver. Reviewed-by: Selvin Xavier Tested-by: Selvin Xavier Signed-off-by: Parav Pandit Link: https://lore.kernel.org/r/20190726182652.50037-1-parav@mellanox.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/bnxt_re/main.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 029babe713f3..30a54f8aa42c 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1473,7 +1473,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) &rdev->active_width); set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); - bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE); return 0; free_sctx: From 16e9111e9ee3edfcf6df120080378afc620cb4d3 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 25 Jul 2019 16:03:53 +0300 Subject: [PATCH 28/99] RDMA/efa: Expose device statistics Expose hardware statistics through the sysfs api: /sys/class/infiniband/efa_0/hw_counters/*. /sys/class/infiniband/efa_0/ports/1/hw_counters/*. Reviewed-by: Firas JahJah Reviewed-by: Yossi Leybovich Signed-off-by: Gal Pressman Link: https://lore.kernel.org/r/20190725130353.11544-1-galpress@amazon.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/efa/efa.h | 3 + drivers/infiniband/hw/efa/efa_com_cmd.c | 35 ++++++++++++ drivers/infiniband/hw/efa/efa_com_cmd.h | 23 ++++++++ drivers/infiniband/hw/efa/efa_main.c | 2 + drivers/infiniband/hw/efa/efa_verbs.c | 75 +++++++++++++++++++++++++ 5 files changed, 138 insertions(+) diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h index 119f8efec564..2283e432693e 100644 --- a/drivers/infiniband/hw/efa/efa.h +++ b/drivers/infiniband/hw/efa/efa.h @@ -156,5 +156,8 @@ int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_udata *udata); enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev, u8 port_num); +struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num); +int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, + u8 port_num, int index); #endif /* _EFA_H_ */ diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c index 62345d8abf3c..16b115df63e8 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.c +++ b/drivers/infiniband/hw/efa/efa_com_cmd.c @@ -702,3 +702,38 @@ int efa_com_dealloc_uar(struct efa_com_dev *edev, return 0; } + +int efa_com_get_stats(struct efa_com_dev *edev, + struct efa_com_get_stats_params *params, + union efa_com_get_stats_result *result) +{ + struct efa_com_admin_queue *aq = &edev->aq; + struct efa_admin_aq_get_stats_cmd cmd = {}; + struct efa_admin_acq_get_stats_resp resp; + int err; + + cmd.aq_common_descriptor.opcode = EFA_ADMIN_GET_STATS; + cmd.type = params->type; + cmd.scope = params->scope; + cmd.scope_modifier = params->scope_modifier; + + err = efa_com_cmd_exec(aq, + (struct efa_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct efa_admin_acq_entry *)&resp, + sizeof(resp)); + if (err) { + ibdev_err(edev->efa_dev, + "Failed to get stats type-%u scope-%u.%u [%d]\n", + cmd.type, cmd.scope, cmd.scope_modifier, err); + return err; + } + + result->basic_stats.tx_bytes = resp.basic_stats.tx_bytes; + result->basic_stats.tx_pkts = resp.basic_stats.tx_pkts; + result->basic_stats.rx_bytes = resp.basic_stats.rx_bytes; + result->basic_stats.rx_pkts = resp.basic_stats.rx_pkts; + result->basic_stats.rx_drops = resp.basic_stats.rx_drops; + + return 0; +} diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h index a1174380462c..7f6c13052f49 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.h +++ b/drivers/infiniband/hw/efa/efa_com_cmd.h @@ -225,6 +225,26 @@ struct efa_com_dealloc_uar_params { u16 uarn; }; +struct efa_com_get_stats_params { + /* see enum efa_admin_get_stats_type */ + u8 type; + /* see enum efa_admin_get_stats_scope */ + u8 scope; + u16 scope_modifier; +}; + +struct efa_com_basic_stats { + u64 tx_bytes; + u64 tx_pkts; + u64 rx_bytes; + u64 rx_pkts; + u64 rx_drops; +}; + +union efa_com_get_stats_result { + struct efa_com_basic_stats basic_stats; +}; + void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low); int efa_com_create_qp(struct efa_com_dev *edev, struct efa_com_create_qp_params *params, @@ -266,5 +286,8 @@ int efa_com_alloc_uar(struct efa_com_dev *edev, struct efa_com_alloc_uar_result *result); int efa_com_dealloc_uar(struct efa_com_dev *edev, struct efa_com_dealloc_uar_params *params); +int efa_com_get_stats(struct efa_com_dev *edev, + struct efa_com_get_stats_params *params, + union efa_com_get_stats_result *result); #endif /* _EFA_COM_CMD_H_ */ diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c index dd1c6d49466f..83858f7e83d0 100644 --- a/drivers/infiniband/hw/efa/efa_main.c +++ b/drivers/infiniband/hw/efa/efa_main.c @@ -201,6 +201,7 @@ static const struct ib_device_ops efa_dev_ops = { .driver_id = RDMA_DRIVER_EFA, .uverbs_abi_ver = EFA_UVERBS_ABI_VERSION, + .alloc_hw_stats = efa_alloc_hw_stats, .alloc_pd = efa_alloc_pd, .alloc_ucontext = efa_alloc_ucontext, .create_ah = efa_create_ah, @@ -212,6 +213,7 @@ static const struct ib_device_ops efa_dev_ops = { .destroy_ah = efa_destroy_ah, .destroy_cq = efa_destroy_cq, .destroy_qp = efa_destroy_qp, + .get_hw_stats = efa_get_hw_stats, .get_link_layer = efa_port_link_layer, .get_port_immutable = efa_get_port_immutable, .mmap = efa_mmap, diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index df77bc312a25..32d3b3deabce 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -41,6 +41,33 @@ static inline u64 get_mmap_key(const struct efa_mmap_entry *efa) ((u64)efa->mmap_page << PAGE_SHIFT); } +#define EFA_DEFINE_STATS(op) \ + op(EFA_TX_BYTES, "tx_bytes") \ + op(EFA_TX_PKTS, "tx_pkts") \ + op(EFA_RX_BYTES, "rx_bytes") \ + op(EFA_RX_PKTS, "rx_pkts") \ + op(EFA_RX_DROPS, "rx_drops") \ + op(EFA_SUBMITTED_CMDS, "submitted_cmds") \ + op(EFA_COMPLETED_CMDS, "completed_cmds") \ + op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \ + op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \ + op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \ + op(EFA_CREATE_QP_ERR, "create_qp_err") \ + op(EFA_REG_MR_ERR, "reg_mr_err") \ + op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \ + op(EFA_CREATE_AH_ERR, "create_ah_err") + +#define EFA_STATS_ENUM(ename, name) ename, +#define EFA_STATS_STR(ename, name) [ename] = name, + +enum efa_hw_stats { + EFA_DEFINE_STATS(EFA_STATS_ENUM) +}; + +static const char *const efa_stats_names[] = { + EFA_DEFINE_STATS(EFA_STATS_STR) +}; + #define EFA_CHUNK_PAYLOAD_SHIFT 12 #define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT) #define EFA_CHUNK_PAYLOAD_PTR_SIZE 8 @@ -1727,6 +1754,54 @@ void efa_destroy_ah(struct ib_ah *ibah, u32 flags) efa_ah_destroy(dev, ah); } +struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num) +{ + return rdma_alloc_hw_stats_struct(efa_stats_names, + ARRAY_SIZE(efa_stats_names), + RDMA_HW_STATS_DEFAULT_LIFESPAN); +} + +int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, + u8 port_num, int index) +{ + struct efa_com_get_stats_params params = {}; + union efa_com_get_stats_result result; + struct efa_dev *dev = to_edev(ibdev); + struct efa_com_basic_stats *bs; + struct efa_com_stats_admin *as; + struct efa_stats *s; + int err; + + params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC; + params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL; + + err = efa_com_get_stats(&dev->edev, ¶ms, &result); + if (err) + return err; + + bs = &result.basic_stats; + stats->value[EFA_TX_BYTES] = bs->tx_bytes; + stats->value[EFA_TX_PKTS] = bs->tx_pkts; + stats->value[EFA_RX_BYTES] = bs->rx_bytes; + stats->value[EFA_RX_PKTS] = bs->rx_pkts; + stats->value[EFA_RX_DROPS] = bs->rx_drops; + + as = &dev->edev.aq.stats; + stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd); + stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd); + stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion); + + s = &dev->stats; + stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd); + stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->sw_stats.alloc_pd_err); + stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->sw_stats.create_qp_err); + stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->sw_stats.reg_mr_err); + stats->value[EFA_ALLOC_UCONTEXT_ERR] = atomic64_read(&s->sw_stats.alloc_ucontext_err); + stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->sw_stats.create_ah_err); + + return ARRAY_SIZE(efa_stats_names); +} + enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev, u8 port_num) { From cb560f5fd951cea1e5f4afd2820b1c0deb75dcb1 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Tue, 30 Jul 2019 11:15:20 -0700 Subject: [PATCH 29/99] infiniband: Remove dev_err() usage after platform_get_irq() We don't need dev_err() messages when platform_get_irq() fails now that platform_get_irq() prints an error message itself when something goes wrong. Let's remove these prints with a simple semantic patch. // @@ expression ret; struct platform_device *E; @@ ret = ( platform_get_irq(E, ...) | platform_get_irq_byname(E, ...) ); if ( \( ret < 0 \| ret <= 0 \) ) { ( -if (ret != -EPROBE_DEFER) -{ ... -dev_err(...); -... } | ... -dev_err(...); ) ... } // While we're here, remove braces on if statements that only have one statement (manually). Cc: Doug Ledford Cc: Jason Gunthorpe Cc: linux-rdma@vger.kernel.org Cc: Greg Kroah-Hartman Signed-off-by: Stephen Boyd Link: https://lore.kernel.org/r/20190730181557.90391-21-swboyd@chromium.org Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index bea71db30461..0ff5f9617639 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -4637,10 +4637,8 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) /* fetch the interrupt numbers */ for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) { hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i); - if (hr_dev->irq[i] <= 0) { - dev_err(dev, "platform get of irq[=%d] failed!\n", i); + if (hr_dev->irq[i] <= 0) return -EINVAL; - } } return 0; From 1dc558923c5ceab0b4eb78dc0050ea4c4aa1accc Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 31 Jul 2019 09:01:44 +0100 Subject: [PATCH 30/99] RDMA/core: fix spelling mistake "Nelink" -> "Netlink" There is a spelling mistake in a warning message, fix it. Signed-off-by: Colin Ian King Reviewed-by: Leon Romanovsky Link: https://lore.kernel.org/r/20190731080144.18327-1-colin.king@canonical.com Signed-off-by: Doug Ledford --- drivers/infiniband/core/netlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index 67a76aca2dd6..81dbd5f41bed 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -303,7 +303,7 @@ void rdma_nl_exit(void) for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++) WARN(rdma_nl_types[idx].cb_table, - "Nelink client %d wasn't released prior to unloading %s\n", + "Netlink client %d wasn't released prior to unloading %s\n", idx, KBUILD_MODNAME); } From d129e3f42266a12b821868fc8c2407a4d80f0b18 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 31 Jul 2019 14:56:27 +0300 Subject: [PATCH 31/99] RDMA/mlx5: Remove DEBUG ODP code Delete DEBUG ODP dead code which is leftover from development stage and doesn't need to be part of the upstream kernel. Signed-off-by: Leon Romanovsky Link: https://lore.kernel.org/r/20190731115627.5433-1-leon@kernel.org Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/odp.c | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 81da82050d05..b0c5de39d186 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -1015,9 +1015,6 @@ static int mlx5_ib_mr_initiator_pfault_handler( u32 transport_caps; struct mlx5_base_av *av; unsigned ds, opcode; -#if defined(DEBUG) - u32 ctrl_wqe_index, ctrl_qpn; -#endif u32 qpn = qp->trans_qp.base.mqp.qpn; ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; @@ -1033,27 +1030,6 @@ static int mlx5_ib_mr_initiator_pfault_handler( return -EFAULT; } -#if defined(DEBUG) - ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) & - MLX5_WQE_CTRL_WQE_INDEX_MASK) >> - MLX5_WQE_CTRL_WQE_INDEX_SHIFT; - if (wqe_index != ctrl_wqe_index) { - mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n", - wqe_index, qpn, - ctrl_wqe_index); - return -EFAULT; - } - - ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >> - MLX5_WQE_CTRL_QPN_SHIFT; - if (qpn != ctrl_qpn) { - mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n", - wqe_index, qpn, - ctrl_qpn); - return -EFAULT; - } -#endif /* DEBUG */ - *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS; *wqe += sizeof(*ctrl); From b2567ebb78bdde9a49010957a0ea5a2514f3fed2 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 31 Jul 2019 15:37:48 +0800 Subject: [PATCH 32/99] RDMA/hns: remove set but not used variable 'irq_num' Fixes gcc '-Wunused-but-set-variable' warning: drivers/infiniband/hw/hns/hns_roce_hw_v2.c: In function hns_roce_v2_cleanup_eq_table: drivers/infiniband/hw/hns/hns_roce_hw_v2.c:5920:6: warning: variable irq_num set but not used [-Wunused-but-set-variable] It is not used since commit 33db6f94847c ("RDMA/hns: Refactor eq table init for hip08") Reported-by: Hulk Robot Signed-off-by: YueHaibing Link: https://lore.kernel.org/r/20190731073748.17664-1-yuehaibing@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 83c58be388af..59f88bf09952 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5917,12 +5917,10 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev) { struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; - int irq_num; int eq_num; int i; eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; - irq_num = eq_num + hr_dev->caps.num_other_vectors; /* Disable irq */ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE); From 31d0e6c149b8c9a9bddc6d68f8600918bb771cb9 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 1 Nov 2018 00:24:08 -0700 Subject: [PATCH 33/99] mlx5: Fix formats with line continuation whitespace The line continuations unintentionally add whitespace so instead use coalesced formats to remove the whitespace. Signed-off-by: Joe Perches Reviewed-by: Leon Romanovsky Link: https://lore.kernel.org/r/f14db3287b23ed8af9bdbf8001e2e2fe7ae9e43a.camel@perches.com Signed-off-by: Doug Ledford --- drivers/net/ethernet/mellanox/mlx5/core/rl.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c index bc86dffdc43c..01c380425f9d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -188,8 +188,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, /* new rate limit */ err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl); if (err) { - mlx5_core_err(dev, "Failed configuring rate limit(err %d): \ - rate %u, max_burst_sz %u, typical_pkt_sz %u\n", + mlx5_core_err(dev, "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n", err, rl->rate, rl->max_burst_sz, rl->typical_pkt_sz); goto out; @@ -218,8 +217,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl) mutex_lock(&table->rl_lock); entry = find_rl_entry(table, rl); if (!entry || !entry->refcount) { - mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u \ - are not configured\n", + mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n", rl->rate, rl->max_burst_sz, rl->typical_pkt_sz); goto out; } From 20cf4e026730104892fa1268de0371a631cee294 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 29 Jul 2019 13:22:09 -0400 Subject: [PATCH 34/99] rdma: Enable ib_alloc_cq to spread work over a device's comp_vectors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Send and Receive completion is handled on a single CPU selected at the time each Completion Queue is allocated. Typically this is when an initiator instantiates an RDMA transport, or when a target accepts an RDMA connection. Some ULPs cannot open a connection per CPU to spread completion workload across available CPUs and MSI vectors. For such ULPs, provide an API that allows the RDMA core to select a completion vector based on the device's complement of available comp_vecs. ULPs that invoke ib_alloc_cq() with only comp_vector 0 are converted to use the new API so that their completion workloads interfere less with each other. Suggested-by: HÃ¥kon Bugge Signed-off-by: Chuck Lever Reviewed-by: Leon Romanovsky Cc: Cc: Link: https://lore.kernel.org/r/20190729171923.13428.52555.stgit@manet.1015granger.net Signed-off-by: Doug Ledford --- drivers/infiniband/core/cq.c | 28 ++++++++++++++++++++++++ drivers/infiniband/ulp/srpt/ib_srpt.c | 4 ++-- fs/cifs/smbdirect.c | 10 +++++---- include/rdma/ib_verbs.h | 19 ++++++++++++++++ net/9p/trans_rdma.c | 6 ++--- net/sunrpc/xprtrdma/svc_rdma_transport.c | 8 +++---- net/sunrpc/xprtrdma/verbs.c | 13 +++++------ 7 files changed, 68 insertions(+), 20 deletions(-) diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index 7c599878ccf7..bbfded6d5d3d 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -252,6 +252,34 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, } EXPORT_SYMBOL(__ib_alloc_cq_user); +/** + * __ib_alloc_cq_any - allocate a completion queue + * @dev: device to allocate the CQ for + * @private: driver private data, accessible from cq->cq_context + * @nr_cqe: number of CQEs to allocate + * @poll_ctx: context to poll the CQ from + * @caller: module owner name + * + * Attempt to spread ULP Completion Queues over each device's interrupt + * vectors. A simple best-effort mechanism is used. + */ +struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, + int nr_cqe, enum ib_poll_context poll_ctx, + const char *caller) +{ + static atomic_t counter; + int comp_vector = 0; + + if (dev->num_comp_vectors > 1) + comp_vector = + atomic_inc_return(&counter) % + min_t(int, dev->num_comp_vectors, num_online_cpus()); + + return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, + caller, NULL); +} +EXPORT_SYMBOL(__ib_alloc_cq_any); + /** * ib_free_cq_user - free a completion queue * @cq: completion queue to free. diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 1a039f16d315..e25c70a56be6 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1767,8 +1767,8 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) goto out; retry: - ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + sq_size, - 0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE); + ch->cq = ib_alloc_cq_any(sdev->device, ch, ch->rq_size + sq_size, + IB_POLL_WORKQUEUE); if (IS_ERR(ch->cq)) { ret = PTR_ERR(ch->cq); pr_err("failed to create CQ cqe= %d ret= %d\n", diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index cd07e5301d42..3c91fa97c9a8 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c @@ -1654,15 +1654,17 @@ static struct smbd_connection *_smbd_get_connection( info->send_cq = NULL; info->recv_cq = NULL; - info->send_cq = ib_alloc_cq(info->id->device, info, - info->send_credit_target, 0, IB_POLL_SOFTIRQ); + info->send_cq = + ib_alloc_cq_any(info->id->device, info, + info->send_credit_target, IB_POLL_SOFTIRQ); if (IS_ERR(info->send_cq)) { info->send_cq = NULL; goto alloc_cq_failed; } - info->recv_cq = ib_alloc_cq(info->id->device, info, - info->receive_credit_max, 0, IB_POLL_SOFTIRQ); + info->recv_cq = + ib_alloc_cq_any(info->id->device, info, + info->receive_credit_max, IB_POLL_SOFTIRQ); if (IS_ERR(info->recv_cq)) { info->recv_cq = NULL; goto alloc_cq_failed; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index c5f8a9f17063..2a1523ccd7ab 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -3711,6 +3711,25 @@ static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, NULL); } +struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, + int nr_cqe, enum ib_poll_context poll_ctx, + const char *caller); + +/** + * ib_alloc_cq_any: Allocate kernel CQ + * @dev: The IB device + * @private: Private data attached to the CQE + * @nr_cqe: Number of CQEs in the CQ + * @poll_ctx: Context used for polling the CQ + */ +static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev, + void *private, int nr_cqe, + enum ib_poll_context poll_ctx) +{ + return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx, + KBUILD_MODNAME); +} + /** * ib_free_cq_user - Free kernel/user CQ * @cq: The CQ to free diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index bac8dad5dd69..b21c3c209815 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c @@ -685,9 +685,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args) goto error; /* Create the Completion Queue */ - rdma->cq = ib_alloc_cq(rdma->cm_id->device, client, - opts.sq_depth + opts.rq_depth + 1, - 0, IB_POLL_SOFTIRQ); + rdma->cq = ib_alloc_cq_any(rdma->cm_id->device, client, + opts.sq_depth + opts.rq_depth + 1, + IB_POLL_SOFTIRQ); if (IS_ERR(rdma->cq)) goto error; diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 3fe665152d95..4d3db6ee7f09 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -454,14 +454,14 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) dprintk("svcrdma: error creating PD for connect request\n"); goto errout; } - newxprt->sc_sq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_sq_depth, - 0, IB_POLL_WORKQUEUE); + newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, newxprt->sc_sq_depth, + IB_POLL_WORKQUEUE); if (IS_ERR(newxprt->sc_sq_cq)) { dprintk("svcrdma: error creating SQ CQ for connect request\n"); goto errout; } - newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, rq_depth, - 0, IB_POLL_WORKQUEUE); + newxprt->sc_rq_cq = + ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE); if (IS_ERR(newxprt->sc_rq_cq)) { dprintk("svcrdma: error creating RQ CQ for connect request\n"); goto errout; diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 805b1f35e1ca..b10aa16557f0 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -521,18 +521,17 @@ int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) init_waitqueue_head(&ep->rep_connect_wait); ep->rep_receive_count = 0; - sendcq = ib_alloc_cq(ia->ri_id->device, NULL, - ep->rep_attr.cap.max_send_wr + 1, - ia->ri_id->device->num_comp_vectors > 1 ? 1 : 0, - IB_POLL_WORKQUEUE); + sendcq = ib_alloc_cq_any(ia->ri_id->device, NULL, + ep->rep_attr.cap.max_send_wr + 1, + IB_POLL_WORKQUEUE); if (IS_ERR(sendcq)) { rc = PTR_ERR(sendcq); goto out1; } - recvcq = ib_alloc_cq(ia->ri_id->device, NULL, - ep->rep_attr.cap.max_recv_wr + 1, - 0, IB_POLL_WORKQUEUE); + recvcq = ib_alloc_cq_any(ia->ri_id->device, NULL, + ep->rep_attr.cap.max_recv_wr + 1, + IB_POLL_WORKQUEUE); if (IS_ERR(recvcq)) { rc = PTR_ERR(recvcq); goto out2; From 7a63b31efbb22391a3f508a2a3d97d3de7c8dca0 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 1 Aug 2019 14:48:27 +0300 Subject: [PATCH 35/99] RDMA/hns: Remove not used UAR assignment UAR in CQ is not used and generates the following compilation warning, clean the code by removing uar assignment. drivers/infiniband/hw/hns/hns_roce_cq.c: In function _create_user_cq_: drivers/infiniband/hw/hns/hns_roce_cq.c:305:27: warning: parameter _uar_ set but not used [-Wunused-but-set-parameter] 305 | struct hns_roce_uar *uar, | ~~~~~~~~~~~~~~~~~~~~~^~~ Fixes: 4f8f0d5e33dd ("RDMA/hns: Package the flow of creating cq") Signed-off-by: Leon Romanovsky Link: https://lore.kernel.org/r/20190801114827.24263-1-leon@kernel.org Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_cq.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 507d3c478404..22541d19cd09 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -83,7 +83,6 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev, static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, struct hns_roce_mtt *hr_mtt, - struct hns_roce_uar *hr_uar, struct hns_roce_cq *hr_cq, int vector) { struct hns_roce_cmd_mailbox *mailbox; @@ -154,7 +153,6 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, hr_cq->cons_index = 0; hr_cq->arm_sn = 1; - hr_cq->uar = hr_uar; atomic_set(&hr_cq->refcount, 1); init_completion(&hr_cq->free); @@ -302,7 +300,6 @@ static int create_user_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, struct ib_udata *udata, struct hns_roce_ib_create_cq_resp *resp, - struct hns_roce_uar *uar, int cq_entries) { struct hns_roce_ib_create_cq ucmd; @@ -337,9 +334,6 @@ static int create_user_cq(struct hns_roce_dev *hr_dev, resp->cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB; } - /* Get user space parameters */ - uar = &context->uar; - return 0; err_mtt: @@ -350,10 +344,10 @@ static int create_user_cq(struct hns_roce_dev *hr_dev, } static int create_kernel_cq(struct hns_roce_dev *hr_dev, - struct hns_roce_cq *hr_cq, struct hns_roce_uar *uar, - int cq_entries) + struct hns_roce_cq *hr_cq, int cq_entries) { struct device *dev = hr_dev->dev; + struct hns_roce_uar *uar; int ret; if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { @@ -420,7 +414,6 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq, struct device *dev = hr_dev->dev; struct hns_roce_ib_create_cq_resp resp = {}; struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); - struct hns_roce_uar *uar = NULL; int vector = attr->comp_vector; int cq_entries = attr->cqe; int ret; @@ -439,14 +432,13 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq, spin_lock_init(&hr_cq->lock); if (udata) { - ret = create_user_cq(hr_dev, hr_cq, udata, &resp, uar, - cq_entries); + ret = create_user_cq(hr_dev, hr_cq, udata, &resp, cq_entries); if (ret) { dev_err(dev, "Create cq failed in user mode!\n"); goto err_cq; } } else { - ret = create_kernel_cq(hr_dev, hr_cq, uar, cq_entries); + ret = create_kernel_cq(hr_dev, hr_cq, cq_entries); if (ret) { dev_err(dev, "Create cq failed in kernel mode!\n"); goto err_cq; @@ -454,7 +446,7 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq, } /* Allocate cq index, fill cq_context */ - ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar, + ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, hr_cq, vector); if (ret) { dev_err(dev, "Creat CQ .Failed to cq_alloc.\n"); From 05bb411ada9508b48044ef5d84dd8bc46cece607 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 1 Aug 2019 20:14:46 +0300 Subject: [PATCH 36/99] RDMA/core: Introduce ratelimited ibdev printk functions Add ratelimited helpers to the ibdev_* printk functions. Implementation inspired by counterpart dev_*_ratelimited functions. Signed-off-by: Gal Pressman Reviewed-by: Leon Romanovsky Link: https://lore.kernel.org/r/20190801171447.54440-2-galpress@amazon.com Signed-off-by: Doug Ledford --- include/rdma/ib_verbs.h | 42 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 2a1523ccd7ab..0ecda7d15df2 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -107,6 +107,48 @@ static inline void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {} #endif +#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \ +do { \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + if (__ratelimit(&_rs)) \ + ibdev_level(ibdev, fmt, ##__VA_ARGS__); \ +} while (0) + +#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \ + ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__) +#define ibdev_alert_ratelimited(ibdev, fmt, ...) \ + ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__) +#define ibdev_crit_ratelimited(ibdev, fmt, ...) \ + ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__) +#define ibdev_err_ratelimited(ibdev, fmt, ...) \ + ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__) +#define ibdev_warn_ratelimited(ibdev, fmt, ...) \ + ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__) +#define ibdev_notice_ratelimited(ibdev, fmt, ...) \ + ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__) +#define ibdev_info_ratelimited(ibdev, fmt, ...) \ + ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__) + +#if defined(CONFIG_DYNAMIC_DEBUG) +/* descriptor check is first to prevent flooding with "callbacks suppressed" */ +#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \ +do { \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \ + __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \ + ##__VA_ARGS__); \ +} while (0) +#else +__printf(2, 3) __cold +static inline +void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {} +#endif + union ib_gid { u8 raw[16]; struct { From cfa1f5f27c79997926f89df5dd1b4f8c920b9623 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 1 Aug 2019 20:14:47 +0300 Subject: [PATCH 37/99] RDMA/efa: Rate limit admin queue error prints Admin queue error prints should never happen unless something wrong happened to the device. However, in the unfortunate case that it does, we should take extra care not to flood the log with error messages. Reviewed-by: Firas JahJah Reviewed-by: Yossi Leybovich Signed-off-by: Gal Pressman Link: https://lore.kernel.org/r/20190801171447.54440-3-galpress@amazon.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/efa/efa_com.c | 70 ++++++------ drivers/infiniband/hw/efa/efa_com_cmd.c | 136 ++++++++++++++---------- 2 files changed, 120 insertions(+), 86 deletions(-) diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c index 2cb42484b0f8..3c412bc5b94f 100644 --- a/drivers/infiniband/hw/efa/efa_com.c +++ b/drivers/infiniband/hw/efa/efa_com.c @@ -109,17 +109,19 @@ static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset) } while (time_is_after_jiffies(exp_time)); if (read_resp->req_id != mmio_read->seq_num) { - ibdev_err(edev->efa_dev, - "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n", - mmio_read->seq_num, offset, read_resp->req_id, - read_resp->reg_off); + ibdev_err_ratelimited( + edev->efa_dev, + "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n", + mmio_read->seq_num, offset, read_resp->req_id, + read_resp->reg_off); err = EFA_MMIO_READ_INVALID; goto out; } if (read_resp->reg_off != offset) { - ibdev_err(edev->efa_dev, - "Reading register failed: wrong offset provided\n"); + ibdev_err_ratelimited( + edev->efa_dev, + "Reading register failed: wrong offset provided\n"); err = EFA_MMIO_READ_INVALID; goto out; } @@ -293,9 +295,10 @@ static struct efa_comp_ctx *efa_com_get_comp_ctx(struct efa_com_admin_queue *aq, u16 ctx_id = cmd_id & (aq->depth - 1); if (aq->comp_ctx[ctx_id].occupied && capture) { - ibdev_err(aq->efa_dev, - "Completion context for command_id %#x is occupied\n", - cmd_id); + ibdev_err_ratelimited( + aq->efa_dev, + "Completion context for command_id %#x is occupied\n", + cmd_id); return NULL; } @@ -401,7 +404,7 @@ static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue spin_lock(&aq->sq.lock); if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) { - ibdev_err(aq->efa_dev, "Admin queue is closed\n"); + ibdev_err_ratelimited(aq->efa_dev, "Admin queue is closed\n"); spin_unlock(&aq->sq.lock); return ERR_PTR(-ENODEV); } @@ -519,8 +522,9 @@ static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_c break; if (time_is_before_jiffies(timeout)) { - ibdev_err(aq->efa_dev, - "Wait for completion (polling) timeout\n"); + ibdev_err_ratelimited( + aq->efa_dev, + "Wait for completion (polling) timeout\n"); /* EFA didn't have any completion */ atomic64_inc(&aq->stats.no_completion); @@ -561,17 +565,19 @@ static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *com atomic64_inc(&aq->stats.no_completion); if (comp_ctx->status == EFA_CMD_COMPLETED) - ibdev_err(aq->efa_dev, - "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n", - efa_com_cmd_str(comp_ctx->cmd_opcode), - comp_ctx->cmd_opcode, comp_ctx->status, - comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); + ibdev_err_ratelimited( + aq->efa_dev, + "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n", + efa_com_cmd_str(comp_ctx->cmd_opcode), + comp_ctx->cmd_opcode, comp_ctx->status, + comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); else - ibdev_err(aq->efa_dev, - "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n", - efa_com_cmd_str(comp_ctx->cmd_opcode), - comp_ctx->cmd_opcode, comp_ctx->status, - comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); + ibdev_err_ratelimited( + aq->efa_dev, + "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n", + efa_com_cmd_str(comp_ctx->cmd_opcode), + comp_ctx->cmd_opcode, comp_ctx->status, + comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); err = -ETIME; @@ -633,10 +639,11 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq, cmd->aq_common_descriptor.opcode); comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size); if (IS_ERR(comp_ctx)) { - ibdev_err(aq->efa_dev, - "Failed to submit command %s (opcode %u) err %ld\n", - efa_com_cmd_str(cmd->aq_common_descriptor.opcode), - cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx)); + ibdev_err_ratelimited( + aq->efa_dev, + "Failed to submit command %s (opcode %u) err %ld\n", + efa_com_cmd_str(cmd->aq_common_descriptor.opcode), + cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx)); up(&aq->avail_cmds); return PTR_ERR(comp_ctx); @@ -644,11 +651,12 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq, err = efa_com_wait_and_process_admin_cq(comp_ctx, aq); if (err) - ibdev_err(aq->efa_dev, - "Failed to process command %s (opcode %u) comp_status %d err %d\n", - efa_com_cmd_str(cmd->aq_common_descriptor.opcode), - cmd->aq_common_descriptor.opcode, - comp_ctx->comp_status, err); + ibdev_err_ratelimited( + aq->efa_dev, + "Failed to process command %s (opcode %u) comp_status %d err %d\n", + efa_com_cmd_str(cmd->aq_common_descriptor.opcode), + cmd->aq_common_descriptor.opcode, comp_ctx->comp_status, + err); up(&aq->avail_cmds); diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c index 16b115df63e8..501dce89f275 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.c +++ b/drivers/infiniband/hw/efa/efa_com_cmd.c @@ -44,7 +44,8 @@ int efa_com_create_qp(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (err) { - ibdev_err(edev->efa_dev, "Failed to create qp [%d]\n", err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to create qp [%d]\n", err); return err; } @@ -82,9 +83,10 @@ int efa_com_modify_qp(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&resp, sizeof(resp)); if (err) { - ibdev_err(edev->efa_dev, - "Failed to modify qp-%u modify_mask[%#x] [%d]\n", - cmd.qp_handle, cmd.modify_mask, err); + ibdev_err_ratelimited( + edev->efa_dev, + "Failed to modify qp-%u modify_mask[%#x] [%d]\n", + cmd.qp_handle, cmd.modify_mask, err); return err; } @@ -109,8 +111,9 @@ int efa_com_query_qp(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&resp, sizeof(resp)); if (err) { - ibdev_err(edev->efa_dev, "Failed to query qp-%u [%d]\n", - cmd.qp_handle, err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to query qp-%u [%d]\n", + cmd.qp_handle, err); return err; } @@ -139,8 +142,9 @@ int efa_com_destroy_qp(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (err) { - ibdev_err(edev->efa_dev, "Failed to destroy qp-%u [%d]\n", - qp_cmd.qp_handle, err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to destroy qp-%u [%d]\n", + qp_cmd.qp_handle, err); return err; } @@ -173,7 +177,8 @@ int efa_com_create_cq(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (err) { - ibdev_err(edev->efa_dev, "Failed to create cq[%d]\n", err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to create cq[%d]\n", err); return err; } @@ -201,8 +206,9 @@ int efa_com_destroy_cq(struct efa_com_dev *edev, sizeof(destroy_resp)); if (err) { - ibdev_err(edev->efa_dev, "Failed to destroy CQ-%u [%d]\n", - params->cq_idx, err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to destroy CQ-%u [%d]\n", + params->cq_idx, err); return err; } @@ -250,7 +256,8 @@ int efa_com_register_mr(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (err) { - ibdev_err(edev->efa_dev, "Failed to register mr [%d]\n", err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to register mr [%d]\n", err); return err; } @@ -277,9 +284,9 @@ int efa_com_dereg_mr(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (err) { - ibdev_err(edev->efa_dev, - "Failed to de-register mr(lkey-%u) [%d]\n", - mr_cmd.l_key, err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to de-register mr(lkey-%u) [%d]\n", + mr_cmd.l_key, err); return err; } @@ -306,8 +313,9 @@ int efa_com_create_ah(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (err) { - ibdev_err(edev->efa_dev, "Failed to create ah for %pI6 [%d]\n", - ah_cmd.dest_addr, err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to create ah for %pI6 [%d]\n", + ah_cmd.dest_addr, err); return err; } @@ -334,8 +342,9 @@ int efa_com_destroy_ah(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (err) { - ibdev_err(edev->efa_dev, "Failed to destroy ah-%d pd-%d [%d]\n", - ah_cmd.ah, ah_cmd.pd, err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to destroy ah-%d pd-%d [%d]\n", + ah_cmd.ah, ah_cmd.pd, err); return err; } @@ -367,8 +376,9 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev, int err; if (!efa_com_check_supported_feature_id(edev, feature_id)) { - ibdev_err(edev->efa_dev, "Feature %d isn't supported\n", - feature_id); + ibdev_err_ratelimited(edev->efa_dev, + "Feature %d isn't supported\n", + feature_id); return -EOPNOTSUPP; } @@ -396,9 +406,10 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev, sizeof(*get_resp)); if (err) { - ibdev_err(edev->efa_dev, - "Failed to submit get_feature command %d [%d]\n", - feature_id, err); + ibdev_err_ratelimited( + edev->efa_dev, + "Failed to submit get_feature command %d [%d]\n", + feature_id, err); return err; } @@ -421,8 +432,9 @@ int efa_com_get_network_attr(struct efa_com_dev *edev, err = efa_com_get_feature(edev, &resp, EFA_ADMIN_NETWORK_ATTR); if (err) { - ibdev_err(edev->efa_dev, - "Failed to get network attributes %d\n", err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to get network attributes %d\n", + err); return err; } @@ -441,8 +453,9 @@ int efa_com_get_device_attr(struct efa_com_dev *edev, err = efa_com_get_feature(edev, &resp, EFA_ADMIN_DEVICE_ATTR); if (err) { - ibdev_err(edev->efa_dev, "Failed to get device attributes %d\n", - err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to get device attributes %d\n", + err); return err; } @@ -456,9 +469,10 @@ int efa_com_get_device_attr(struct efa_com_dev *edev, result->db_bar = resp.u.device_attr.db_bar; if (result->admin_api_version < 1) { - ibdev_err(edev->efa_dev, - "Failed to get device attr api version [%u < 1]\n", - result->admin_api_version); + ibdev_err_ratelimited( + edev->efa_dev, + "Failed to get device attr api version [%u < 1]\n", + result->admin_api_version); return -EINVAL; } @@ -466,8 +480,9 @@ int efa_com_get_device_attr(struct efa_com_dev *edev, err = efa_com_get_feature(edev, &resp, EFA_ADMIN_QUEUE_ATTR); if (err) { - ibdev_err(edev->efa_dev, - "Failed to get network attributes %d\n", err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to get network attributes %d\n", + err); return err; } @@ -497,7 +512,8 @@ int efa_com_get_hw_hints(struct efa_com_dev *edev, err = efa_com_get_feature(edev, &resp, EFA_ADMIN_HW_HINTS); if (err) { - ibdev_err(edev->efa_dev, "Failed to get hw hints %d\n", err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to get hw hints %d\n", err); return err; } @@ -520,8 +536,9 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev, int err; if (!efa_com_check_supported_feature_id(edev, feature_id)) { - ibdev_err(edev->efa_dev, "Feature %d isn't supported\n", - feature_id); + ibdev_err_ratelimited(edev->efa_dev, + "Feature %d isn't supported\n", + feature_id); return -EOPNOTSUPP; } @@ -545,9 +562,10 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev, sizeof(*set_resp)); if (err) { - ibdev_err(edev->efa_dev, - "Failed to submit set_feature command %d error: %d\n", - feature_id, err); + ibdev_err_ratelimited( + edev->efa_dev, + "Failed to submit set_feature command %d error: %d\n", + feature_id, err); return err; } @@ -574,8 +592,9 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups) err = efa_com_get_feature(edev, &get_resp, EFA_ADMIN_AENQ_CONFIG); if (err) { - ibdev_err(edev->efa_dev, "Failed to get aenq attributes: %d\n", - err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to get aenq attributes: %d\n", + err); return err; } @@ -585,9 +604,10 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups) get_resp.u.aenq.enabled_groups); if ((get_resp.u.aenq.supported_groups & groups) != groups) { - ibdev_err(edev->efa_dev, - "Trying to set unsupported aenq groups[%#x] supported[%#x]\n", - groups, get_resp.u.aenq.supported_groups); + ibdev_err_ratelimited( + edev->efa_dev, + "Trying to set unsupported aenq groups[%#x] supported[%#x]\n", + groups, get_resp.u.aenq.supported_groups); return -EOPNOTSUPP; } @@ -595,8 +615,9 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups) err = efa_com_set_feature(edev, &set_resp, &cmd, EFA_ADMIN_AENQ_CONFIG); if (err) { - ibdev_err(edev->efa_dev, "Failed to set aenq attributes: %d\n", - err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to set aenq attributes: %d\n", + err); return err; } @@ -619,7 +640,8 @@ int efa_com_alloc_pd(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&resp, sizeof(resp)); if (err) { - ibdev_err(edev->efa_dev, "Failed to allocate pd[%d]\n", err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to allocate pd[%d]\n", err); return err; } @@ -645,8 +667,9 @@ int efa_com_dealloc_pd(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&resp, sizeof(resp)); if (err) { - ibdev_err(edev->efa_dev, "Failed to deallocate pd-%u [%d]\n", - cmd.pd, err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to deallocate pd-%u [%d]\n", + cmd.pd, err); return err; } @@ -669,7 +692,8 @@ int efa_com_alloc_uar(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&resp, sizeof(resp)); if (err) { - ibdev_err(edev->efa_dev, "Failed to allocate uar[%d]\n", err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to allocate uar[%d]\n", err); return err; } @@ -695,8 +719,9 @@ int efa_com_dealloc_uar(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&resp, sizeof(resp)); if (err) { - ibdev_err(edev->efa_dev, "Failed to deallocate uar-%u [%d]\n", - cmd.uar, err); + ibdev_err_ratelimited(edev->efa_dev, + "Failed to deallocate uar-%u [%d]\n", + cmd.uar, err); return err; } @@ -723,9 +748,10 @@ int efa_com_get_stats(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&resp, sizeof(resp)); if (err) { - ibdev_err(edev->efa_dev, - "Failed to get stats type-%u scope-%u.%u [%d]\n", - cmd.type, cmd.scope, cmd.scope_modifier, err); + ibdev_err_ratelimited( + edev->efa_dev, + "Failed to get stats type-%u scope-%u.%u [%d]\n", + cmd.type, cmd.scope, cmd.scope_modifier, err); return err; } From 72a7720fca37fec0daf295923f17ac5d88a613e1 Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 7 Aug 2019 13:31:35 +0300 Subject: [PATCH 38/99] RDMA: Introduce ib_port_phys_state enum In order to improve readability, add ib_port_phys_state enum to replace the use of magic numbers. Signed-off-by: Kamal Heib Reviewed-by: Andrew Boyer Acked-by: Michal Kalderon Acked-by: Bernard Metzler Link: https://lore.kernel.org/r/20190807103138.17219-2-kamalheib1@gmail.com Signed-off-by: Doug Ledford --- drivers/infiniband/core/sysfs.c | 30 +++++++++++++------- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 4 +-- drivers/infiniband/hw/efa/efa_verbs.c | 2 +- drivers/infiniband/hw/hns/hns_roce_device.h | 10 ------- drivers/infiniband/hw/hns/hns_roce_main.c | 3 +- drivers/infiniband/hw/mlx4/main.c | 3 +- drivers/infiniband/hw/mlx5/main.c | 4 +-- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 4 +-- drivers/infiniband/hw/qedr/verbs.c | 4 +-- drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 7 +++-- drivers/infiniband/sw/rxe/rxe.h | 4 --- drivers/infiniband/sw/rxe/rxe_param.h | 2 +- drivers/infiniband/sw/rxe/rxe_verbs.c | 6 ++-- drivers/infiniband/sw/siw/siw_verbs.c | 3 +- include/rdma/ib_verbs.h | 10 +++++++ 15 files changed, 53 insertions(+), 43 deletions(-) diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index b477295a96c2..7a50cedcef1f 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -289,6 +289,24 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, ib_width_enum_to_int(attr.active_width), speed); } +static const char *phys_state_to_str(enum ib_port_phys_state phys_state) +{ + static const char * phys_state_str[] = { + "", + "Sleep", + "Polling", + "Disabled", + "PortConfigurationTraining", + "LinkUp", + "LinkErrorRecovery", + "Phy Test", + }; + + if (phys_state < ARRAY_SIZE(phys_state_str)) + return phys_state_str[phys_state]; + return ""; +} + static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused, char *buf) { @@ -300,16 +318,8 @@ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused, if (ret) return ret; - switch (attr.phys_state) { - case 1: return sprintf(buf, "1: Sleep\n"); - case 2: return sprintf(buf, "2: Polling\n"); - case 3: return sprintf(buf, "3: Disabled\n"); - case 4: return sprintf(buf, "4: PortConfigurationTraining\n"); - case 5: return sprintf(buf, "5: LinkUp\n"); - case 6: return sprintf(buf, "6: LinkErrorRecovery\n"); - case 7: return sprintf(buf, "7: Phy Test\n"); - default: return sprintf(buf, "%d: \n", attr.phys_state); - } + return sprintf(buf, "%d: %s\n", attr.phys_state, + phys_state_to_str(attr.phys_state)); } static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused, diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 098ab883733e..f9e97d0cc459 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -220,10 +220,10 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num, if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) { port_attr->state = IB_PORT_ACTIVE; - port_attr->phys_state = 5; + port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; } else { port_attr->state = IB_PORT_DOWN; - port_attr->phys_state = 3; + port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; } port_attr->max_mtu = IB_MTU_4096; port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu); diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index 32d3b3deabce..70851bd7f801 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -333,7 +333,7 @@ int efa_query_port(struct ib_device *ibdev, u8 port, props->lmc = 1; props->state = IB_PORT_ACTIVE; - props->phys_state = 5; + props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; props->gid_tbl_len = 1; props->pkey_tbl_len = 1; props->active_speed = IB_SPEED_EDR; diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index b39497a13b61..12a2b8565771 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -989,16 +989,6 @@ struct hns_roce_hw { const struct ib_device_ops *hns_roce_dev_srq_ops; }; -enum hns_phy_state { - HNS_ROCE_PHY_SLEEP = 1, - HNS_ROCE_PHY_POLLING = 2, - HNS_ROCE_PHY_DISABLED = 3, - HNS_ROCE_PHY_TRAINING = 4, - HNS_ROCE_PHY_LINKUP = 5, - HNS_ROCE_PHY_LINKERR = 6, - HNS_ROCE_PHY_TEST = 7 -}; - struct hns_roce_dev { struct ib_device ib_dev; struct platform_device *pdev; diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 1e4ba48f5613..1b757cc924c3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -262,7 +262,8 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; props->phys_state = (props->state == IB_PORT_ACTIVE) ? - HNS_ROCE_PHY_LINKUP : HNS_ROCE_PHY_DISABLED; + IB_PORT_PHYS_STATE_LINK_UP : + IB_PORT_PHYS_STATE_DISABLED; spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 8790101facb7..8d2f1e38b891 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -734,7 +734,8 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port, static u8 state_to_phys_state(enum ib_port_state state) { - return state == IB_PORT_ACTIVE ? 5 : 3; + return state == IB_PORT_ACTIVE ? + IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED; } static int eth_link_query_port(struct ib_device *ibdev, u8 port, diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 4a3d700cd783..af1986b4aa7d 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -535,7 +535,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); props->pkey_tbl_len = 1; props->state = IB_PORT_DOWN; - props->phys_state = 3; + props->phys_state = IB_PORT_PHYS_STATE_DISABLED; mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr); props->qkey_viol_cntr = qkey_viol_cntr; @@ -561,7 +561,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, if (netif_running(ndev) && netif_carrier_ok(ndev)) { props->state = IB_PORT_ACTIVE; - props->phys_state = 5; + props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; } ndev_ib_mtu = iboe_get_mtu(ndev->mtu); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index bccc11378109..e8267e590772 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -163,10 +163,10 @@ int ocrdma_query_port(struct ib_device *ibdev, netdev = dev->nic_info.netdev; if (netif_running(netdev) && netif_oper_up(netdev)) { port_state = IB_PORT_ACTIVE; - props->phys_state = 5; + props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; } else { port_state = IB_PORT_DOWN; - props->phys_state = 3; + props->phys_state = IB_PORT_PHYS_STATE_DISABLED; } props->max_mtu = IB_MTU_4096; props->active_mtu = iboe_get_mtu(netdev->mtu); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 0c6a4bc848f5..6f3ce86019b7 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -221,10 +221,10 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr) /* *attr being zeroed by the caller, avoid zeroing it here */ if (rdma_port->port_state == QED_RDMA_PORT_UP) { attr->state = IB_PORT_ACTIVE; - attr->phys_state = 5; + attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; } else { attr->state = IB_PORT_DOWN; - attr->phys_state = 3; + attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; } attr->max_mtu = IB_MTU_4096; attr->active_mtu = iboe_get_mtu(dev->ndev->mtu); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index a354c7c86547..556b8e44a51c 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -356,13 +356,14 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port, if (!us_ibdev->ufdev->link_up) { props->state = IB_PORT_DOWN; - props->phys_state = 3; + props->phys_state = IB_PORT_PHYS_STATE_DISABLED; } else if (!us_ibdev->ufdev->inaddr) { props->state = IB_PORT_INIT; - props->phys_state = 4; + props->phys_state = + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING; } else { props->state = IB_PORT_ACTIVE; - props->phys_state = 5; + props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; } props->port_cap_flags = 0; diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index ecf6e659c0da..fb07eed9e402 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -65,10 +65,6 @@ */ #define RXE_UVERBS_ABI_VERSION 2 -#define RDMA_LINK_PHYS_STATE_LINK_UP (5) -#define RDMA_LINK_PHYS_STATE_DISABLED (3) -#define RDMA_LINK_PHYS_STATE_POLLING (2) - #define RXE_ROCE_V2_SPORT (0xc000) static inline u32 rxe_crc32(struct rxe_dev *rxe, diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h index 1abed47ca221..fe5207386700 100644 --- a/drivers/infiniband/sw/rxe/rxe_param.h +++ b/drivers/infiniband/sw/rxe/rxe_param.h @@ -154,7 +154,7 @@ enum rxe_port_param { RXE_PORT_ACTIVE_WIDTH = IB_WIDTH_1X, RXE_PORT_ACTIVE_SPEED = 1, RXE_PORT_PKEY_TBL_LEN = 64, - RXE_PORT_PHYS_STATE = 2, + RXE_PORT_PHYS_STATE = IB_PORT_PHYS_STATE_POLLING, RXE_PORT_SUBNET_PREFIX = 0xfe80000000000000ULL, }; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 4ebdfcf4d33e..623129f27f5a 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -69,11 +69,11 @@ static int rxe_query_port(struct ib_device *dev, &attr->active_width); if (attr->state == IB_PORT_ACTIVE) - attr->phys_state = RDMA_LINK_PHYS_STATE_LINK_UP; + attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; else if (dev_get_flags(rxe->ndev) & IFF_UP) - attr->phys_state = RDMA_LINK_PHYS_STATE_POLLING; + attr->phys_state = IB_PORT_PHYS_STATE_POLLING; else - attr->phys_state = RDMA_LINK_PHYS_STATE_DISABLED; + attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; mutex_unlock(&rxe->usdev_lock); diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 32dc79d0e898..404e7ca4b30c 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -206,7 +206,8 @@ int siw_query_port(struct ib_device *base_dev, u8 port, attr->gid_tbl_len = 1; attr->max_msg_sz = -1; attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); - attr->phys_state = sdev->state == IB_PORT_ACTIVE ? 5 : 3; + attr->phys_state = sdev->state == IB_PORT_ACTIVE ? + IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED; attr->pkey_tbl_len = 1; attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP; attr->state = sdev->state; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 0ecda7d15df2..391499008a22 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -493,6 +493,16 @@ enum ib_port_state { IB_PORT_ACTIVE_DEFER = 5 }; +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + enum ib_port_width { IB_WIDTH_1X = 1, IB_WIDTH_2X = 16, From 691f380df242a6725cb4cc706887c293cfba6afc Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 7 Aug 2019 13:31:36 +0300 Subject: [PATCH 39/99] RDMA/cxgb3: Use ib_device_set_netdev() This change is required to associate the cxgb3 ib_dev with the underlying net_device, so in the upcoming patch we can call ib_device_get_netdev(). Signed-off-by: Kamal Heib Link: https://lore.kernel.org/r/20190807103138.17219-3-kamalheib1@gmail.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/cxgb3/iwch_provider.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index e775c1a1a450..5848e4727b2e 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -1273,8 +1273,24 @@ static const struct ib_device_ops iwch_dev_ops = { INIT_RDMA_OBJ_SIZE(ib_ucontext, iwch_ucontext, ibucontext), }; +static int set_netdevs(struct ib_device *ib_dev, struct cxio_rdev *rdev) +{ + int ret; + int i; + + for (i = 0; i < rdev->port_info.nports; i++) { + ret = ib_device_set_netdev(ib_dev, rdev->port_info.lldevs[i], + i + 1); + if (ret) + return ret; + } + return 0; +} + int iwch_register_device(struct iwch_dev *dev) { + int err; + pr_debug("%s iwch_dev %p\n", __func__, dev); memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); @@ -1315,6 +1331,10 @@ int iwch_register_device(struct iwch_dev *dev) rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group); ib_set_device_ops(&dev->ibdev, &iwch_dev_ops); + err = set_netdevs(&dev->ibdev, &dev->rdev); + if (err) + return err; + return ib_register_device(&dev->ibdev, "cxgb3_%d"); } From 4929116bdf72995c0eebc74d48667262be1080ac Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 7 Aug 2019 13:31:37 +0300 Subject: [PATCH 40/99] RDMA/core: Add common iWARP query port Add support for a common iWARP query port function, the new function includes a common code that is used by the iWARP devices to update the port attributes like max_mtu, active_mtu, state, and phys_state, the function also includes a call for the driver-specific query_port callback to query the device-specific port attributes. Signed-off-by: Kamal Heib Link: https://lore.kernel.org/r/20190807103138.17219-4-kamalheib1@gmail.com Signed-off-by: Doug Ledford --- drivers/infiniband/core/device.c | 89 ++++++++++++++++++++++++++------ 1 file changed, 72 insertions(+), 17 deletions(-) diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index c3576c7d2e8f..8892862fb759 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1939,6 +1939,74 @@ void ib_dispatch_event(struct ib_event *event) } EXPORT_SYMBOL(ib_dispatch_event); +static int iw_query_port(struct ib_device *device, + u8 port_num, + struct ib_port_attr *port_attr) +{ + struct in_device *inetdev; + struct net_device *netdev; + int err; + + memset(port_attr, 0, sizeof(*port_attr)); + + netdev = ib_device_get_netdev(device, port_num); + if (!netdev) + return -ENODEV; + + dev_put(netdev); + + port_attr->max_mtu = IB_MTU_4096; + port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu); + + if (!netif_carrier_ok(netdev)) { + port_attr->state = IB_PORT_DOWN; + port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; + } else { + inetdev = in_dev_get(netdev); + + if (inetdev && inetdev->ifa_list) { + port_attr->state = IB_PORT_ACTIVE; + port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; + in_dev_put(inetdev); + } else { + port_attr->state = IB_PORT_INIT; + port_attr->phys_state = + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING; + } + } + + err = device->ops.query_port(device, port_num, port_attr); + if (err) + return err; + + return 0; +} + +static int __ib_query_port(struct ib_device *device, + u8 port_num, + struct ib_port_attr *port_attr) +{ + union ib_gid gid = {}; + int err; + + memset(port_attr, 0, sizeof(*port_attr)); + + err = device->ops.query_port(device, port_num, port_attr); + if (err || port_attr->subnet_prefix) + return err; + + if (rdma_port_get_link_layer(device, port_num) != + IB_LINK_LAYER_INFINIBAND) + return 0; + + err = device->ops.query_gid(device, port_num, 0, &gid); + if (err) + return err; + + port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix); + return 0; +} + /** * ib_query_port - Query IB port attributes * @device:Device to query @@ -1952,26 +2020,13 @@ int ib_query_port(struct ib_device *device, u8 port_num, struct ib_port_attr *port_attr) { - union ib_gid gid; - int err; - if (!rdma_is_port_valid(device, port_num)) return -EINVAL; - memset(port_attr, 0, sizeof(*port_attr)); - err = device->ops.query_port(device, port_num, port_attr); - if (err || port_attr->subnet_prefix) - return err; - - if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) - return 0; - - err = device->ops.query_gid(device, port_num, 0, &gid); - if (err) - return err; - - port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix); - return 0; + if (rdma_protocol_iwarp(device, port_num)) + return iw_query_port(device, port_num, port_attr); + else + return __ib_query_port(device, port_num, port_attr); } EXPORT_SYMBOL(ib_query_port); From d8d5cfac45dbbaacc4950b29c7b071547e5edc31 Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 7 Aug 2019 13:31:38 +0300 Subject: [PATCH 41/99] RDMA/{cxgb3, cxgb4, i40iw}: Remove common code Now that we have a common iWARP query port function we can remove the common code from the iWARP drivers. Signed-off-by: Kamal Heib Acked-by: Potnuri Bharat Teja Link: https://lore.kernel.org/r/20190807103138.17219-5-kamalheib1@gmail.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/cxgb3/iwch_provider.c | 25 --------------------- drivers/infiniband/hw/cxgb4/provider.c | 24 -------------------- drivers/infiniband/hw/i40iw/i40iw_verbs.c | 11 --------- 3 files changed, 60 deletions(-) diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 5848e4727b2e..dcf02ec02810 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -991,33 +991,8 @@ static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *pro static int iwch_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { - struct iwch_dev *dev; - struct net_device *netdev; - struct in_device *inetdev; - pr_debug("%s ibdev %p\n", __func__, ibdev); - dev = to_iwch_dev(ibdev); - netdev = dev->rdev.port_info.lldevs[port-1]; - - /* props being zeroed by the caller, avoid zeroing it here */ - props->max_mtu = IB_MTU_4096; - props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); - - if (!netif_carrier_ok(netdev)) - props->state = IB_PORT_DOWN; - else { - inetdev = in_dev_get(netdev); - if (inetdev) { - if (inetdev->ifa_list) - props->state = IB_PORT_ACTIVE; - else - props->state = IB_PORT_INIT; - in_dev_put(inetdev); - } else - props->state = IB_PORT_INIT; - } - props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_SNMP_TUNNEL_SUP | diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 5e59c5708729..d373ac0fe2cb 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -305,32 +305,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro static int c4iw_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { - struct c4iw_dev *dev; - struct net_device *netdev; - struct in_device *inetdev; - pr_debug("ibdev %p\n", ibdev); - dev = to_c4iw_dev(ibdev); - netdev = dev->rdev.lldi.ports[port-1]; - /* props being zeroed by the caller, avoid zeroing it here */ - props->max_mtu = IB_MTU_4096; - props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); - - if (!netif_carrier_ok(netdev)) - props->state = IB_PORT_DOWN; - else { - inetdev = in_dev_get(netdev); - if (inetdev) { - if (inetdev->ifa_list) - props->state = IB_PORT_ACTIVE; - else - props->state = IB_PORT_INIT; - in_dev_put(inetdev); - } else - props->state = IB_PORT_INIT; - } - props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_SNMP_TUNNEL_SUP | diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index d169a8031375..8056930bbe2c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -97,18 +97,7 @@ static int i40iw_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { - struct i40iw_device *iwdev = to_iwdev(ibdev); - struct net_device *netdev = iwdev->netdev; - - /* props being zeroed by the caller, avoid zeroing it here */ - props->max_mtu = IB_MTU_4096; - props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); - props->lid = 1; - if (netif_carrier_ok(iwdev->netdev)) - props->state = IB_PORT_ACTIVE; - else - props->state = IB_PORT_DOWN; props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; props->gid_tbl_len = 1; From a3e2d4c7e76653d7e7bb34b6ca1692e3ca462d80 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 7 Aug 2019 11:22:28 +0800 Subject: [PATCH 42/99] RDMA/hns: remove obsolete Kconfig comment Since commit a07fc0bb483e ("RDMA/hns: Fix build error") these kconfig comment is obsolete, so just remove it. Signed-off-by: YueHaibing Link: https://lore.kernel.org/r/20190807032228.6788-1-yuehaibing@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/Kconfig | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig index 54782197c717..d602b698b57e 100644 --- a/drivers/infiniband/hw/hns/Kconfig +++ b/drivers/infiniband/hw/hns/Kconfig @@ -8,8 +8,6 @@ config INFINIBAND_HNS is used in Hisilicon Hip06 and more further ICT SoC based on platform device. - To compile HIP06 or HIP08 driver as module, choose M here. - config INFINIBAND_HNS_HIP06 tristate "Hisilicon Hip06 Family RoCE support" depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET @@ -17,15 +15,9 @@ config INFINIBAND_HNS_HIP06 RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and Hip07 SoC. These RoCE engines are platform devices. - To compile this driver, choose Y here: if INFINIBAND_HNS is m, this - module will be called hns-roce-hw-v1 - config INFINIBAND_HNS_HIP08 tristate "Hisilicon Hip08 Family RoCE support" depends on INFINIBAND_HNS && PCI && HNS3 ---help--- RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC. The RoCE engine is a PCI device. - - To compile this driver, choose Y here: if INFINIBAND_HNS is m, this - module will be called hns-roce-hw-v2. From cc95b23c250005873e4a117fccc53a8378fb3aed Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Thu, 8 Aug 2019 22:53:41 +0800 Subject: [PATCH 43/99] RDMA/hns: Encapsulate some lines for setting sq size in user mode It needs to check the sq size with integrity when configures the relatived parameters of sq. Here moves the relatived code into a special function. Signed-off-by: Lijun Ou Link: https://lore.kernel.org/r/1565276034-97329-2-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_qp.c | 29 +++++++++++++++++++------ 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 9c272c20bbf9..5fcc17e6ec15 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -324,16 +324,12 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, return 0; } -static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, - struct ib_qp_cap *cap, - struct hns_roce_qp *hr_qp, - struct hns_roce_ib_create_qp *ucmd) +static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, + struct ib_qp_cap *cap, + struct hns_roce_ib_create_qp *ucmd) { u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); u8 max_sq_stride = ilog2(roundup_sq_stride); - u32 ex_sge_num; - u32 page_size; - u32 max_cnt; /* Sanity check SQ size before proceeding */ if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || @@ -349,6 +345,25 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, return -EINVAL; } + return 0; +} + +static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, + struct ib_qp_cap *cap, + struct hns_roce_qp *hr_qp, + struct hns_roce_ib_create_qp *ucmd) +{ + u32 ex_sge_num; + u32 page_size; + u32 max_cnt; + int ret; + + ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); + if (ret) { + ibdev_err(&hr_dev->ib_dev, "Sanity check sq size failed\n"); + return ret; + } + hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; hr_qp->sq.wqe_shift = ucmd->log_sq_stride; From 8ea417ffc2db52a69b16712ab6ee07e8f74aa740 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Thu, 8 Aug 2019 22:53:42 +0800 Subject: [PATCH 44/99] RDMA/hns: Optimize hns_roce_modify_qp function Here mainly packages some code into some new functions in order to reduce code compelexity. Signed-off-by: Lijun Ou Link: https://lore.kernel.org/r/1565276034-97329-3-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_qp.c | 117 ++++++++++++++---------- 1 file changed, 69 insertions(+), 48 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 5fcc17e6ec15..f76617bbf009 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1070,6 +1070,72 @@ int to_hr_qp_type(int qp_type) return transport_type; } +static int check_mtu_validate(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, + struct ib_qp_attr *attr, int attr_mask) +{ + struct device *dev = hr_dev->dev; + enum ib_mtu active_mtu; + int p; + + p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; + active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); + + if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && + attr->path_mtu > hr_dev->caps.max_mtu) || + attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) { + dev_err(dev, "attr path_mtu(%d)invalid while modify qp", + attr->path_mtu); + return -EINVAL; + } + + return 0; +} + +static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); + struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); + struct device *dev = hr_dev->dev; + int p; + + if ((attr_mask & IB_QP_PORT) && + (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { + dev_err(dev, "attr port_num invalid.attr->port_num=%d\n", + attr->port_num); + return -EINVAL; + } + + if (attr_mask & IB_QP_PKEY_INDEX) { + p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; + if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { + dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n", + attr->pkey_index); + return -EINVAL; + } + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && + attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { + dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", + attr->max_rd_atomic); + return -EINVAL; + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && + attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { + dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n", + attr->max_dest_rd_atomic); + return -EINVAL; + } + + if (attr_mask & IB_QP_PATH_MTU) + return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask); + + return 0; +} + int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { @@ -1078,15 +1144,12 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, enum ib_qp_state cur_state, new_state; struct device *dev = hr_dev->dev; int ret = -EINVAL; - int p; - enum ib_mtu active_mtu; mutex_lock(&hr_qp->mutex); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : (enum ib_qp_state)hr_qp->state; - new_state = attr_mask & IB_QP_STATE ? - attr->qp_state : cur_state; + new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (ibqp->uobject && (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { @@ -1107,51 +1170,9 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, goto out; } - if ((attr_mask & IB_QP_PORT) && - (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { - dev_err(dev, "attr port_num invalid.attr->port_num=%d\n", - attr->port_num); + ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask); + if (ret) goto out; - } - - if (attr_mask & IB_QP_PKEY_INDEX) { - p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; - if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { - dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n", - attr->pkey_index); - goto out; - } - } - - if (attr_mask & IB_QP_PATH_MTU) { - p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; - active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); - - if ((hr_dev->caps.max_mtu == IB_MTU_4096 && - attr->path_mtu > IB_MTU_4096) || - (hr_dev->caps.max_mtu == IB_MTU_2048 && - attr->path_mtu > IB_MTU_2048) || - attr->path_mtu < IB_MTU_256 || - attr->path_mtu > active_mtu) { - dev_err(dev, "attr path_mtu(%d)invalid while modify qp", - attr->path_mtu); - goto out; - } - } - - if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && - attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { - dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", - attr->max_rd_atomic); - goto out; - } - - if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && - attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { - dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n", - attr->max_dest_rd_atomic); - goto out; - } if (cur_state == new_state && cur_state == IB_QPS_RESET) { if (hr_dev->caps.min_wqes) { From ece9c205f707fc0fda832d5da18b745d30c7c6bc Mon Sep 17 00:00:00 2001 From: Yixian Liu Date: Thu, 8 Aug 2019 22:53:43 +0800 Subject: [PATCH 45/99] RDMA/hns: Update the prompt message for creating and destroy qp Current prompt message is uncorrect when destroying qp, add qpn information when creating qp. Signed-off-by: Yixian Liu Signed-off-by: Lijun Ou Link: https://lore.kernel.org/r/1565276034-97329-4-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 6 +++--- drivers/infiniband/hw/hns/hns_roce_qp.c | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 59f88bf09952..7cd4b3bd649e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -4571,8 +4571,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); if (ret) { - dev_err(dev, "modify QP %06lx to ERR failed.\n", - hr_qp->qpn); + dev_err(dev, "modify QP to Reset failed.\n"); return ret; } } @@ -4641,7 +4640,8 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata); if (ret) { - dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret); + dev_err(hr_dev->dev, "Destroy qp 0x%06lx failed(%d)\n", + hr_qp->qpn, ret); return ret; } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index f76617bbf009..f803209b6e04 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1002,7 +1002,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0, hr_qp); if (ret) { - dev_err(dev, "Create RC QP failed\n"); + dev_err(dev, "Create RC QP 0x%06lx failed(%d)\n", + hr_qp->qpn, ret); kfree(hr_qp); return ERR_PTR(ret); } From 2288b3b3b187520d7fef37197c07ca035d9b8111 Mon Sep 17 00:00:00 2001 From: Yixian Liu Date: Thu, 8 Aug 2019 22:53:44 +0800 Subject: [PATCH 46/99] RDMA/hns: Remove unnessary init for cmq reg There is no need to init the enable bit of cmq. Signed-off-by: Yixian Liu Link: https://lore.kernel.org/r/1565276034-97329-5-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 6 ++---- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 2 -- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 7cd4b3bd649e..2a30a91a9824 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -887,8 +887,7 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type) roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, upper_32_bits(dma)); roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG, - (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) | - HNS_ROCE_CMQ_ENABLE); + ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0); roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0); } else { @@ -896,8 +895,7 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type) roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG, upper_32_bits(dma)); roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG, - (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) | - HNS_ROCE_CMQ_ENABLE); + ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0); roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0); } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 478f5a5b7aa1..58931b5399f8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -126,8 +126,6 @@ #define HNS_ROCE_CMD_FLAG_ERR_INTR BIT(HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT) #define HNS_ROCE_CMQ_DESC_NUM_S 3 -#define HNS_ROCE_CMQ_EN_B 16 -#define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B) #define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5 From b5c229dc1585adcbe158ecaebb5e6993c9c220b6 Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Thu, 8 Aug 2019 22:53:45 +0800 Subject: [PATCH 47/99] RDMA/hns: Clean up unnecessary initial assignment Here remove some unncessary initialization for some valiables. Signed-off-by: Lang Cheng Signed-off-by: Lijun Ou Link: https://lore.kernel.org/r/1565276034-97329-6-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 2a30a91a9824..8566a6830643 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -239,7 +239,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct device *dev = hr_dev->dev; struct hns_roce_v2_db sq_db; struct ib_qp_attr attr; - unsigned int sge_ind = 0; + unsigned int sge_ind; unsigned int owner_bit; unsigned long flags; unsigned int ind; @@ -4291,7 +4291,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, struct hns_roce_v2_qp_context *context; struct hns_roce_v2_qp_context *qpc_mask; struct device *dev = hr_dev->dev; - int ret = -EINVAL; + int ret; context = kcalloc(2, sizeof(*context), GFP_ATOMIC); if (!context) From 6def7de6d450893852802794ab44d8053672cf88 Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Thu, 8 Aug 2019 22:53:46 +0800 Subject: [PATCH 48/99] RDMA/hns: Update some comments style Here removes some useless comments and adds necessary spaces to another comments. Signed-off-by: Lang Cheng Link: https://lore.kernel.org/r/1565276034-97329-7-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_device.h | 65 ++++++++++----------- drivers/infiniband/hw/hns/hns_roce_hem.h | 6 +- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 9 +-- drivers/infiniband/hw/hns/hns_roce_mr.c | 1 - 4 files changed, 38 insertions(+), 43 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 12a2b8565771..4fffa3da3a87 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -84,7 +84,6 @@ #define HNS_ROCE_CEQ_ENTRY_SIZE 0x4 #define HNS_ROCE_AEQ_ENTRY_SIZE 0x10 -/* 4G/4K = 1M */ #define HNS_ROCE_SL_SHIFT 28 #define HNS_ROCE_TCLASS_SHIFT 20 #define HNS_ROCE_FLOW_LABEL_MASK 0xfffff @@ -322,7 +321,7 @@ struct hns_roce_hem_table { unsigned long num_hem; /* HEM entry record obj total num */ unsigned long num_obj; - /*Single obj size */ + /* Single obj size */ unsigned long obj_size; unsigned long table_chunk_size; int lowmem; @@ -343,7 +342,7 @@ struct hns_roce_mtt { struct hns_roce_buf_region { int offset; /* page offset */ - u32 count; /* page count*/ + u32 count; /* page count */ int hopnum; /* addressing hop num */ }; @@ -384,25 +383,25 @@ struct hns_roce_mr { u64 size; /* Address range of MR */ u32 key; /* Key of MR */ u32 pd; /* PD num of MR */ - u32 access;/* Access permission of MR */ + u32 access; /* Access permission of MR */ u32 npages; int enabled; /* MR's active status */ int type; /* MR's register type */ - u64 *pbl_buf;/* MR's PBL space */ + u64 *pbl_buf; /* MR's PBL space */ dma_addr_t pbl_dma_addr; /* MR's PBL space PA */ - u32 pbl_size;/* PA number in the PBL */ - u64 pbl_ba;/* page table address */ - u32 l0_chunk_last_num;/* L0 last number */ - u32 l1_chunk_last_num;/* L1 last number */ - u64 **pbl_bt_l2;/* PBL BT L2 */ - u64 **pbl_bt_l1;/* PBL BT L1 */ - u64 *pbl_bt_l0;/* PBL BT L0 */ - dma_addr_t *pbl_l2_dma_addr;/* PBL BT L2 dma addr */ - dma_addr_t *pbl_l1_dma_addr;/* PBL BT L1 dma addr */ - dma_addr_t pbl_l0_dma_addr;/* PBL BT L0 dma addr */ - u32 pbl_ba_pg_sz;/* BT chunk page size */ - u32 pbl_buf_pg_sz;/* buf chunk page size */ - u32 pbl_hop_num;/* multi-hop number */ + u32 pbl_size; /* PA number in the PBL */ + u64 pbl_ba; /* page table address */ + u32 l0_chunk_last_num; /* L0 last number */ + u32 l1_chunk_last_num; /* L1 last number */ + u64 **pbl_bt_l2; /* PBL BT L2 */ + u64 **pbl_bt_l1; /* PBL BT L1 */ + u64 *pbl_bt_l0; /* PBL BT L0 */ + dma_addr_t *pbl_l2_dma_addr; /* PBL BT L2 dma addr */ + dma_addr_t *pbl_l1_dma_addr; /* PBL BT L1 dma addr */ + dma_addr_t pbl_l0_dma_addr; /* PBL BT L0 dma addr */ + u32 pbl_ba_pg_sz; /* BT chunk page size */ + u32 pbl_buf_pg_sz; /* buf chunk page size */ + u32 pbl_hop_num; /* multi-hop number */ }; struct hns_roce_mr_table { @@ -425,16 +424,16 @@ struct hns_roce_wq { u32 max_post; int max_gs; int offset; - int wqe_shift;/* WQE size */ + int wqe_shift; /* WQE size */ u32 head; u32 tail; void __iomem *db_reg_l; }; struct hns_roce_sge { - int sge_cnt; /* SGE num */ + int sge_cnt; /* SGE num */ int offset; - int sge_shift;/* SGE size */ + int sge_shift; /* SGE size */ }; struct hns_roce_buf_list { @@ -750,7 +749,7 @@ struct hns_roce_eq { struct hns_roce_dev *hr_dev; void __iomem *doorbell; - int type_flag;/* Aeq:1 ceq:0 */ + int type_flag; /* Aeq:1 ceq:0 */ int eqn; u32 entries; int log_entries; @@ -796,22 +795,22 @@ struct hns_roce_caps { int local_ca_ack_delay; int num_uars; u32 phy_num_uars; - u32 max_sq_sg; /* 2 */ - u32 max_sq_inline; /* 32 */ - u32 max_rq_sg; /* 2 */ + u32 max_sq_sg; + u32 max_sq_inline; + u32 max_rq_sg; u32 max_extend_sg; - int num_qps; /* 256k */ + int num_qps; int reserved_qps; int num_qpc_timer; int num_cqc_timer; u32 max_srq_sg; int num_srqs; - u32 max_wqes; /* 16k */ + u32 max_wqes; u32 max_srqs; u32 max_srq_wrs; u32 max_srq_sges; - u32 max_sq_desc_sz; /* 64 */ - u32 max_rq_desc_sz; /* 64 */ + u32 max_sq_desc_sz; + u32 max_rq_desc_sz; u32 max_srq_desc_sz; int max_qp_init_rdma; int max_qp_dest_rdma; @@ -822,7 +821,7 @@ struct hns_roce_caps { int reserved_cqs; int reserved_srqs; u32 max_srqwqes; - int num_aeq_vectors; /* 1 */ + int num_aeq_vectors; int num_comp_vectors; int num_other_vectors; int num_mtpts; @@ -903,7 +902,7 @@ struct hns_roce_caps { u32 sl_num; u32 tsq_buf_pg_sz; u32 tpq_buf_pg_sz; - u32 chunk_sz; /* chunk size in non multihop mode*/ + u32 chunk_sz; /* chunk size in non multihop mode */ u64 flags; }; @@ -1033,8 +1032,8 @@ struct hns_roce_dev { int loop_idc; u32 sdb_offset; u32 odb_offset; - dma_addr_t tptr_dma_addr; /*only for hw v1*/ - u32 tptr_size; /*only for hw v1*/ + dma_addr_t tptr_dma_addr; /* only for hw v1 */ + u32 tptr_size; /* only for hw v1 */ const struct hns_roce_hw *hw; void *priv; struct workqueue_struct *irq_workq; diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h index f1ccb8f35fe5..86783276fb1f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.h +++ b/drivers/infiniband/hw/hns/hns_roce_hem.h @@ -102,9 +102,9 @@ struct hns_roce_hem_mhop { u32 buf_chunk_size; u32 bt_chunk_size; u32 ba_l0_num; - u32 l0_idx;/* level 0 base address table index */ - u32 l1_idx;/* level 1 base address table index */ - u32 l2_idx;/* level 2 base address table index */ + u32 l0_idx; /* level 0 base address table index */ + u32 l1_idx; /* level 1 base address table index */ + u32 l2_idx; /* level 2 base address table index */ }; void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 8566a6830643..d2458f6c6cea 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1886,7 +1886,7 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) goto err_tpq_init_failed; } - /* Alloc memory for QPC Timer buffer space chunk*/ + /* Alloc memory for QPC Timer buffer space chunk */ for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num; qpc_count++) { ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table, @@ -1897,7 +1897,7 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) } } - /* Alloc memory for CQC Timer buffer space chunk*/ + /* Alloc memory for CQC Timer buffer space chunk */ for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num; cqc_count++) { ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table, @@ -5236,14 +5236,12 @@ static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev, buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT); bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT); - /* hop_num = 0 */ if (mhop_num == HNS_ROCE_HOP_NUM_0) { dma_free_coherent(dev, (unsigned int)(eq->entries * eq->eqe_size), eq->bt_l0, eq->l0_dma); return; } - /* hop_num = 1 or hop = 2 */ dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma); if (mhop_num == 1) { for (i = 0; i < eq->l0_last_num; i++) { @@ -5483,7 +5481,6 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, buf_chk_sz); bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN); - /* hop_num = 0 */ if (mhop_num == HNS_ROCE_HOP_NUM_0) { if (eq->entries > buf_chk_sz / eq->eqe_size) { dev_err(dev, "eq entries %d is larger than buf_pg_sz!", @@ -5749,7 +5746,7 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num, } } - /* irq contains: abnormal + AEQ + CEQ*/ + /* irq contains: abnormal + AEQ + CEQ */ for (j = 0; j < irq_num; j++) if (j < other_num) snprintf((char *)hr_dev->irq_names[j], diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 0cfa94605f77..8157679021b9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -517,7 +517,6 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, if (mhop_num == HNS_ROCE_HOP_NUM_0) return 0; - /* hop_num = 1 */ if (mhop_num == 1) return pbl_1hop_alloc(hr_dev, npages, mr, pbl_bt_sz); From 0e20ebf8d48e02846e202cad23bbfd6850b0b003 Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Thu, 8 Aug 2019 22:53:47 +0800 Subject: [PATCH 49/99] RDMA/hns: Handling the error return value of hem function Handling the error return value of hns_roce_calc_hem_mhop. Signed-off-by: Lang Cheng Link: https://lore.kernel.org/r/1565276034-97329-8-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hem.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index d3e72a0be0b3..0268c7aae485 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -830,7 +830,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, } else { u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */ - hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); + if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop)) + goto out; /* mtt mhop */ i = mhop.l0_idx; j = mhop.l1_idx; @@ -879,11 +880,13 @@ int hns_roce_table_get_range(struct hns_roce_dev *hr_dev, { struct hns_roce_hem_mhop mhop; unsigned long inc = table->table_chunk_size / table->obj_size; - unsigned long i; + unsigned long i = 0; int ret; if (hns_roce_check_whether_mhop(hr_dev, table->type)) { - hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); + ret = hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); + if (ret) + goto fail; inc = mhop.bt_chunk_size / table->obj_size; } @@ -913,7 +916,8 @@ void hns_roce_table_put_range(struct hns_roce_dev *hr_dev, unsigned long i; if (hns_roce_check_whether_mhop(hr_dev, table->type)) { - hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); + if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop)) + return; inc = mhop.bt_chunk_size / table->obj_size; } @@ -1035,7 +1039,8 @@ static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev, int i; u64 obj; - hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); + if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop)) + return; buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size : mhop.bt_chunk_size; From e7f40440afb829594c3ae96187404bc0ecf15ce3 Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Thu, 8 Aug 2019 22:53:48 +0800 Subject: [PATCH 50/99] RDMA/hns: Split bool statement and assign statement Assign statement can not be contained in bool statement or function param. Signed-off-by: Lang Cheng Link: https://lore.kernel.org/r/1565276034-97329-9-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index d2458f6c6cea..982b75bd3440 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -4938,7 +4938,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) { struct device *dev = hr_dev->dev; - struct hns_roce_aeqe *aeqe; + struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq); int aeqe_found = 0; int event_type; int sub_type; @@ -4946,8 +4946,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, u32 qpn; u32 cqn; - while ((aeqe = next_aeqe_sw_v2(eq))) { - + while (aeqe) { /* Make sure we read AEQ entry after we have checked the * ownership bit */ @@ -5016,6 +5015,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, eq->cons_index = 0; } hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn); + + aeqe = next_aeqe_sw_v2(eq); } set_eq_cons_index_v2(eq); @@ -5068,12 +5069,11 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) { struct device *dev = hr_dev->dev; - struct hns_roce_ceqe *ceqe; + struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq); int ceqe_found = 0; u32 cqn; - while ((ceqe = next_ceqe_sw_v2(eq))) { - + while (ceqe) { /* Make sure we read CEQ entry after we have checked the * ownership bit */ @@ -5092,6 +5092,8 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, dev_warn(dev, "cons_index overflow, set back to 0.\n"); eq->cons_index = 0; } + + ceqe = next_ceqe_sw_v2(eq); } set_eq_cons_index_v2(eq); From bebdb83f97ee725a8327d36d7cedb85eb0492726 Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Thu, 8 Aug 2019 22:53:49 +0800 Subject: [PATCH 51/99] RDMA/hns: Refactor irq request code Remove unnecessary if...else..., to make the code look simpler. Signed-off-by: Lang Cheng Link: https://lore.kernel.org/r/1565276034-97329-10-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 25 +++++++++++----------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 982b75bd3440..049d66cdf659 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5749,18 +5749,19 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num, } /* irq contains: abnormal + AEQ + CEQ */ - for (j = 0; j < irq_num; j++) - if (j < other_num) - snprintf((char *)hr_dev->irq_names[j], - HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", j); - else if (j < (other_num + aeq_num)) - snprintf((char *)hr_dev->irq_names[j], - HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d", - j - other_num); - else - snprintf((char *)hr_dev->irq_names[j], - HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d", - j - other_num - aeq_num); + for (j = 0; j < other_num; j++) + snprintf((char *)hr_dev->irq_names[j], + HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", j); + + for (j = other_num; j < (other_num + aeq_num); j++) + snprintf((char *)hr_dev->irq_names[j], + HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d", + j - other_num); + + for (j = (other_num + aeq_num); j < irq_num; j++) + snprintf((char *)hr_dev->irq_names[j], + HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d", + j - other_num - aeq_num); for (j = 0; j < irq_num; j++) { if (j < other_num) From 4b42d05d0b2ca56bef42976d304f478f567966ec Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Thu, 8 Aug 2019 22:53:50 +0800 Subject: [PATCH 52/99] RDMA/hns: Remove unnecessary kzalloc For hns_roce_v2_query_qp and hns_roce_v2_modify_qp, we can use stack memory to create qp context data. Make the code simpler. Signed-off-by: Lang Cheng Link: https://lore.kernel.org/r/1565276034-97329-11-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 64 +++++++++------------- 1 file changed, 27 insertions(+), 37 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 049d66cdf659..208ff4be45a7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -4288,22 +4288,19 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_v2_qp_context *context; - struct hns_roce_v2_qp_context *qpc_mask; + struct hns_roce_v2_qp_context ctx[2]; + struct hns_roce_v2_qp_context *context = ctx; + struct hns_roce_v2_qp_context *qpc_mask = ctx + 1; struct device *dev = hr_dev->dev; int ret; - context = kcalloc(2, sizeof(*context), GFP_ATOMIC); - if (!context) - return -ENOMEM; - - qpc_mask = context + 1; /* * In v2 engine, software pass context and context mask to hardware * when modifying qp. If software need modify some fields in context, * we should set all bits of the relevant fields in context mask to * 0 at the same time, else set them to 0x1. */ + memset(context, 0, sizeof(*context)); memset(qpc_mask, 0xff, sizeof(*qpc_mask)); ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state, new_state, context, qpc_mask); @@ -4349,8 +4346,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, V2_QPC_BYTE_60_QP_ST_S, 0); /* SW pass context to HW */ - ret = hns_roce_v2_qp_modify(hr_dev, cur_state, new_state, - context, hr_qp); + ret = hns_roce_v2_qp_modify(hr_dev, cur_state, new_state, ctx, hr_qp); if (ret) { dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret); goto out; @@ -4378,7 +4374,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, } out: - kfree(context); return ret; } @@ -4429,16 +4424,12 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_v2_qp_context *context; + struct hns_roce_v2_qp_context context = {}; struct device *dev = hr_dev->dev; int tmp_qp_state; int state; int ret; - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return -ENOMEM; - memset(qp_attr, 0, sizeof(*qp_attr)); memset(qp_init_attr, 0, sizeof(*qp_init_attr)); @@ -4450,14 +4441,14 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, goto done; } - ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context); + ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context); if (ret) { dev_err(dev, "query qpc error\n"); ret = -EINVAL; goto out; } - state = roce_get_field(context->byte_60_qpst_tempid, + state = roce_get_field(context.byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S); tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state); if (tmp_qp_state == -1) { @@ -4467,7 +4458,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, } hr_qp->state = (u8)tmp_qp_state; qp_attr->qp_state = (enum ib_qp_state)hr_qp->state; - qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc, + qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context.byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, V2_QPC_BYTE_24_MTU_S); qp_attr->path_mig_state = IB_MIG_ARMED; @@ -4475,20 +4466,20 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, if (hr_qp->ibqp.qp_type == IB_QPT_UD) qp_attr->qkey = V2_QKEY_VAL; - qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn, + qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn, V2_QPC_BYTE_108_RX_REQ_EPSN_M, V2_QPC_BYTE_108_RX_REQ_EPSN_S); - qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn, + qp_attr->sq_psn = (u32)roce_get_field(context.byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M, V2_QPC_BYTE_172_SQ_CUR_PSN_S); - qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err, + qp_attr->dest_qp_num = (u8)roce_get_field(context.byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S); - qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en, + qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S)) << V2_QP_RWE_S) | - ((roce_get_bit(context->byte_76_srqn_op_en, + ((roce_get_bit(context.byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S)) << V2_QP_RRE_S) | - ((roce_get_bit(context->byte_76_srqn_op_en, + ((roce_get_bit(context.byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S); if (hr_qp->ibqp.qp_type == IB_QPT_RC || @@ -4497,43 +4488,43 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, rdma_ah_retrieve_grh(&qp_attr->ah_attr); rdma_ah_set_sl(&qp_attr->ah_attr, - roce_get_field(context->byte_28_at_fl, + roce_get_field(context.byte_28_at_fl, V2_QPC_BYTE_28_SL_M, V2_QPC_BYTE_28_SL_S)); - grh->flow_label = roce_get_field(context->byte_28_at_fl, + grh->flow_label = roce_get_field(context.byte_28_at_fl, V2_QPC_BYTE_28_FL_M, V2_QPC_BYTE_28_FL_S); - grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx, + grh->sgid_index = roce_get_field(context.byte_20_smac_sgid_idx, V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S); - grh->hop_limit = roce_get_field(context->byte_24_mtu_tc, + grh->hop_limit = roce_get_field(context.byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M, V2_QPC_BYTE_24_HOP_LIMIT_S); - grh->traffic_class = roce_get_field(context->byte_24_mtu_tc, + grh->traffic_class = roce_get_field(context.byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S); - memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw)); + memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw)); } qp_attr->port_num = hr_qp->port + 1; qp_attr->sq_draining = 0; - qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl, + qp_attr->max_rd_atomic = 1 << roce_get_field(context.byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M, V2_QPC_BYTE_208_SR_MAX_S); - qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq, + qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M, V2_QPC_BYTE_140_RR_MAX_S); - qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn, + qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_MIN_RNR_TIME_M, V2_QPC_BYTE_80_MIN_RNR_TIME_S); - qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl, + qp_attr->timeout = (u8)roce_get_field(context.byte_28_at_fl, V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S); - qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn, + qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M, V2_QPC_BYTE_212_RETRY_CNT_S); - qp_attr->rnr_retry = context->rq_rnr_timer; + qp_attr->rnr_retry = context.rq_rnr_timer; done: qp_attr->cur_qp_state = qp_attr->qp_state; @@ -4552,7 +4543,6 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, out: mutex_unlock(&hr_qp->mutex); - kfree(context); return ret; } From 260c3b3499198b2cfa911ea7fb4f28bb129ef1e8 Mon Sep 17 00:00:00 2001 From: Yangyang Li Date: Thu, 8 Aug 2019 22:53:51 +0800 Subject: [PATCH 53/99] RDMA/hns: Refactor hns_roce_v2_set_hem for hip08 In order to reduce the complexity of hns_roce_v2_set_hem, extract the implementation of op as a function. Signed-off-by: Yangyang Li Link: https://lore.kernel.org/r/1565276034-97329-12-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 93 ++++++++++++---------- 1 file changed, 51 insertions(+), 42 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 208ff4be45a7..4de5f848f91f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -2903,46 +2903,15 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries, return npolled; } -static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, int obj, - int step_idx) +static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type, + int step_idx) { - struct device *dev = hr_dev->dev; - struct hns_roce_cmd_mailbox *mailbox; - struct hns_roce_hem_iter iter; - struct hns_roce_hem_mhop mhop; - struct hns_roce_hem *hem; - unsigned long mhop_obj = obj; - int i, j, k; - int ret = 0; - u64 hem_idx = 0; - u64 l1_idx = 0; - u64 bt_ba = 0; - u32 chunk_ba_num; - u32 hop_num; - u16 op = 0xff; + int op; - if (!hns_roce_check_whether_mhop(hr_dev, table->type)) - return 0; + if (type == HEM_TYPE_SCCC && step_idx) + return -EINVAL; - hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); - i = mhop.l0_idx; - j = mhop.l1_idx; - k = mhop.l2_idx; - hop_num = mhop.hop_num; - chunk_ba_num = mhop.bt_chunk_size / 8; - - if (hop_num == 2) { - hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num + - k; - l1_idx = i * chunk_ba_num + j; - } else if (hop_num == 1) { - hem_idx = i * chunk_ba_num + j; - } else if (hop_num == HNS_ROCE_HOP_NUM_0) { - hem_idx = i; - } - - switch (table->type) { + switch (type) { case HEM_TYPE_QPC: op = HNS_ROCE_CMD_WRITE_QPC_BT0; break; @@ -2965,15 +2934,55 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0; break; default: - dev_warn(dev, "Table %d not to be written by mailbox!\n", - table->type); - return 0; + dev_warn(hr_dev->dev, + "Table %d not to be written by mailbox!\n", type); + return -EINVAL; } - if (table->type == HEM_TYPE_SCCC && step_idx) + return op + step_idx; +} + +static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_table *table, int obj, + int step_idx) +{ + struct hns_roce_cmd_mailbox *mailbox; + struct hns_roce_hem_iter iter; + struct hns_roce_hem_mhop mhop; + struct hns_roce_hem *hem; + unsigned long mhop_obj = obj; + int i, j, k; + int ret = 0; + u64 hem_idx = 0; + u64 l1_idx = 0; + u64 bt_ba = 0; + u32 chunk_ba_num; + u32 hop_num; + int op; + + if (!hns_roce_check_whether_mhop(hr_dev, table->type)) return 0; - op += step_idx; + hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); + i = mhop.l0_idx; + j = mhop.l1_idx; + k = mhop.l2_idx; + hop_num = mhop.hop_num; + chunk_ba_num = mhop.bt_chunk_size / 8; + + if (hop_num == 2) { + hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num + + k; + l1_idx = i * chunk_ba_num + j; + } else if (hop_num == 1) { + hem_idx = i * chunk_ba_num + j; + } else if (hop_num == HNS_ROCE_HOP_NUM_0) { + hem_idx = i; + } + + op = get_op_for_set_hem(hr_dev, table->type, step_idx); + if (op == -EINVAL) + return 0; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) From 249f2f921f24db878b0cc5da80cb15d531587d79 Mon Sep 17 00:00:00 2001 From: Weihang Li Date: Thu, 8 Aug 2019 22:53:52 +0800 Subject: [PATCH 54/99] RDMA/hns: Remove redundant print in hns_roce_v2_ceq_int() There is no need to tell users when eq->cons_index is overflow, we just set it back to zero. Signed-off-by: Weihang Li Link: https://lore.kernel.org/r/1565276034-97329-13-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 4de5f848f91f..a1f62e072753 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5009,10 +5009,9 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, ++eq->cons_index; aeqe_found = 1; - if (eq->cons_index > (2 * eq->entries - 1)) { - dev_warn(dev, "cons_index overflow, set back to 0.\n"); + if (eq->cons_index > (2 * eq->entries - 1)) eq->cons_index = 0; - } + hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn); aeqe = next_aeqe_sw_v2(eq); From d967e2625faa43da9e3be3228b8c57d3cf404953 Mon Sep 17 00:00:00 2001 From: Weihang Li Date: Thu, 8 Aug 2019 22:53:53 +0800 Subject: [PATCH 55/99] RDMA/hns: Disable alw_lcl_lpbk of SSU If we enabled alw_lcl_lpbk in promiscuous mode, packet whose source and destination mac address is equal will be handled in both inner loopback and outer loopback. This will halve performance of roce in promiscuous mode. Signed-off-by: Weihang Li Link: https://lore.kernel.org/r/1565276034-97329-14-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index a1f62e072753..a178d63f3ec8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1308,7 +1308,7 @@ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN); desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR); roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1); - roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 1); + roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0); roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1); return hns_roce_cmq_send(hr_dev, &desc, 1); From db50077b953072b420943e89fcf06d5922898b89 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Thu, 8 Aug 2019 22:53:54 +0800 Subject: [PATCH 56/99] RDMA/hns: Use the new APIs for printing log Here uses the new APIs instead of some dev print interfaces in some functions. Signed-off-by: Lijun Ou Link: https://lore.kernel.org/r/1565276034-97329-15-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 8 ++-- drivers/infiniband/hw/hns/hns_roce_qp.c | 45 ++++++++++++---------- 2 files changed, 29 insertions(+), 24 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index a178d63f3ec8..87a15747ddae 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -4560,7 +4560,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, struct ib_udata *udata) { struct hns_roce_cq *send_cq, *recv_cq; - struct device *dev = hr_dev->dev; + struct ib_device *ibdev = &hr_dev->ib_dev; int ret; if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) { @@ -4568,7 +4568,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); if (ret) { - dev_err(dev, "modify QP to Reset failed.\n"); + ibdev_err(ibdev, "modify QP to Reset failed.\n"); return ret; } } @@ -4637,8 +4637,8 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata); if (ret) { - dev_err(hr_dev->dev, "Destroy qp 0x%06lx failed(%d)\n", - hr_qp->qpn, ret); + ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n", + hr_qp->qpn, ret); return ret; } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index f803209b6e04..b729f8ef90a2 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -335,13 +335,13 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || ucmd->log_sq_stride > max_sq_stride || ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { - dev_err(hr_dev->dev, "check SQ size error!\n"); + ibdev_err(&hr_dev->ib_dev, "check SQ size error!\n"); return -EINVAL; } if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { - dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n", - cap->max_send_sge); + ibdev_err(&hr_dev->ib_dev, "SQ sge error! max_send_sge=%d\n", + cap->max_send_sge); return -EINVAL; } @@ -988,7 +988,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); - struct device *dev = hr_dev->dev; + struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_sqp *hr_sqp; struct hns_roce_qp *hr_qp; int ret; @@ -1002,8 +1002,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0, hr_qp); if (ret) { - dev_err(dev, "Create RC QP 0x%06lx failed(%d)\n", - hr_qp->qpn, ret); + ibdev_err(ibdev, "Create RC QP 0x%06lx failed(%d)\n", + hr_qp->qpn, ret); kfree(hr_qp); return ERR_PTR(ret); } @@ -1015,7 +1015,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, case IB_QPT_GSI: { /* Userspace is not allowed to create special QPs: */ if (udata) { - dev_err(dev, "not support usr space GSI\n"); + ibdev_err(ibdev, "not support usr space GSI\n"); return ERR_PTR(-EINVAL); } @@ -1037,7 +1037,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp->ibqp.qp_num, hr_qp); if (ret) { - dev_err(dev, "Create GSI QP failed!\n"); + ibdev_err(ibdev, "Create GSI QP failed!\n"); kfree(hr_sqp); return ERR_PTR(ret); } @@ -1045,7 +1045,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, break; } default:{ - dev_err(dev, "not support QP type %d\n", init_attr->qp_type); + ibdev_err(ibdev, "not support QP type %d\n", + init_attr->qp_type); return ERR_PTR(-EINVAL); } } @@ -1075,7 +1076,6 @@ static int check_mtu_validate(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_qp_attr *attr, int attr_mask) { - struct device *dev = hr_dev->dev; enum ib_mtu active_mtu; int p; @@ -1085,7 +1085,8 @@ static int check_mtu_validate(struct hns_roce_dev *hr_dev, if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && attr->path_mtu > hr_dev->caps.max_mtu) || attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) { - dev_err(dev, "attr path_mtu(%d)invalid while modify qp", + ibdev_err(&hr_dev->ib_dev, + "attr path_mtu(%d)invalid while modify qp", attr->path_mtu); return -EINVAL; } @@ -1098,12 +1099,12 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr, { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct device *dev = hr_dev->dev; int p; if ((attr_mask & IB_QP_PORT) && (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { - dev_err(dev, "attr port_num invalid.attr->port_num=%d\n", + ibdev_err(&hr_dev->ib_dev, + "attr port_num invalid.attr->port_num=%d\n", attr->port_num); return -EINVAL; } @@ -1111,7 +1112,8 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr_mask & IB_QP_PKEY_INDEX) { p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { - dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n", + ibdev_err(&hr_dev->ib_dev, + "attr pkey_index invalid.attr->pkey_index=%d\n", attr->pkey_index); return -EINVAL; } @@ -1119,14 +1121,16 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { - dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", + ibdev_err(&hr_dev->ib_dev, + "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", attr->max_rd_atomic); return -EINVAL; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { - dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n", + ibdev_err(&hr_dev->ib_dev, + "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n", attr->max_dest_rd_atomic); return -EINVAL; } @@ -1143,7 +1147,6 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); enum ib_qp_state cur_state, new_state; - struct device *dev = hr_dev->dev; int ret = -EINVAL; mutex_lock(&hr_qp->mutex); @@ -1160,14 +1163,15 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (hr_qp->rdb_en == 1) hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); } else { - dev_warn(dev, "flush cqe is not supported in userspace!\n"); + ibdev_warn(&hr_dev->ib_dev, + "flush cqe is not supported in userspace!\n"); goto out; } } if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { - dev_err(dev, "ib_modify_qp_is_ok failed\n"); + ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n"); goto out; } @@ -1178,7 +1182,8 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (cur_state == new_state && cur_state == IB_QPS_RESET) { if (hr_dev->caps.min_wqes) { ret = -EPERM; - dev_err(dev, "cur_state=%d new_state=%d\n", cur_state, + ibdev_err(&hr_dev->ib_dev, + "cur_state=%d new_state=%d\n", cur_state, new_state); } else { ret = 0; From 972d7560ee37d48a9121ca1b7796d107a16784e7 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Thu, 8 Aug 2019 11:43:57 +0300 Subject: [PATCH 57/99] IB/mlx5: Add legacy events to DEVX list Add two events that were defined in the device specification but were not exposed in the driver list. Post this patch those events can be read over the DEVX events interface once be reported by the firmware. Signed-off-by: Yishai Hadas Reviewed-by: Edward Srouji Signed-off-by: Leon Romanovsky Link: https://lore.kernel.org/r/20190808084358.29517-4-leon@kernel.org Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/devx.c | 8 ++++++++ include/linux/mlx5/device.h | 9 +++++++++ 2 files changed, 17 insertions(+) diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index fd577ffd7864..3dbdfe0eb5e4 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -233,6 +233,8 @@ static bool is_legacy_obj_event_num(u16 event_num) case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: case MLX5_EVENT_TYPE_DCT_DRAINED: case MLX5_EVENT_TYPE_COMP: + case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION: + case MLX5_EVENT_TYPE_XRQ_ERROR: return true; default: return false; @@ -315,8 +317,10 @@ static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe) case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: return eqe->data.qp_srq.type; case MLX5_EVENT_TYPE_CQ_ERROR: + case MLX5_EVENT_TYPE_XRQ_ERROR: return 0; case MLX5_EVENT_TYPE_DCT_DRAINED: + case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION: return MLX5_EVENT_QUEUE_TYPE_DCT; default: return MLX5_GET(affiliated_event_header, &eqe->data, obj_type); @@ -2300,7 +2304,11 @@ static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data) case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; break; + case MLX5_EVENT_TYPE_XRQ_ERROR: + obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff; + break; case MLX5_EVENT_TYPE_DCT_DRAINED: + case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION: obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; break; case MLX5_EVENT_TYPE_CQ_ERROR: diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index ce9839c8bc1a..e427af260ebe 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -328,6 +328,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17, + MLX5_EVENT_TYPE_XRQ_ERROR = 0x18, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22, MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24, @@ -345,6 +346,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe, MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, + MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d, MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, @@ -584,6 +586,12 @@ struct mlx5_eqe_cq_err { u8 syndrome; }; +struct mlx5_eqe_xrq_err { + __be32 reserved1[5]; + __be32 type_xrqn; + __be32 reserved2; +}; + struct mlx5_eqe_port_state { u8 reserved0[8]; u8 port; @@ -698,6 +706,7 @@ union ev_data { struct mlx5_eqe_pps pps; struct mlx5_eqe_dct dct; struct mlx5_eqe_temp_warning temp_warning; + struct mlx5_eqe_xrq_err xrq_err; } __packed; struct mlx5_eqe { From 8293a598feec361bf40c81711e9974139a9dd580 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Thu, 8 Aug 2019 11:43:58 +0300 Subject: [PATCH 58/99] IB/mlx5: Expose XRQ legacy commands over the DEVX interface Expose missing XRQ legacy commands over the DEVX interface. Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky Link: https://lore.kernel.org/r/20190808084358.29517-5-leon@kernel.org Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/devx.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 3dbdfe0eb5e4..04b4e937c198 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -546,6 +546,8 @@ static u64 devx_get_obj_id(const void *in) break; case MLX5_CMD_OP_ARM_XRQ: case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: + case MLX5_CMD_OP_RELEASE_XRQ_ERROR: + case MLX5_CMD_OP_MODIFY_XRQ: obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ, MLX5_GET(arm_xrq_in, in, xrqn)); break; @@ -822,6 +824,8 @@ static bool devx_is_obj_modify_cmd(const void *in) case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: case MLX5_CMD_OP_ARM_XRQ: case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: + case MLX5_CMD_OP_RELEASE_XRQ_ERROR: + case MLX5_CMD_OP_MODIFY_XRQ: return true; case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: { From 9dc4cfff115f03e999e5d50a593c83cd0ef4125c Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 13 Aug 2019 13:28:14 +0300 Subject: [PATCH 59/99] RDMA/mlx5: Annotate lock dependency in bind/unbind slave port NULL-ing notifier_call is performed under protection of mlx5_ib_multiport_mutex lock. Such protection is not easily spotted and better to be guarded by lockdep annotation. Signed-off-by: Leon Romanovsky Link: https://lore.kernel.org/r/20190813102814.22350-1-leon@kernel.org Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index af1986b4aa7d..98e566acb746 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -5819,7 +5819,6 @@ static void init_delay_drop(struct mlx5_ib_dev *dev) mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n"); } -/* The mlx5_ib_multiport_mutex should be held when calling this function */ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, struct mlx5_ib_multiport_info *mpi) { @@ -5829,6 +5828,8 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, int err; int i; + lockdep_assert_held(&mlx5_ib_multiport_mutex); + mlx5_ib_cleanup_cong_debugfs(ibdev, port_num); spin_lock(&port->mp.mpi_lock); @@ -5879,13 +5880,14 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN; } -/* The mlx5_ib_multiport_mutex should be held when calling this function */ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, struct mlx5_ib_multiport_info *mpi) { u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; int err; + lockdep_assert_held(&mlx5_ib_multiport_mutex); + spin_lock(&ibdev->port[port_num].mp.mpi_lock); if (ibdev->port[port_num].mp.mpi) { mlx5_ib_dbg(ibdev, "port %d already affiliated.\n", From 0e1aa6f0959e053651102c69cde9d6c42fea2a91 Mon Sep 17 00:00:00 2001 From: Weihang Li Date: Fri, 9 Aug 2019 17:40:58 +0800 Subject: [PATCH 60/99] RDMA/hns: Logic optimization of wc_flags We should set IB_WC_WITH_VLAN only when VLAN is enabled. In addition, move setting of IB_WC_WITH_SMAC below setting of wc->smac. Signed-off-by: Weihang Li Link: https://lore.kernel.org/r/1565343666-73193-2-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 87a15747ddae..7a14f0be5019 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -2860,15 +2860,16 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, wc->smac[5] = roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_SMAC_5_M, V2_CQE_BYTE_28_SMAC_5_S); + wc->wc_flags |= IB_WC_WITH_SMAC; if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) { wc->vlan_id = (u16)roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_VID_M, V2_CQE_BYTE_28_VID_S); + wc->wc_flags |= IB_WC_WITH_VLAN; } else { wc->vlan_id = 0xffff; } - wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); wc->network_hdr_type = roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_PORT_TYPE_M, V2_CQE_BYTE_28_PORT_TYPE_S); From 76827087bb3f82f3cc18871633e06a4df36a5ed9 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Fri, 9 Aug 2019 17:40:59 +0800 Subject: [PATCH 61/99] RDMA/hns: Bugfix for creating qp attached to srq When create a qp and attached to srq, rq will no longer be used and the members of rq will be set zero. As a result, the wrid of rq will not be allocated and used. Fixes: 926a01dc000d ("RDMA/hns: Add QP operations support for hip08 SoC") Signed-off-by: Lijun Ou Link: https://lore.kernel.org/r/1565343666-73193-3-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_qp.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index b729f8ef90a2..ec6b5dd3f442 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -854,11 +854,18 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); - hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), - GFP_KERNEL); - if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) { + if (ZERO_OR_NULL_PTR(hr_qp->sq.wrid)) { ret = -ENOMEM; - goto err_wrid; + goto err_get_bufs; + } + + if (hr_qp->rq.wqe_cnt) { + hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), + GFP_KERNEL); + if (ZERO_OR_NULL_PTR(hr_qp->rq.wrid)) { + ret = -ENOMEM; + goto err_sq_wrid; + } } } @@ -944,8 +951,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hns_roce_qp_has_rq(init_attr)) hns_roce_db_unmap_user(uctx, &hr_qp->rdb); } else { - kfree(hr_qp->sq.wrid); - kfree(hr_qp->rq.wrid); + if (hr_qp->rq.wqe_cnt) + kfree(hr_qp->rq.wrid); } err_sq_dbmap: @@ -956,6 +963,10 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hns_roce_qp_has_sq(init_attr)) hns_roce_db_unmap_user(uctx, &hr_qp->sdb); +err_sq_wrid: + if (!udata) + kfree(hr_qp->sq.wrid); + err_get_bufs: hns_roce_free_buf_list(buf_list, hr_qp->region_cnt); From d7e5ca88d60c0b5e166e33bfe356e961824ce0f4 Mon Sep 17 00:00:00 2001 From: Yangyang Li Date: Fri, 9 Aug 2019 17:41:01 +0800 Subject: [PATCH 62/99] RDMA/hns: Modify pi vlaue when cq overflows When exiting "for loop", the actual value of pi will be increased by 1, which is compatible with the next calculation. But when pi is equal to "ci + hr_cq-> ib_cq.cqe", the "break" was called and the pi is actual value, it will lead one cqe still existing, so the "==" should be modify to ">". Signed-off-by: Yangyang Li Link: https://lore.kernel.org/r/1565343666-73193-5-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 7a14f0be5019..e05a6a290a55 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -2407,7 +2407,7 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index); ++prod_index) { - if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe) + if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe) break; } From 9bba3f0cbfc8abf2e1549ea03c0128186081d7a8 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Fri, 9 Aug 2019 17:41:02 +0800 Subject: [PATCH 63/99] RDMA/hns: Bugfix for slab-out-of-bounds when unloading hip08 driver kasan will report a BUG when run command 'rmmod hns_roce_hw_v2', the calltrace is as follows: ================================================================== BUG: KASAN: slab-out-of-bounds in hns_roce_table_mhop_put+0x584/0x828 [hns_roce] Read of size 8 at addr ffff802185e08300 by task rmmod/270 Call trace: dump_backtrace+0x0/0x1e8 show_stack+0x14/0x20 dump_stack+0xc4/0xfc print_address_description+0x60/0x270 __kasan_report+0x164/0x1b8 kasan_report+0xc/0x18 __asan_load8+0x84/0xa8 hns_roce_table_mhop_put+0x584/0x828 [hns_roce] hns_roce_table_put+0x174/0x1a0 [hns_roce] hns_roce_mr_free+0x124/0x210 [hns_roce] hns_roce_dereg_mr+0x90/0xb8 [hns_roce] ib_dealloc_pd_user+0x60/0xf0 ib_mad_port_close+0x128/0x1d8 ib_mad_remove_device+0x94/0x118 remove_client_context+0xa0/0xe0 disable_device+0xfc/0x1c0 __ib_unregister_device+0x60/0xe0 ib_unregister_device+0x24/0x38 hns_roce_exit+0x3c/0x138 [hns_roce] __hns_roce_hw_v2_uninit_instance.isra.30+0x28/0x50 [hns_roce_hw_v2] hns_roce_hw_v2_uninit_instance+0x44/0x60 [hns_roce_hw_v2] hclge_uninit_client_instance+0x15c/0x238 [hclge] hnae3_uninit_client_instance+0x84/0xa8 [hnae3] hnae3_unregister_client+0x84/0x158 [hnae3] hns_roce_hw_v2_exit+0x14/0x20 [hns_roce_hw_v2] __arm64_sys_delete_module+0x20c/0x308 el0_svc_handler+0xbc/0x210 el0_svc+0x8/0xc Allocated by task 255: __kasan_kmalloc.isra.0+0xd0/0x180 kasan_kmalloc+0xc/0x18 __kmalloc+0x16c/0x328 hns_roce_init_hem_table+0x20c/0x428 [hns_roce] hns_roce_init+0x214/0xfe0 [hns_roce] __hns_roce_hw_v2_init_instance+0x284/0x330 [hns_roce_hw_v2] hns_roce_hw_v2_init_instance+0xd0/0x1b8 [hns_roce_hw_v2] hclge_init_roce_client_instance+0x180/0x310 [hclge] hclge_init_client_instance+0xcc/0x508 [hclge] hnae3_init_client_instance.part.3+0x3c/0x80 [hnae3] hnae3_register_client+0x134/0x1a8 [hnae3] 0xffff200009c00014 do_one_initcall+0x9c/0x3e0 do_init_module+0xd4/0x2d8 load_module+0x3284/0x3690 __se_sys_init_module+0x274/0x308 __arm64_sys_init_module+0x40/0x50 el0_svc_handler+0xbc/0x210 el0_svc+0x8/0xc Freed by task 0: (stack is not available) The buggy address belongs to the object at ffff802185e06300 which belongs to the cache kmalloc-8k of size 8192 The buggy address is located 0 bytes to the right of 8192-byte region [ffff802185e06300, ffff802185e08300) The buggy address belongs to the page: page:ffff7fe008617800 refcount:1 mapcount:0 mapping:ffff802340020e00 index:0x0 compound_mapcount: 0 flags: 0x5fffe00000010200(slab|head) raw: 5fffe00000010200 dead000000000100 dead000000000200 ffff802340020e00 raw: 0000000000000000 00000000803e003e 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff802185e08200: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ffff802185e08280: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 >ffff802185e08300: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ^ ffff802185e08380: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffff802185e08400: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ================================================================== Disabling lock debugging due to kernel taint Fixes: a25d13cbe816 ("RDMA/hns: Add the interfaces to support multi hop addressing for the contexts in hip08") Signed-off-by: Xi Wang Link: https://lore.kernel.org/r/1565343666-73193-6-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hem.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index 0268c7aae485..f2c4fef0b70e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -85,12 +85,13 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type) } static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx, - u32 bt_chunk_num) + u32 bt_chunk_num, u64 hem_max_num) { - int i; + u64 check_max_num = start_idx + bt_chunk_num; + u64 i; - for (i = 0; i < bt_chunk_num; i++) - if (hem[start_idx + i]) + for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++) + if (hem[i]) return false; return true; @@ -496,6 +497,12 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, return -EINVAL; } + if (unlikely(hem_idx >= table->num_hem)) { + dev_err(dev, "Table %d exceed hem limt idx = %llu,max = %lu!\n", + table->type, hem_idx, table->num_hem); + return -EINVAL; + } + mutex_lock(&table->mutex); if (table->hem[hem_idx]) { @@ -732,7 +739,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, if (check_whether_bt_num_2(table->type, hop_num)) { start_idx = mhop.l0_idx * chunk_ba_num; if (hns_roce_check_hem_null(table->hem, start_idx, - chunk_ba_num)) { + chunk_ba_num, table->num_hem)) { if (table->type < HEM_TYPE_MTT && hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) dev_warn(dev, "Clear HEM base address failed.\n"); @@ -746,7 +753,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num + mhop.l1_idx * chunk_ba_num; if (hns_roce_check_hem_null(table->hem, start_idx, - chunk_ba_num)) { + chunk_ba_num, table->num_hem)) { if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1)) dev_warn(dev, "Clear HEM base address failed.\n"); From bf8c02f961c89e5ccae5987b7ab28f5592a35101 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Fri, 9 Aug 2019 17:41:03 +0800 Subject: [PATCH 64/99] RDMA/hns: bugfix for slab-out-of-bounds when loading hip08 driver kasan will report a BUG when run command 'insmod hns_roce_hw_v2.ko', the calltrace is as follows: ================================================================== BUG: KASAN: slab-out-of-bounds in hns_roce_v2_init_eq_table+0x1324/0x1948 [hns_roce_hw_v2] Read of size 8 at addr ffff8020e7a10608 by task insmod/256 CPU: 0 PID: 256 Comm: insmod Tainted: G O 5.2.0-rc4 #1 Hardware name: Huawei D06 /D06, BIOS Hisilicon D06 UEFI RC0 Call trace: dump_backtrace+0x0/0x1e8 show_stack+0x14/0x20 dump_stack+0xc4/0xfc print_address_description+0x60/0x270 __kasan_report+0x164/0x1b8 kasan_report+0xc/0x18 __asan_load8+0x84/0xa8 hns_roce_v2_init_eq_table+0x1324/0x1948 [hns_roce_hw_v2] hns_roce_init+0xf8/0xfe0 [hns_roce] __hns_roce_hw_v2_init_instance+0x284/0x330 [hns_roce_hw_v2] hns_roce_hw_v2_init_instance+0xd0/0x1b8 [hns_roce_hw_v2] hclge_init_roce_client_instance+0x180/0x310 [hclge] hclge_init_client_instance+0xcc/0x508 [hclge] hnae3_init_client_instance.part.3+0x3c/0x80 [hnae3] hnae3_register_client+0x134/0x1a8 [hnae3] hns_roce_hw_v2_init+0x14/0x10000 [hns_roce_hw_v2] do_one_initcall+0x9c/0x3e0 do_init_module+0xd4/0x2d8 load_module+0x3284/0x3690 __se_sys_init_module+0x274/0x308 __arm64_sys_init_module+0x40/0x50 el0_svc_handler+0xbc/0x210 el0_svc+0x8/0xc Allocated by task 256: __kasan_kmalloc.isra.0+0xd0/0x180 kasan_kmalloc+0xc/0x18 __kmalloc+0x16c/0x328 hns_roce_v2_init_eq_table+0x764/0x1948 [hns_roce_hw_v2] hns_roce_init+0xf8/0xfe0 [hns_roce] __hns_roce_hw_v2_init_instance+0x284/0x330 [hns_roce_hw_v2] hns_roce_hw_v2_init_instance+0xd0/0x1b8 [hns_roce_hw_v2] hclge_init_roce_client_instance+0x180/0x310 [hclge] hclge_init_client_instance+0xcc/0x508 [hclge] hnae3_init_client_instance.part.3+0x3c/0x80 [hnae3] hnae3_register_client+0x134/0x1a8 [hnae3] hns_roce_hw_v2_init+0x14/0x10000 [hns_roce_hw_v2] do_one_initcall+0x9c/0x3e0 do_init_module+0xd4/0x2d8 load_module+0x3284/0x3690 __se_sys_init_module+0x274/0x308 __arm64_sys_init_module+0x40/0x50 el0_svc_handler+0xbc/0x210 el0_svc+0x8/0xc Freed by task 0: (stack is not available) The buggy address belongs to the object at ffff8020e7a10600 which belongs to the cache kmalloc-128 of size 128 The buggy address is located 8 bytes inside of 128-byte region [ffff8020e7a10600, ffff8020e7a10680) The buggy address belongs to the page: page:ffff7fe00839e840 refcount:1 mapcount:0 mapping:ffff802340020200 index:0x0 flags: 0x5fffe00000000200(slab) raw: 5fffe00000000200 dead000000000100 dead000000000200 ffff802340020200 raw: 0000000000000000 0000000081000100 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff8020e7a10500: 00 00 00 00 00 00 00 00 fc fc fc fc fc fc fc fc ffff8020e7a10580: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc >ffff8020e7a10600: 00 fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ^ ffff8020e7a10680: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffff8020e7a10700: 00 fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ================================================================== Disabling lock debugging due to kernel taint Fixes: a5073d6054f7 ("RDMA/hns: Add eq support of hip08") Signed-off-by: Xi Wang Link: https://lore.kernel.org/r/1565343666-73193-7-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index e05a6a290a55..206dfdb16cd5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5547,7 +5547,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, break; } eq->cur_eqe_ba = eq->buf_dma[0]; - eq->nxt_eqe_ba = eq->buf_dma[1]; + if (ba_num > 1) + eq->nxt_eqe_ba = eq->buf_dma[1]; } else if (mhop_num == 2) { /* alloc L1 BT and buf */ @@ -5588,7 +5589,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, break; } eq->cur_eqe_ba = eq->buf_dma[0]; - eq->nxt_eqe_ba = eq->buf_dma[1]; + if (ba_num > 1) + eq->nxt_eqe_ba = eq->buf_dma[1]; } eq->l0_last_num = i + 1; From 77905379e9b28a490051b15239bcaf6458d171ee Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Fri, 9 Aug 2019 17:41:04 +0800 Subject: [PATCH 65/99] RDMA/hns: Remove unuseful member For struct hns_roce_cmdq, toggle is unused and delete it. Signed-off-by: Lang Cheng Link: https://lore.kernel.org/r/1565343666-73193-8-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_cmd.c | 1 - drivers/infiniband/hw/hns/hns_roce_device.h | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c index 0cd09bf4d7ea..ade26faade8d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.c +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c @@ -211,7 +211,6 @@ int hns_roce_cmd_init(struct hns_roce_dev *hr_dev) mutex_init(&hr_dev->cmd.hcr_mutex); sema_init(&hr_dev->cmd.poll_sem, 1); hr_dev->cmd.use_events = 0; - hr_dev->cmd.toggle = 1; hr_dev->cmd.max_cmds = CMD_MAX_NUM; hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev, HNS_ROCE_MAILBOX_SIZE, diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 4fffa3da3a87..c7bf738fb137 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -617,7 +617,6 @@ struct hns_roce_cmdq { * close device, switch into poll mode(non event mode) */ u8 use_events; - u8 toggle; }; struct hns_roce_cmd_mailbox { From a7325af725e83ae1a4e7a2290250dd1c952b672d Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 16 Aug 2019 14:39:07 +0300 Subject: [PATCH 66/99] RDMA/hns: Fix some white space check_mtu_validate() This line was indented a bit too far. Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/20190816113907.GA30799@mwanda Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_qp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index ec6b5dd3f442..ba8176869f51 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1091,7 +1091,7 @@ static int check_mtu_validate(struct hns_roce_dev *hr_dev, int p; p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; - active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); + active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && attr->path_mtu > hr_dev->caps.max_mtu) || From b2299e83815c59ab59c4ee4fb4842b3b28e5072f Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 19 Aug 2019 14:45:47 +0300 Subject: [PATCH 67/99] RDMA: Delete DEBUG code There is no need to keep DEBUG defines for out-of-the tree testing. Signed-off-by: Leon Romanovsky Link: https://lore.kernel.org/r/20190819114547.20704-1-leon@kernel.org Signed-off-by: Doug Ledford --- drivers/infiniband/core/fmr_pool.c | 13 ------------- include/rdma/ib_verbs.h | 3 --- 2 files changed, 16 deletions(-) diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index 7d841b689a1e..e08aec427027 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c @@ -148,13 +148,6 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool) hlist_del_init(&fmr->cache_node); fmr->remap_count = 0; list_add_tail(&fmr->fmr->list, &fmr_list); - -#ifdef DEBUG - if (fmr->ref_count !=0) { - pr_warn(PFX "Unmapping FMR 0x%08x with ref count %d\n", - fmr, fmr->ref_count); - } -#endif } list_splice_init(&pool->dirty_list, &unmap_list); @@ -496,12 +489,6 @@ void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) } } -#ifdef DEBUG - if (fmr->ref_count < 0) - pr_warn(PFX "FMR %p has ref count %d < 0\n", - fmr, fmr->ref_count); -#endif - spin_unlock_irqrestore(&pool->pool_lock, flags); } EXPORT_SYMBOL(ib_fmr_pool_unmap); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 391499008a22..08e966c8081a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -98,9 +98,6 @@ void ibdev_info(const struct ib_device *ibdev, const char *format, ...); #if defined(CONFIG_DYNAMIC_DEBUG) #define ibdev_dbg(__dev, format, args...) \ dynamic_ibdev_dbg(__dev, format, ##args) -#elif defined(DEBUG) -#define ibdev_dbg(__dev, format, args...) \ - ibdev_printk(KERN_DEBUG, __dev, format, ##args) #else __printf(2, 3) __cold static inline From d8abe88450beb96a66e434323eb6ab737654b840 Mon Sep 17 00:00:00 2001 From: Mark Zhang Date: Mon, 19 Aug 2019 14:36:26 +0300 Subject: [PATCH 68/99] RDMA/mlx5: RDMA_RX flow type support for user applications Currently user applications can only steer TCP/IP(NIC RX/RX) traffic. This patch adds RDMA_RX as a new flow type to allow the user to insert steering rules to control RDMA traffic. Two destinations are supported(but not set at the same time): devx flow table object and QP. Signed-off-by: Mark Zhang Signed-off-by: Leon Romanovsky Link: https://lore.kernel.org/r/20190819113626.20284-4-leon@kernel.org Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/flow.c | 13 +++++++++++-- drivers/infiniband/hw/mlx5/main.c | 7 +++++++ drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 + include/uapi/rdma/mlx5_user_ioctl_verbs.h | 1 + 4 files changed, 20 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c index b8841355fcd5..571bb8539cdb 100644 --- a/drivers/infiniband/hw/mlx5/flow.c +++ b/drivers/infiniband/hw/mlx5/flow.c @@ -32,6 +32,9 @@ mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type, case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB: *namespace = MLX5_FLOW_NAMESPACE_FDB; break; + case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX: + *namespace = MLX5_FLOW_NAMESPACE_RDMA_RX; + break; default: return -EINVAL; } @@ -101,6 +104,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !dest_devx) return -EINVAL; + /* Allow only DEVX object or QP as dest when inserting to RDMA_RX */ + if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) && + ((!dest_devx && !dest_qp) || (dest_devx && dest_qp))) + return -EINVAL; + if (dest_devx) { devx_obj = uverbs_attr_get_obj( attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX); @@ -112,8 +120,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( */ if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type)) return -EINVAL; - /* Allow only flow table as dest when inserting to FDB */ - if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && + /* Allow only flow table as dest when inserting to FDB or RDMA_RX */ + if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB || + fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) && dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) return -EINVAL; } else if (dest_qp) { diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 1ec0e667110e..07aecba16019 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3963,6 +3963,11 @@ _get_flow_table(struct mlx5_ib_dev *dev, esw_encap) flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; priority = FDB_BYPASS_PATH; + } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) { + max_table_size = + BIT(MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, + log_max_ft_size)); + priority = fs_matcher->priority; } max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES); @@ -3977,6 +3982,8 @@ _get_flow_table(struct mlx5_ib_dev *dev, prio = &dev->flow_db->egress_prios[priority]; else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) prio = &dev->flow_db->fdb; + else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) + prio = &dev->flow_db->rdma_rx[priority]; if (!prio) return ERR_PTR(-EINVAL); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index cb41a7e6255a..6abfbf3a69b7 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -200,6 +200,7 @@ struct mlx5_ib_flow_db { struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS]; struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS]; struct mlx5_ib_flow_prio fdb; + struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT]; struct mlx5_flow_table *lag_demux_ft; /* Protect flow steering bypass flow tables * when add/del flow rules. diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h index 7e9900b0e746..88b6ca70c2fe 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h +++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h @@ -43,6 +43,7 @@ enum mlx5_ib_uapi_flow_table_type { MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0, MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1, MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB = 0x2, + MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX = 0x3, }; enum mlx5_ib_uapi_flow_action_packet_reformat_type { From 958b6813f0c077c45a36d0a10b5bdcd27216eabe Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Mon, 26 Aug 2019 14:53:49 +0300 Subject: [PATCH 69/99] RDMA/efa: Remove umem check on dereg MR flow EFA driver is not a kverbs provider, the check for MR umem is redundant. Link: https://lore.kernel.org/r/20190826115350.21718-2-galpress@amazon.com Reviewed-by: Firas JahJah Reviewed-by: Yossi Leybovich Signed-off-by: Gal Pressman Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/efa/efa_verbs.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index 70851bd7f801..1e23c621a419 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -1500,14 +1500,12 @@ int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey); - if (mr->umem) { - params.l_key = mr->ibmr.lkey; - err = efa_com_dereg_mr(&dev->edev, ¶ms); - if (err) - return err; - } - ib_umem_release(mr->umem); + params.l_key = mr->ibmr.lkey; + err = efa_com_dereg_mr(&dev->edev, ¶ms); + if (err) + return err; + ib_umem_release(mr->umem); kfree(mr); return 0; From 1bc5ba836e3ba02b8c7981a1fb453fe33513526d Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Mon, 26 Aug 2019 14:53:50 +0300 Subject: [PATCH 70/99] RDMA/efa: Use existing FIELD_SIZEOF macro Use FIELD_SIZEOF macro instead of hard coding it in field_avail macro. Link: https://lore.kernel.org/r/20190826115350.21718-3-galpress@amazon.com Reviewed-by: Daniel Kranzdorf Reviewed-by: Firas JahJah Signed-off-by: Gal Pressman Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/efa/efa_verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index 1e23c621a419..4edae89e8e3c 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -148,7 +148,7 @@ static inline struct efa_ah *to_eah(struct ib_ah *ibah) } #define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \ - sizeof(((typeof(x) *)0)->fld) <= (sz)) + FIELD_SIZEOF(typeof(x), fld) <= (sz)) #define is_reserved_cleared(reserved) \ !memchr_inv(reserved, 0, sizeof(reserved)) From fd1a52f38c23c5d99fc2e6178ac4b74c5cbcf793 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Wed, 21 Aug 2019 19:30:09 +0200 Subject: [PATCH 71/99] RDMA/iwpm: Delete unnecessary checks before the macro call "dev_kfree_skb" The dev_kfree_skb() function performs also input parameter validation. Thus the test around the shown calls is not needed. This issue was detected by using the Coccinelle software. Link: https://lore.kernel.org/r/16df4c50-1f61-d7c4-3fc8-3073666d281d@web.de Signed-off-by: Markus Elfring Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/iwpm_msg.c | 9 +++------ drivers/infiniband/core/iwpm_util.c | 9 +++------ 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c index f1a873d4e842..46686990a827 100644 --- a/drivers/infiniband/core/iwpm_msg.c +++ b/drivers/infiniband/core/iwpm_msg.c @@ -124,8 +124,7 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client) return ret; pid_query_error: pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client); - if (skb) - dev_kfree_skb(skb); + dev_kfree_skb(skb); if (nlmsg_request) iwpm_free_nlmsg_request(&nlmsg_request->kref); return ret; @@ -214,8 +213,7 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) add_mapping_error: pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client); add_mapping_error_nowarn: - if (skb) - dev_kfree_skb(skb); + dev_kfree_skb(skb); if (nlmsg_request) iwpm_free_nlmsg_request(&nlmsg_request->kref); return ret; @@ -308,8 +306,7 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) query_mapping_error: pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client); query_mapping_error_nowarn: - if (skb) - dev_kfree_skb(skb); + dev_kfree_skb(skb); if (nlmsg_request) iwpm_free_nlmsg_request(&nlmsg_request->kref); return ret; diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index c7ad3499228c..13495b43dbc1 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -655,8 +655,7 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) return 0; mapinfo_num_error: pr_info("%s: %s\n", __func__, err_str); - if (skb) - dev_kfree_skb(skb); + dev_kfree_skb(skb); return ret; } @@ -778,8 +777,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid) send_mapping_info_exit: if (ret) { pr_warn("%s: %s (ret = %d)\n", __func__, err_str, ret); - if (skb) - dev_kfree_skb(skb); + dev_kfree_skb(skb); return ret; } send_nlmsg_done(skb, nl_client, iwpm_pid); @@ -834,7 +832,6 @@ int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version) return 0; hello_num_error: pr_info("%s: %s\n", __func__, err_str); - if (skb) - dev_kfree_skb(skb); + dev_kfree_skb(skb); return ret; } From 29af94987ba68a0244b73e8a0b051addb8853f93 Mon Sep 17 00:00:00 2001 From: Michael Guralnik Date: Mon, 19 Aug 2019 15:08:14 +0300 Subject: [PATCH 72/99] IB/mlx5: Remove check of FW capabilities in ODP page fault handling As page fault handling is initiated by FW, there is no need to check that the ODP supports the operation and transport. Link: https://lore.kernel.org/r/20190819120815.21225-3-leon@kernel.org Signed-off-by: Michael Guralnik Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/odp.c | 48 +------------------------------- 1 file changed, 1 insertion(+), 47 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 5b642d81e617..8c8e5f41a82f 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -990,17 +990,6 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev, return ret < 0 ? ret : npages; } -static const u32 mlx5_ib_odp_opcode_cap[] = { - [MLX5_OPCODE_SEND] = IB_ODP_SUPPORT_SEND, - [MLX5_OPCODE_SEND_IMM] = IB_ODP_SUPPORT_SEND, - [MLX5_OPCODE_SEND_INVAL] = IB_ODP_SUPPORT_SEND, - [MLX5_OPCODE_RDMA_WRITE] = IB_ODP_SUPPORT_WRITE, - [MLX5_OPCODE_RDMA_WRITE_IMM] = IB_ODP_SUPPORT_WRITE, - [MLX5_OPCODE_RDMA_READ] = IB_ODP_SUPPORT_READ, - [MLX5_OPCODE_ATOMIC_CS] = IB_ODP_SUPPORT_ATOMIC, - [MLX5_OPCODE_ATOMIC_FA] = IB_ODP_SUPPORT_ATOMIC, -}; - /* * Parse initiator WQE. Advances the wqe pointer to point at the * scatter-gather list, and set wqe_end to the end of the WQE. @@ -1011,7 +1000,6 @@ static int mlx5_ib_mr_initiator_pfault_handler( { struct mlx5_wqe_ctrl_seg *ctrl = *wqe; u16 wqe_index = pfault->wqe.wqe_index; - u32 transport_caps; struct mlx5_base_av *av; unsigned ds, opcode; #if defined(DEBUG) @@ -1059,29 +1047,8 @@ static int mlx5_ib_mr_initiator_pfault_handler( opcode = be32_to_cpu(ctrl->opmod_idx_opcode) & MLX5_WQE_CTRL_OPCODE_MASK; - switch (qp->ibqp.qp_type) { - case IB_QPT_XRC_INI: + if (qp->ibqp.qp_type == IB_QPT_XRC_INI) *wqe += sizeof(struct mlx5_wqe_xrc_seg); - transport_caps = dev->odp_caps.per_transport_caps.xrc_odp_caps; - break; - case IB_QPT_RC: - transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps; - break; - case IB_QPT_UD: - transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps; - break; - default: - mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n", - qp->ibqp.qp_type); - return -EFAULT; - } - - if (unlikely(opcode >= ARRAY_SIZE(mlx5_ib_odp_opcode_cap) || - !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) { - mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n", - opcode); - return -EFAULT; - } if (qp->ibqp.qp_type == IB_QPT_UD) { av = *wqe; @@ -1146,19 +1113,6 @@ static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev, return -EFAULT; } - switch (qp->ibqp.qp_type) { - case IB_QPT_RC: - if (!(dev->odp_caps.per_transport_caps.rc_odp_caps & - IB_ODP_SUPPORT_RECV)) - goto invalid_transport_or_opcode; - break; - default: -invalid_transport_or_opcode: - mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n", - qp->ibqp.qp_type); - return -EFAULT; - } - *wqe_end = wqe + wqe_size; return 0; From 75e46fc02c975f401e70a53ecd55d475081d13a3 Mon Sep 17 00:00:00 2001 From: Michael Guralnik Date: Mon, 19 Aug 2019 15:08:15 +0300 Subject: [PATCH 73/99] IB/mlx5: Add page fault handler for DC initiator WQE Parsing DC initiator WQEs upon page fault requires skipping an address vector segment, as in UD WQEs. Link: https://lore.kernel.org/r/20190819120815.21225-4-leon@kernel.org Signed-off-by: Michael Guralnik Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/odp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 8c8e5f41a82f..429c72407d7f 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -1050,7 +1050,8 @@ static int mlx5_ib_mr_initiator_pfault_handler( if (qp->ibqp.qp_type == IB_QPT_XRC_INI) *wqe += sizeof(struct mlx5_wqe_xrc_seg); - if (qp->ibqp.qp_type == IB_QPT_UD) { + if (qp->ibqp.qp_type == IB_QPT_UD || + qp->qp_sub_type == MLX5_IB_QPT_DCI) { av = *wqe; if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV)) *wqe += sizeof(struct mlx5_av); From 82e620d9c3a096108e8a2f90ce7199bdad2040eb Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Wed, 21 Aug 2019 21:14:30 +0800 Subject: [PATCH 74/99] RDMA/hns: Modify the data structure of hns_roce_av we change type of some members to u32/u8 from __le32 as well as split sl_tclass_flowlabel into three variables in hns_roce_av. Signed-off-by: Lang Cheng Link: https://lore.kernel.org/r/1566393276-42555-4-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_ah.c | 23 +++++++-------------- drivers/infiniband/hw/hns/hns_roce_device.h | 8 ++++--- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 9 +++----- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 9 +++----- 4 files changed, 18 insertions(+), 31 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index cdd2ac24fc2a..90e08c0c332d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -66,11 +66,9 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr, HNS_ROCE_VLAN_SL_SHIFT; } - ah->av.port_pd = cpu_to_le32(to_hr_pd(ibah->pd)->pdn | - (rdma_ah_get_port_num(ah_attr) << - HNS_ROCE_PORT_NUM_SHIFT)); + ah->av.port = rdma_ah_get_port_num(ah_attr); ah->av.gid_index = grh->sgid_index; - ah->av.vlan = cpu_to_le16(vlan_tag); + ah->av.vlan = vlan_tag; ah->av.vlan_en = vlan_en; dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index, ah->av.vlan); @@ -79,8 +77,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr, ah->av.stat_rate = IB_RATE_10_GBPS; memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE); - ah->av.sl_tclass_flowlabel = cpu_to_le32(rdma_ah_get_sl(ah_attr) << - HNS_ROCE_SL_SHIFT); + ah->av.sl = rdma_ah_get_sl(ah_attr); return 0; } @@ -91,17 +88,11 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) memset(ah_attr, 0, sizeof(*ah_attr)); - rdma_ah_set_sl(ah_attr, (le32_to_cpu(ah->av.sl_tclass_flowlabel) >> - HNS_ROCE_SL_SHIFT)); - rdma_ah_set_port_num(ah_attr, (le32_to_cpu(ah->av.port_pd) >> - HNS_ROCE_PORT_NUM_SHIFT)); + rdma_ah_set_sl(ah_attr, ah->av.sl); + rdma_ah_set_port_num(ah_attr, ah->av.port); rdma_ah_set_static_rate(ah_attr, ah->av.stat_rate); - rdma_ah_set_grh(ah_attr, NULL, - (le32_to_cpu(ah->av.sl_tclass_flowlabel) & - HNS_ROCE_FLOW_LABEL_MASK), ah->av.gid_index, - ah->av.hop_limit, - (le32_to_cpu(ah->av.sl_tclass_flowlabel) >> - HNS_ROCE_TCLASS_SHIFT)); + rdma_ah_set_grh(ah_attr, NULL, ah->av.flowlabel, + ah->av.gid_index, ah->av.hop_limit, ah->av.tclass); rdma_ah_set_dgid_raw(ah_attr, ah->av.dgid); return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index c7bf738fb137..011e0384b56e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -568,14 +568,16 @@ struct hns_roce_raq_table { }; struct hns_roce_av { - __le32 port_pd; + u8 port; u8 gid_index; u8 stat_rate; u8 hop_limit; - __le32 sl_tclass_flowlabel; + u32 flowlabel; + u8 sl; + u8 tclass; u8 dgid[HNS_ROCE_GID_SIZE]; u8 mac[ETH_ALEN]; - __le16 vlan; + u16 vlan; bool vlan_en; }; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 4c3ac2b75966..e687b439269e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -175,13 +175,11 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, roce_set_field(ud_sq_wqe->u32_36, UD_SEND_WQE_U32_36_FLOW_LABEL_M, UD_SEND_WQE_U32_36_FLOW_LABEL_S, - ah->av.sl_tclass_flowlabel & - HNS_ROCE_FLOW_LABEL_MASK); + ah->av.flowlabel); roce_set_field(ud_sq_wqe->u32_36, UD_SEND_WQE_U32_36_PRIORITY_M, UD_SEND_WQE_U32_36_PRIORITY_S, - le32_to_cpu(ah->av.sl_tclass_flowlabel) >> - HNS_ROCE_SL_SHIFT); + ah->av.sl); roce_set_field(ud_sq_wqe->u32_36, UD_SEND_WQE_U32_36_SGID_INDEX_M, UD_SEND_WQE_U32_36_SGID_INDEX_S, @@ -195,8 +193,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, roce_set_field(ud_sq_wqe->u32_40, UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M, UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, - ah->av.sl_tclass_flowlabel >> - HNS_ROCE_TCLASS_SHIFT); + ah->av.tclass); memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 206dfdb16cd5..67e56b885771 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -397,18 +397,15 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M, V2_UD_SEND_WQE_BYTE_36_TCLASS_S, - ah->av.sl_tclass_flowlabel >> - HNS_ROCE_TCLASS_SHIFT); + ah->av.tclass); roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, - ah->av.sl_tclass_flowlabel & - HNS_ROCE_FLOW_LABEL_MASK); + ah->av.flowlabel); roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M, V2_UD_SEND_WQE_BYTE_40_SL_S, - le32_to_cpu(ah->av.sl_tclass_flowlabel) >> - HNS_ROCE_SL_SHIFT); + ah->av.sl); roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_PORTN_M, V2_UD_SEND_WQE_BYTE_40_PORTN_S, From 90c559b1864aa52518c10d5535bf5d5ea74189e5 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Wed, 21 Aug 2019 21:14:31 +0800 Subject: [PATCH 75/99] RDMA/hns: Remove the some magic number Here uses the meaningful macro instead of the magic number for readability. Signed-off-by: Lijun Ou Signed-off-by: Lang Chen Link: https://lore.kernel.org/r/1566393276-42555-5-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_device.h | 5 +++++ drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 3 ++- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 4 ++-- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 3 ++- 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 011e0384b56e..5eb7134f2fce 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -127,6 +127,11 @@ #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4 #define SRQ_DB_REG 0x230 +/* The chip implementation of the consumer index is calculated + * according to twice the actual EQ depth + */ +#define EQ_DEPTH_COEFF 2 + enum { HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0, HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index e687b439269e..3fcd0bdc79dd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -4018,7 +4018,8 @@ static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev, ++eq->cons_index; ceqes_found = 1; - if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth - 1) { + if (eq->cons_index > + EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) { dev_warn(&eq->hr_dev->pdev->dev, "cons_index overflow, set back to 0.\n"); eq->cons_index = 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 67e56b885771..d4c4c4157085 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5084,7 +5084,7 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, ++eq->cons_index; ceqe_found = 1; - if (eq->cons_index > (2 * eq->entries - 1)) { + if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1)) { dev_warn(dev, "cons_index overflow, set back to 0.\n"); eq->cons_index = 0; } @@ -6501,7 +6501,7 @@ static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle) handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT; dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n"); - msleep(100); + msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY); __hns_roce_hw_v2_uninit_instance(handle, false); return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 58931b5399f8..1301629769ed 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -96,7 +96,8 @@ #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 #define HNS_ROCE_V2_RSV_QPS 8 -#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000 +#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000 +#define HNS_ROCE_V2_HW_RST_UNINT_DELAY 100 #define HNS_ROCE_CONTEXT_HOP_NUM 1 #define HNS_ROCE_SCCC_HOP_NUM 1 From bfe860351e31e71913d4e6c46aae5724b661a519 Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Wed, 21 Aug 2019 21:14:32 +0800 Subject: [PATCH 76/99] RDMA/hns: Fix cast from or to restricted __le32 for driver Sparse is whining about the u32 and __le32 mixed usage in the driver. The roce_set_field() is used to __le32 data of hardware only. If a variable is not delivered to the hardware, the __le32 type and related operations are not required. Signed-off-by: Lang Cheng Link: https://lore.kernel.org/r/1566393276-42555-6-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_device.h | 4 +- drivers/infiniband/hw/hns/hns_roce_hem.c | 34 ++--- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 37 +++-- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 141 ++++++++++---------- drivers/infiniband/hw/hns/hns_roce_mr.c | 7 +- drivers/infiniband/hw/hns/hns_roce_qp.c | 8 +- 6 files changed, 105 insertions(+), 126 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 5eb7134f2fce..96d1302abde1 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -657,7 +657,7 @@ struct hns_roce_qp { u8 rdb_en; u8 sdb_en; u32 doorbell_qpn; - __le32 sq_signal_bits; + u32 sq_signal_bits; u32 sq_next_wqe; struct hns_roce_wq sq; @@ -712,7 +712,7 @@ enum { }; struct hns_roce_ceqe { - u32 comp; + __le32 comp; }; struct hns_roce_aeqe { diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index f2c4fef0b70e..e82215774032 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -368,9 +368,9 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, unsigned long flags; struct hns_roce_hem_iter iter; void __iomem *bt_cmd; - u32 bt_cmd_h_val = 0; - u32 bt_cmd_val[2]; - u32 bt_cmd_l = 0; + __le32 bt_cmd_val[2]; + __le32 bt_cmd_h = 0; + __le32 bt_cmd_l = 0; u64 bt_ba = 0; int ret = 0; @@ -380,30 +380,20 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, switch (table->type) { case HEM_TYPE_QPC: - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC); - break; case HEM_TYPE_MTPT: - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, - HEM_TYPE_MTPT); - break; case HEM_TYPE_CQC: - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC); - break; case HEM_TYPE_SRQC: - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, - HEM_TYPE_SRQC); + roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type); break; default: return ret; } - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, + + roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); - roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); - roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); + roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); + roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); /* Currently iter only a chunk */ for (hns_roce_hem_first(table->hem[i], &iter); @@ -429,13 +419,13 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, return -EBUSY; } - bt_cmd_l = (u32)bt_ba; - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, + bt_cmd_l = cpu_to_le32(bt_ba); + roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> BT_BA_SHIFT); bt_cmd_val[0] = bt_cmd_l; - bt_cmd_val[1] = bt_cmd_h_val; + bt_cmd_val[1] = bt_cmd_h; hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); spin_unlock_irqrestore(lock, flags); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 3fcd0bdc79dd..07b72061faa5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -73,7 +73,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, int ps_opcode = 0, i = 0; unsigned long flags = 0; void *wqe = NULL; - u32 doorbell[2]; + __le32 doorbell[2]; int nreq = 0; u32 ind = 0; int ret = 0; @@ -332,10 +332,10 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn); roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1); - doorbell[0] = le32_to_cpu(sq_db.u32_4); - doorbell[1] = le32_to_cpu(sq_db.u32_8); + doorbell[0] = sq_db.u32_4; + doorbell[1] = sq_db.u32_8; - hns_roce_write64_k((__le32 *)doorbell, qp->sq.db_reg_l); + hns_roce_write64_k(doorbell, qp->sq.db_reg_l); qp->sq_next_wqe = ind; } @@ -360,7 +360,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct device *dev = &hr_dev->pdev->dev; struct hns_roce_rq_db rq_db; - uint32_t doorbell[2] = {0}; + __le32 doorbell[2] = {0}; spin_lock_irqsave(&hr_qp->rq.lock, flags); ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1); @@ -434,11 +434,10 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp, roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S, 1); - doorbell[0] = le32_to_cpu(rq_db.u32_4); - doorbell[1] = le32_to_cpu(rq_db.u32_8); + doorbell[0] = rq_db.u32_4; + doorbell[1] = rq_db.u32_8; - hns_roce_write64_k((__le32 *)doorbell, - hr_qp->rq.db_reg_l); + hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l); } } spin_unlock_irqrestore(&hr_qp->rq.lock, flags); @@ -712,7 +711,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) struct ib_cq *cq; struct ib_pd *pd; union ib_gid dgid; - u64 subnet_prefix; + __be64 subnet_prefix; int attr_mask = 0; int ret; int i, j; @@ -2162,7 +2161,7 @@ static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, { struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); u32 notification_flag; - __le32 doorbell[2]; + __le32 doorbell[2] = {}; notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL; @@ -2437,18 +2436,12 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, switch (table->type) { case HEM_TYPE_QPC: - roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC); bt_ba = priv->bt_table.qpc_buf.map >> 12; break; case HEM_TYPE_MTPT: - roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT); bt_ba = priv->bt_table.mtpt_buf.map >> 12; break; case HEM_TYPE_CQC: - roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC); bt_ba = priv->bt_table.cqc_buf.map >> 12; break; case HEM_TYPE_SRQC: @@ -2457,6 +2450,8 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, default: return 0; } + roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type); roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); @@ -2481,7 +2476,7 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, end -= HW_SYNC_SLEEP_TIME_INTERVAL; } - bt_cmd_val[0] = (__le32)bt_ba; + bt_cmd_val[0] = cpu_to_le32(bt_ba); roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32); hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); @@ -2624,7 +2619,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port); roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SIGNALING_TYPE_S, - le32_to_cpu(hr_qp->sq_signal_bits)); + hr_qp->sq_signal_bits); roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S, 1); roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S, @@ -2930,7 +2925,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, 1); roce_set_bit(context->qpc_bytes_32, QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S, - le32_to_cpu(hr_qp->sq_signal_bits)); + hr_qp->sq_signal_bits); port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port; @@ -3575,7 +3570,7 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148, QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S); - qp_attr->rnr_retry = (u8)context->rnr_retry; + qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry); done: qp_attr->cur_qp_state = qp_attr->qp_state; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index d4c4c4157085..77309832100a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1039,7 +1039,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, * If the command is sync, wait for the firmware to write back, * if multi descriptors to be sent, use the first one to check */ - if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) { + if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) { do { if (hns_roce_cmq_csq_done(hr_dev)) break; @@ -1056,7 +1056,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, desc_to_use = &csq->desc[ntc]; desc[handle] = *desc_to_use; dev_dbg(hr_dev->dev, "Get cmq desc:\n"); - desc_ret = desc[handle].retval; + desc_ret = le16_to_cpu(desc[handle].retval); if (desc_ret == CMD_EXEC_SUCCESS) ret = 0; else @@ -1119,7 +1119,7 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev) return ret; resp = (struct hns_roce_query_version *)desc.data; - hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version); + hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version); hr_dev->vendor_id = hr_dev->pci_dev->vendor; return 0; @@ -1293,7 +1293,7 @@ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, swt = (struct hns_roce_vf_switch *)desc.data; hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true); - swt->rocee_sel |= cpu_to_le16(HNS_ICL_SWITCH_CMD_ROCEE_SEL); + swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL); roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M, VF_SWITCH_DATA_FUN_ID_VF_ID_S, @@ -1719,9 +1719,10 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev, desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); if (i == 0) { - req_a->base_addr_l = link_tbl->table.map & 0xffffffff; - req_a->base_addr_h = (link_tbl->table.map >> 32) & - 0xffffffff; + req_a->base_addr_l = + cpu_to_le32(link_tbl->table.map & 0xffffffff); + req_a->base_addr_h = + cpu_to_le32(link_tbl->table.map >> 32); roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_DEPTH_M, CFG_LLM_QUE_DEPTH_S, @@ -1730,13 +1731,15 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev, CFG_LLM_QUE_PGSZ_M, CFG_LLM_QUE_PGSZ_S, link_tbl->pg_sz); - req_a->head_ba_l = entry[0].blk_ba0; - req_a->head_ba_h_nxtptr = entry[0].blk_ba1_nxt_ptr; + req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0); + req_a->head_ba_h_nxtptr = + cpu_to_le32(entry[0].blk_ba1_nxt_ptr); roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M, CFG_LLM_HEAD_PTR_S, 0); } else { - req_b->tail_ba_l = entry[page_num - 1].blk_ba0; + req_b->tail_ba_l = + cpu_to_le32(entry[page_num - 1].blk_ba0); roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M, CFG_LLM_TAIL_BA_H_S, @@ -1812,17 +1815,13 @@ static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev, link_tbl->pg_list[i].map = t; - entry[i].blk_ba0 = (t >> 12) & 0xffffffff; - roce_set_field(entry[i].blk_ba1_nxt_ptr, - HNS_ROCE_LINK_TABLE_BA1_M, - HNS_ROCE_LINK_TABLE_BA1_S, - t >> 44); + entry[i].blk_ba0 = (u32)(t >> 12); + entry[i].blk_ba1_nxt_ptr = (u32)(t >> 44); if (i < (pg_num - 1)) - roce_set_field(entry[i].blk_ba1_nxt_ptr, - HNS_ROCE_LINK_TABLE_NXT_PTR_M, - HNS_ROCE_LINK_TABLE_NXT_PTR_S, - i + 1); + entry[i].blk_ba1_nxt_ptr |= + (i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S; + } link_tbl->npages = pg_num; link_tbl->pg_sz = buf_chk_sz; @@ -1947,7 +1946,7 @@ static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev) if (status) return status; - return cpu_to_le32(mb_st->mb_status_hw_run); + return le32_to_cpu(mb_st->mb_status_hw_run); } static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev) @@ -1973,10 +1972,10 @@ static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param, hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false); - mb->in_param_l = cpu_to_le64(in_param); - mb->in_param_h = cpu_to_le64(in_param) >> 32; - mb->out_param_l = cpu_to_le64(out_param); - mb->out_param_h = cpu_to_le64(out_param) >> 32; + mb->in_param_l = cpu_to_le32(in_param); + mb->in_param_h = cpu_to_le32(in_param >> 32); + mb->out_param_l = cpu_to_le32(out_param); + mb->out_param_h = cpu_to_le32(out_param >> 32); mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op); mb->token_event_en = cpu_to_le32(event << 16 | token); @@ -2118,7 +2117,7 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M, CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h); - smac_tb->vf_smac_l = reg_smac_l; + smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l); return hns_roce_cmq_send(hr_dev, &desc, 1); } @@ -2473,29 +2472,26 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent)); roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M, V2_CQC_BYTE_4_CEQN_S, vector); - cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn); roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M, V2_CQC_BYTE_8_CQN_S, hr_cq->cqn); - cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT); - cq_context->cqe_cur_blk_addr = - cpu_to_le32(cq_context->cqe_cur_blk_addr); + cq_context->cqe_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT); roce_set_field(cq_context->byte_16_hop_addr, V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M, V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S, - cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT))); + mtts[0] >> (32 + PAGE_ADDR_SHIFT)); roce_set_field(cq_context->byte_16_hop_addr, V2_CQC_BYTE_16_CQE_HOP_NUM_M, V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num); - cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT); + cq_context->cqe_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT); roce_set_field(cq_context->byte_24_pgsz_addr, V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M, V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S, - cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT))); + mtts[1] >> (32 + PAGE_ADDR_SHIFT)); roce_set_field(cq_context->byte_24_pgsz_addr, V2_CQC_BYTE_24_CQE_BA_PG_SZ_M, V2_CQC_BYTE_24_CQE_BA_PG_SZ_S, @@ -2505,7 +2501,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S, hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET); - cq_context->cqe_ba = (u32)(dma_handle >> 3); + cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3); roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M, V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3))); @@ -2518,7 +2514,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, V2_CQC_BYTE_44_DB_RECORD_ADDR_M, V2_CQC_BYTE_44_DB_RECORD_ADDR_S, ((u32)hr_cq->db.dma) >> 1); - cq_context->db_record_addr = hr_cq->db.dma >> 32; + cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32); roce_set_field(cq_context->byte_56_cqe_period_maxcnt, V2_CQC_BYTE_56_CQ_MAX_CNT_M, @@ -2536,7 +2532,7 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq, struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); u32 notification_flag; - u32 doorbell[2]; + __le32 doorbell[2]; doorbell[0] = 0; doorbell[1] = 0; @@ -2663,9 +2659,9 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, ++wq->tail; } else if ((*cur_qp)->ibqp.srq) { srq = to_hr_srq((*cur_qp)->ibqp.srq); - wqe_ctr = le16_to_cpu(roce_get_field(cqe->byte_4, - V2_CQE_BYTE_4_WQE_INDX_M, - V2_CQE_BYTE_4_WQE_INDX_S)); + wqe_ctr = (u16)roce_get_field(cqe->byte_4, + V2_CQE_BYTE_4_WQE_INDX_M, + V2_CQE_BYTE_4_WQE_INDX_S); wc->wr_id = srq->wrid[wqe_ctr]; hns_roce_free_srq_wqe(srq, wqe_ctr); } else { @@ -3240,7 +3236,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_68_rq_db, V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M, V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0); - context->rq_db_record_addr = hr_qp->rdb.dma >> 32; + context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32); qpc_mask->rq_db_record_addr = 0; roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, @@ -3623,7 +3619,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, } dmac = (u8 *)attr->ah_attr.roce.dmac; - context->wqe_sge_ba = (u32)(wqe_sge_ba >> 3); + context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3); qpc_mask->wqe_sge_ba = 0; /* @@ -3679,7 +3675,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M, V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0); - context->rq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT); + context->rq_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT); qpc_mask->rq_cur_blk_addr = 0; roce_set_field(context->byte_92_srq_info, @@ -3690,7 +3686,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0); - context->rq_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT); + context->rq_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT); qpc_mask->rq_nxt_blk_addr = 0; roce_set_field(context->byte_104_rq_sge, @@ -3705,7 +3701,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4); roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M, V2_QPC_BYTE_132_TRRL_BA_S, 0); - context->trrl_ba = (u32)(dma_handle_3 >> (16 + 4)); + context->trrl_ba = cpu_to_le32(dma_handle_3 >> (16 + 4)); qpc_mask->trrl_ba = 0; roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M, V2_QPC_BYTE_140_TRRL_BA_S, @@ -3713,7 +3709,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M, V2_QPC_BYTE_140_TRRL_BA_S, 0); - context->irrl_ba = (u32)(dma_handle_2 >> 6); + context->irrl_ba = cpu_to_le32(dma_handle_2 >> 6); qpc_mask->irrl_ba = 0; roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M, V2_QPC_BYTE_208_IRRL_BA_S, @@ -3861,7 +3857,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, * we should set all bits of the relevant fields in context mask to * 0 at the same time, else set them to 0x1. */ - context->sq_cur_blk_addr = (u32)(sq_cur_blk >> PAGE_ADDR_SHIFT); + context->sq_cur_blk_addr = cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT); roce_set_field(context->byte_168_irrl_idx, V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, @@ -3873,8 +3869,8 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ? - ((u32)(sge_cur_blk >> - PAGE_ADDR_SHIFT)) : 0; + cpu_to_le32(sge_cur_blk >> + PAGE_ADDR_SHIFT) : 0; roce_set_field(context->byte_184_irrl_idx, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, @@ -3887,7 +3883,8 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0); - context->rx_sq_cur_blk_addr = (u32)(sq_cur_blk >> PAGE_ADDR_SHIFT); + context->rx_sq_cur_blk_addr = + cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT); roce_set_field(context->byte_232_irrl_sge, V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, @@ -4262,7 +4259,7 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp, } if (attr_mask & IB_QP_QKEY) { - context->qkey_xrcd = attr->qkey; + context->qkey_xrcd = cpu_to_le32(attr->qkey); qpc_mask->qkey_xrcd = 0; hr_qp->qkey = attr->qkey; } @@ -4531,7 +4528,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M, V2_QPC_BYTE_212_RETRY_CNT_S); - qp_attr->rnr_retry = context.rq_rnr_timer; + qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer); done: qp_attr->cur_qp_state = qp_attr->qp_state; @@ -4860,7 +4857,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev, static void set_eq_cons_index_v2(struct hns_roce_eq *eq) { struct hns_roce_dev *hr_dev = eq->hr_dev; - u32 doorbell[2]; + __le32 doorbell[2]; doorbell[0] = 0; doorbell[1] = 0; @@ -5125,14 +5122,14 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id) int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG); int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG); - if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) { + if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) { struct pci_dev *pdev = hr_dev->pci_dev; struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); const struct hnae3_ae_ops *ops = ae_dev->ops; dev_err(dev, "AEQ overflow!\n"); - roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1); + int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S; roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); /* Set reset level for reset_event() */ @@ -5142,27 +5139,27 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id) if (ops->reset_event) ops->reset_event(pdev, NULL); - roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1); + int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S; roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); int_work = 1; - } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) { + } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) { dev_err(dev, "BUS ERR!\n"); - roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1); + int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S; roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); - roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1); + int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S; roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); int_work = 1; - } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) { + } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) { dev_err(dev, "OTHER ERR!\n"); - roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1); + int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S; roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); - roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1); + int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S; roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); int_work = 1; @@ -5972,7 +5969,7 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev, roce_set_field(srq_context->byte_24_wqe_bt_ba, SRQC_BYTE_24_SRQ_WQE_BT_BA_M, SRQC_BYTE_24_SRQ_WQE_BT_BA_S, - cpu_to_le32(dma_handle_wqe >> 35)); + dma_handle_wqe >> 35); roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M, SRQC_BYTE_28_PD_S, pdn); @@ -5980,20 +5977,18 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev, SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 : fls(srq->max_gs - 1)); - srq_context->idx_bt_ba = (u32)(dma_handle_idx >> 3); - srq_context->idx_bt_ba = cpu_to_le32(srq_context->idx_bt_ba); + srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3); roce_set_field(srq_context->rsv_idx_bt_ba, SRQC_BYTE_36_SRQ_IDX_BT_BA_M, SRQC_BYTE_36_SRQ_IDX_BT_BA_S, - cpu_to_le32(dma_handle_idx >> 35)); + dma_handle_idx >> 35); - srq_context->idx_cur_blk_addr = (u32)(mtts_idx[0] >> PAGE_ADDR_SHIFT); srq_context->idx_cur_blk_addr = - cpu_to_le32(srq_context->idx_cur_blk_addr); + cpu_to_le32(mtts_idx[0] >> PAGE_ADDR_SHIFT); roce_set_field(srq_context->byte_44_idxbufpgsz_addr, SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M, SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S, - cpu_to_le32((mtts_idx[0]) >> (32 + PAGE_ADDR_SHIFT))); + mtts_idx[0] >> (32 + PAGE_ADDR_SHIFT)); roce_set_field(srq_context->byte_44_idxbufpgsz_addr, SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M, SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S, @@ -6009,13 +6004,12 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev, SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S, hr_dev->caps.idx_buf_pg_sz); - srq_context->idx_nxt_blk_addr = (u32)(mtts_idx[1] >> PAGE_ADDR_SHIFT); srq_context->idx_nxt_blk_addr = - cpu_to_le32(srq_context->idx_nxt_blk_addr); + cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT); roce_set_field(srq_context->rsv_idxnxtblkaddr, SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M, SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S, - cpu_to_le32((mtts_idx[1]) >> (32 + PAGE_ADDR_SHIFT))); + mtts_idx[1] >> (32 + PAGE_ADDR_SHIFT)); roce_set_field(srq_context->byte_56_xrc_cqn, SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S, cqn); @@ -6209,9 +6203,10 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, */ wmb(); - srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S | - (srq->srqn & V2_DB_BYTE_4_TAG_M); - srq_db.parameter = srq->head; + srq_db.byte_4 = + cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S | + (srq->srqn & V2_DB_BYTE_4_TAG_M)); + srq_db.parameter = cpu_to_le32(srq->head); hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 8157679021b9..5f8416ba09a9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -1426,7 +1426,7 @@ static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr) { struct hns_roce_mr *mr = to_hr_mr(ibmr); - mr->pbl_buf[mr->npages++] = cpu_to_le64(addr); + mr->pbl_buf[mr->npages++] = addr; return 0; } @@ -1597,10 +1597,9 @@ static int hns_roce_write_mtr(struct hns_roce_dev *hr_dev, /* Save page addr, low 12 bits : 0 */ for (i = 0; i < count; i++) { if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) - mtts[i] = cpu_to_le64(bufs[npage] >> - PAGE_ADDR_SHIFT); + mtts[i] = bufs[npage] >> PAGE_ADDR_SHIFT; else - mtts[i] = cpu_to_le64(bufs[npage]); + mtts[i] = bufs[npage]; npage++; } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index ba8176869f51..8868172d99c8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -641,7 +641,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct ib_udata *udata, unsigned long sqpn, struct hns_roce_qp *hr_qp) { - dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { 0 }; + dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL }; struct device *dev = hr_dev->dev; struct hns_roce_ib_create_qp ucmd; struct hns_roce_ib_create_qp_resp resp = {}; @@ -663,9 +663,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hr_qp->ibqp.qp_type = init_attr->qp_type; if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) - hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR); + hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; else - hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR); + hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata, hns_roce_qp_has_rq(init_attr), hr_qp); @@ -910,7 +910,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, if (sqpn) hr_qp->doorbell_qpn = 1; else - hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn); + hr_qp->doorbell_qpn = (u32)hr_qp->qpn; if (udata) { ret = ib_copy_to_udata(udata, &resp, From e075da5e7c47ee1c20b4e8f9a20c6105be99b10f Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Wed, 21 Aug 2019 21:14:33 +0800 Subject: [PATCH 77/99] RDMA/hns: Add reset process for function-clear If the hardware is resetting, the driver should not perform the mailbox operation.Function-clear needs to add relevant judgment. Signed-off-by: Lang Cheng Link: https://lore.kernel.org/r/1566393276-42555-7-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 98 +++++++++++++++++++++- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 2 + 2 files changed, 98 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 77309832100a..ecd0283f98c2 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1125,26 +1125,118 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev) return 0; } +static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; + struct hnae3_handle *handle = priv->handle; + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + unsigned long reset_cnt; + bool sw_resetting; + bool hw_resetting; + + reset_cnt = ops->ae_dev_reset_cnt(handle); + hw_resetting = ops->get_hw_reset_stat(handle); + sw_resetting = ops->ae_dev_resetting(handle); + + if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting) + return true; + + return false; +} + +static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval, + int flag) +{ + struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; + struct hnae3_handle *handle = priv->handle; + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + unsigned long instance_stage; + unsigned long reset_cnt; + unsigned long end; + bool sw_resetting; + bool hw_resetting; + + instance_stage = handle->rinfo.instance_state; + reset_cnt = ops->ae_dev_reset_cnt(handle); + hw_resetting = ops->get_hw_reset_stat(handle); + sw_resetting = ops->ae_dev_resetting(handle); + + if (reset_cnt != hr_dev->reset_cnt) { + hr_dev->dis_db = true; + hr_dev->is_reset = true; + dev_info(hr_dev->dev, "Func clear success after reset.\n"); + } else if (hw_resetting) { + hr_dev->dis_db = true; + + dev_warn(hr_dev->dev, + "Func clear is pending, device in resetting state.\n"); + end = HNS_ROCE_V2_HW_RST_TIMEOUT; + while (end) { + if (!ops->get_hw_reset_stat(handle)) { + hr_dev->is_reset = true; + dev_info(hr_dev->dev, + "Func clear success after reset.\n"); + return; + } + msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT); + end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; + } + + dev_warn(hr_dev->dev, "Func clear failed.\n"); + } else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) { + hr_dev->dis_db = true; + + dev_warn(hr_dev->dev, + "Func clear is pending, device in resetting state.\n"); + end = HNS_ROCE_V2_HW_RST_TIMEOUT; + while (end) { + if (ops->ae_dev_reset_cnt(handle) != + hr_dev->reset_cnt) { + hr_dev->is_reset = true; + dev_info(hr_dev->dev, + "Func clear success after sw reset\n"); + return; + } + msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT); + end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; + } + + dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n"); + } else { + if (retval && !flag) + dev_warn(hr_dev->dev, + "Func clear read failed, ret = %d.\n", retval); + + dev_warn(hr_dev->dev, "Func clear failed.\n"); + } +} static void hns_roce_function_clear(struct hns_roce_dev *hr_dev) { + bool fclr_write_fail_flag = false; struct hns_roce_func_clear *resp; struct hns_roce_cmq_desc desc; unsigned long end; - int ret; + int ret = 0; + + if (hns_roce_func_clr_chk_rst(hr_dev)) + goto out; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false); resp = (struct hns_roce_func_clear *)desc.data; ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) { + fclr_write_fail_flag = true; dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n", ret); - return; + goto out; } msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL); end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS; while (end) { + if (hns_roce_func_clr_chk_rst(hr_dev)) + goto out; msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT); end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT; @@ -1161,7 +1253,9 @@ static void hns_roce_function_clear(struct hns_roce_dev *hr_dev) } } +out: dev_err(hr_dev->dev, "Func clear fail.\n"); + hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag); } static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 1301629769ed..43219d2f7de0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -99,6 +99,8 @@ #define HNS_ROCE_V2_HW_RST_TIMEOUT 1000 #define HNS_ROCE_V2_HW_RST_UNINT_DELAY 100 +#define HNS_ROCE_V2_HW_RST_COMPLETION_WAIT 20 + #define HNS_ROCE_CONTEXT_HOP_NUM 1 #define HNS_ROCE_SCCC_HOP_NUM 1 #define HNS_ROCE_MTT_HOP_NUM 1 From 18df508c7970c2c5e8bcaed48469bb28f9075e89 Mon Sep 17 00:00:00 2001 From: Wenpeng Liang Date: Wed, 21 Aug 2019 21:14:34 +0800 Subject: [PATCH 78/99] RDMA/hns: Remove if-else judgment statements for creating srq Because if the value of srqwqe_buf_pg_sz is zero, npages and ib_umem_page_count are equivalent, page_shif and PAGE_SHIFT are equivalent in hns_roce_create_srq. Here remove it. Signed-off-by: Wenpeng Liang Signed-off-by: Lijun Ou Link: https://lore.kernel.org/r/1566393276-42555-8-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_srq.c | 29 ++++++------------------ 1 file changed, 7 insertions(+), 22 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index c011422112b2..1a421721cf4f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -191,15 +191,11 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata, if (IS_ERR(srq->umem)) return PTR_ERR(srq->umem); - if (hr_dev->caps.srqwqe_buf_pg_sz) { - npages = (ib_umem_page_count(srq->umem) + - (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) / - (1 << hr_dev->caps.srqwqe_buf_pg_sz); - page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; - ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt); - } else - ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->umem), - PAGE_SHIFT, &srq->mtt); + npages = (ib_umem_page_count(srq->umem) + + (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) / + (1 << hr_dev->caps.srqwqe_buf_pg_sz); + page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; + ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt); if (ret) goto err_user_buf; @@ -216,19 +212,8 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata, goto err_user_srq_mtt; } - if (hr_dev->caps.idx_buf_pg_sz) { - npages = (ib_umem_page_count(srq->idx_que.umem) + - (1 << hr_dev->caps.idx_buf_pg_sz) - 1) / - (1 << hr_dev->caps.idx_buf_pg_sz); - page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz; - ret = hns_roce_mtt_init(hr_dev, npages, page_shift, - &srq->idx_que.mtt); - } else { - ret = hns_roce_mtt_init(hr_dev, - ib_umem_page_count(srq->idx_que.umem), - PAGE_SHIFT, - &srq->idx_que.mtt); - } + ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->idx_que.umem), + PAGE_SHIFT, &srq->idx_que.mtt); if (ret) { dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n"); From afca2a2b837a8a93d8376f0843f225dd967aac12 Mon Sep 17 00:00:00 2001 From: Wenpeng Liang Date: Wed, 21 Aug 2019 21:14:35 +0800 Subject: [PATCH 79/99] RDMA/hns: Delete the not-used lines Delete the assignment of srq->ibsrq.ext.xrc.srq_num, beacause this value is not used. Signed-off-by: Wenpeng Liang Signed-off-by: Lijun Ou Link: https://lore.kernel.org/r/1566393276-42555-9-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_srq.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 1a421721cf4f..9591457eb768 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -412,7 +412,6 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, goto err_wrid; srq->event = hns_roce_ib_srq_event; - srq->ibsrq.ext.xrc.srq_num = srq->srqn; resp.srqn = srq->srqn; if (udata) { From 98c09b8c9767c61f97976c358c47ba17c026dc77 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Wed, 21 Aug 2019 21:14:36 +0800 Subject: [PATCH 80/99] RDMA/hns: Fix wrong assignment of qp_access_flags We used wrong shifts when set qp_attr->qp_access_flag, this patch exchange V2_QP_RRE_S and V2_QP_RWE_S to fix it. Fixes: 2a3d923f8730 ("RDMA/hns: Replace magic numbers with #defines") Signed-off-by: Weihang Li Signed-off-by: Lijun Ou Link: https://lore.kernel.org/r/1566393276-42555-10-git-send-email-oulijun@huawei.com Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index ecd0283f98c2..7a89d669f8bf 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -4574,9 +4574,9 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S); qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en, - V2_QPC_BYTE_76_RRE_S)) << V2_QP_RWE_S) | + V2_QPC_BYTE_76_RRE_S)) << V2_QP_RRE_S) | ((roce_get_bit(context.byte_76_srqn_op_en, - V2_QPC_BYTE_76_RWE_S)) << V2_QP_RRE_S) | + V2_QPC_BYTE_76_RWE_S)) << V2_QP_RWE_S) | ((roce_get_bit(context.byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S); From a6e4d254c19b541a58caced322111084b27a7788 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20Bugge?= Date: Mon, 2 Sep 2019 11:27:31 +0200 Subject: [PATCH 81/99] RDMA/cma: Fix false error message MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In addr_handler(), assuming status == 0 and the device already has been acquired (id_priv->cma_dev != NULL), we get the following incorrect "error" message: RDMA CM: ADDR_ERROR: failed to resolve IP. status 0 Fixes: 498683c6a7ee ("IB/cma: Add debug messages to error flows") Link: https://lore.kernel.org/r/20190902092731.1055757-1-haakon.bugge@oracle.com Signed-off-by: HÃ¥kon Bugge Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 19f1730a4f24..c7985c24f914 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -3046,7 +3046,7 @@ static void addr_handler(int status, struct sockaddr *src_addr, if (status) pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n", status); - } else { + } else if (status) { pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status); } From bd0abfa8ca1dab85e9cedbf1988e5b4e53c67584 Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Tue, 3 Sep 2019 14:45:19 +0200 Subject: [PATCH 82/99] Documentation/infiniband: update name of some functions Update the document since those functions had been renamed in below. Fixes: 0a18cfe4f6d7 ("IB/core: Rename ib_create_ah to rdma_create_ah") Fixes: 67b985b6c755 ("IB/core: Rename ib_modify_ah to rdma_modify_ah") Fixes: bfbfd661c9ea ("IB/core: Rename ib_query_ah to rdma_query_ah") Fixes: 365231593409 ("IB/core: Rename ib_destroy_ah to rdma_destroy_ah") Link: https://lore.kernel.org/r/20190903124519.28318-1-guoqing.jiang@cloud.ionos.com Signed-off-by: Guoqing Jiang Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- Documentation/infiniband/core_locking.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Documentation/infiniband/core_locking.rst b/Documentation/infiniband/core_locking.rst index f34669beb4fe..8f76a8a5a38f 100644 --- a/Documentation/infiniband/core_locking.rst +++ b/Documentation/infiniband/core_locking.rst @@ -29,10 +29,10 @@ Sleeping and interrupt context The corresponding functions exported to upper level protocol consumers: - - ib_create_ah - - ib_modify_ah - - ib_query_ah - - ib_destroy_ah + - rdma_create_ah + - rdma_modify_ah + - rdma_query_ah + - rdma_destroy_ah - ib_post_send - ib_post_recv - ib_req_notify_cq From 3b961b4f83161857ed671f489d00266469f34053 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 6 Sep 2019 22:17:27 +0800 Subject: [PATCH 83/99] RDMA/hns: Use devm_platform_ioremap_resource() to simplify code Use devm_platform_ioremap_resource() to simplify the code a bit. This is detected by coccinelle. Link: https://lore.kernel.org/r/20190906141727.26552-1-yuehaibing@huawei.com Signed-off-by: YueHaibing Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 07b72061faa5..4b5b9cfcd26f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -4517,7 +4517,6 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) struct platform_device *pdev = NULL; struct net_device *netdev = NULL; struct device_node *net_node; - struct resource *res; int port_cnt = 0; u8 phy_port; int ret; @@ -4556,8 +4555,7 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) } /* get the mapped register base address */ - res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0); - hr_dev->reg_base = devm_ioremap_resource(dev, res); + hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0); if (IS_ERR(hr_dev->reg_base)) return PTR_ERR(hr_dev->reg_base); From b97b218b302d7ccc82381d3c8254396804e7d819 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Sun, 8 Sep 2019 11:07:26 +0300 Subject: [PATCH 84/99] RDMA/odp: Add missing cast for 32 bit length is a size_t which is unsigned int on 32 bit: ../drivers/infiniband/core/umem_odp.c: In function 'ib_init_umem_odp': ../include/linux/overflow.h:59:15: warning: comparison of distinct pointer types lacks a cast 59 | (void) (&__a == &__b); \ | ^~ ../drivers/infiniband/core/umem_odp.c:220:7: note: in expansion of macro 'check_add_overflow' Fixes: 204e3e5630c5 ("RDMA/odp: Check for overflow when computing the umem_odp end") Link: https://lore.kernel.org/r/20190908080726.30017-1-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/umem_odp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 32fb0b579dec..bd01c77aeef0 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -293,7 +293,7 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp, umem_odp->interval_tree.start = ALIGN_DOWN(umem_odp->umem.address, page_size); if (check_add_overflow(umem_odp->umem.address, - umem_odp->umem.length, + (unsigned long)umem_odp->umem.length, &umem_odp->interval_tree.last)) return -EOVERFLOW; umem_odp->interval_tree.last = From 2ac5a6d3a917755919d97c89f8aa74e4ed958557 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 6 Sep 2019 17:57:17 +0200 Subject: [PATCH 85/99] RDMA/usnic: Avoid overly large buffers on stack It's never a good idea to put a 1000-byte buffer on the kernel stack. The compiler warns about this instance when usnic_ib_log_vf() gets inlined into usnic_ib_pci_probe(): drivers/infiniband/hw/usnic/usnic_ib_main.c:543:12: error: stack frame size of 1044 bytes in function 'usnic_ib_pci_probe' [-Werror,-Wframe-larger-than=] As this is only called for debugging purposes in the setup path, it's trivial to convert to a dynamic allocation. Link: https://lore.kernel.org/r/20190906155730.2750200-1-arnd@arndb.de Signed-off-by: Arnd Bergmann Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/usnic/usnic_ib_main.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 03f54eb9404b..c9abe1c01e4e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -89,9 +89,15 @@ static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz) void usnic_ib_log_vf(struct usnic_ib_vf *vf) { - char buf[1000]; - usnic_ib_dump_vf(vf, buf, sizeof(buf)); + char *buf = kzalloc(1000, GFP_KERNEL); + + if (!buf) + return; + + usnic_ib_dump_vf(vf, buf, 1000); usnic_dbg("%s\n", buf); + + kfree(buf); } /* Start of netdev section */ From 4a9d46a9fe14401f21df69cea97c62396d5fb053 Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Tue, 10 Sep 2019 17:21:19 -0500 Subject: [PATCH 86/99] RDMA: Fix goto target to release the allocated memory In bnxt_re_create_srq(), when ib_copy_to_udata() fails allocated memory should be released by goto fail. Fixes: 37cb11acf1f7 ("RDMA/bnxt_re: Add SRQ support for Broadcom adapters") Link: https://lore.kernel.org/r/20190910222120.16517-1-navid.emamdoost@gmail.com Signed-off-by: Navid Emamdoost Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index f9e97d0cc459..b4149dc9e824 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1398,7 +1398,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!"); bnxt_qplib_destroy_srq(&rdev->qplib_res, &srq->qplib_srq); - goto exit; + goto fail; } } if (nq) From 0404bd629fd4d05e7ab815e6e1ecd81e728678d3 Mon Sep 17 00:00:00 2001 From: Bernard Metzler Date: Mon, 9 Sep 2019 15:24:27 +0200 Subject: [PATCH 87/99] RDMA/siw: Fix page address mapping in TX path Use the correct kmap()/kunmap() flow to determine page address used for CRC computation. Using page_address() is wrong, since page might be in highmem. Fixes: b9be6f18cf9e ("rdma/siw: transmit path") Link: https://lore.kernel.org/r/20190909132427.30264-1-bmt@zurich.ibm.com Reported-by: Krishnamraju Eraparaju Signed-off-by: Bernard Metzler Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/siw/siw_qp_tx.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 43020d2040fc..eb83fe183318 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -520,11 +520,12 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) c_tx->mpa_crc_hd, iov[seg].iov_base, plen); - } else if (do_crc) - crypto_shash_update( - c_tx->mpa_crc_hd, - page_address(p) + fp_off, - plen); + } else if (do_crc) { + crypto_shash_update(c_tx->mpa_crc_hd, + kmap(p) + fp_off, + plen); + kunmap(p); + } } else { u64 pa = ((sge->laddr + sge_off) & PAGE_MASK); From 1ba7c8f800586805c409f5b1ff1803318a41933a Mon Sep 17 00:00:00 2001 From: Sergey Gorenko Date: Thu, 12 Sep 2019 10:35:34 +0000 Subject: [PATCH 88/99] IB/iser: Support up to 16MB data transfer in a single command Maximum supported IO size is 8MB for the iSER driver. The current value is limited by the ISCSI_ISER_MAX_SG_TABLESIZE macro. But the driver is able to handle 16MB IOs without any significant changes. Increasing this limit can be useful for the storage arrays which are fine tuned for IOs larger than 8 MB. This commit allows to configure maximum IO size up to 16MB using the max_sectors module parameter. Link: https://lore.kernel.org/r/20190912103534.18210-1-sergeygo@mellanox.com Signed-off-by: Sergey Gorenko Reviewed-by: Max Gurtovoy Acked-by: Sagi Grimberg Signed-off-by: Jason Gunthorpe --- drivers/infiniband/ulp/iser/iscsi_iser.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 39bf213444cb..52ce63592dcf 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -102,9 +102,10 @@ /* Default support is 512KB I/O size */ #define ISER_DEF_MAX_SECTORS 1024 -#define ISCSI_ISER_DEF_SG_TABLESIZE ((ISER_DEF_MAX_SECTORS * 512) >> SHIFT_4K) -/* Maximum support is 8MB I/O size */ -#define ISCSI_ISER_MAX_SG_TABLESIZE ((16384 * 512) >> SHIFT_4K) +#define ISCSI_ISER_DEF_SG_TABLESIZE \ + ((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> SHIFT_4K) +/* Maximum support is 16MB I/O size */ +#define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> SHIFT_4K) #define ISER_DEF_XMIT_CMDS_DEFAULT 512 #if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT From 4db8fd4973329810c8549b08df12907e6fe921c2 Mon Sep 17 00:00:00 2001 From: Bernard Metzler Date: Mon, 9 Sep 2019 15:29:45 +0200 Subject: [PATCH 89/99] RDMA/siw: Relax from kmap_atomic() use in TX path Since the transmit path is never executed in an atomic context, we do not need kmap_atomic() and can always use less demanding kmap(). Link: https://lore.kernel.org/r/20190909132945.30462-1-bmt@zurich.ibm.com Signed-off-by: Bernard Metzler Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/siw/siw_qp_tx.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 8e72f955921d..5d97bba0ce6d 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -76,16 +76,15 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr) if (unlikely(!p)) return -EFAULT; - buffer = kmap_atomic(p); + buffer = kmap(p); if (likely(PAGE_SIZE - off >= bytes)) { memcpy(paddr, buffer + off, bytes); - kunmap_atomic(buffer); } else { unsigned long part = bytes - (PAGE_SIZE - off); memcpy(paddr, buffer + off, part); - kunmap_atomic(buffer); + kunmap(p); if (!mem->is_pbl) p = siw_get_upage(mem->umem, @@ -97,11 +96,10 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr) if (unlikely(!p)) return -EFAULT; - buffer = kmap_atomic(p); - memcpy(paddr + part, buffer, - bytes - part); - kunmap_atomic(buffer); + buffer = kmap(p); + memcpy(paddr + part, buffer, bytes - part); } + kunmap(p); } } return (int)bytes; From c05fc15634f9316d493cddb32319c2711b0d8f59 Mon Sep 17 00:00:00 2001 From: Kaike Wan Date: Wed, 11 Sep 2019 07:30:41 -0400 Subject: [PATCH 90/99] IB/hfi1: Add traces for TID RDMA READ This patch adds traces to debug packet loss and retry for TID RDMA READ protocol. Link: https://lore.kernel.org/r/20190911113041.126040.64541.stgit@awfm-01.aw.intel.com Reviewed-by: Mike Marciniszyn Signed-off-by: Kaike Wan Signed-off-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hfi1/rc.c | 5 ++++ drivers/infiniband/hw/hfi1/tid_rdma.c | 8 ++++++ drivers/infiniband/hw/hfi1/trace_tid.h | 38 ++++++++++++++++++++++++++ 3 files changed, 51 insertions(+) diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 024a7c2b6124..eeca08d3b470 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -1483,6 +1483,11 @@ static void update_num_rd_atomic(struct rvt_qp *qp, u32 psn, req->ack_pending = cur_seg - req->comp_seg; priv->pending_tid_r_segs += req->ack_pending; qp->s_num_rd_atomic += req->ack_pending; + trace_hfi1_tid_req_update_num_rd_atomic(qp, 0, + wqe->wr.opcode, + wqe->psn, + wqe->lpsn, + req); } else { priv->pending_tid_r_segs += req->total_segs; qp->s_num_rd_atomic += req->total_segs; diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 6141f4edc6bf..b4dcc4d29f84 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -2646,6 +2646,9 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, u32 fpsn; lockdep_assert_held(&qp->r_lock); + trace_hfi1_rsp_read_kdeth_eflags(qp, ibpsn); + trace_hfi1_sender_read_kdeth_eflags(qp); + trace_hfi1_tid_read_sender_kdeth_eflags(qp, 0); spin_lock(&qp->s_lock); /* If the psn is out of valid range, drop the packet */ if (cmp_psn(ibpsn, qp->s_last_psn) < 0 || @@ -2710,6 +2713,8 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, goto s_unlock; req = wqe_to_tid_req(wqe); + trace_hfi1_tid_req_read_kdeth_eflags(qp, 0, wqe->wr.opcode, wqe->psn, + wqe->lpsn, req); switch (rcv_type) { case RHF_RCV_TYPE_EXPECTED: switch (rte) { @@ -2724,6 +2729,9 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, * packets that could be still in the fabric. */ flow = &req->flows[req->clear_tail]; + trace_hfi1_tid_flow_read_kdeth_eflags(qp, + req->clear_tail, + flow); if (priv->s_flags & HFI1_R_TID_SW_PSN) { diff = cmp_psn(psn, flow->flow_state.r_next_psn); diff --git a/drivers/infiniband/hw/hfi1/trace_tid.h b/drivers/infiniband/hw/hfi1/trace_tid.h index 4388b594ed1b..343fb9894a82 100644 --- a/drivers/infiniband/hw/hfi1/trace_tid.h +++ b/drivers/infiniband/hw/hfi1/trace_tid.h @@ -627,6 +627,12 @@ DEFINE_EVENT(/* event */ TP_ARGS(qp, index, flow) ); +DEFINE_EVENT(/* event */ + hfi1_tid_flow_template, hfi1_tid_flow_read_kdeth_eflags, + TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), + TP_ARGS(qp, index, flow) +); + DECLARE_EVENT_CLASS(/* tid_node */ hfi1_tid_node_template, TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base, @@ -851,6 +857,12 @@ DEFINE_EVENT(/* event */ TP_ARGS(qp, psn) ); +DEFINE_EVENT(/* event */ + hfi1_responder_info_template, hfi1_rsp_read_kdeth_eflags, + TP_PROTO(struct rvt_qp *qp, u32 psn), + TP_ARGS(qp, psn) +); + DECLARE_EVENT_CLASS(/* sender_info */ hfi1_sender_info_template, TP_PROTO(struct rvt_qp *qp), @@ -955,6 +967,12 @@ DEFINE_EVENT(/* event */ TP_ARGS(qp) ); +DEFINE_EVENT(/* event */ + hfi1_sender_info_template, hfi1_sender_read_kdeth_eflags, + TP_PROTO(struct rvt_qp *qp), + TP_ARGS(qp) +); + DECLARE_EVENT_CLASS(/* tid_read_sender */ hfi1_tid_read_sender_template, TP_PROTO(struct rvt_qp *qp, char newreq), @@ -1015,6 +1033,12 @@ DEFINE_EVENT(/* event */ TP_ARGS(qp, newreq) ); +DEFINE_EVENT(/* event */ + hfi1_tid_read_sender_template, hfi1_tid_read_sender_kdeth_eflags, + TP_PROTO(struct rvt_qp *qp, char newreq), + TP_ARGS(qp, newreq) +); + DECLARE_EVENT_CLASS(/* tid_rdma_request */ hfi1_tid_rdma_request_template, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, @@ -1215,6 +1239,13 @@ DEFINE_EVENT(/* event */ TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); +DEFINE_EVENT(/* event */ + hfi1_tid_rdma_request_template, hfi1_tid_req_read_kdeth_eflags, + TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, + struct tid_rdma_request *req), + TP_ARGS(qp, newreq, opcode, psn, lpsn, req) +); + DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_make_rc_ack_write, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, @@ -1229,6 +1260,13 @@ DEFINE_EVENT(/* event */ TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); +DEFINE_EVENT(/* event */ + hfi1_tid_rdma_request_template, hfi1_tid_req_update_num_rd_atomic, + TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, + struct tid_rdma_request *req), + TP_ARGS(qp, newreq, opcode, psn, lpsn, req) +); + DECLARE_EVENT_CLASS(/* rc_rcv_err */ hfi1_rc_rcv_err_template, TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff), From 7199435414868bd656284349edc1a1f528fe3662 Mon Sep 17 00:00:00 2001 From: Kaike Wan Date: Wed, 11 Sep 2019 07:30:47 -0400 Subject: [PATCH 91/99] IB/{rdmavt, hfi1, qib}: Add a counter for credit waits This patch adds a counter for credit waits to assist field debugging. Link: https://lore.kernel.org/r/20190911113047.126040.10857.stgit@awfm-01.aw.intel.com Reviewed-by: Mike Marciniszyn Signed-off-by: Kaike Wan Signed-off-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hfi1/chip.c | 2 ++ drivers/infiniband/hw/hfi1/chip.h | 1 + drivers/infiniband/hw/hfi1/rc.c | 10 ++------ drivers/infiniband/hw/qib/qib_rc.c | 10 ++------ drivers/infiniband/hw/qib/qib_sysfs.c | 2 ++ include/rdma/rdma_vt.h | 1 + include/rdma/rdmavt_qp.h | 35 +++++++++++++++++++++++++++ 7 files changed, 45 insertions(+), 16 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 67052dc3100c..9b1fb84a3d45 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -4101,6 +4101,7 @@ def_access_ibp_counter(rc_dupreq); def_access_ibp_counter(rdma_seq); def_access_ibp_counter(unaligned); def_access_ibp_counter(seq_naks); +def_access_ibp_counter(rc_crwaits); static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH), @@ -5119,6 +5120,7 @@ static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = { [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq), [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned), [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks), +[C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits), [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL, access_sw_cpu_rc_acks), [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL, diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index b76cf81f927f..4ca5ac8d7e9e 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h @@ -1245,6 +1245,7 @@ enum { C_SW_IBP_RDMA_SEQ, C_SW_IBP_UNALIGNED, C_SW_IBP_SEQ_NAK, + C_SW_IBP_RC_CRWAITS, C_SW_CPU_RC_ACKS, C_SW_CPU_RC_QACKS, C_SW_CPU_RC_DELAYED_COMP, diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index eeca08d3b470..513a8aac9ccd 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -595,11 +595,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) case IB_WR_SEND_WITH_IMM: case IB_WR_SEND_WITH_INV: /* If no credit, return. */ - if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && - rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { - qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; + if (!rvt_rc_credit_avail(qp, wqe)) goto bail; - } if (len > pmtu) { qp->s_state = OP(SEND_FIRST); len = pmtu; @@ -632,11 +629,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) goto no_flow_control; case IB_WR_RDMA_WRITE_WITH_IMM: /* If no credit, return. */ - if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && - rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { - qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; + if (!rvt_rc_credit_avail(qp, wqe)) goto bail; - } no_flow_control: put_ib_reth_vaddr( wqe->rdma_wr.remote_addr, diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 1d5e2d4ee257..aaf7438258fa 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -313,11 +313,8 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) case IB_WR_SEND: case IB_WR_SEND_WITH_IMM: /* If no credit, return. */ - if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && - rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { - qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; + if (!rvt_rc_credit_avail(qp, wqe)) goto bail; - } if (len > pmtu) { qp->s_state = OP(SEND_FIRST); len = pmtu; @@ -344,11 +341,8 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) goto no_flow_control; case IB_WR_RDMA_WRITE_WITH_IMM: /* If no credit, return. */ - if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && - rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { - qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; + if (!rvt_rc_credit_avail(qp, wqe)) goto bail; - } no_flow_control: ohdr->u.rc.reth.vaddr = cpu_to_be64(wqe->rdma_wr.remote_addr); diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 905206a0c2d5..3926be78036e 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -436,6 +436,7 @@ QIB_DIAGC_ATTR(dmawait); QIB_DIAGC_ATTR(unaligned); QIB_DIAGC_ATTR(rc_dupreq); QIB_DIAGC_ATTR(rc_seqnak); +QIB_DIAGC_ATTR(rc_crwaits); static struct attribute *diagc_default_attributes[] = { &qib_diagc_attr_rc_resends.attr, @@ -453,6 +454,7 @@ static struct attribute *diagc_default_attributes[] = { &qib_diagc_attr_unaligned.attr, &qib_diagc_attr_rc_dupreq.attr, &qib_diagc_attr_rc_seqnak.attr, + &qib_diagc_attr_rc_crwaits.attr, NULL }; diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 525848e227dc..ac5a9430abb6 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -116,6 +116,7 @@ struct rvt_ibport { u64 n_unaligned; u64 n_rc_dupreq; u64 n_rc_seqnak; + u64 n_rc_crwaits; u16 pkey_violations; u16 qkey_violations; u16 mkey_violations; diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index e06c77d76463..b550ae89bf85 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -973,6 +973,41 @@ static inline void rvt_free_rq(struct rvt_rq *rq) rq->wq = NULL; } +/** + * rvt_to_iport - Get the ibport pointer + * @qp: the qp pointer + * + * This function returns the ibport pointer from the qp pointer. + */ +static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp) +{ + struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); + + return rdi->ports[qp->port_num - 1]; +} + +/** + * rvt_rc_credit_avail - Check if there are enough RC credits for the request + * @qp: the qp + * @wqe: the request + * + * This function returns false when there are not enough credits for the given + * request and true otherwise. + */ +static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe) +{ + lockdep_assert_held(&qp->s_lock); + if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && + rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { + struct rvt_ibport *rvp = rvt_to_iport(qp); + + qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; + rvp->n_rc_crwaits++; + return false; + } + return true; +} + struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi, u64 v, void (*cb)(struct rvt_qp *qp, u64 v)); From f8659d68e2bee5b86a1beaf7be42d942e1fc81f4 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Wed, 11 Sep 2019 07:30:53 -0400 Subject: [PATCH 92/99] IB/hfi1: Define variables as unsigned long to fix KASAN warning Define the working variables to be unsigned long to be compatible with for_each_set_bit and change types as needed. While we are at it remove unused variables from a couple of functions. This was found because of the following KASAN warning: ================================================================== BUG: KASAN: stack-out-of-bounds in find_first_bit+0x19/0x70 Read of size 8 at addr ffff888362d778d0 by task kworker/u308:2/1889 CPU: 21 PID: 1889 Comm: kworker/u308:2 Tainted: G W 5.3.0-rc2-mm1+ #2 Hardware name: Intel Corporation W2600CR/W2600CR, BIOS SE5C600.86B.02.04.0003.102320141138 10/23/2014 Workqueue: ib-comp-unb-wq ib_cq_poll_work [ib_core] Call Trace: dump_stack+0x9a/0xf0 ? find_first_bit+0x19/0x70 print_address_description+0x6c/0x332 ? find_first_bit+0x19/0x70 ? find_first_bit+0x19/0x70 __kasan_report.cold.6+0x1a/0x3b ? find_first_bit+0x19/0x70 kasan_report+0xe/0x12 find_first_bit+0x19/0x70 pma_get_opa_portstatus+0x5cc/0xa80 [hfi1] ? ret_from_fork+0x3a/0x50 ? pma_get_opa_port_ectrs+0x200/0x200 [hfi1] ? stack_trace_consume_entry+0x80/0x80 hfi1_process_mad+0x39b/0x26c0 [hfi1] ? __lock_acquire+0x65e/0x21b0 ? clear_linkup_counters+0xb0/0xb0 [hfi1] ? check_chain_key+0x1d7/0x2e0 ? lock_downgrade+0x3a0/0x3a0 ? match_held_lock+0x2e/0x250 ib_mad_recv_done+0x698/0x15e0 [ib_core] ? clear_linkup_counters+0xb0/0xb0 [hfi1] ? ib_mad_send_done+0xc80/0xc80 [ib_core] ? mark_held_locks+0x79/0xa0 ? _raw_spin_unlock_irqrestore+0x44/0x60 ? rvt_poll_cq+0x1e1/0x340 [rdmavt] __ib_process_cq+0x97/0x100 [ib_core] ib_cq_poll_work+0x31/0xb0 [ib_core] process_one_work+0x4ee/0xa00 ? pwq_dec_nr_in_flight+0x110/0x110 ? do_raw_spin_lock+0x113/0x1d0 worker_thread+0x57/0x5a0 ? process_one_work+0xa00/0xa00 kthread+0x1bb/0x1e0 ? kthread_create_on_node+0xc0/0xc0 ret_from_fork+0x3a/0x50 The buggy address belongs to the page: page:ffffea000d8b5dc0 refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 flags: 0x17ffffc0000000() raw: 0017ffffc0000000 0000000000000000 ffffea000d8b5dc8 0000000000000000 raw: 0000000000000000 0000000000000000 00000000ffffffff 0000000000000000 page dumped because: kasan: bad access detected addr ffff888362d778d0 is located in stack of task kworker/u308:2/1889 at offset 32 in frame: pma_get_opa_portstatus+0x0/0xa80 [hfi1] this frame has 1 object: [32, 36) 'vl_select_mask' Memory state around the buggy address: ffff888362d77780: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ffff888362d77800: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 >ffff888362d77880: 00 00 00 00 00 00 f1 f1 f1 f1 04 f2 f2 f2 00 00 ^ ffff888362d77900: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ffff888362d77980: 00 00 00 00 00 00 00 00 f1 f1 f1 f1 04 f2 f2 f2 ================================================================== Cc: Fixes: 7724105686e7 ("IB/hfi1: add driver files") Link: https://lore.kernel.org/r/20190911113053.126040.47327.stgit@awfm-01.aw.intel.com Reviewed-by: Mike Marciniszyn Signed-off-by: Ira Weiny Signed-off-by: Kaike Wan Signed-off-by: Dennis Dalessandro Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hfi1/mad.c | 45 ++++++++++++++------------------ 1 file changed, 19 insertions(+), 26 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 184dba3c2828..d8ff063a5419 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -2326,7 +2326,7 @@ struct opa_port_status_req { __be32 vl_select_mask; }; -#define VL_MASK_ALL 0x000080ff +#define VL_MASK_ALL 0x00000000000080ffUL struct opa_port_status_rsp { __u8 port_num; @@ -2625,15 +2625,14 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp, } static void a0_portstatus(struct hfi1_pportdata *ppd, - struct opa_port_status_rsp *rsp, u32 vl_select_mask) + struct opa_port_status_rsp *rsp) { if (!is_bx(ppd->dd)) { unsigned long vl; u64 sum_vl_xmit_wait = 0; - u32 vl_all_mask = VL_MASK_ALL; + unsigned long vl_all_mask = VL_MASK_ALL; - for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), - 8 * sizeof(vl_all_mask)) { + for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) { u64 tmp = sum_vl_xmit_wait + read_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl)); @@ -2730,12 +2729,12 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, (struct opa_port_status_req *)pmp->data; struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct opa_port_status_rsp *rsp; - u32 vl_select_mask = be32_to_cpu(req->vl_select_mask); + unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask); unsigned long vl; size_t response_data_size; u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; u8 port_num = req->port_num; - u8 num_vls = hweight32(vl_select_mask); + u8 num_vls = hweight64(vl_select_mask); struct _vls_pctrs *vlinfo; struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); @@ -2770,7 +2769,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, hfi1_read_link_quality(dd, &rsp->link_quality_indicator); - rsp->vl_select_mask = cpu_to_be32(vl_select_mask); + rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask); rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL)); rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS, @@ -2841,8 +2840,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, * So in the for_each_set_bit() loop below, we don't need * any additional checks for vl. */ - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(vl_select_mask)) { + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { memset(vlinfo, 0, sizeof(*vlinfo)); tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl)); @@ -2883,7 +2881,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, vfi++; } - a0_portstatus(ppd, rsp, vl_select_mask); + a0_portstatus(ppd, rsp); if (resp_len) *resp_len += response_data_size; @@ -2930,16 +2928,14 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port, return error_counter_summary; } -static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp, - u32 vl_select_mask) +static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp) { if (!is_bx(ppd->dd)) { unsigned long vl; u64 sum_vl_xmit_wait = 0; - u32 vl_all_mask = VL_MASK_ALL; + unsigned long vl_all_mask = VL_MASK_ALL; - for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), - 8 * sizeof(vl_all_mask)) { + for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) { u64 tmp = sum_vl_xmit_wait + read_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl)); @@ -2994,7 +2990,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, u64 port_mask; u8 port_num; unsigned long vl; - u32 vl_select_mask; + unsigned long vl_select_mask; int vfi; u16 link_width; u16 link_speed; @@ -3071,8 +3067,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, * So in the for_each_set_bit() loop below, we don't need * any additional checks for vl. */ - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(req->vl_select_mask)) { + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { memset(vlinfo, 0, sizeof(*vlinfo)); rsp->vls[vfi].port_vl_xmit_data = @@ -3120,7 +3115,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, vfi++; } - a0_datacounters(ppd, rsp, vl_select_mask); + a0_datacounters(ppd, rsp); if (resp_len) *resp_len += response_data_size; @@ -3215,7 +3210,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, struct _vls_ectrs *vlinfo; unsigned long vl; u64 port_mask, tmp; - u32 vl_select_mask; + unsigned long vl_select_mask; int vfi; req = (struct opa_port_error_counters64_msg *)pmp->data; @@ -3273,8 +3268,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, vlinfo = &rsp->vls[0]; vfi = 0; vl_select_mask = be32_to_cpu(req->vl_select_mask); - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(req->vl_select_mask)) { + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { memset(vlinfo, 0, sizeof(*vlinfo)); rsp->vls[vfi].port_vl_xmit_discards = cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL, @@ -3485,7 +3479,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; u64 portn = be64_to_cpu(req->port_select_mask[3]); u32 counter_select = be32_to_cpu(req->counter_select_mask); - u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */ + unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */ unsigned long vl; if ((nports != 1) || (portn != 1 << port)) { @@ -3579,8 +3573,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, if (counter_select & CS_UNCORRECTABLE_ERRORS) write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0); - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(vl_select_mask)) { + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { if (counter_select & CS_PORT_XMIT_DATA) write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0); From 3d50503b3b33d87bc929eee6b65d206a36bc4579 Mon Sep 17 00:00:00 2001 From: Yixian Liu Date: Thu, 29 Aug 2019 16:41:41 +0800 Subject: [PATCH 93/99] RDMA/hns: Optimize cmd init and mode selection for hip08 There are two modes for mailbox command (cmd) queue, i.e., event mode and poll mode. For each mode, we use corresponding semaphores to protect the cmd queue resource competition, so called event_sem and poll_sem. During cmd init, both semaphores are initialized and poll mode is selected. Thus, there is no need to up poll_sema again in cmd_use_polling. Furthermore, there is no need to down the sema of the other side while switching mode. This patch aims to decouple the switch between event mode and poll mode of cmd. Link: https://lore.kernel.org/r/1567068102-56919-2-git-send-email-liweihang@hisilicon.com Signed-off-by: Yixian Liu Signed-off-by: Weihang Li Signed-off-by: Doug Ledford Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_cmd.c | 10 +--------- drivers/infiniband/hw/hns/hns_roce_main.c | 8 ++++---- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c index ade26faade8d..455d533dd7c4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.c +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c @@ -251,23 +251,15 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev) hr_cmd->token_mask = CMD_TOKEN_MASK; hr_cmd->use_events = 1; - down(&hr_cmd->poll_sem); - return 0; } void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev) { struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd; - int i; - - hr_cmd->use_events = 0; - - for (i = 0; i < hr_cmd->max_cmds; ++i) - down(&hr_cmd->event_sem); kfree(hr_cmd->context); - up(&hr_cmd->poll_sem); + hr_cmd->use_events = 0; } struct hns_roce_cmd_mailbox diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 1b757cc924c3..b5d196c119ee 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -902,6 +902,7 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) goto error_failed_cmd_init; } + /* EQ depends on poll mode, event mode depends on EQ */ ret = hr_dev->hw->init_eq(hr_dev); if (ret) { dev_err(dev, "eq init failed!\n"); @@ -911,8 +912,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) if (hr_dev->cmd_mod) { ret = hns_roce_cmd_use_events(hr_dev); if (ret) { - dev_err(dev, "Switch to event-driven cmd failed!\n"); - goto error_failed_use_event; + dev_warn(dev, + "Cmd event mode failed, set back to poll!\n"); + hns_roce_cmd_use_polling(hr_dev); } } @@ -955,8 +957,6 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) error_failed_init_hem: if (hr_dev->cmd_mod) hns_roce_cmd_use_polling(hr_dev); - -error_failed_use_event: hr_dev->hw->cleanup_eq(hr_dev); error_failed_eq_table: From 395b59a116a24916019cfc5e34b3f44743c01b2d Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Thu, 29 Aug 2019 16:41:42 +0800 Subject: [PATCH 94/99] RDMA/hns: Package operations of rq inline buffer into separate functions Here packages the codes of allocating and freeing rq inline buffer in hns_roce_create_qp_common function in order to reduce the complexity. Link: https://lore.kernel.org/r/1567068102-56919-3-git-send-email-liweihang@hisilicon.com Signed-off-by: Lijun Ou Signed-off-by: Weihang Li Signed-off-by: Doug Ledford Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_qp.c | 95 +++++++++++++++---------- 1 file changed, 56 insertions(+), 39 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 8868172d99c8..bd78ff90d998 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -635,6 +635,50 @@ static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr) return 1; } +static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr) +{ + u32 max_recv_sge = init_attr->cap.max_recv_sge; + struct hns_roce_rinl_wqe *wqe_list; + u32 wqe_cnt = hr_qp->rq.wqe_cnt; + int i; + + /* allocate recv inline buf */ + wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe), + GFP_KERNEL); + + if (!wqe_list) + goto err; + + /* Allocate a continuous buffer for all inline sge we need */ + wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge * + sizeof(struct hns_roce_rinl_sge)), + GFP_KERNEL); + if (!wqe_list[0].sg_list) + goto err_wqe_list; + + /* Assign buffers of sg_list to each inline wqe */ + for (i = 1; i < wqe_cnt; i++) + wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge]; + + hr_qp->rq_inl_buf.wqe_list = wqe_list; + hr_qp->rq_inl_buf.wqe_cnt = wqe_cnt; + + return 0; + +err_wqe_list: + kfree(wqe_list); + +err: + return -ENOMEM; +} + +static void free_rq_inline_buf(struct hns_roce_qp *hr_qp) +{ + kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); + kfree(hr_qp->rq_inl_buf.wqe_list); +} + static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct ib_pd *ib_pd, struct ib_qp_init_attr *init_attr, @@ -676,33 +720,11 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && hns_roce_qp_has_rq(init_attr)) { - /* allocate recv inline buf */ - hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt, - sizeof(struct hns_roce_rinl_wqe), - GFP_KERNEL); - if (!hr_qp->rq_inl_buf.wqe_list) { - ret = -ENOMEM; + ret = alloc_rq_inline_buf(hr_qp, init_attr); + if (ret) { + dev_err(dev, "allocate receive inline buffer failed\n"); goto err_out; } - - hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt; - - /* Firstly, allocate a list of sge space buffer */ - hr_qp->rq_inl_buf.wqe_list[0].sg_list = - kcalloc(hr_qp->rq_inl_buf.wqe_cnt, - init_attr->cap.max_recv_sge * - sizeof(struct hns_roce_rinl_sge), - GFP_KERNEL); - if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) { - ret = -ENOMEM; - goto err_wqe_list; - } - - for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++) - /* Secondly, reallocate the buffer */ - hr_qp->rq_inl_buf.wqe_list[i].sg_list = - &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i * - init_attr->cap.max_recv_sge]; } page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; @@ -710,14 +732,14 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { dev_err(dev, "ib_copy_from_udata error for create qp\n"); ret = -EFAULT; - goto err_rq_sge_list; + goto err_alloc_rq_inline_buf; } ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, &ucmd); if (ret) { dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n"); - goto err_rq_sge_list; + goto err_alloc_rq_inline_buf; } hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr, @@ -725,7 +747,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, if (IS_ERR(hr_qp->umem)) { dev_err(dev, "ib_umem_get error for create qp\n"); ret = PTR_ERR(hr_qp->umem); - goto err_rq_sge_list; + goto err_alloc_rq_inline_buf; } hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp, hr_qp->regions, ARRAY_SIZE(hr_qp->regions), @@ -786,13 +808,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { dev_err(dev, "init_attr->create_flags error!\n"); ret = -EINVAL; - goto err_rq_sge_list; + goto err_alloc_rq_inline_buf; } if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { dev_err(dev, "init_attr->create_flags error!\n"); ret = -EINVAL; - goto err_rq_sge_list; + goto err_alloc_rq_inline_buf; } /* Set SQ size */ @@ -800,7 +822,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hr_qp); if (ret) { dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); - goto err_rq_sge_list; + goto err_alloc_rq_inline_buf; } /* QP doorbell register address */ @@ -814,7 +836,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); if (ret) { dev_err(dev, "rq record doorbell alloc failed!\n"); - goto err_rq_sge_list; + goto err_alloc_rq_inline_buf; } *hr_qp->rdb.db_record = 0; hr_qp->rdb_en = 1; @@ -980,15 +1002,10 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) hns_roce_free_db(hr_dev, &hr_qp->rdb); -err_rq_sge_list: +err_alloc_rq_inline_buf: if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && hns_roce_qp_has_rq(init_attr)) - kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); - -err_wqe_list: - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && - hns_roce_qp_has_rq(init_attr)) - kfree(hr_qp->rq_inl_buf.wqe_list); + free_rq_inline_buf(hr_qp); err_out: return ret; From d97a3e92f33685768be04b2dfcd812f78f8ef341 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 11 Sep 2019 10:28:56 +0100 Subject: [PATCH 95/99] RDMA/bnxt_re: Fix spelling mistake "missin_resp" -> "missing_resp" There is a spelling mistake in a literal string, fix it. Fixes: 89f81008baac ("RDMA/bnxt_re: expose detailed stats retrieved from HW") Link: https://lore.kernel.org/r/20190911092856.11146-1-colin.king@canonical.com Signed-off-by: Colin Ian King Acked-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/hw_counters.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c index 604b71875f5f..3421a0b15983 100644 --- a/drivers/infiniband/hw/bnxt_re/hw_counters.c +++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c @@ -74,7 +74,7 @@ static const char * const bnxt_re_stat_name[] = { [BNXT_RE_SEQ_ERR_NAKS_RCVD] = "seq_err_naks_rcvd", [BNXT_RE_MAX_RETRY_EXCEEDED] = "max_retry_exceeded", [BNXT_RE_RNR_NAKS_RCVD] = "rnr_naks_rcvd", - [BNXT_RE_MISSING_RESP] = "missin_resp", + [BNXT_RE_MISSING_RESP] = "missing_resp", [BNXT_RE_UNRECOVERABLE_ERR] = "unrecoverable_err", [BNXT_RE_BAD_RESP_ERR] = "bad_resp_err", [BNXT_RE_LOCAL_QP_OP_ERR] = "local_qp_op_err", From 130c2c576e75efaea9cd321ec4b171cc93cd0030 Mon Sep 17 00:00:00 2001 From: Danit Goldberg Date: Mon, 16 Sep 2019 09:48:17 +0300 Subject: [PATCH 96/99] IB/mlx5: Use the original address for the page during free_pages The removal of 'buffer' in the patch below caused free_page() to use a value that had been offset since the wqe pointer is adjusted while the routine runs. The current implementation of free_pages() rounds down to a pfn, discarding the adjustment, but this is not the right way to use the API. Preserve the initial value and use it for free_page(). Fixes: 0f51427bd097 ("RDMA/mlx5: Cleanup WQE page fault handler") Link: https://lore.kernel.org/r/20190916064818.19823-2-leon@kernel.org Signed-off-by: Danit Goldberg Reviewed-by: Yishai Hadas Signed-off-by: Leon Romanovsky Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/odp.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 14fe94bcc788..2e9b43061797 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -1131,7 +1131,7 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, { bool sq = pfault->type & MLX5_PFAULT_REQUESTOR; u16 wqe_index = pfault->wqe.wqe_index; - void *wqe = NULL, *wqe_end = NULL; + void *wqe, *wqe_start = NULL, *wqe_end = NULL; u32 bytes_mapped, total_wqe_bytes; struct mlx5_core_rsc_common *res; int resume_with_error = 1; @@ -1152,12 +1152,13 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, goto resolve_page_fault; } - wqe = (void *)__get_free_page(GFP_KERNEL); - if (!wqe) { + wqe_start = (void *)__get_free_page(GFP_KERNEL); + if (!wqe_start) { mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n"); goto resolve_page_fault; } + wqe = wqe_start; qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL; if (qp && sq) { ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE, @@ -1212,7 +1213,7 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, pfault->wqe.wq_num, resume_with_error, pfault->type); mlx5_core_res_put(res); - free_page((unsigned long)wqe); + free_page((unsigned long)wqe_start); } static int pages_in_range(u64 address, u32 length) From 5d44adebbb7e785939df3db36ac360f5e8b73e44 Mon Sep 17 00:00:00 2001 From: Danit Goldberg Date: Mon, 16 Sep 2019 09:48:18 +0300 Subject: [PATCH 97/99] IB/mlx5: Free mpi in mp_slave mode ib_add_slave_port() allocates a multiport struct but never frees it. Don't leak memory, free the allocated mpi struct during driver unload. Cc: Fixes: 32f69e4be269 ("{net, IB}/mlx5: Manage port association for multiport RoCE") Link: https://lore.kernel.org/r/20190916064818.19823-3-leon@kernel.org Signed-off-by: Danit Goldberg Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index c6bc4aee7638..6fedfff7862a 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -6999,6 +6999,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); list_del(&mpi->list); mutex_unlock(&mlx5_ib_multiport_mutex); + kfree(mpi); return; } From a3f4b8e31822650f815f4219a4abde2001fd2fd0 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 10 Sep 2019 14:42:58 +0100 Subject: [PATCH 98/99] RDMA/efa: Fix incorrect error print The error print should indicate that it failed to get the queue attributes, not network attributes. Link: https://lore.kernel.org/r/20190910134301.4194-2-galpress@amazon.com Reviewed-by: Daniel Kranzdorf Reviewed-by: Firas JahJah Signed-off-by: Gal Pressman Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/efa/efa_com_cmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c index 501dce89f275..c079f1332082 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.c +++ b/drivers/infiniband/hw/efa/efa_com_cmd.c @@ -481,7 +481,7 @@ int efa_com_get_device_attr(struct efa_com_dev *edev, EFA_ADMIN_QUEUE_ATTR); if (err) { ibdev_err_ratelimited(edev->efa_dev, - "Failed to get network attributes %d\n", + "Failed to get queue attributes %d\n", err); return err; } From 3eca7fc2d8d1275d9cf0c709f0937becbfcf6d96 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Mon, 16 Sep 2019 10:11:54 +0300 Subject: [PATCH 99/99] RDMA: Fix double-free in srq creation error flow The cited commit introduced a double-free of the srq buffer in the error flow of procedure __uverbs_create_xsrq(). The problem is that ib_destroy_srq_user() called in the error flow also frees the srq buffer. Thus, if uverbs_response() fails in __uverbs_create_srq(), the srq buffer will be freed twice. Cc: Fixes: 68e326dea1db ("RDMA: Handle SRQ allocations by IB/core") Link: https://lore.kernel.org/r/20190916071154.20383-5-leon@kernel.org Signed-off-by: Jack Morgenstein Signed-off-by: Leon Romanovsky Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/uverbs_cmd.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 8f4fd4fac159..13af88da5f79 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -3482,7 +3482,8 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs, err_copy: ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs)); - + /* It was released in ib_destroy_srq_user */ + srq = NULL; err_free: kfree(srq); err_put: