diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index ccaedfd53e49..b1de8d608e4d 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -346,9 +346,14 @@ static void qedr_free_resources(struct qedr_dev *dev) static int qedr_alloc_resources(struct qedr_dev *dev) { + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .elem_size = sizeof(struct regpair *), + }; struct qedr_cnq *cnq; __le16 *cons_pi; - u16 n_entries; int i, rc; dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid), @@ -382,7 +387,9 @@ static int qedr_alloc_resources(struct qedr_dev *dev) dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev); /* Allocate CNQ PBLs */ - n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE); + params.num_elems = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, + QEDR_ROCE_MAX_CNQ_SIZE); + for (i = 0; i < dev->num_cnq; i++) { cnq = &dev->cnq_array[i]; @@ -391,13 +398,8 @@ static int qedr_alloc_resources(struct qedr_dev *dev) if (rc) goto err3; - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_CONSUME, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - n_entries, - sizeof(struct regpair *), - &cnq->pbl, NULL); + rc = dev->ops->common->chain_alloc(dev->cdev, &cnq->pbl, + ¶ms); if (rc) goto err4; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 9b9e80266367..49b8a43e3fa2 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -891,6 +891,12 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, udata, struct qedr_ucontext, ibucontext); struct qed_rdma_destroy_cq_out_params destroy_oparams; struct qed_rdma_destroy_cq_in_params destroy_iparams; + struct qed_chain_init_params chain_params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME, + .cnt_type = QED_CHAIN_CNT_TYPE_U32, + .elem_size = sizeof(union rdma_cqe), + }; struct qedr_dev *dev = get_qedr_dev(ibdev); struct qed_rdma_create_cq_in_params params; struct qedr_create_cq_ureq ureq = {}; @@ -917,6 +923,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, chain_entries = qedr_align_cq_entries(entries); chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES); + chain_params.num_elems = chain_entries; /* calc db offset. user will add DPI base, kernel will add db addr */ db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT); @@ -951,13 +958,8 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, } else { cq->cq_type = QEDR_CQ_TYPE_KERNEL; - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_CONSUME, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - chain_entries, - sizeof(union rdma_cqe), - &cq->pbl, NULL); + rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl, + &chain_params); if (rc) goto err0; @@ -1446,6 +1448,12 @@ static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq, struct ib_srq_init_attr *init_attr) { struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq; + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U32, + .elem_size = QEDR_SRQ_WQE_ELEM_SIZE, + }; dma_addr_t phy_prod_pair_addr; u32 num_elems; void *va; @@ -1464,13 +1472,9 @@ static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq, hw_srq->virt_prod_pair_addr = va; num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE; - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - num_elems, - QEDR_SRQ_WQE_ELEM_SIZE, - &hw_srq->pbl, NULL); + params.num_elems = num_elems; + + rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, ¶ms); if (rc) goto err0; @@ -1901,29 +1905,28 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev, u32 n_sq_elems, u32 n_rq_elems) { struct qed_rdma_create_qp_out_params out_params; + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .cnt_type = QED_CHAIN_CNT_TYPE_U32, + }; int rc; - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - n_sq_elems, - QEDR_SQE_ELEMENT_SIZE, - &qp->sq.pbl, NULL); + params.intended_use = QED_CHAIN_USE_TO_PRODUCE; + params.num_elems = n_sq_elems; + params.elem_size = QEDR_SQE_ELEMENT_SIZE; + rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms); if (rc) return rc; in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl); in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl); - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - n_rq_elems, - QEDR_RQE_ELEMENT_SIZE, - &qp->rq.pbl, NULL); + params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE; + params.elem_size = n_rq_elems; + params.elem_size = QEDR_RQE_ELEMENT_SIZE; + + rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms); if (rc) return rc; @@ -1949,14 +1952,19 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev, u32 n_sq_elems, u32 n_rq_elems) { struct qed_rdma_create_qp_out_params out_params; - struct qed_chain_ext_pbl ext_pbl; + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .cnt_type = QED_CHAIN_CNT_TYPE_U32, + }; int rc; in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems, QEDR_SQE_ELEMENT_SIZE, + QED_CHAIN_PAGE_SIZE, QED_CHAIN_MODE_PBL); in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems, QEDR_RQE_ELEMENT_SIZE, + QED_CHAIN_PAGE_SIZE, QED_CHAIN_MODE_PBL); qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx, @@ -1966,31 +1974,24 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev, return -EINVAL; /* Now we allocate the chain */ - ext_pbl.p_pbl_virt = out_params.sq_pbl_virt; - ext_pbl.p_pbl_phys = out_params.sq_pbl_phys; - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - n_sq_elems, - QEDR_SQE_ELEMENT_SIZE, - &qp->sq.pbl, &ext_pbl); + params.intended_use = QED_CHAIN_USE_TO_PRODUCE; + params.num_elems = n_sq_elems; + params.elem_size = QEDR_SQE_ELEMENT_SIZE; + params.ext_pbl_virt = out_params.sq_pbl_virt; + params.ext_pbl_phys = out_params.sq_pbl_phys; + rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms); if (rc) goto err; - ext_pbl.p_pbl_virt = out_params.rq_pbl_virt; - ext_pbl.p_pbl_phys = out_params.rq_pbl_phys; - - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - n_rq_elems, - QEDR_RQE_ELEMENT_SIZE, - &qp->rq.pbl, &ext_pbl); + params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE; + params.num_elems = n_rq_elems; + params.elem_size = QEDR_RQE_ELEMENT_SIZE; + params.ext_pbl_virt = out_params.rq_pbl_virt; + params.ext_pbl_phys = out_params.rq_pbl_phys; + rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms); if (rc) goto err; diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 4176bbf2a22b..f947b105cf14 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -3,12 +3,35 @@ obj-$(CONFIG_QED) := qed.o -qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ - qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ - qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed_mng_tlv.o -qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o -qed-$(CONFIG_QED_LL2) += qed_ll2.o -qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o -qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o +qed-y := \ + qed_chain.o \ + qed_cxt.o \ + qed_dcbx.o \ + qed_debug.o \ + qed_dev.o \ + qed_hw.o \ + qed_init_fw_funcs.o \ + qed_init_ops.o \ + qed_int.o \ + qed_l2.o \ + qed_main.o \ + qed_mcp.o \ + qed_mng_tlv.o \ + qed_ptp.o \ + qed_selftest.o \ + qed_sp_commands.o \ + qed_spq.o + qed-$(CONFIG_QED_FCOE) += qed_fcoe.o +qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o +qed-$(CONFIG_QED_LL2) += qed_ll2.o qed-$(CONFIG_QED_OOO) += qed_ooo.o + +qed-$(CONFIG_QED_RDMA) += \ + qed_iwarp.o \ + qed_rdma.o \ + qed_roce.o + +qed-$(CONFIG_QED_SRIOV) += \ + qed_sriov.o \ + qed_vf.o diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c new file mode 100644 index 000000000000..f8efd36d66e0 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c @@ -0,0 +1,369 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* Copyright (c) 2020 Marvell International Ltd. */ + +#include +#include +#include + +#include "qed_dev_api.h" + +static void qed_chain_init(struct qed_chain *chain, + const struct qed_chain_init_params *params, + u32 page_cnt) +{ + memset(chain, 0, sizeof(*chain)); + + chain->elem_size = params->elem_size; + chain->intended_use = params->intended_use; + chain->mode = params->mode; + chain->cnt_type = params->cnt_type; + + chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size, + params->page_size); + chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size, + params->page_size, + params->mode); + chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size, + params->mode); + + chain->elem_per_page_mask = chain->elem_per_page - 1; + chain->next_page_mask = chain->usable_per_page & + chain->elem_per_page_mask; + + chain->page_size = params->page_size; + chain->page_cnt = page_cnt; + chain->capacity = chain->usable_per_page * page_cnt; + chain->size = chain->elem_per_page * page_cnt; + + if (params->ext_pbl_virt) { + chain->pbl_sp.table_virt = params->ext_pbl_virt; + chain->pbl_sp.table_phys = params->ext_pbl_phys; + + chain->b_external_pbl = true; + } +} + +static void qed_chain_init_next_ptr_elem(const struct qed_chain *chain, + void *virt_curr, void *virt_next, + dma_addr_t phys_next) +{ + struct qed_chain_next *next; + u32 size; + + size = chain->elem_size * chain->usable_per_page; + next = virt_curr + size; + + DMA_REGPAIR_LE(next->next_phys, phys_next); + next->next_virt = virt_next; +} + +static void qed_chain_init_mem(struct qed_chain *chain, void *virt_addr, + dma_addr_t phys_addr) +{ + chain->p_virt_addr = virt_addr; + chain->p_phys_addr = phys_addr; +} + +static void qed_chain_free_next_ptr(struct qed_dev *cdev, + struct qed_chain *chain) +{ + struct device *dev = &cdev->pdev->dev; + struct qed_chain_next *next; + dma_addr_t phys, phys_next; + void *virt, *virt_next; + u32 size, i; + + size = chain->elem_size * chain->usable_per_page; + virt = chain->p_virt_addr; + phys = chain->p_phys_addr; + + for (i = 0; i < chain->page_cnt; i++) { + if (!virt) + break; + + next = virt + size; + virt_next = next->next_virt; + phys_next = HILO_DMA_REGPAIR(next->next_phys); + + dma_free_coherent(dev, chain->page_size, virt, phys); + + virt = virt_next; + phys = phys_next; + } +} + +static void qed_chain_free_single(struct qed_dev *cdev, + struct qed_chain *chain) +{ + if (!chain->p_virt_addr) + return; + + dma_free_coherent(&cdev->pdev->dev, chain->page_size, + chain->p_virt_addr, chain->p_phys_addr); +} + +static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain) +{ + struct device *dev = &cdev->pdev->dev; + struct addr_tbl_entry *entry; + u32 i; + + if (!chain->pbl.pp_addr_tbl) + return; + + for (i = 0; i < chain->page_cnt; i++) { + entry = chain->pbl.pp_addr_tbl + i; + if (!entry->virt_addr) + break; + + dma_free_coherent(dev, chain->page_size, entry->virt_addr, + entry->dma_map); + } + + if (!chain->b_external_pbl) + dma_free_coherent(dev, chain->pbl_sp.table_size, + chain->pbl_sp.table_virt, + chain->pbl_sp.table_phys); + + vfree(chain->pbl.pp_addr_tbl); + chain->pbl.pp_addr_tbl = NULL; +} + +/** + * qed_chain_free() - Free chain DMA memory. + * + * @cdev: Main device structure. + * @chain: Chain to free. + */ +void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain) +{ + switch (chain->mode) { + case QED_CHAIN_MODE_NEXT_PTR: + qed_chain_free_next_ptr(cdev, chain); + break; + case QED_CHAIN_MODE_SINGLE: + qed_chain_free_single(cdev, chain); + break; + case QED_CHAIN_MODE_PBL: + qed_chain_free_pbl(cdev, chain); + break; + default: + return; + } + + qed_chain_init_mem(chain, NULL, 0); +} + +static int +qed_chain_alloc_sanity_check(struct qed_dev *cdev, + const struct qed_chain_init_params *params, + u32 page_cnt) +{ + u64 chain_size; + + chain_size = ELEMS_PER_PAGE(params->elem_size, params->page_size); + chain_size *= page_cnt; + + if (!chain_size) + return -EINVAL; + + /* The actual chain size can be larger than the maximal possible value + * after rounding up the requested elements number to pages, and after + * taking into account the unusuable elements (next-ptr elements). + * The size of a "u16" chain can be (U16_MAX + 1) since the chain + * size/capacity fields are of u32 type. + */ + switch (params->cnt_type) { + case QED_CHAIN_CNT_TYPE_U16: + if (chain_size > U16_MAX + 1) + break; + + return 0; + case QED_CHAIN_CNT_TYPE_U32: + if (chain_size > U32_MAX) + break; + + return 0; + default: + return -EINVAL; + } + + DP_NOTICE(cdev, + "The actual chain size (0x%llx) is larger than the maximal possible value\n", + chain_size); + + return -EINVAL; +} + +static int qed_chain_alloc_next_ptr(struct qed_dev *cdev, + struct qed_chain *chain) +{ + struct device *dev = &cdev->pdev->dev; + void *virt, *virt_prev = NULL; + dma_addr_t phys; + u32 i; + + for (i = 0; i < chain->page_cnt; i++) { + virt = dma_alloc_coherent(dev, chain->page_size, &phys, + GFP_KERNEL); + if (!virt) + return -ENOMEM; + + if (i == 0) { + qed_chain_init_mem(chain, virt, phys); + qed_chain_reset(chain); + } else { + qed_chain_init_next_ptr_elem(chain, virt_prev, virt, + phys); + } + + virt_prev = virt; + } + + /* Last page's next element should point to the beginning of the + * chain. + */ + qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr, + chain->p_phys_addr); + + return 0; +} + +static int qed_chain_alloc_single(struct qed_dev *cdev, + struct qed_chain *chain) +{ + dma_addr_t phys; + void *virt; + + virt = dma_alloc_coherent(&cdev->pdev->dev, chain->page_size, + &phys, GFP_KERNEL); + if (!virt) + return -ENOMEM; + + qed_chain_init_mem(chain, virt, phys); + qed_chain_reset(chain); + + return 0; +} + +static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain) +{ + struct device *dev = &cdev->pdev->dev; + struct addr_tbl_entry *addr_tbl; + dma_addr_t phys, pbl_phys; + __le64 *pbl_virt; + u32 page_cnt, i; + size_t size; + void *virt; + + page_cnt = chain->page_cnt; + + size = array_size(page_cnt, sizeof(*addr_tbl)); + if (unlikely(size == SIZE_MAX)) + return -EOVERFLOW; + + addr_tbl = vzalloc(size); + if (!addr_tbl) + return -ENOMEM; + + chain->pbl.pp_addr_tbl = addr_tbl; + + if (chain->b_external_pbl) + goto alloc_pages; + + size = array_size(page_cnt, sizeof(*pbl_virt)); + if (unlikely(size == SIZE_MAX)) + return -EOVERFLOW; + + pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys, GFP_KERNEL); + if (!pbl_virt) + return -ENOMEM; + + chain->pbl_sp.table_virt = pbl_virt; + chain->pbl_sp.table_phys = pbl_phys; + chain->pbl_sp.table_size = size; + +alloc_pages: + for (i = 0; i < page_cnt; i++) { + virt = dma_alloc_coherent(dev, chain->page_size, &phys, + GFP_KERNEL); + if (!virt) + return -ENOMEM; + + if (i == 0) { + qed_chain_init_mem(chain, virt, phys); + qed_chain_reset(chain); + } + + /* Fill the PBL table with the physical address of the page */ + pbl_virt[i] = cpu_to_le64(phys); + + /* Keep the virtual address of the page */ + addr_tbl[i].virt_addr = virt; + addr_tbl[i].dma_map = phys; + } + + return 0; +} + +/** + * qed_chain_alloc() - Allocate and initialize a chain. + * + * @cdev: Main device structure. + * @chain: Chain to be processed. + * @params: Chain initialization parameters. + * + * Return: 0 on success, negative errno otherwise. + */ +int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, + struct qed_chain_init_params *params) +{ + u32 page_cnt; + int rc; + + if (!params->page_size) + params->page_size = QED_CHAIN_PAGE_SIZE; + + if (params->mode == QED_CHAIN_MODE_SINGLE) + page_cnt = 1; + else + page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems, + params->elem_size, + params->page_size, + params->mode); + + rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt); + if (rc) { + DP_NOTICE(cdev, + "Cannot allocate a chain with the given arguments:\n"); + DP_NOTICE(cdev, + "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu, page_size %u]\n", + params->intended_use, params->mode, params->cnt_type, + params->num_elems, params->elem_size, + params->page_size); + return rc; + } + + qed_chain_init(chain, params, page_cnt); + + switch (params->mode) { + case QED_CHAIN_MODE_NEXT_PTR: + rc = qed_chain_alloc_next_ptr(cdev, chain); + break; + case QED_CHAIN_MODE_SINGLE: + rc = qed_chain_alloc_single(cdev, chain); + break; + case QED_CHAIN_MODE_PBL: + rc = qed_chain_alloc_pbl(cdev, chain); + break; + default: + return -EINVAL; + } + + if (!rc) + return 0; + + qed_chain_free(cdev, chain); + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 6516a1f921da..d9c7a1a6be94 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -4716,279 +4716,6 @@ void qed_hw_remove(struct qed_dev *cdev) qed_mcp_nvm_info_free(p_hwfn); } -static void qed_chain_free_next_ptr(struct qed_dev *cdev, - struct qed_chain *p_chain) -{ - void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL; - dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; - struct qed_chain_next *p_next; - u32 size, i; - - if (!p_virt) - return; - - size = p_chain->elem_size * p_chain->usable_per_page; - - for (i = 0; i < p_chain->page_cnt; i++) { - if (!p_virt) - break; - - p_next = (struct qed_chain_next *)((u8 *)p_virt + size); - p_virt_next = p_next->next_virt; - p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); - - dma_free_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, p_virt, p_phys); - - p_virt = p_virt_next; - p_phys = p_phys_next; - } -} - -static void qed_chain_free_single(struct qed_dev *cdev, - struct qed_chain *p_chain) -{ - if (!p_chain->p_virt_addr) - return; - - dma_free_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, - p_chain->p_virt_addr, p_chain->p_phys_addr); -} - -static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) -{ - struct addr_tbl_entry *pp_addr_tbl = p_chain->pbl.pp_addr_tbl; - u32 page_cnt = p_chain->page_cnt, i, pbl_size; - - if (!pp_addr_tbl) - return; - - for (i = 0; i < page_cnt; i++) { - if (!pp_addr_tbl[i].virt_addr || !pp_addr_tbl[i].dma_map) - break; - - dma_free_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, - pp_addr_tbl[i].virt_addr, - pp_addr_tbl[i].dma_map); - } - - pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; - - if (!p_chain->b_external_pbl) - dma_free_coherent(&cdev->pdev->dev, - pbl_size, - p_chain->pbl_sp.p_virt_table, - p_chain->pbl_sp.p_phys_table); - - vfree(p_chain->pbl.pp_addr_tbl); - p_chain->pbl.pp_addr_tbl = NULL; -} - -void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain) -{ - switch (p_chain->mode) { - case QED_CHAIN_MODE_NEXT_PTR: - qed_chain_free_next_ptr(cdev, p_chain); - break; - case QED_CHAIN_MODE_SINGLE: - qed_chain_free_single(cdev, p_chain); - break; - case QED_CHAIN_MODE_PBL: - qed_chain_free_pbl(cdev, p_chain); - break; - } -} - -static int -qed_chain_alloc_sanity_check(struct qed_dev *cdev, - enum qed_chain_cnt_type cnt_type, - size_t elem_size, u32 page_cnt) -{ - u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; - - /* The actual chain size can be larger than the maximal possible value - * after rounding up the requested elements number to pages, and after - * taking into acount the unusuable elements (next-ptr elements). - * The size of a "u16" chain can be (U16_MAX + 1) since the chain - * size/capacity fields are of a u32 type. - */ - if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 && - chain_size > ((u32)U16_MAX + 1)) || - (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) { - DP_NOTICE(cdev, - "The actual chain size (0x%llx) is larger than the maximal possible value\n", - chain_size); - return -EINVAL; - } - - return 0; -} - -static int -qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain) -{ - void *p_virt = NULL, *p_virt_prev = NULL; - dma_addr_t p_phys = 0; - u32 i; - - for (i = 0; i < p_chain->page_cnt; i++) { - p_virt = dma_alloc_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, - &p_phys, GFP_KERNEL); - if (!p_virt) - return -ENOMEM; - - if (i == 0) { - qed_chain_init_mem(p_chain, p_virt, p_phys); - qed_chain_reset(p_chain); - } else { - qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, - p_virt, p_phys); - } - - p_virt_prev = p_virt; - } - /* Last page's next element should point to the beginning of the - * chain. - */ - qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, - p_chain->p_virt_addr, - p_chain->p_phys_addr); - - return 0; -} - -static int -qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain) -{ - dma_addr_t p_phys = 0; - void *p_virt = NULL; - - p_virt = dma_alloc_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL); - if (!p_virt) - return -ENOMEM; - - qed_chain_init_mem(p_chain, p_virt, p_phys); - qed_chain_reset(p_chain); - - return 0; -} - -static int -qed_chain_alloc_pbl(struct qed_dev *cdev, - struct qed_chain *p_chain, - struct qed_chain_ext_pbl *ext_pbl) -{ - u32 page_cnt = p_chain->page_cnt, size, i; - dma_addr_t p_phys = 0, p_pbl_phys = 0; - struct addr_tbl_entry *pp_addr_tbl; - u8 *p_pbl_virt = NULL; - void *p_virt = NULL; - - size = page_cnt * sizeof(*pp_addr_tbl); - pp_addr_tbl = vzalloc(size); - if (!pp_addr_tbl) - return -ENOMEM; - - /* The allocation of the PBL table is done with its full size, since it - * is expected to be successive. - * qed_chain_init_pbl_mem() is called even in a case of an allocation - * failure, since tbl was previously allocated, and it - * should be saved to allow its freeing during the error flow. - */ - size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; - - if (!ext_pbl) { - p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, - size, &p_pbl_phys, GFP_KERNEL); - } else { - p_pbl_virt = ext_pbl->p_pbl_virt; - p_pbl_phys = ext_pbl->p_pbl_phys; - p_chain->b_external_pbl = true; - } - - qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_addr_tbl); - if (!p_pbl_virt) - return -ENOMEM; - - for (i = 0; i < page_cnt; i++) { - p_virt = dma_alloc_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, - &p_phys, GFP_KERNEL); - if (!p_virt) - return -ENOMEM; - - if (i == 0) { - qed_chain_init_mem(p_chain, p_virt, p_phys); - qed_chain_reset(p_chain); - } - - /* Fill the PBL table with the physical address of the page */ - *(dma_addr_t *)p_pbl_virt = p_phys; - /* Keep the virtual address of the page */ - p_chain->pbl.pp_addr_tbl[i].virt_addr = p_virt; - p_chain->pbl.pp_addr_tbl[i].dma_map = p_phys; - - p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; - } - - return 0; -} - -int qed_chain_alloc(struct qed_dev *cdev, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type, - u32 num_elems, - size_t elem_size, - struct qed_chain *p_chain, - struct qed_chain_ext_pbl *ext_pbl) -{ - u32 page_cnt; - int rc = 0; - - if (mode == QED_CHAIN_MODE_SINGLE) - page_cnt = 1; - else - page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); - - rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt); - if (rc) { - DP_NOTICE(cdev, - "Cannot allocate a chain with the given arguments:\n"); - DP_NOTICE(cdev, - "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", - intended_use, mode, cnt_type, num_elems, elem_size); - return rc; - } - - qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use, - mode, cnt_type); - - switch (mode) { - case QED_CHAIN_MODE_NEXT_PTR: - rc = qed_chain_alloc_next_ptr(cdev, p_chain); - break; - case QED_CHAIN_MODE_SINGLE: - rc = qed_chain_alloc_single(cdev, p_chain); - break; - case QED_CHAIN_MODE_PBL: - rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl); - break; - } - if (rc) - goto nomem; - - return 0; - -nomem: - qed_chain_free(cdev, p_chain); - return rc; -} - int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) { if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 395d4932c262..d3c1f3879be8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -254,35 +254,9 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, dma_addr_t dest_addr, u32 size_in_dwords, struct qed_dmae_params *p_params); -/** - * @brief qed_chain_alloc - Allocate and initialize a chain - * - * @param p_hwfn - * @param intended_use - * @param mode - * @param num_elems - * @param elem_size - * @param p_chain - * @param ext_pbl - a possible external PBL - * - * @return int - */ -int -qed_chain_alloc(struct qed_dev *cdev, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type, - u32 num_elems, - size_t elem_size, - struct qed_chain *p_chain, struct qed_chain_ext_pbl *ext_pbl); - -/** - * @brief qed_chain_free - Free chain DMA memory - * - * @param p_hwfn - * @param p_chain - */ -void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain); +int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, + struct qed_chain_init_params *params); +void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain); /** * @@brief qed_fw_l2_queue - Get absolute L2 queue ID diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 25d2c882d7ac..4eae4ee3538f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -684,9 +684,13 @@ static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn) static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, struct qed_iscsi_conn **p_out_conn) { - u16 uhq_num_elements = 0, xhq_num_elements = 0, r2tq_num_elements = 0; struct scsi_terminate_extra_params *p_q_cnts = NULL; struct qed_iscsi_pf_params *p_params = NULL; + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + }; struct tcp_upload_params *p_tcp = NULL; struct qed_iscsi_conn *p_conn = NULL; int rc = 0; @@ -727,34 +731,25 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, goto nomem_upload_param; p_conn->tcp_upload_params_virt_addr = p_tcp; - r2tq_num_elements = p_params->num_r2tq_pages_in_ring * - QED_CHAIN_PAGE_SIZE / 0x80; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - r2tq_num_elements, 0x80, &p_conn->r2tq, NULL); + params.num_elems = p_params->num_r2tq_pages_in_ring * + QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_wqe); + params.elem_size = sizeof(struct iscsi_wqe); + + rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, ¶ms); if (rc) goto nomem_r2tq; - uhq_num_elements = p_params->num_uhq_pages_in_ring * + params.num_elems = p_params->num_uhq_pages_in_ring * QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe); - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - uhq_num_elements, - sizeof(struct iscsi_uhqe), &p_conn->uhq, NULL); + params.elem_size = sizeof(struct iscsi_uhqe); + + rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, ¶ms); if (rc) goto nomem_uhq; - xhq_num_elements = uhq_num_elements; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - xhq_num_elements, - sizeof(struct iscsi_xhqe), &p_conn->xhq, NULL); + params.elem_size = sizeof(struct iscsi_xhqe); + + rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, ¶ms); if (rc) goto nomem; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 6f4aec339cd4..0452b728c527 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1125,6 +1125,12 @@ static int qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_info) { + struct qed_chain_init_params params = { + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = p_ll2_info->input.rx_num_desc, + }; + struct qed_dev *cdev = p_hwfn->cdev; struct qed_ll2_rx_packet *p_descq; u32 capacity; int rc = 0; @@ -1132,13 +1138,10 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, if (!p_ll2_info->input.rx_num_desc) goto out; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_NEXT_PTR, - QED_CHAIN_CNT_TYPE_U16, - p_ll2_info->input.rx_num_desc, - sizeof(struct core_rx_bd), - &p_ll2_info->rx_queue.rxq_chain, NULL); + params.mode = QED_CHAIN_MODE_NEXT_PTR; + params.elem_size = sizeof(struct core_rx_bd); + + rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rxq_chain, ¶ms); if (rc) { DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n"); goto out; @@ -1154,13 +1157,10 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, } p_ll2_info->rx_queue.descq_array = p_descq; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - p_ll2_info->input.rx_num_desc, - sizeof(struct core_rx_fast_path_cqe), - &p_ll2_info->rx_queue.rcq_chain, NULL); + params.mode = QED_CHAIN_MODE_PBL; + params.elem_size = sizeof(struct core_rx_fast_path_cqe); + + rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rcq_chain, ¶ms); if (rc) { DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n"); goto out; @@ -1177,6 +1177,13 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_info) { + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = p_ll2_info->input.tx_num_desc, + .elem_size = sizeof(struct core_tx_bd), + }; struct qed_ll2_tx_packet *p_descq; u32 desc_size; u32 capacity; @@ -1185,13 +1192,8 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, if (!p_ll2_info->input.tx_num_desc) goto out; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - p_ll2_info->input.tx_num_desc, - sizeof(struct core_tx_bd), - &p_ll2_info->tx_queue.txq_chain, NULL); + rc = qed_chain_alloc(p_hwfn->cdev, &p_ll2_info->tx_queue.txq_chain, + ¶ms); if (rc) goto out; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 8142f5669b26..aa71adcf31ee 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -366,11 +366,11 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, - p_hwfn->p_eq->chain.pbl_sp.p_phys_table); + qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain)); page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); p_ramrod->event_ring_num_pages = page_cnt; DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, - p_hwfn->p_consq->chain.pbl_sp.p_phys_table); + qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain)); qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 92ab029789e5..0bc1a0aeb56e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -382,22 +382,26 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) { + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = num_elem, + .elem_size = sizeof(union event_ring_element), + }; struct qed_eq *p_eq; + int ret; /* Allocate EQ struct */ p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL); if (!p_eq) return -ENOMEM; - /* Allocate and initialize EQ chain*/ - if (qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - num_elem, - sizeof(union event_ring_element), - &p_eq->chain, NULL)) + ret = qed_chain_alloc(p_hwfn->cdev, &p_eq->chain, ¶ms); + if (ret) { + DP_NOTICE(p_hwfn, "Failed to allocate EQ chain\n"); goto eq_allocate_fail; + } /* register EQ completion on the SP SB */ qed_int_register_cb(p_hwfn, qed_eq_completion, @@ -408,7 +412,8 @@ int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) eq_allocate_fail: kfree(p_eq); - return -ENOMEM; + + return ret; } void qed_eq_setup(struct qed_hwfn *p_hwfn) @@ -529,33 +534,40 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) int qed_spq_alloc(struct qed_hwfn *p_hwfn) { + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_SINGLE, + .intended_use = QED_CHAIN_USE_TO_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .elem_size = sizeof(struct slow_path_element), + }; + struct qed_dev *cdev = p_hwfn->cdev; struct qed_spq_entry *p_virt = NULL; struct qed_spq *p_spq = NULL; dma_addr_t p_phys = 0; u32 capacity; + int ret; /* SPQ struct */ p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL); if (!p_spq) return -ENOMEM; - /* SPQ ring */ - if (qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_PRODUCE, - QED_CHAIN_MODE_SINGLE, - QED_CHAIN_CNT_TYPE_U16, - 0, /* N/A when the mode is SINGLE */ - sizeof(struct slow_path_element), - &p_spq->chain, NULL)) - goto spq_allocate_fail; + /* SPQ ring */ + ret = qed_chain_alloc(cdev, &p_spq->chain, ¶ms); + if (ret) { + DP_NOTICE(p_hwfn, "Failed to allocate SPQ chain\n"); + goto spq_chain_alloc_fail; + } /* allocate and fill the SPQ elements (incl. ramrod data list) */ capacity = qed_chain_get_capacity(&p_spq->chain); - p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + ret = -ENOMEM; + + p_virt = dma_alloc_coherent(&cdev->pdev->dev, capacity * sizeof(struct qed_spq_entry), &p_phys, GFP_KERNEL); if (!p_virt) - goto spq_allocate_fail; + goto spq_alloc_fail; p_spq->p_virt = p_virt; p_spq->p_phys = p_phys; @@ -563,10 +575,12 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) return 0; -spq_allocate_fail: - qed_chain_free(p_hwfn->cdev, &p_spq->chain); +spq_alloc_fail: + qed_chain_free(cdev, &p_spq->chain); +spq_chain_alloc_fail: kfree(p_spq); - return -ENOMEM; + + return ret; } void qed_spq_free(struct qed_hwfn *p_hwfn) @@ -967,30 +981,40 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, return 0; } +#define QED_SPQ_CONSQ_ELEM_SIZE 0x80 + int qed_consq_alloc(struct qed_hwfn *p_hwfn) { + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = QED_CHAIN_PAGE_SIZE / QED_SPQ_CONSQ_ELEM_SIZE, + .elem_size = QED_SPQ_CONSQ_ELEM_SIZE, + }; struct qed_consq *p_consq; + int ret; /* Allocate ConsQ struct */ p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL); if (!p_consq) return -ENOMEM; - /* Allocate and initialize EQ chain*/ - if (qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - QED_CHAIN_PAGE_SIZE / 0x80, - 0x80, &p_consq->chain, NULL)) - goto consq_allocate_fail; + /* Allocate and initialize ConsQ chain */ + ret = qed_chain_alloc(p_hwfn->cdev, &p_consq->chain, ¶ms); + if (ret) { + DP_NOTICE(p_hwfn, "Failed to allocate ConsQ chain"); + goto consq_alloc_fail; + } p_hwfn->p_consq = p_consq; + return 0; -consq_allocate_fail: +consq_alloc_fail: kfree(p_consq); - return -ENOMEM; + + return ret; } void qed_consq_setup(struct qed_hwfn *p_hwfn) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index f1d7f73de902..803c1fcca8ad 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -176,16 +176,17 @@ struct qede_dev { u32 dp_module; u8 dp_level; - unsigned long flags; -#define IS_VF(edev) (test_bit(QEDE_FLAGS_IS_VF, &(edev)->flags)) + unsigned long flags; +#define IS_VF(edev) test_bit(QEDE_FLAGS_IS_VF, \ + &(edev)->flags) const struct qed_eth_ops *ops; struct qede_ptp *ptp; u64 ptp_skip_txts; - struct qed_dev_eth_info dev_info; -#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues) -#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues) + struct qed_dev_eth_info dev_info; +#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues) +#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues) #define QEDE_IS_BB(edev) \ ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB) #define QEDE_IS_AH(edev) \ @@ -198,14 +199,16 @@ struct qede_dev { u8 fp_num_rx; u16 req_queues; u16 num_queues; -#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues) -#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx) + u16 total_xdp_queues; + +#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues) +#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx) #define QEDE_RX_QUEUE_IDX(edev, i) (i) -#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx) +#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx) struct qed_int_info int_info; - /* Smaller private varaiant of the RTNL lock */ + /* Smaller private variant of the RTNL lock */ struct mutex qede_lock; u32 state; /* Protected by qede_lock */ u16 rx_buf_size; @@ -226,22 +229,28 @@ struct qede_dev { SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) struct qede_stats stats; -#define QEDE_RSS_INDIR_INITED BIT(0) -#define QEDE_RSS_KEY_INITED BIT(1) -#define QEDE_RSS_CAPS_INITED BIT(2) - u32 rss_params_inited; /* bit-field to track initialized rss params */ - u16 rss_ind_table[128]; - u32 rss_key[10]; - u8 rss_caps; - u16 q_num_rx_buffers; /* Must be a power of two */ - u16 q_num_tx_buffers; /* Must be a power of two */ + /* Bitfield to track initialized RSS params */ + u32 rss_params_inited; +#define QEDE_RSS_INDIR_INITED BIT(0) +#define QEDE_RSS_KEY_INITED BIT(1) +#define QEDE_RSS_CAPS_INITED BIT(2) + + u16 rss_ind_table[128]; + u32 rss_key[10]; + u8 rss_caps; + + /* Both must be a power of two */ + u16 q_num_rx_buffers; + u16 q_num_tx_buffers; + + bool gro_disable; + + struct list_head vlan_list; + u16 configured_vlans; + u16 non_configured_vlans; + bool accept_any_vlan; - bool gro_disable; - struct list_head vlan_list; - u16 configured_vlans; - u16 non_configured_vlans; - bool accept_any_vlan; struct delayed_work sp_task; unsigned long sp_flags; u16 vxlan_dst_port; @@ -252,14 +261,14 @@ struct qede_dev { struct qede_rdma_dev rdma_info; - struct bpf_prog *xdp_prog; + struct bpf_prog *xdp_prog; - unsigned long err_flags; -#define QEDE_ERR_IS_HANDLED 31 -#define QEDE_ERR_ATTN_CLR_EN 0 -#define QEDE_ERR_GET_DBG_INFO 1 -#define QEDE_ERR_IS_RECOVERABLE 2 -#define QEDE_ERR_WARN 3 + unsigned long err_flags; +#define QEDE_ERR_IS_HANDLED 31 +#define QEDE_ERR_ATTN_CLR_EN 0 +#define QEDE_ERR_GET_DBG_INFO 1 +#define QEDE_ERR_IS_RECOVERABLE 2 +#define QEDE_ERR_WARN 3 struct qede_dump_info dump_info; }; @@ -372,29 +381,34 @@ struct sw_tx_bd { }; struct sw_tx_xdp { - struct page *page; - dma_addr_t mapping; + struct page *page; + struct xdp_frame *xdpf; + dma_addr_t mapping; }; struct qede_tx_queue { - u8 is_xdp; - bool is_legacy; - u16 sw_tx_cons; - u16 sw_tx_prod; - u16 num_tx_buffers; /* Slowpath only */ + u8 is_xdp; + bool is_legacy; + u16 sw_tx_cons; + u16 sw_tx_prod; + u16 num_tx_buffers; /* Slowpath only */ - u64 xmit_pkts; - u64 stopped_cnt; - u64 tx_mem_alloc_err; + u64 xmit_pkts; + u64 stopped_cnt; + u64 tx_mem_alloc_err; - __le16 *hw_cons_ptr; + __le16 *hw_cons_ptr; /* Needed for the mapping of packets */ - struct device *dev; + struct device *dev; - void __iomem *doorbell_addr; - union db_prod tx_db; - int index; /* Slowpath only */ + void __iomem *doorbell_addr; + union db_prod tx_db; + + /* Spinlock for XDP queues in case of XDP_REDIRECT */ + spinlock_t xdp_tx_lock; + + int index; /* Slowpath only */ #define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \ QEDE_MAX_TSS_CNT(edev)) #define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev)) @@ -406,22 +420,22 @@ struct qede_tx_queue { #define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx) \ (&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \ [QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)])) -#define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0])) +#define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0])) /* Regular Tx requires skb + metadata for release purpose, * while XDP requires the pages and the mapped address. */ union { - struct sw_tx_bd *skbs; - struct sw_tx_xdp *xdp; - } sw_tx_ring; + struct sw_tx_bd *skbs; + struct sw_tx_xdp *xdp; + } sw_tx_ring; - struct qed_chain tx_pbl; + struct qed_chain tx_pbl; /* Slowpath; Should be kept in end [unless missing padding] */ - void *handle; - u16 cos; - u16 ndev_txq_id; + void *handle; + u16 cos; + u16 ndev_txq_id; }; #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \ @@ -435,32 +449,37 @@ struct qede_tx_queue { #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) struct qede_fastpath { - struct qede_dev *edev; -#define QEDE_FASTPATH_TX BIT(0) -#define QEDE_FASTPATH_RX BIT(1) -#define QEDE_FASTPATH_XDP BIT(2) -#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX) - u8 type; - u8 id; - u8 xdp_xmit; - struct napi_struct napi; - struct qed_sb_info *sb_info; - struct qede_rx_queue *rxq; - struct qede_tx_queue *txq; - struct qede_tx_queue *xdp_tx; + struct qede_dev *edev; -#define VEC_NAME_SIZE (sizeof_field(struct net_device, name) + 8) - char name[VEC_NAME_SIZE]; + u8 type; +#define QEDE_FASTPATH_TX BIT(0) +#define QEDE_FASTPATH_RX BIT(1) +#define QEDE_FASTPATH_XDP BIT(2) +#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX) + + u8 id; + + u8 xdp_xmit; +#define QEDE_XDP_TX BIT(0) +#define QEDE_XDP_REDIRECT BIT(1) + + struct napi_struct napi; + struct qed_sb_info *sb_info; + struct qede_rx_queue *rxq; + struct qede_tx_queue *txq; + struct qede_tx_queue *xdp_tx; + + char name[IFNAMSIZ + 8]; }; /* Debug print definitions */ -#define DP_NAME(edev) ((edev)->ndev->name) +#define DP_NAME(edev) netdev_name((edev)->ndev) -#define XMIT_PLAIN 0 -#define XMIT_L4_CSUM BIT(0) -#define XMIT_LSO BIT(1) -#define XMIT_ENC BIT(2) -#define XMIT_ENC_GSO_L4_CSUM BIT(3) +#define XMIT_PLAIN 0 +#define XMIT_L4_CSUM BIT(0) +#define XMIT_LSO BIT(1) +#define XMIT_ENC BIT(2) +#define XMIT_ENC_GSO_L4_CSUM BIT(3) #define QEDE_CSUM_ERROR BIT(0) #define QEDE_CSUM_UNNECESSARY BIT(1) @@ -503,6 +522,8 @@ struct qede_reload_args { /* Datapath functions definition */ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); +int qede_xdp_transmit(struct net_device *dev, int n_frames, + struct xdp_frame **frames, u32 flags); u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); netdev_features_t qede_features_check(struct sk_buff *skb, diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 1c4ece0713f8..a2494bf85007 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -302,51 +302,94 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq) wmb(); } -static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, - struct sw_rx_data *metadata, u16 padding, u16 length) +static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad, + u16 len, struct page *page, struct xdp_frame *xdpf) { - struct qede_tx_queue *txq = fp->xdp_tx; - struct eth_tx_1st_bd *first_bd; - u16 idx = txq->sw_tx_prod; + struct eth_tx_1st_bd *bd; + struct sw_tx_xdp *xdp; u16 val; - if (!qed_chain_get_elem_left(&txq->tx_pbl)) { + if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >= + txq->num_tx_buffers)) { txq->stopped_cnt++; return -ENOMEM; } - first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); + bd = qed_chain_produce(&txq->tx_pbl); + bd->data.nbds = 1; + bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); - memset(first_bd, 0, sizeof(*first_bd)); - first_bd->data.bd_flags.bitfields = - BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); - - val = (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << + val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; - first_bd->data.bitfields |= cpu_to_le16(val); - first_bd->data.nbds = 1; + bd->data.bitfields = cpu_to_le16(val); /* We can safely ignore the offset, as it's 0 for XDP */ - BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length); + BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len); - /* Synchronize the buffer back to device, as program [probably] - * has changed it. - */ - dma_sync_single_for_device(&edev->pdev->dev, - metadata->mapping + padding, - length, PCI_DMA_TODEVICE); + xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod; + xdp->mapping = dma; + xdp->page = page; + xdp->xdpf = xdpf; - txq->sw_tx_ring.xdp[idx].page = metadata->data; - txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping; txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; - /* Mark the fastpath for future XDP doorbell */ - fp->xdp_xmit = 1; - return 0; } +int qede_xdp_transmit(struct net_device *dev, int n_frames, + struct xdp_frame **frames, u32 flags) +{ + struct qede_dev *edev = netdev_priv(dev); + struct device *dmadev = &edev->pdev->dev; + struct qede_tx_queue *xdp_tx; + struct xdp_frame *xdpf; + dma_addr_t mapping; + int i, drops = 0; + u16 xdp_prod; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + if (unlikely(!netif_running(dev))) + return -ENETDOWN; + + i = smp_processor_id() % edev->total_xdp_queues; + xdp_tx = edev->fp_array[i].xdp_tx; + + spin_lock(&xdp_tx->xdp_tx_lock); + + for (i = 0; i < n_frames; i++) { + xdpf = frames[i]; + + mapping = dma_map_single(dmadev, xdpf->data, xdpf->len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dmadev, mapping))) { + xdp_return_frame_rx_napi(xdpf); + drops++; + + continue; + } + + if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len, + NULL, xdpf))) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (flags & XDP_XMIT_FLUSH) { + xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl); + + xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); + qede_update_tx_producer(xdp_tx); + } + + spin_unlock(&xdp_tx->xdp_tx_lock); + + return n_frames - drops; +} + int qede_txq_has_work(struct qede_tx_queue *txq) { u16 hw_bd_cons; @@ -362,20 +405,31 @@ int qede_txq_has_work(struct qede_tx_queue *txq) static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) { - u16 hw_bd_cons, idx; + struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp; + struct device *dev = &edev->pdev->dev; + struct xdp_frame *xdpf; + u16 hw_bd_cons; hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); barrier(); while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { + xdp_info = xdp_arr + txq->sw_tx_cons; + xdpf = xdp_info->xdpf; + + if (xdpf) { + dma_unmap_single(dev, xdp_info->mapping, xdpf->len, + DMA_TO_DEVICE); + xdp_return_frame(xdpf); + + xdp_info->xdpf = NULL; + } else { + dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE, + DMA_BIDIRECTIONAL); + __free_page(xdp_info->page); + } + qed_chain_consume(&txq->tx_pbl); - idx = txq->sw_tx_cons; - - dma_unmap_page(&edev->pdev->dev, - txq->sw_tx_ring.xdp[idx].mapping, - PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(txq->sw_tx_ring.xdp[idx].page); - txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; txq->xmit_pkts++; } @@ -1064,32 +1118,59 @@ static bool qede_rx_xdp(struct qede_dev *edev, switch (act) { case XDP_TX: /* We need the replacement buffer before transmit. */ - if (qede_alloc_rx_buffer(rxq, true)) { + if (unlikely(qede_alloc_rx_buffer(rxq, true))) { qede_recycle_rx_bd_ring(rxq, 1); + trace_xdp_exception(edev->ndev, prog, act); - return false; + break; } /* Now if there's a transmission problem, we'd still have to * throw current buffer, as replacement was already allocated. */ - if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) { - dma_unmap_page(rxq->dev, bd->mapping, - PAGE_SIZE, DMA_BIDIRECTIONAL); + if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping, + *data_offset, *len, bd->data, + NULL))) { + dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, + rxq->data_direction); __free_page(bd->data); + trace_xdp_exception(edev->ndev, prog, act); + } else { + dma_sync_single_for_device(rxq->dev, + bd->mapping + *data_offset, + *len, rxq->data_direction); + fp->xdp_xmit |= QEDE_XDP_TX; } /* Regardless, we've consumed an Rx BD */ qede_rx_bd_ring_consume(rxq); - return false; + break; + case XDP_REDIRECT: + /* We need the replacement buffer before transmit. */ + if (unlikely(qede_alloc_rx_buffer(rxq, true))) { + qede_recycle_rx_bd_ring(rxq, 1); + trace_xdp_exception(edev->ndev, prog, act); + break; + } + + dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, + rxq->data_direction); + + if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog))) + DP_NOTICE(edev, "Failed to redirect the packet\n"); + else + fp->xdp_xmit |= QEDE_XDP_REDIRECT; + + qede_rx_bd_ring_consume(rxq); + break; default: bpf_warn_invalid_xdp_action(act); - /* Fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(edev->ndev, prog, act); - /* Fall through */ + fallthrough; case XDP_DROP: qede_recycle_rx_bd_ring(rxq, cqe->bd_num); } @@ -1353,6 +1434,9 @@ int qede_poll(struct napi_struct *napi, int budget) napi); struct qede_dev *edev = fp->edev; int rx_work_done = 0; + u16 xdp_prod; + + fp->xdp_xmit = 0; if (likely(fp->type & QEDE_FASTPATH_TX)) { int cos; @@ -1380,14 +1464,16 @@ int qede_poll(struct napi_struct *napi, int budget) } } - if (fp->xdp_xmit) { - u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); + if (fp->xdp_xmit & QEDE_XDP_TX) { + xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); - fp->xdp_xmit = 0; fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); qede_update_tx_producer(fp->xdp_tx); } + if (fp->xdp_xmit & QEDE_XDP_REDIRECT) + xdp_do_flush_map(); + return rx_work_done; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 6f2171dc0dea..1aaae3203f5a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -639,79 +639,81 @@ qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type, } static const struct net_device_ops qede_netdev_ops = { - .ndo_open = qede_open, - .ndo_stop = qede_close, - .ndo_start_xmit = qede_start_xmit, - .ndo_select_queue = qede_select_queue, - .ndo_set_rx_mode = qede_set_rx_mode, - .ndo_set_mac_address = qede_set_mac_addr, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = qede_change_mtu, - .ndo_do_ioctl = qede_ioctl, - .ndo_tx_timeout = qede_tx_timeout, + .ndo_open = qede_open, + .ndo_stop = qede_close, + .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, + .ndo_set_rx_mode = qede_set_rx_mode, + .ndo_set_mac_address = qede_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = qede_change_mtu, + .ndo_do_ioctl = qede_ioctl, + .ndo_tx_timeout = qede_tx_timeout, #ifdef CONFIG_QED_SRIOV - .ndo_set_vf_mac = qede_set_vf_mac, - .ndo_set_vf_vlan = qede_set_vf_vlan, - .ndo_set_vf_trust = qede_set_vf_trust, + .ndo_set_vf_mac = qede_set_vf_mac, + .ndo_set_vf_vlan = qede_set_vf_vlan, + .ndo_set_vf_trust = qede_set_vf_trust, #endif - .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, - .ndo_fix_features = qede_fix_features, - .ndo_set_features = qede_set_features, - .ndo_get_stats64 = qede_get_stats64, + .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, + .ndo_fix_features = qede_fix_features, + .ndo_set_features = qede_set_features, + .ndo_get_stats64 = qede_get_stats64, #ifdef CONFIG_QED_SRIOV - .ndo_set_vf_link_state = qede_set_vf_link_state, - .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, - .ndo_get_vf_config = qede_get_vf_config, - .ndo_set_vf_rate = qede_set_vf_rate, + .ndo_set_vf_link_state = qede_set_vf_link_state, + .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, + .ndo_get_vf_config = qede_get_vf_config, + .ndo_set_vf_rate = qede_set_vf_rate, #endif - .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, - .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, - .ndo_features_check = qede_features_check, - .ndo_bpf = qede_xdp, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, + .ndo_features_check = qede_features_check, + .ndo_bpf = qede_xdp, #ifdef CONFIG_RFS_ACCEL - .ndo_rx_flow_steer = qede_rx_flow_steer, + .ndo_rx_flow_steer = qede_rx_flow_steer, #endif - .ndo_setup_tc = qede_setup_tc_offload, + .ndo_xdp_xmit = qede_xdp_transmit, + .ndo_setup_tc = qede_setup_tc_offload, }; static const struct net_device_ops qede_netdev_vf_ops = { - .ndo_open = qede_open, - .ndo_stop = qede_close, - .ndo_start_xmit = qede_start_xmit, - .ndo_select_queue = qede_select_queue, - .ndo_set_rx_mode = qede_set_rx_mode, - .ndo_set_mac_address = qede_set_mac_addr, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = qede_change_mtu, - .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, - .ndo_fix_features = qede_fix_features, - .ndo_set_features = qede_set_features, - .ndo_get_stats64 = qede_get_stats64, - .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, - .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, - .ndo_features_check = qede_features_check, + .ndo_open = qede_open, + .ndo_stop = qede_close, + .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, + .ndo_set_rx_mode = qede_set_rx_mode, + .ndo_set_mac_address = qede_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = qede_change_mtu, + .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, + .ndo_fix_features = qede_fix_features, + .ndo_set_features = qede_set_features, + .ndo_get_stats64 = qede_get_stats64, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, + .ndo_features_check = qede_features_check, }; static const struct net_device_ops qede_netdev_vf_xdp_ops = { - .ndo_open = qede_open, - .ndo_stop = qede_close, - .ndo_start_xmit = qede_start_xmit, - .ndo_select_queue = qede_select_queue, - .ndo_set_rx_mode = qede_set_rx_mode, - .ndo_set_mac_address = qede_set_mac_addr, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = qede_change_mtu, - .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, - .ndo_fix_features = qede_fix_features, - .ndo_set_features = qede_set_features, - .ndo_get_stats64 = qede_get_stats64, - .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, - .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, - .ndo_features_check = qede_features_check, - .ndo_bpf = qede_xdp, + .ndo_open = qede_open, + .ndo_stop = qede_close, + .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, + .ndo_set_rx_mode = qede_set_rx_mode, + .ndo_set_mac_address = qede_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = qede_change_mtu, + .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, + .ndo_fix_features = qede_fix_features, + .ndo_set_features = qede_set_features, + .ndo_get_stats64 = qede_get_stats64, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, + .ndo_features_check = qede_features_check, + .ndo_bpf = qede_xdp, + .ndo_xdp_xmit = qede_xdp_transmit, }; /* ------------------------------------------------------------------------- @@ -1442,6 +1444,11 @@ static void qede_set_tpa_param(struct qede_rx_queue *rxq) /* This function allocates all memory needed per Rx queue */ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { + struct qed_chain_init_params params = { + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = RX_RING_SIZE, + }; + struct qed_dev *cdev = edev->cdev; int i, rc, size; rxq->num_rx_buffers = edev->q_num_rx_buffers; @@ -1477,24 +1484,20 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) } /* Allocate FW Rx ring */ - rc = edev->ops->common->chain_alloc(edev->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_NEXT_PTR, - QED_CHAIN_CNT_TYPE_U16, - RX_RING_SIZE, - sizeof(struct eth_rx_bd), - &rxq->rx_bd_ring, NULL); + params.mode = QED_CHAIN_MODE_NEXT_PTR; + params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE; + params.elem_size = sizeof(struct eth_rx_bd); + + rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, ¶ms); if (rc) goto err; /* Allocate FW completion ring */ - rc = edev->ops->common->chain_alloc(edev->cdev, - QED_CHAIN_USE_TO_CONSUME, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - RX_RING_SIZE, - sizeof(union eth_rx_cqe), - &rxq->rx_comp_ring, NULL); + params.mode = QED_CHAIN_MODE_PBL; + params.intended_use = QED_CHAIN_USE_TO_CONSUME; + params.elem_size = sizeof(union eth_rx_cqe); + + rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, ¶ms); if (rc) goto err; @@ -1531,7 +1534,13 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) /* This function allocates all memory needed per Tx queue */ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) { - union eth_tx_bd_types *p_virt; + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = edev->q_num_tx_buffers, + .elem_size = sizeof(union eth_tx_bd_types), + }; int size, rc; txq->num_tx_buffers = edev->q_num_tx_buffers; @@ -1549,13 +1558,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) goto err; } - rc = edev->ops->common->chain_alloc(edev->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - txq->num_tx_buffers, - sizeof(*p_virt), - &txq->tx_pbl, NULL); + rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, ¶ms); if (rc) goto err; @@ -1711,6 +1714,7 @@ static void qede_init_fp(struct qede_dev *edev) { int queue_id, rxq_index = 0, txq_index = 0; struct qede_fastpath *fp; + bool init_xdp = false; for_each_queue(queue_id) { fp = &edev->fp_array[queue_id]; @@ -1722,6 +1726,9 @@ static void qede_init_fp(struct qede_dev *edev) fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev, rxq_index); fp->xdp_tx->is_xdp = 1; + + spin_lock_init(&fp->xdp_tx->xdp_tx_lock); + init_xdp = true; } if (fp->type & QEDE_FASTPATH_RX) { @@ -1737,6 +1744,13 @@ static void qede_init_fp(struct qede_dev *edev) /* Driver have no error path from here */ WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev, fp->rxq->rxq_id) < 0); + + if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq, + MEM_TYPE_PAGE_ORDER0, + NULL)) { + DP_NOTICE(edev, + "Failed to register XDP memory model\n"); + } } if (fp->type & QEDE_FASTPATH_TX) { @@ -1762,6 +1776,11 @@ static void qede_init_fp(struct qede_dev *edev) snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", edev->ndev->name, queue_id); } + + if (init_xdp) { + edev->total_xdp_queues = QEDE_RSS_COUNT(edev); + DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues); + } } static int qede_set_real_num_queues(struct qede_dev *edev) diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 7071dc92b4e2..4d58dc8943f0 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -26,9 +27,9 @@ enum qed_chain_mode { }; enum qed_chain_use_mode { - QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ - QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ - QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ + QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ + QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ + QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ }; enum qed_chain_cnt_type { @@ -40,197 +41,230 @@ enum qed_chain_cnt_type { }; struct qed_chain_next { - struct regpair next_phys; - void *next_virt; + struct regpair next_phys; + void *next_virt; }; struct qed_chain_pbl_u16 { - u16 prod_page_idx; - u16 cons_page_idx; + u16 prod_page_idx; + u16 cons_page_idx; }; struct qed_chain_pbl_u32 { - u32 prod_page_idx; - u32 cons_page_idx; -}; - -struct qed_chain_ext_pbl { - dma_addr_t p_pbl_phys; - void *p_pbl_virt; + u32 prod_page_idx; + u32 cons_page_idx; }; struct qed_chain_u16 { /* Cyclic index of next element to produce/consme */ - u16 prod_idx; - u16 cons_idx; + u16 prod_idx; + u16 cons_idx; }; struct qed_chain_u32 { /* Cyclic index of next element to produce/consme */ - u32 prod_idx; - u32 cons_idx; + u32 prod_idx; + u32 cons_idx; }; struct addr_tbl_entry { - void *virt_addr; - dma_addr_t dma_map; + void *virt_addr; + dma_addr_t dma_map; }; struct qed_chain { - /* fastpath portion of the chain - required for commands such + /* Fastpath portion of the chain - required for commands such * as produce / consume. */ + /* Point to next element to produce/consume */ - void *p_prod_elem; - void *p_cons_elem; + void *p_prod_elem; + void *p_cons_elem; /* Fastpath portions of the PBL [if exists] */ + struct { /* Table for keeping the virtual and physical addresses of the * chain pages, respectively to the physical addresses * in the pbl table. */ - struct addr_tbl_entry *pp_addr_tbl; + struct addr_tbl_entry *pp_addr_tbl; union { - struct qed_chain_pbl_u16 u16; - struct qed_chain_pbl_u32 u32; - } c; - } pbl; + struct qed_chain_pbl_u16 u16; + struct qed_chain_pbl_u32 u32; + } c; + } pbl; union { - struct qed_chain_u16 chain16; - struct qed_chain_u32 chain32; - } u; + struct qed_chain_u16 chain16; + struct qed_chain_u32 chain32; + } u; /* Capacity counts only usable elements */ - u32 capacity; - u32 page_cnt; + u32 capacity; + u32 page_cnt; - enum qed_chain_mode mode; + enum qed_chain_mode mode; /* Elements information for fast calculations */ - u16 elem_per_page; - u16 elem_per_page_mask; - u16 elem_size; - u16 next_page_mask; - u16 usable_per_page; - u8 elem_unusable; + u16 elem_per_page; + u16 elem_per_page_mask; + u16 elem_size; + u16 next_page_mask; + u16 usable_per_page; + u8 elem_unusable; - u8 cnt_type; + enum qed_chain_cnt_type cnt_type; /* Slowpath of the chain - required for initialization and destruction, * but isn't involved in regular functionality. */ + u32 page_size; + /* Base address of a pre-allocated buffer for pbl */ struct { - dma_addr_t p_phys_table; - void *p_virt_table; - } pbl_sp; + __le64 *table_virt; + dma_addr_t table_phys; + size_t table_size; + } pbl_sp; /* Address of first page of the chain - the address is required * for fastpath operation [consume/produce] but only for the SINGLE * flavour which isn't considered fastpath [== SPQ]. */ - void *p_virt_addr; - dma_addr_t p_phys_addr; + void *p_virt_addr; + dma_addr_t p_phys_addr; /* Total number of elements [for entire chain] */ - u32 size; + u32 size; - u8 intended_use; + enum qed_chain_use_mode intended_use; - bool b_external_pbl; + bool b_external_pbl; }; -#define QED_CHAIN_PBL_ENTRY_SIZE (8) -#define QED_CHAIN_PAGE_SIZE (0x1000) -#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) +struct qed_chain_init_params { + enum qed_chain_mode mode; + enum qed_chain_use_mode intended_use; + enum qed_chain_cnt_type cnt_type; -#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ - (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ - (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \ - (elem_size))) : 0) + u32 page_size; + u32 num_elems; + size_t elem_size; -#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ - ((u32)(ELEMS_PER_PAGE(elem_size) - \ - UNUSABLE_ELEMS_PER_PAGE(elem_size, mode))) + void *ext_pbl_virt; + dma_addr_t ext_pbl_phys; +}; -#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ - DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) +#define QED_CHAIN_PAGE_SIZE SZ_4K -#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) -#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32) +#define ELEMS_PER_PAGE(elem_size, page_size) \ + ((page_size) / (elem_size)) + +#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ + (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ + (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) : \ + 0) + +#define USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode) \ + ((u32)(ELEMS_PER_PAGE((elem_size), (page_size)) - \ + UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode)))) + +#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, page_size, mode) \ + DIV_ROUND_UP((elem_cnt), \ + USABLE_ELEMS_PER_PAGE((elem_size), (page_size), (mode))) + +#define is_chain_u16(p) \ + ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) +#define is_chain_u32(p) \ + ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32) /* Accessors */ -static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) + +static inline u16 qed_chain_get_prod_idx(const struct qed_chain *chain) { - return p_chain->u.chain16.prod_idx; + return chain->u.chain16.prod_idx; } -static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain) +static inline u16 qed_chain_get_cons_idx(const struct qed_chain *chain) { - return p_chain->u.chain16.cons_idx; + return chain->u.chain16.cons_idx; } -static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain) +static inline u32 qed_chain_get_prod_idx_u32(const struct qed_chain *chain) { - return p_chain->u.chain32.cons_idx; + return chain->u.chain32.prod_idx; } -static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) +static inline u32 qed_chain_get_cons_idx_u32(const struct qed_chain *chain) { - u16 elem_per_page = p_chain->elem_per_page; - u32 prod = p_chain->u.chain16.prod_idx; - u32 cons = p_chain->u.chain16.cons_idx; + return chain->u.chain32.cons_idx; +} + +static inline u16 qed_chain_get_elem_used(const struct qed_chain *chain) +{ + u32 prod = qed_chain_get_prod_idx(chain); + u32 cons = qed_chain_get_cons_idx(chain); + u16 elem_per_page = chain->elem_per_page; u16 used; if (prod < cons) prod += (u32)U16_MAX + 1; used = (u16)(prod - cons); - if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= prod / elem_per_page - cons / elem_per_page; + if (chain->mode == QED_CHAIN_MODE_NEXT_PTR) + used -= (u16)(prod / elem_per_page - cons / elem_per_page); - return (u16)(p_chain->capacity - used); + return used; } -static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain) +static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain) { - u16 elem_per_page = p_chain->elem_per_page; - u64 prod = p_chain->u.chain32.prod_idx; - u64 cons = p_chain->u.chain32.cons_idx; + return (u16)(chain->capacity - qed_chain_get_elem_used(chain)); +} + +static inline u32 qed_chain_get_elem_used_u32(const struct qed_chain *chain) +{ + u64 prod = qed_chain_get_prod_idx_u32(chain); + u64 cons = qed_chain_get_cons_idx_u32(chain); + u16 elem_per_page = chain->elem_per_page; u32 used; if (prod < cons) prod += (u64)U32_MAX + 1; used = (u32)(prod - cons); - if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) + if (chain->mode == QED_CHAIN_MODE_NEXT_PTR) used -= (u32)(prod / elem_per_page - cons / elem_per_page); - return p_chain->capacity - used; + return used; } -static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain) +static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain) { - return p_chain->usable_per_page; + return chain->capacity - qed_chain_get_elem_used_u32(chain); } -static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain) +static inline u16 qed_chain_get_usable_per_page(const struct qed_chain *chain) { - return p_chain->elem_unusable; + return chain->usable_per_page; } -static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain) +static inline u8 qed_chain_get_unusable_per_page(const struct qed_chain *chain) { - return p_chain->page_cnt; + return chain->elem_unusable; } -static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) +static inline u32 qed_chain_get_page_cnt(const struct qed_chain *chain) { - return p_chain->pbl_sp.p_phys_table; + return chain->page_cnt; +} + +static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain) +{ + return chain->pbl_sp.table_phys; } /** @@ -484,118 +518,6 @@ static inline void qed_chain_reset(struct qed_chain *p_chain) } } -/** - * @brief qed_chain_init - Initalizes a basic chain struct - * - * @param p_chain - * @param p_virt_addr - * @param p_phys_addr physical address of allocated buffer's beginning - * @param page_cnt number of pages in the allocated buffer - * @param elem_size size of each element in the chain - * @param intended_use - * @param mode - */ -static inline void qed_chain_init_params(struct qed_chain *p_chain, - u32 page_cnt, - u8 elem_size, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type) -{ - /* chain fixed parameters */ - p_chain->p_virt_addr = NULL; - p_chain->p_phys_addr = 0; - p_chain->elem_size = elem_size; - p_chain->intended_use = (u8)intended_use; - p_chain->mode = mode; - p_chain->cnt_type = (u8)cnt_type; - - p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); - p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); - p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; - p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); - p_chain->next_page_mask = (p_chain->usable_per_page & - p_chain->elem_per_page_mask); - - p_chain->page_cnt = page_cnt; - p_chain->capacity = p_chain->usable_per_page * page_cnt; - p_chain->size = p_chain->elem_per_page * page_cnt; - - p_chain->pbl_sp.p_phys_table = 0; - p_chain->pbl_sp.p_virt_table = NULL; - p_chain->pbl.pp_addr_tbl = NULL; -} - -/** - * @brief qed_chain_init_mem - - * - * Initalizes a basic chain struct with its chain buffers - * - * @param p_chain - * @param p_virt_addr virtual address of allocated buffer's beginning - * @param p_phys_addr physical address of allocated buffer's beginning - * - */ -static inline void qed_chain_init_mem(struct qed_chain *p_chain, - void *p_virt_addr, dma_addr_t p_phys_addr) -{ - p_chain->p_virt_addr = p_virt_addr; - p_chain->p_phys_addr = p_phys_addr; -} - -/** - * @brief qed_chain_init_pbl_mem - - * - * Initalizes a basic chain struct with its pbl buffers - * - * @param p_chain - * @param p_virt_pbl pointer to a pre allocated side table which will hold - * virtual page addresses. - * @param p_phys_pbl pointer to a pre-allocated side table which will hold - * physical page addresses. - * @param pp_virt_addr_tbl - * pointer to a pre-allocated side table which will hold - * the virtual addresses of the chain pages. - * - */ -static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, - void *p_virt_pbl, - dma_addr_t p_phys_pbl, - struct addr_tbl_entry *pp_addr_tbl) -{ - p_chain->pbl_sp.p_phys_table = p_phys_pbl; - p_chain->pbl_sp.p_virt_table = p_virt_pbl; - p_chain->pbl.pp_addr_tbl = pp_addr_tbl; -} - -/** - * @brief qed_chain_init_next_ptr_elem - - * - * Initalizes a next pointer element - * - * @param p_chain - * @param p_virt_curr virtual address of a chain page of which the next - * pointer element is initialized - * @param p_virt_next virtual address of the next chain page - * @param p_phys_next physical address of the next chain page - * - */ -static inline void -qed_chain_init_next_ptr_elem(struct qed_chain *p_chain, - void *p_virt_curr, - void *p_virt_next, dma_addr_t p_phys_next) -{ - struct qed_chain_next *p_next; - u32 size; - - size = p_chain->elem_size * p_chain->usable_per_page; - p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size); - - DMA_REGPAIR_LE(p_next->next_phys, p_phys_next); - - p_next->next_virt = p_virt_next; -} - /** * @brief qed_chain_get_last_elem - * @@ -703,7 +625,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) for (i = 0; i < page_cnt; i++) memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0, - QED_CHAIN_PAGE_SIZE); + p_chain->page_size); } #endif diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index a5c6854343e6..cd6a5c7e56eb 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -948,13 +948,8 @@ struct qed_common_ops { u8 dp_level); int (*chain_alloc)(struct qed_dev *cdev, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type, - u32 num_elems, - size_t elem_size, - struct qed_chain *p_chain, - struct qed_chain_ext_pbl *ext_pbl); + struct qed_chain *chain, + struct qed_chain_init_params *params); void (*chain_free)(struct qed_dev *cdev, struct qed_chain *p_chain);