bnxt_en: Enhance bnxt_alloc_ring()/bnxt_free_ring().

To support level 2 context page memory structures, enhance the
bnxt_ring_mem_info structure with a "depth" field to specify the page
level and add a flag to specify using full pages for L1 and L2 page
tables.  This is needed to support RDMA functionality on 57500 chips
since RDMA requires more context memory.

Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Michael Chan 2018-12-20 03:38:49 -05:00 committed by David S. Miller
parent 760b6d3341
commit 4f49b2b8d4
2 changed files with 16 additions and 6 deletions

View File

@ -2375,7 +2375,11 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
rmem->pg_arr[i] = NULL;
}
if (rmem->pg_tbl) {
dma_free_coherent(&pdev->dev, rmem->nr_pages * 8,
size_t pg_tbl_size = rmem->nr_pages * 8;
if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
pg_tbl_size = rmem->page_size;
dma_free_coherent(&pdev->dev, pg_tbl_size,
rmem->pg_tbl, rmem->pg_tbl_map);
rmem->pg_tbl = NULL;
}
@ -2393,9 +2397,12 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
valid_bit = PTU_PTE_VALID;
if (rmem->nr_pages > 1) {
rmem->pg_tbl = dma_alloc_coherent(&pdev->dev,
rmem->nr_pages * 8,
if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
size_t pg_tbl_size = rmem->nr_pages * 8;
if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
pg_tbl_size = rmem->page_size;
rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
&rmem->pg_tbl_map,
GFP_KERNEL);
if (!rmem->pg_tbl)
@ -2412,7 +2419,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i])
return -ENOMEM;
if (rmem->nr_pages > 1) {
if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
extra_bits |= PTU_PTE_NEXT_TO_LAST;

View File

@ -617,9 +617,12 @@ struct bnxt_sw_rx_agg_bd {
struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
u32 flags;
u16 flags;
#define BNXT_RMEM_VALID_PTE_FLAG 1
#define BNXT_RMEM_RING_PTE_FLAG 2
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth;
void **pg_arr;
dma_addr_t *dma_arr;