IB/iser: Maintain connection fmr_pool under a single registration descriptor

This will allow us to unify the memory registration code path between
the various methods which vary by the device capabilities. This change
will make it easier and less intrusive to remove fmr_pools from the
code when we'd want to.

The reason we use a single descriptor is to avoid taking a
redundant spinlock when working with FMRs.

We also change the signature of iser_reg_page_vec to make it match
iser_fast_reg_mr (and the future indirect registration method).

Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Adir Lev <adirl@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Adir Lev 2015-08-06 18:32:59 +03:00 committed by Doug Ledford
parent 385ad87d4b
commit 2b3bf95810
3 changed files with 68 additions and 54 deletions

View File

@ -380,12 +380,20 @@ struct iser_device {
* struct iser_reg_resources - Fast registration recources * struct iser_reg_resources - Fast registration recources
* *
* @mr: memory region * @mr: memory region
* @frpl: fast reg page list * @fmr_pool: pool of fmrs
* @frpl: fast reg page list used by frwrs
* @page_vec: fast reg page list used by fmr pool
* @mr_valid: is mr valid indicator * @mr_valid: is mr valid indicator
*/ */
struct iser_reg_resources { struct iser_reg_resources {
struct ib_mr *mr; union {
struct ib_fast_reg_page_list *frpl; struct ib_mr *mr;
struct ib_fmr_pool *fmr_pool;
};
union {
struct ib_fast_reg_page_list *frpl;
struct iser_page_vec *page_vec;
};
u8 mr_valid:1; u8 mr_valid:1;
}; };
@ -420,28 +428,14 @@ struct iser_fr_desc {
/** /**
* struct iser_fr_pool: connection fast registration pool * struct iser_fr_pool: connection fast registration pool
* *
* @list: list of fastreg descriptors
* @lock: protects fmr/fastreg pool * @lock: protects fmr/fastreg pool
* @union.fmr: * @size: size of the pool
* @pool: FMR pool for fast registrations
* @page_vec: fast reg page list to hold mapped commands pages
* used for registration
* @union.fastreg:
* @pool: Fast registration descriptors pool for fast
* registrations
* @pool_size: Size of pool
*/ */
struct iser_fr_pool { struct iser_fr_pool {
spinlock_t lock; struct list_head list;
union { spinlock_t lock;
struct { int size;
struct ib_fmr_pool *pool;
struct iser_page_vec *page_vec;
} fmr;
struct {
struct list_head pool;
int pool_size;
} fastreg;
};
}; };
/** /**

View File

@ -189,7 +189,7 @@ iser_reg_desc_get(struct ib_conn *ib_conn)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&fr_pool->lock, flags); spin_lock_irqsave(&fr_pool->lock, flags);
desc = list_first_entry(&fr_pool->fastreg.pool, desc = list_first_entry(&fr_pool->list,
struct iser_fr_desc, list); struct iser_fr_desc, list);
list_del(&desc->list); list_del(&desc->list);
spin_unlock_irqrestore(&fr_pool->lock, flags); spin_unlock_irqrestore(&fr_pool->lock, flags);
@ -205,7 +205,7 @@ iser_reg_desc_put(struct ib_conn *ib_conn,
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&fr_pool->lock, flags); spin_lock_irqsave(&fr_pool->lock, flags);
list_add(&desc->list, &fr_pool->fastreg.pool); list_add(&desc->list, &fr_pool->list);
spin_unlock_irqrestore(&fr_pool->lock, flags); spin_unlock_irqrestore(&fr_pool->lock, flags);
} }
@ -478,12 +478,13 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
static static
int iser_reg_page_vec(struct iscsi_iser_task *iser_task, int iser_reg_page_vec(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem, struct iser_data_buf *mem,
struct iser_page_vec *page_vec, struct iser_reg_resources *rsc,
struct iser_mem_reg *mem_reg) struct iser_mem_reg *mem_reg)
{ {
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_device *device = ib_conn->device; struct iser_device *device = ib_conn->device;
struct iser_page_vec *page_vec = rsc->page_vec;
struct ib_fmr_pool *fmr_pool = rsc->fmr_pool;
struct ib_pool_fmr *fmr; struct ib_pool_fmr *fmr;
int ret, plen; int ret, plen;
@ -499,7 +500,7 @@ int iser_reg_page_vec(struct iscsi_iser_task *iser_task,
return -EINVAL; return -EINVAL;
} }
fmr = ib_fmr_pool_map_phys(fr_pool->fmr.pool, fmr = ib_fmr_pool_map_phys(fmr_pool,
page_vec->pages, page_vec->pages,
page_vec->length, page_vec->length,
page_vec->pages[0]); page_vec->pages[0]);
@ -587,20 +588,23 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
if (mem->dma_nents == 1) { if (mem->dma_nents == 1) {
return iser_reg_dma(device, mem, mem_reg); return iser_reg_dma(device, mem, mem_reg);
} else { /* use FMR for multiple dma entries */ } else { /* use FMR for multiple dma entries */
err = iser_reg_page_vec(iser_task, mem, struct iser_fr_desc *desc;
fr_pool->fmr.page_vec, mem_reg);
desc = list_first_entry(&fr_pool->list,
struct iser_fr_desc, list);
err = iser_reg_page_vec(iser_task, mem, &desc->rsc, mem_reg);
if (err && err != -EAGAIN) { if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev); iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
mem->dma_nents, mem->dma_nents,
ntoh24(iser_task->desc.iscsi_header.dlength)); ntoh24(iser_task->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
fr_pool->fmr.page_vec->data_size, desc->rsc.page_vec->data_size,
fr_pool->fmr.page_vec->length, desc->rsc.page_vec->length,
fr_pool->fmr.page_vec->offset); desc->rsc.page_vec->offset);
for (i = 0; i < fr_pool->fmr.page_vec->length; i++) for (i = 0; i < desc->rsc.page_vec->length; i++)
iser_err("page_vec[%d] = 0x%llx\n", i, iser_err("page_vec[%d] = 0x%llx\n", i,
(unsigned long long)fr_pool->fmr.page_vec->pages[i]); (unsigned long long)desc->rsc.page_vec->pages[i]);
} }
if (err) if (err)
return err; return err;

View File

@ -204,17 +204,25 @@ int iser_alloc_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
struct iser_device *device = ib_conn->device; struct iser_device *device = ib_conn->device;
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_page_vec *page_vec; struct iser_page_vec *page_vec;
struct iser_fr_desc *desc;
struct ib_fmr_pool *fmr_pool; struct ib_fmr_pool *fmr_pool;
struct ib_fmr_pool_param params; struct ib_fmr_pool_param params;
int ret = -ENOMEM; int ret;
INIT_LIST_HEAD(&fr_pool->list);
spin_lock_init(&fr_pool->lock); spin_lock_init(&fr_pool->lock);
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
page_vec = kmalloc(sizeof(*page_vec) + page_vec = kmalloc(sizeof(*page_vec) +
(sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE + 1)), (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE + 1)),
GFP_KERNEL); GFP_KERNEL);
if (!page_vec) if (!page_vec) {
return ret; ret = -ENOMEM;
goto err_frpl;
}
page_vec->pages = (u64 *)(page_vec + 1); page_vec->pages = (u64 *)(page_vec + 1);
@ -236,16 +244,20 @@ int iser_alloc_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
if (IS_ERR(fmr_pool)) { if (IS_ERR(fmr_pool)) {
ret = PTR_ERR(fmr_pool); ret = PTR_ERR(fmr_pool);
iser_err("FMR allocation failed, err %d\n", ret); iser_err("FMR allocation failed, err %d\n", ret);
goto err; goto err_fmr;
} }
fr_pool->fmr.page_vec = page_vec; desc->rsc.page_vec = page_vec;
fr_pool->fmr.pool = fmr_pool; desc->rsc.fmr_pool = fmr_pool;
list_add(&desc->list, &fr_pool->list);
return 0; return 0;
err: err_fmr:
kfree(page_vec); kfree(page_vec);
err_frpl:
kfree(desc);
return ret; return ret;
} }
@ -255,14 +267,18 @@ int iser_alloc_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
void iser_free_fmr_pool(struct ib_conn *ib_conn) void iser_free_fmr_pool(struct ib_conn *ib_conn)
{ {
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_fr_desc *desc;
desc = list_first_entry(&fr_pool->list,
struct iser_fr_desc, list);
list_del(&desc->list);
iser_info("freeing conn %p fmr pool %p\n", iser_info("freeing conn %p fmr pool %p\n",
ib_conn, fr_pool->fmr.pool); ib_conn, desc->rsc.fmr_pool);
ib_destroy_fmr_pool(fr_pool->fmr.pool); ib_destroy_fmr_pool(desc->rsc.fmr_pool);
fr_pool->fmr.pool = NULL; kfree(desc->rsc.page_vec);
kfree(fr_pool->fmr.page_vec); kfree(desc);
fr_pool->fmr.page_vec = NULL;
} }
static int static int
@ -392,9 +408,9 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
struct iser_fr_desc *desc; struct iser_fr_desc *desc;
int i, ret; int i, ret;
INIT_LIST_HEAD(&fr_pool->fastreg.pool); INIT_LIST_HEAD(&fr_pool->list);
spin_lock_init(&fr_pool->lock); spin_lock_init(&fr_pool->lock);
fr_pool->fastreg.pool_size = 0; fr_pool->size = 0;
for (i = 0; i < cmds_max; i++) { for (i = 0; i < cmds_max; i++) {
desc = iser_create_fastreg_desc(device->ib_device, device->pd, desc = iser_create_fastreg_desc(device->ib_device, device->pd,
ib_conn->pi_support); ib_conn->pi_support);
@ -403,8 +419,8 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
goto err; goto err;
} }
list_add_tail(&desc->list, &fr_pool->fastreg.pool); list_add_tail(&desc->list, &fr_pool->list);
fr_pool->fastreg.pool_size++; fr_pool->size++;
} }
return 0; return 0;
@ -423,12 +439,12 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
struct iser_fr_desc *desc, *tmp; struct iser_fr_desc *desc, *tmp;
int i = 0; int i = 0;
if (list_empty(&fr_pool->fastreg.pool)) if (list_empty(&fr_pool->list))
return; return;
iser_info("freeing conn %p fr pool\n", ib_conn); iser_info("freeing conn %p fr pool\n", ib_conn);
list_for_each_entry_safe(desc, tmp, &fr_pool->fastreg.pool, list) { list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) {
list_del(&desc->list); list_del(&desc->list);
iser_free_reg_res(&desc->rsc); iser_free_reg_res(&desc->rsc);
if (desc->pi_ctx) if (desc->pi_ctx)
@ -437,9 +453,9 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
++i; ++i;
} }
if (i < fr_pool->fastreg.pool_size) if (i < fr_pool->size)
iser_warn("pool still has %d regions registered\n", iser_warn("pool still has %d regions registered\n",
fr_pool->fastreg.pool_size - i); fr_pool->size - i);
} }
/** /**