IB/iser: Use a dedicated descriptor for login

We'll need it later with the new CQ abstraction. also switch
login bufs to void pointers.

Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Sagi Grimberg 2015-11-04 10:50:31 +02:00 committed by Christoph Hellwig
parent 1dc7b1f10d
commit 0f512b34c6
3 changed files with 84 additions and 78 deletions

View File

@ -326,6 +326,25 @@ struct iser_rx_desc {
char pad[ISER_RX_PAD_SIZE]; char pad[ISER_RX_PAD_SIZE];
} __attribute__((packed)); } __attribute__((packed));
/**
* struct iser_login_desc - iSER login descriptor
*
* @req: pointer to login request buffer
* @resp: pointer to login response buffer
* @req_dma: DMA address of login request buffer
* @rsp_dma: DMA address of login response buffer
* @sge: IB sge for login post recv
*/
struct iser_login_desc {
void *req;
void *rsp;
u64 req_dma;
u64 rsp_dma;
struct ib_sge sge;
} __attribute__((packed));
struct iser_conn; struct iser_conn;
struct ib_conn; struct ib_conn;
struct iscsi_iser_task; struct iscsi_iser_task;
@ -514,11 +533,7 @@ struct ib_conn {
* @up_completion: connection establishment completed * @up_completion: connection establishment completed
* (state is ISER_CONN_UP) * (state is ISER_CONN_UP)
* @conn_list: entry in ig conn list * @conn_list: entry in ig conn list
* @login_buf: login data buffer (stores login parameters) * @login_desc: login descriptor
* @login_req_buf: login request buffer
* @login_req_dma: login request buffer dma address
* @login_resp_buf: login response buffer
* @login_resp_dma: login response buffer dma address
* @rx_desc_head: head of rx_descs cyclic buffer * @rx_desc_head: head of rx_descs cyclic buffer
* @rx_descs: rx buffers array (cyclic buffer) * @rx_descs: rx buffers array (cyclic buffer)
* @num_rx_descs: number of rx descriptors * @num_rx_descs: number of rx descriptors
@ -541,10 +556,7 @@ struct iser_conn {
struct completion ib_completion; struct completion ib_completion;
struct completion up_completion; struct completion up_completion;
struct list_head conn_list; struct list_head conn_list;
struct iser_login_desc login_desc;
char *login_buf;
char *login_req_buf, *login_resp_buf;
u64 login_req_dma, login_resp_dma;
unsigned int rx_desc_head; unsigned int rx_desc_head;
struct iser_rx_desc *rx_descs; struct iser_rx_desc *rx_descs;
u32 num_rx_descs; u32 num_rx_descs;

View File

@ -174,73 +174,63 @@ static void iser_create_send_desc(struct iser_conn *iser_conn,
static void iser_free_login_buf(struct iser_conn *iser_conn) static void iser_free_login_buf(struct iser_conn *iser_conn)
{ {
struct iser_device *device = iser_conn->ib_conn.device; struct iser_device *device = iser_conn->ib_conn.device;
struct iser_login_desc *desc = &iser_conn->login_desc;
if (!iser_conn->login_buf) if (!desc->req)
return; return;
if (iser_conn->login_req_dma) ib_dma_unmap_single(device->ib_device, desc->req_dma,
ib_dma_unmap_single(device->ib_device, ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
iser_conn->login_req_dma,
ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
if (iser_conn->login_resp_dma) ib_dma_unmap_single(device->ib_device, desc->rsp_dma,
ib_dma_unmap_single(device->ib_device, ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
iser_conn->login_resp_dma,
ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
kfree(iser_conn->login_buf); kfree(desc->req);
kfree(desc->rsp);
/* make sure we never redo any unmapping */ /* make sure we never redo any unmapping */
iser_conn->login_req_dma = 0; desc->req = NULL;
iser_conn->login_resp_dma = 0; desc->rsp = NULL;
iser_conn->login_buf = NULL;
} }
static int iser_alloc_login_buf(struct iser_conn *iser_conn) static int iser_alloc_login_buf(struct iser_conn *iser_conn)
{ {
struct iser_device *device = iser_conn->ib_conn.device; struct iser_device *device = iser_conn->ib_conn.device;
int req_err, resp_err; struct iser_login_desc *desc = &iser_conn->login_desc;
BUG_ON(device == NULL); desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
if (!desc->req)
return -ENOMEM;
iser_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + desc->req_dma = ib_dma_map_single(device->ib_device, desc->req,
ISER_RX_LOGIN_SIZE, GFP_KERNEL); ISCSI_DEF_MAX_RECV_SEG_LEN,
if (!iser_conn->login_buf) DMA_TO_DEVICE);
goto out_err; if (ib_dma_mapping_error(device->ib_device,
desc->req_dma))
goto free_req;
iser_conn->login_req_buf = iser_conn->login_buf; desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
iser_conn->login_resp_buf = iser_conn->login_buf + if (!desc->rsp)
ISCSI_DEF_MAX_RECV_SEG_LEN; goto unmap_req;
iser_conn->login_req_dma = ib_dma_map_single(device->ib_device, desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp,
iser_conn->login_req_buf, ISER_RX_LOGIN_SIZE,
ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
DMA_TO_DEVICE); if (ib_dma_mapping_error(device->ib_device,
desc->rsp_dma))
goto free_rsp;
iser_conn->login_resp_dma = ib_dma_map_single(device->ib_device,
iser_conn->login_resp_buf,
ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
req_err = ib_dma_mapping_error(device->ib_device,
iser_conn->login_req_dma);
resp_err = ib_dma_mapping_error(device->ib_device,
iser_conn->login_resp_dma);
if (req_err || resp_err) {
if (req_err)
iser_conn->login_req_dma = 0;
if (resp_err)
iser_conn->login_resp_dma = 0;
goto free_login_buf;
}
return 0; return 0;
free_login_buf: free_rsp:
iser_free_login_buf(iser_conn); kfree(desc->rsp);
unmap_req:
ib_dma_unmap_single(device->ib_device, desc->req_dma,
ISCSI_DEF_MAX_RECV_SEG_LEN,
DMA_TO_DEVICE);
free_req:
kfree(desc->req);
out_err:
iser_err("unable to alloc or map login buf\n");
return -ENOMEM; return -ENOMEM;
} }
@ -520,25 +510,25 @@ int iser_send_control(struct iscsi_conn *conn,
data_seg_len = ntoh24(task->hdr->dlength); data_seg_len = ntoh24(task->hdr->dlength);
if (data_seg_len > 0) { if (data_seg_len > 0) {
struct iser_login_desc *desc = &iser_conn->login_desc;
struct ib_sge *tx_dsg = &mdesc->tx_sg[1]; struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
if (task != conn->login_task) { if (task != conn->login_task) {
iser_err("data present on non login task!!!\n"); iser_err("data present on non login task!!!\n");
goto send_control_error; goto send_control_error;
} }
ib_dma_sync_single_for_cpu(device->ib_device, ib_dma_sync_single_for_cpu(device->ib_device, desc->req_dma,
iser_conn->login_req_dma, task->data_count, task->data_count, DMA_TO_DEVICE);
DMA_TO_DEVICE);
memcpy(iser_conn->login_req_buf, task->data, task->data_count); memcpy(desc->req, task->data, task->data_count);
ib_dma_sync_single_for_device(device->ib_device, ib_dma_sync_single_for_device(device->ib_device, desc->req_dma,
iser_conn->login_req_dma, task->data_count, task->data_count, DMA_TO_DEVICE);
DMA_TO_DEVICE);
tx_dsg->addr = iser_conn->login_req_dma; tx_dsg->addr = desc->req_dma;
tx_dsg->length = task->data_count; tx_dsg->length = task->data_count;
tx_dsg->lkey = device->pd->local_dma_lkey; tx_dsg->lkey = device->pd->local_dma_lkey;
mdesc->num_sge = 2; mdesc->num_sge = 2;
} }
@ -572,27 +562,31 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc,
struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn, struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
ib_conn); ib_conn);
struct iscsi_hdr *hdr; struct iscsi_hdr *hdr;
char *data;
u64 rx_dma; u64 rx_dma;
int rx_buflen, outstanding, count, err; int rx_buflen, outstanding, count, err;
/* differentiate between login to all other PDUs */ /* differentiate between login to all other PDUs */
if ((char *)rx_desc == iser_conn->login_resp_buf) { if (rx_desc == (void *)&iser_conn->login_desc) {
rx_dma = iser_conn->login_resp_dma; rx_dma = iser_conn->login_desc.rsp_dma;
rx_buflen = ISER_RX_LOGIN_SIZE; rx_buflen = ISER_RX_LOGIN_SIZE;
hdr = iser_conn->login_desc.rsp + sizeof(struct iser_hdr);
data = iser_conn->login_desc.rsp + ISER_HEADERS_LEN;
} else { } else {
rx_dma = rx_desc->dma_addr; rx_dma = rx_desc->dma_addr;
rx_buflen = ISER_RX_PAYLOAD_SIZE; rx_buflen = ISER_RX_PAYLOAD_SIZE;
hdr = &rx_desc->iscsi_header;
data = rx_desc->data;
} }
ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma, ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
rx_buflen, DMA_FROM_DEVICE); rx_buflen, DMA_FROM_DEVICE);
hdr = &rx_desc->iscsi_header;
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN)); hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
iscsi_iser_recv(iser_conn->iscsi_conn, hdr, rx_desc->data, iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data,
rx_xfer_len - ISER_HEADERS_LEN); rx_xfer_len - ISER_HEADERS_LEN);
ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma, ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
@ -604,7 +598,7 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc,
* for the posted rx bufs refcount to become zero handles everything */ * for the posted rx bufs refcount to become zero handles everything */
ib_conn->post_recv_buf_count--; ib_conn->post_recv_buf_count--;
if (rx_dma == iser_conn->login_resp_dma) if (rx_desc == (void *)&iser_conn->login_desc)
return; return;
outstanding = ib_conn->post_recv_buf_count; outstanding = ib_conn->post_recv_buf_count;

View File

@ -1047,17 +1047,17 @@ int iser_post_recvl(struct iser_conn *iser_conn)
{ {
struct ib_recv_wr rx_wr, *rx_wr_failed; struct ib_recv_wr rx_wr, *rx_wr_failed;
struct ib_conn *ib_conn = &iser_conn->ib_conn; struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct ib_sge sge; struct iser_login_desc *desc = &iser_conn->login_desc;
int ib_ret; int ib_ret;
sge.addr = iser_conn->login_resp_dma; desc->sge.addr = desc->rsp_dma;
sge.length = ISER_RX_LOGIN_SIZE; desc->sge.length = ISER_RX_LOGIN_SIZE;
sge.lkey = ib_conn->device->pd->local_dma_lkey; desc->sge.lkey = ib_conn->device->pd->local_dma_lkey;
rx_wr.wr_id = (uintptr_t)iser_conn->login_resp_buf; rx_wr.wr_id = (uintptr_t)desc;
rx_wr.sg_list = &sge; rx_wr.sg_list = &desc->sge;
rx_wr.num_sge = 1; rx_wr.num_sge = 1;
rx_wr.next = NULL; rx_wr.next = NULL;
ib_conn->post_recv_buf_count++; ib_conn->post_recv_buf_count++;
ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed); ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);