qed: Add support for freeing two ll2 buffers for corner cases

When posting a packet on the ll2 tx, we can provide a cookie that
will be returned upon tx completion. This cookie is the ll2 iwarp buffer
which is then reposted to the rx ring. Part of the unaligned mpa flow
is determining when a buffer can be reposted. Each buffer needs to be
sent only once as a cookie for on the tx ring. In packed fpdu case, only
the last packet will be sent with the buffer, meaning we need to handle the
case that a cookie can be NULL on tx complete. In addition, when a fpdu
splits over two buffers, but there are no more fpdus on the second buffer,
two buffers need to be provided as a cookie. To avoid changing the ll2
interface to provide two cookies, we introduce a piggy buf pointer,
relevant for iWARP only, that holds a pointer to a second buffer that
needs to be released during tx completion.

Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Michal Kalderon 2017-10-09 12:37:52 +03:00 committed by David S. Miller
parent 469981b17a
commit d531038eeb
2 changed files with 26 additions and 0 deletions

View File

@ -1846,6 +1846,12 @@ qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
/* vlan overload with enum iwarp_ll2_tx_queues */ /* vlan overload with enum iwarp_ll2_tx_queues */
tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE; tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
/* special case of unaligned packet and not packed, need to send
* both buffers as cookie to release.
*/
if (tcp_payload_size == fpdu->incomplete_bytes)
fpdu->mpa_buf->piggy_buf = buf;
ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
/* Set first fragment to header */ /* Set first fragment to header */
@ -2195,9 +2201,19 @@ static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
bool b_last_fragment, bool b_last_packet) bool b_last_fragment, bool b_last_packet)
{ {
struct qed_iwarp_ll2_buff *buffer = cookie; struct qed_iwarp_ll2_buff *buffer = cookie;
struct qed_iwarp_ll2_buff *piggy;
struct qed_hwfn *p_hwfn = cxt; struct qed_hwfn *p_hwfn = cxt;
if (!buffer) /* can happen in packed mpa unaligned... */
return;
/* this was originally an rx packet, post it back */ /* this was originally an rx packet, post it back */
piggy = buffer->piggy_buf;
if (piggy) {
buffer->piggy_buf = NULL;
qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle);
}
qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle); qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle) if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
@ -2216,6 +2232,15 @@ static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
if (!buffer) if (!buffer)
return; return;
if (buffer->piggy_buf) {
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
buffer->piggy_buf->buff_size,
buffer->piggy_buf->data,
buffer->piggy_buf->data_phys_addr);
kfree(buffer->piggy_buf);
}
dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size, dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
buffer->data, buffer->data_phys_addr); buffer->data, buffer->data_phys_addr);

View File

@ -55,6 +55,7 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
#define QED_IWARP_HANDLE_INVAL (0xff) #define QED_IWARP_HANDLE_INVAL (0xff)
struct qed_iwarp_ll2_buff { struct qed_iwarp_ll2_buff {
struct qed_iwarp_ll2_buff *piggy_buf;
void *data; void *data;
dma_addr_t data_phys_addr; dma_addr_t data_phys_addr;
u32 buff_size; u32 buff_size;