cxgbit: Abort the TCP connection in case of data out timeout
If DDP is programmed for a WRITE cmd and data out timer gets expired then abort the TCP connection before freeing the cmd to avoid any possibility of DDP after freeing the cmd. Signed-off-by: Varun Prakash <varun@chelsio.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
parent
b849b45675
commit
1ae01724ae
|
@ -165,6 +165,7 @@ enum cxgbit_csk_flags {
|
|||
CSK_LOGIN_PDU_DONE,
|
||||
CSK_LOGIN_DONE,
|
||||
CSK_DDP_ENABLE,
|
||||
CSK_ABORT_RPL_WAIT,
|
||||
};
|
||||
|
||||
struct cxgbit_sock_common {
|
||||
|
@ -321,6 +322,7 @@ int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
|
|||
int cxgbit_setup_conn_digest(struct cxgbit_sock *);
|
||||
int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
|
||||
void cxgbit_free_np(struct iscsi_np *);
|
||||
void cxgbit_abort_conn(struct cxgbit_sock *csk);
|
||||
void cxgbit_free_conn(struct iscsi_conn *);
|
||||
extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
|
||||
int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
|
||||
|
|
|
@ -665,6 +665,46 @@ static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
|
|||
return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
|
||||
}
|
||||
|
||||
static void
|
||||
__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
|
||||
{
|
||||
__kfree_skb(skb);
|
||||
|
||||
if (csk->com.state != CSK_STATE_ESTABLISHED)
|
||||
goto no_abort;
|
||||
|
||||
set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
|
||||
csk->com.state = CSK_STATE_ABORTING;
|
||||
|
||||
cxgbit_send_abort_req(csk);
|
||||
|
||||
return;
|
||||
|
||||
no_abort:
|
||||
cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
|
||||
cxgbit_put_csk(csk);
|
||||
}
|
||||
|
||||
void cxgbit_abort_conn(struct cxgbit_sock *csk)
|
||||
{
|
||||
struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
|
||||
|
||||
cxgbit_get_csk(csk);
|
||||
cxgbit_init_wr_wait(&csk->com.wr_wait);
|
||||
|
||||
spin_lock_bh(&csk->lock);
|
||||
if (csk->lock_owner) {
|
||||
cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
|
||||
__skb_queue_tail(&csk->backlogq, skb);
|
||||
} else {
|
||||
__cxgbit_abort_conn(csk, skb);
|
||||
}
|
||||
spin_unlock_bh(&csk->lock);
|
||||
|
||||
cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
|
||||
csk->tid, 600, __func__);
|
||||
}
|
||||
|
||||
void cxgbit_free_conn(struct iscsi_conn *conn)
|
||||
{
|
||||
struct cxgbit_sock *csk = conn->context;
|
||||
|
@ -1709,12 +1749,17 @@ static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
|
|||
|
||||
static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
|
||||
{
|
||||
struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
|
||||
|
||||
pr_debug("%s: csk %p; tid %u; state %d\n",
|
||||
__func__, csk, csk->tid, csk->com.state);
|
||||
|
||||
switch (csk->com.state) {
|
||||
case CSK_STATE_ABORTING:
|
||||
csk->com.state = CSK_STATE_DEAD;
|
||||
if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
|
||||
cxgbit_wake_up(&csk->com.wr_wait, __func__,
|
||||
rpl->status);
|
||||
cxgbit_put_csk(csk);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -275,6 +275,14 @@ void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|||
struct cxgbit_device *cdev = csk->com.cdev;
|
||||
struct cxgbi_ppm *ppm = cdev2ppm(cdev);
|
||||
|
||||
/* Abort the TCP conn if DDP is not complete to
|
||||
* avoid any possibility of DDP after freeing
|
||||
* the cmd.
|
||||
*/
|
||||
if (unlikely(cmd->write_data_done !=
|
||||
cmd->se_cmd.data_length))
|
||||
cxgbit_abort_conn(csk);
|
||||
|
||||
cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
|
||||
|
||||
dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
|
||||
|
|
Loading…
Reference in New Issue