Merge branch 'sctp-transmit-errs'

Xin Long says:

====================
sctp: fix the transmit err process

This patchset is to improve the transmit err process and also fix some
issues.

After this patchset, once the chunks are enqueued successfully, even
if the chunks fail to send out, no matter because of nodst or nomem,
no err retruns back to users any more. Instead, they are taken care
of by retransmit.

v1->v2:
  - add more details to the changelog in patch 1/6
  - add Fixes: tag in patch 2/6, 3/6
  - also revert 69b5777f2e in patch 3/6
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-09-18 22:02:40 -04:00
commit 829ff34868
6 changed files with 84 additions and 103 deletions

View File

@ -537,6 +537,7 @@ struct sctp_datamsg {
struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
struct sctp_sndrcvinfo *,
struct iov_iter *);
void sctp_datamsg_free(struct sctp_datamsg *);
void sctp_datamsg_put(struct sctp_datamsg *);
void sctp_chunk_fail(struct sctp_chunk *, int error);
int sctp_chunk_abandoned(struct sctp_chunk *);
@ -1076,7 +1077,7 @@ struct sctp_outq {
void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
void sctp_outq_teardown(struct sctp_outq *);
void sctp_outq_free(struct sctp_outq*);
int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
void sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *);
int sctp_outq_is_empty(const struct sctp_outq *);
void sctp_outq_restart(struct sctp_outq *);
@ -1084,7 +1085,7 @@ void sctp_outq_restart(struct sctp_outq *);
void sctp_retransmit(struct sctp_outq *, struct sctp_transport *,
sctp_retransmit_reason_t);
void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
int sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
void sctp_prsctp_prune(struct sctp_association *asoc,
struct sctp_sndrcvinfo *sinfo, int msg_len);
/* Uncork and flush an outqueue. */

View File

@ -70,6 +70,19 @@ static struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp)
return msg;
}
void sctp_datamsg_free(struct sctp_datamsg *msg)
{
struct sctp_chunk *chunk;
/* This doesn't have to be a _safe vairant because
* sctp_chunk_free() only drops the refs.
*/
list_for_each_entry(chunk, &msg->chunks, frag_list)
sctp_chunk_free(chunk);
sctp_datamsg_put(msg);
}
/* Final destructruction of datamsg memory. */
static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
{

View File

@ -180,7 +180,6 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
int one_packet, gfp_t gfp)
{
sctp_xmit_t retval;
int error = 0;
pr_debug("%s: packet:%p size:%Zu chunk:%p size:%d\n", __func__,
packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
@ -188,6 +187,8 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
case SCTP_XMIT_PMTU_FULL:
if (!packet->has_cookie_echo) {
int error = 0;
error = sctp_packet_transmit(packet, gfp);
if (error < 0)
chunk->skb->sk->sk_err = -error;
@ -441,14 +442,14 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
* time. Application may notice this error.
*/
pr_err_once("Trying to GSO but underlying device doesn't support it.");
goto nomem;
goto err;
}
} else {
pkt_size = packet->size;
}
head = alloc_skb(pkt_size + MAX_HEADER, gfp);
if (!head)
goto nomem;
goto err;
if (gso) {
NAPI_GRO_CB(head)->last = head;
skb_shinfo(head)->gso_type = sk->sk_gso_type;
@ -469,8 +470,12 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
}
}
dst = dst_clone(tp->dst);
if (!dst)
goto no_route;
if (!dst) {
if (asoc)
IP_INC_STATS(sock_net(asoc->base.sk),
IPSTATS_MIB_OUTNOROUTES);
goto nodst;
}
skb_dst_set(head, dst);
/* Build the SCTP header. */
@ -621,8 +626,10 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
if (!gso)
break;
if (skb_gro_receive(&head, nskb))
if (skb_gro_receive(&head, nskb)) {
kfree_skb(nskb);
goto nomem;
}
nskb = NULL;
if (WARN_ON_ONCE(skb_shinfo(head)->gso_segs >=
sk->sk_gso_max_segs))
@ -716,18 +723,13 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
}
head->ignore_df = packet->ipfragok;
tp->af_specific->sctp_xmit(head, tp);
goto out;
out:
sctp_packet_reset(packet);
return err;
no_route:
kfree_skb(head);
if (nskb != head)
kfree_skb(nskb);
if (asoc)
IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
nomem:
if (packet->auth && list_empty(&packet->auth->list))
sctp_chunk_free(packet->auth);
nodst:
/* FIXME: Returning the 'err' will effect all the associations
* associated with a socket, although only one of the paths of the
* association is unreachable.
@ -736,22 +738,18 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
* required.
*/
/* err = -EHOSTUNREACH; */
err:
/* Control chunks are unreliable so just drop them. DATA chunks
* will get resent or dropped later.
*/
kfree_skb(head);
err:
list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
list_del_init(&chunk->list);
if (!sctp_chunk_is_data(chunk))
sctp_chunk_free(chunk);
}
goto out;
nomem:
if (packet->auth && list_empty(&packet->auth->list))
sctp_chunk_free(packet->auth);
err = -ENOMEM;
goto err;
out:
sctp_packet_reset(packet);
return err;
}
/********************************************************************

View File

@ -68,7 +68,7 @@ static void sctp_mark_missing(struct sctp_outq *q,
static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
/* Add data to the front of the queue. */
static inline void sctp_outq_head_data(struct sctp_outq *q,
@ -285,10 +285,9 @@ void sctp_outq_free(struct sctp_outq *q)
}
/* Put a new chunk in an sctp_outq. */
int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
{
struct net *net = sock_net(q->asoc->base.sk);
int error = 0;
pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk,
chunk && chunk->chunk_hdr ?
@ -299,54 +298,26 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
* immediately.
*/
if (sctp_chunk_is_data(chunk)) {
/* Is it OK to queue data chunks? */
/* From 9. Termination of Association
*
* When either endpoint performs a shutdown, the
* association on each peer will stop accepting new
* data from its user and only deliver data in queue
* at the time of sending or receiving the SHUTDOWN
* chunk.
*/
switch (q->asoc->state) {
case SCTP_STATE_CLOSED:
case SCTP_STATE_SHUTDOWN_PENDING:
case SCTP_STATE_SHUTDOWN_SENT:
case SCTP_STATE_SHUTDOWN_RECEIVED:
case SCTP_STATE_SHUTDOWN_ACK_SENT:
/* Cannot send after transport endpoint shutdown */
error = -ESHUTDOWN;
break;
pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
__func__, q, chunk, chunk && chunk->chunk_hdr ?
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
"illegal chunk");
default:
pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
__func__, q, chunk, chunk && chunk->chunk_hdr ?
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
"illegal chunk");
sctp_chunk_hold(chunk);
sctp_outq_tail_data(q, chunk);
if (chunk->asoc->prsctp_enable &&
SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
chunk->asoc->sent_cnt_removable++;
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
else
SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
break;
}
sctp_outq_tail_data(q, chunk);
if (chunk->asoc->prsctp_enable &&
SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
chunk->asoc->sent_cnt_removable++;
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
else
SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
} else {
list_add_tail(&chunk->list, &q->control_chunk_list);
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
}
if (error < 0)
return error;
if (!q->cork)
error = sctp_outq_flush(q, 0, gfp);
return error;
sctp_outq_flush(q, 0, gfp);
}
/* Insert a chunk into the sorted list based on the TSNs. The retransmit list
@ -559,7 +530,6 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
sctp_retransmit_reason_t reason)
{
struct net *net = sock_net(q->asoc->base.sk);
int error = 0;
switch (reason) {
case SCTP_RTXR_T3_RTX:
@ -603,10 +573,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
* will be flushed at the end.
*/
if (reason != SCTP_RTXR_FAST_RTX)
error = sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
if (error)
q->asoc->base.sk->sk_err = -error;
sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
}
/*
@ -778,12 +745,12 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
}
/* Cork the outqueue so queued chunks are really queued. */
int sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
{
if (q->cork)
q->cork = 0;
return sctp_outq_flush(q, 0, gfp);
sctp_outq_flush(q, 0, gfp);
}
@ -796,7 +763,7 @@ int sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
* locking concerns must be made. Today we use the sock lock to protect
* this function.
*/
static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
{
struct sctp_packet *packet;
struct sctp_packet singleton;
@ -919,8 +886,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
sctp_packet_config(&singleton, vtag, 0);
sctp_packet_append_chunk(&singleton, chunk);
error = sctp_packet_transmit(&singleton, gfp);
if (error < 0)
return error;
if (error < 0) {
asoc->base.sk->sk_err = -error;
return;
}
break;
case SCTP_CID_ABORT:
@ -1018,6 +987,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
retran:
error = sctp_outq_flush_rtx(q, packet,
rtx_timeout, &start_timer);
if (error < 0)
asoc->base.sk->sk_err = -error;
if (start_timer) {
sctp_transport_reset_t3_rtx(transport);
@ -1192,14 +1163,15 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
struct sctp_transport,
send_ready);
packet = &t->packet;
if (!sctp_packet_empty(packet))
if (!sctp_packet_empty(packet)) {
error = sctp_packet_transmit(packet, gfp);
if (error < 0)
asoc->base.sk->sk_err = -error;
}
/* Clear the burst limited state, if any */
sctp_transport_burst_reset(t);
}
return error;
}
/* Update unack_data based on the incoming SACK chunk */

View File

@ -1020,19 +1020,13 @@ static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
* This way the whole message is queued up and bundling if
* encouraged for small fragments.
*/
static int sctp_cmd_send_msg(struct sctp_association *asoc,
struct sctp_datamsg *msg, gfp_t gfp)
static void sctp_cmd_send_msg(struct sctp_association *asoc,
struct sctp_datamsg *msg, gfp_t gfp)
{
struct sctp_chunk *chunk;
int error = 0;
list_for_each_entry(chunk, &msg->chunks, frag_list) {
error = sctp_outq_tail(&asoc->outqueue, chunk, gfp);
if (error)
break;
}
return error;
list_for_each_entry(chunk, &msg->chunks, frag_list)
sctp_outq_tail(&asoc->outqueue, chunk, gfp);
}
@ -1427,8 +1421,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
local_cork = 1;
}
/* Send a chunk to our peer. */
error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk,
gfp);
sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, gfp);
break;
case SCTP_CMD_SEND_PKT:
@ -1682,7 +1675,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_FORCE_PRIM_RETRAN:
t = asoc->peer.retran_path;
asoc->peer.retran_path = asoc->peer.primary_path;
error = sctp_outq_uncork(&asoc->outqueue, gfp);
sctp_outq_uncork(&asoc->outqueue, gfp);
local_cork = 0;
asoc->peer.retran_path = t;
break;
@ -1709,7 +1702,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
sctp_outq_cork(&asoc->outqueue);
local_cork = 1;
}
error = sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
break;
case SCTP_CMD_SEND_NEXT_ASCONF:
sctp_cmd_send_asconf(asoc);
@ -1739,9 +1732,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
*/
if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
if (chunk->end_of_packet || chunk->singleton)
error = sctp_outq_uncork(&asoc->outqueue, gfp);
sctp_outq_uncork(&asoc->outqueue, gfp);
} else if (local_cork)
error = sctp_outq_uncork(&asoc->outqueue, gfp);
sctp_outq_uncork(&asoc->outqueue, gfp);
if (sp->data_ready_signalled)
sp->data_ready_signalled = 0;

View File

@ -1958,6 +1958,8 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
/* Now send the (possibly) fragmented message. */
list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
sctp_chunk_hold(chunk);
/* Do accounting for the write space. */
sctp_set_owner_w(chunk);
@ -1970,13 +1972,15 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
* breaks.
*/
err = sctp_primitive_SEND(net, asoc, datamsg);
sctp_datamsg_put(datamsg);
/* Did the lower layer accept the chunk? */
if (err)
if (err) {
sctp_datamsg_free(datamsg);
goto out_free;
}
pr_debug("%s: we sent primitively\n", __func__);
sctp_datamsg_put(datamsg);
err = msg_len;
if (unlikely(wait_connect)) {