[NET] SCTP: Fix whitespace errors.

Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
YOSHIFUJI Hideaki 2007-02-09 23:25:18 +09:00 committed by David S. Miller
parent 10297b9931
commit d808ad9ab8
23 changed files with 365 additions and 365 deletions

View File

@ -158,14 +158,14 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
* If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
* recommended value of 5 times 'RTO.Max'.
*/
asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
= 5 * asoc->rto_max;
asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
sp->autoclose * HZ;
/* Initilizes the timers */
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
init_timer(&asoc->timers[i]);
@ -1334,8 +1334,8 @@ int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
asoc->ep->base.bind_addr.port, gfp);
}
/* Lookup laddr in the bind address list of an association. */
int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
/* Lookup laddr in the bind address list of an association. */
int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
const union sctp_addr *laddr)
{
int found;
@ -1343,7 +1343,7 @@ int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
sctp_read_lock(&asoc->base.addr_lock);
if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
sctp_sk(asoc->base.sk))) {
sctp_sk(asoc->base.sk))) {
found = 1;
goto out;
}

View File

@ -62,7 +62,7 @@ static void sctp_bind_addr_clean(struct sctp_bind_addr *);
/* Copy 'src' to 'dest' taking 'scope' into account. Omit addresses
* in 'src' which have a broader scope than 'scope'.
*/
int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
const struct sctp_bind_addr *src,
sctp_scope_t scope, gfp_t gfp,
int flags)
@ -296,7 +296,7 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
********************************************************************/
/* Does this contain a specified address? Allow wildcarding. */
int sctp_bind_addr_match(struct sctp_bind_addr *bp,
int sctp_bind_addr_match(struct sctp_bind_addr *bp,
const union sctp_addr *addr,
struct sctp_sock *opt)
{
@ -306,7 +306,7 @@ int sctp_bind_addr_match(struct sctp_bind_addr *bp,
list_for_each(pos, &bp->address_list) {
laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
if (opt->pf->cmp_addr(&laddr->a, addr, opt))
return 1;
return 1;
}
return 0;
@ -329,12 +329,12 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
list_for_each(pos, &bp->address_list) {
laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
addr_buf = (union sctp_addr *)addrs;
for (i = 0; i < addrcnt; i++) {
addr = (union sctp_addr *)addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
if (!af)
if (!af)
return NULL;
if (opt->pf->cmp_addr(&laddr->a, addr, opt))
@ -350,7 +350,7 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
}
/* Copy out addresses from the global local address list. */
static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
union sctp_addr *addr,
sctp_scope_t scope, gfp_t gfp,
int flags)

View File

@ -1,40 +1,40 @@
/* SCTP kernel reference Implementation
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2003 International Business Machines, Corp.
*
*
* This file is part of the SCTP kernel reference Implementation
*
*
* SCTP Checksum functions
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* The SCTP reference implementation is distributed in the hope that it
*
* The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Dinakaran Joseph
* Written or modified by:
* Dinakaran Joseph
* Jon Grimm <jgrimm@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
@ -135,10 +135,10 @@ static const __u32 crc_c[256] = {
0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351,
};
__u32 sctp_start_cksum(__u8 *buffer, __u16 length)
{
__u32 crc32 = ~(__u32) 0;
__u32 crc32 = ~(__u32) 0;
__u32 i;
/* Optimize this routine to be SCTP specific, knowing how
@ -147,7 +147,7 @@ __u32 sctp_start_cksum(__u8 *buffer, __u16 length)
/* Calculate CRC up to the checksum. */
for (i = 0; i < (sizeof(struct sctphdr) - sizeof(__u32)); i++)
CRC32C(crc32, buffer[i]);
CRC32C(crc32, buffer[i]);
/* Skip checksum field of the header. */
for (i = 0; i < sizeof(__u32); i++)
@ -175,13 +175,13 @@ __u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32)
__u32 i;
__u32 *_to = (__u32 *)to;
__u32 *_from = (__u32 *)from;
for (i = 0; i < (length/4); i++) {
_to[i] = _from[i];
CRC32C(crc32, from[i*4]);
CRC32C(crc32, from[i*4+1]);
CRC32C(crc32, from[i*4+2]);
CRC32C(crc32, from[i*4+3]);
CRC32C(crc32, from[i*4+3]);
}
return crc32;

View File

@ -3,48 +3,48 @@
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
*
*
* This file is part of the SCTP kernel reference Implementation
*
*
* This file is part of the implementation of the add-IP extension,
* based on <draft-ietf-tsvwg-addip-sctp-02.txt> June 29, 2001,
* for the SCTP kernel reference Implementation.
*
*
* This file converts numerical ID value to alphabetical names for SCTP
* terms such as chunk type, parameter time, event type, etc.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* The SCTP reference implementation is distributed in the hope that it
*
* The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Xingang Guo <xingang.guo@intel.com>
* Jon Grimm <jgrimm@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
@ -81,7 +81,7 @@ const char *sctp_cname(const sctp_subtype_t cid)
return "illegal chunk id";
if (cid.chunk <= SCTP_CID_BASE_MAX)
return sctp_cid_tbl[cid.chunk];
switch (cid.chunk) {
case SCTP_CID_ASCONF:
return "ASCONF";
@ -154,7 +154,7 @@ const char *sctp_pname(const sctp_subtype_t id)
static const char *sctp_other_tbl[] = {
"NO_PENDING_TSN",
"ICMP_PROTO_UNREACH",
"ICMP_PROTO_UNREACH",
};
/* Lookup "other" debug name. */

View File

@ -369,7 +369,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work)
chunk->transport->last_time_heard = jiffies;
error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, state,
ep, asoc, chunk, GFP_ATOMIC);
ep, asoc, chunk, GFP_ATOMIC);
if (error && chunk)
chunk->pdiscard = 1;

View File

@ -226,7 +226,7 @@ int sctp_rcv(struct sk_buff *skb)
nf_reset(skb);
if (sk_filter(sk, skb))
goto discard_release;
goto discard_release;
/* Create an SCTP packet structure. */
chunk = sctp_chunkify(skb, asoc, sk);
@ -293,11 +293,11 @@ int sctp_rcv(struct sk_buff *skb)
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
struct sctp_ep_common *rcvr = NULL;
struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
struct sctp_ep_common *rcvr = NULL;
int backloged = 0;
rcvr = chunk->rcvr;
rcvr = chunk->rcvr;
/* If the rcvr is dead then the association or endpoint
* has been deleted and we can safely drop the chunk
@ -347,7 +347,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
else
BUG();
return 0;
return 0;
}
static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
@ -399,7 +399,7 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
* Normally, if PMTU discovery is disabled, an ICMP Fragmentation
* Needed will never be sent, but if a message was sent before
* PMTU discovery was disabled that was larger than the PMTU, it
* would not be fragmented, so it must be re-transmitted fragmented.
* would not be fragmented, so it must be re-transmitted fragmented.
*/
sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
}
@ -416,8 +416,8 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
*
*/
void sctp_icmp_proto_unreachable(struct sock *sk,
struct sctp_association *asoc,
struct sctp_transport *t)
struct sctp_association *asoc,
struct sctp_transport *t)
{
SCTP_DEBUG_PRINTK("%s\n", __FUNCTION__);

View File

@ -2,43 +2,43 @@
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2002 International Business Machines, Corp.
*
*
* This file is part of the SCTP kernel reference Implementation
*
*
* These functions are the methods for accessing the SCTP inqueue.
*
* An SCTP inqueue is a queue into which you push SCTP packets
* (which might be bundles or fragments of chunks) and out of which you
* pop SCTP whole chunks.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* The SCTP reference implementation is distributed in the hope that it
*
* The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
*
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
@ -152,8 +152,8 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
chunk->data_accepted = 0;
}
chunk->chunk_hdr = ch;
chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
chunk->chunk_hdr = ch;
chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
/* In the unlikely case of an IP reassembly, the skb could be
* non-linear. If so, update chunk_end so that it doesn't go past
* the skb->tail.
@ -169,11 +169,11 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
/* This is not a singleton */
chunk->singleton = 0;
} else if (chunk->chunk_end > chunk->skb->tail) {
/* RFC 2960, Section 6.10 Bundling
/* RFC 2960, Section 6.10 Bundling
*
* Partial chunks MUST NOT be placed in an SCTP packet.
* If the receiver detects a partial chunk, it MUST drop
* the chunk.
* the chunk.
*
* Since the end of the chunk is past the end of our buffer
* (which contains the whole packet, we can freely discard

View File

@ -236,7 +236,7 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc,
ipv6_addr_copy(&fl.fl6_dst, &daddr->v6.sin6_addr);
if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
fl.oif = daddr->v6.sin6_scope_id;
SCTP_DEBUG_PRINTK("%s: DST=" NIP6_FMT " ",
__FUNCTION__, NIP6(fl.fl6_dst));

View File

@ -1,39 +1,39 @@
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2004
*
*
* This file is part of the SCTP kernel reference Implementation
*
*
* Support for memory object debugging. This allows one to monitor the
* object allocations/deallocations for types instrumented for this
* via the proc fs.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* object allocations/deallocations for types instrumented for this
* via the proc fs.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* The SCTP reference implementation is distributed in the hope that it
*
* The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
*
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
@ -121,7 +121,7 @@ static int sctp_dbg_objcnt_read(char *buffer, char **start, off_t offset,
if (len > length)
len = length;
return len;
return len;
}
/* Initialize the objcount in the proc filesystem. */
@ -131,7 +131,7 @@ void sctp_dbg_objcnt_init(void)
ent = create_proc_read_entry("sctp_dbg_objcnt", 0, proc_net_sctp,
sctp_dbg_objcnt_read, NULL);
if (!ent)
printk(KERN_WARNING
printk(KERN_WARNING
"sctp_dbg_objcnt: Unable to create /proc entry.\n");
}

View File

@ -85,8 +85,8 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
chunk = sctp_get_ecne_prepend(packet->transport->asoc);
/* If there a is a prepend chunk stick it on the list before
* any other chunks get appended.
*/
* any other chunks get appended.
*/
if (chunk)
sctp_packet_append_chunk(packet, chunk);
}
@ -110,8 +110,8 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
packet->destination_port = dport;
INIT_LIST_HEAD(&packet->chunk_list);
if (asoc) {
struct sctp_sock *sp = sctp_sk(asoc->base.sk);
overhead = sp->pf->af->net_header_len;
struct sctp_sock *sp = sctp_sk(asoc->base.sk);
overhead = sp->pf->af->net_header_len;
} else {
overhead = sizeof(struct ipv6hdr);
}
@ -442,7 +442,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* acknowledged or have failed.
*/
if (!sctp_chunk_is_data(chunk))
sctp_chunk_free(chunk);
sctp_chunk_free(chunk);
}
/* Perform final transformation on checksum. */
@ -528,7 +528,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
list_del_init(&chunk->list);
if (!sctp_chunk_is_data(chunk))
sctp_chunk_free(chunk);
sctp_chunk_free(chunk);
}
goto out;
nomem:

View File

@ -376,7 +376,7 @@ static void sctp_insert_list(struct list_head *head, struct list_head *new)
}
}
if (!done)
list_add_tail(new, head);
list_add_tail(new, head);
}
/* Mark all the eligible packets on a transport for retransmission. */
@ -578,7 +578,7 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
break;
case SCTP_XMIT_RWND_FULL:
/* Send this packet. */
/* Send this packet. */
if ((error = sctp_packet_transmit(pkt)) == 0)
*start_timer = 1;
@ -590,7 +590,7 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
break;
case SCTP_XMIT_NAGLE_DELAY:
/* Send this packet. */
/* Send this packet. */
if ((error = sctp_packet_transmit(pkt)) == 0)
*start_timer = 1;
@ -605,7 +605,7 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
*/
list_add_tail(lchunk, &transport->transmitted);
/* Mark the chunk as ineligible for fast retransmit
/* Mark the chunk as ineligible for fast retransmit
* after it is retransmitted.
*/
if (chunk->fast_retransmit > 0)
@ -703,11 +703,11 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
* inactive.
*
* 3.3.6 Heartbeat Acknowledgement:
* ...
* ...
* A HEARTBEAT ACK is always sent to the source IP
* address of the IP datagram containing the
* HEARTBEAT chunk to which this ack is responding.
* ...
* ...
*/
if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT &&
chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK)
@ -914,7 +914,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
BUG();
}
/* BUG: We assume that the sctp_packet_transmit()
/* BUG: We assume that the sctp_packet_transmit()
* call below will succeed all the time and add the
* chunk to the transmitted list and restart the
* timers.
@ -1266,7 +1266,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
* first instance of the packet or a later
* instance).
*/
if (!tchunk->tsn_gap_acked &&
if (!tchunk->tsn_gap_acked &&
!tchunk->resent &&
tchunk->rtt_in_progress) {
tchunk->rtt_in_progress = 0;
@ -1275,7 +1275,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
rtt);
}
}
if (TSN_lte(tsn, sack_ctsn)) {
if (TSN_lte(tsn, sack_ctsn)) {
/* RFC 2960 6.3.2 Retransmission Timer Rules
*
* R3) Whenever a SACK is received
@ -1590,7 +1590,7 @@ static void sctp_mark_missing(struct sctp_outq *q,
SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, "
"ssthresh: %d, flight_size: %d, pba: %d\n",
__FUNCTION__, transport, transport->cwnd,
transport->ssthresh, transport->flight_size,
transport->ssthresh, transport->flight_size,
transport->partial_bytes_acked);
}
}
@ -1603,7 +1603,7 @@ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
__u16 gap;
__u32 ctsn = ntohl(sack->cum_tsn_ack);
if (TSN_lte(tsn, ctsn))
if (TSN_lte(tsn, ctsn))
goto pass;
/* 3.3.4 Selective Acknowledgement (SACK) (3):
@ -1657,7 +1657,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
/* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
* received SACK.
*
*
* If (Advanced.Peer.Ack.Point < SackCumAck), then update
* Advanced.Peer.Ack.Point to be equal to SackCumAck.
*/
@ -1671,7 +1671,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
*
* Assuming that a SACK arrived with the Cumulative TSN ACK 102
* and the Advanced.Peer.Ack.Point is updated to this value:
*
*
* out-queue at the end of ==> out-queue after Adv.Ack.Point
* normal SACK processing local advancement
* ... ...
@ -1692,7 +1692,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
/* Remove any chunks in the abandoned queue that are acked by
* the ctsn.
*/
*/
if (TSN_lte(tsn, ctsn)) {
list_del_init(lchunk);
if (!chunk->tsn_gap_acked) {
@ -1743,7 +1743,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
*/
if (asoc->adv_peer_ack_point > ctsn)
ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
nskips, &ftsn_skip_arr[0]);
nskips, &ftsn_skip_arr[0]);
if (ftsn_chunk) {
list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);

View File

@ -70,7 +70,7 @@ int sctp_primitive_ ## name(struct sctp_association *asoc, \
\
error = sctp_do_sm(event_type, subtype, state, ep, asoc, \
arg, GFP_KERNEL); \
return error; \
return error; \
}
/* 10.1 ULP-to-SCTP
@ -207,7 +207,7 @@ DECLARE_PRIMITIVE(REQUESTHEARTBEAT);
/* ADDIP
* 3.1.1 Address Configuration Change Chunk (ASCONF)
*
*
* This chunk is used to communicate to the remote endpoint one of the
* configuration change requests that MUST be acknowledged. The
* information carried in the ASCONF Chunk uses the form of a

View File

@ -77,7 +77,7 @@ static struct snmp_mib sctp_snmp_list[] = {
/* Return the current value of a particular entry in the mib by adding its
* per cpu counters.
*/
*/
static unsigned long
fold_field(void *mib[], int nr)
{
@ -102,7 +102,7 @@ static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
for (i = 0; sctp_snmp_list[i].name != NULL; i++)
seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
fold_field((void **)sctp_statistics,
fold_field((void **)sctp_statistics,
sctp_snmp_list[i].entry));
return 0;

View File

@ -102,11 +102,11 @@ static __init int sctp_proc_init(void)
}
if (sctp_snmp_proc_init())
goto out_nomem;
goto out_nomem;
if (sctp_eps_proc_init())
goto out_nomem;
goto out_nomem;
if (sctp_assocs_proc_init())
goto out_nomem;
goto out_nomem;
return 0;
@ -114,7 +114,7 @@ static __init int sctp_proc_init(void)
return -ENOMEM;
}
/* Clean up the proc fs entry for the SCTP protocol.
/* Clean up the proc fs entry for the SCTP protocol.
* Note: Do not make this __exit as it is used in the init error
* path.
*/
@ -286,7 +286,7 @@ static int sctp_v4_to_addr_param(const union sctp_addr *addr,
param->v4.param_hdr.type = SCTP_PARAM_IPV4_ADDRESS;
param->v4.param_hdr.length = htons(length);
param->v4.addr.s_addr = addr->v4.sin_addr.s_addr;
param->v4.addr.s_addr = addr->v4.sin_addr.s_addr;
return length;
}
@ -344,9 +344,9 @@ static int sctp_v4_addr_valid(union sctp_addr *addr,
if (IS_IPV4_UNUSABLE_ADDRESS(&addr->v4.sin_addr.s_addr))
return 0;
/* Is this a broadcast address? */
if (skb && ((struct rtable *)skb->dst)->rt_flags & RTCF_BROADCAST)
return 0;
/* Is this a broadcast address? */
if (skb && ((struct rtable *)skb->dst)->rt_flags & RTCF_BROADCAST)
return 0;
return 1;
}
@ -494,7 +494,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
out:
if (dst)
SCTP_DEBUG_PRINTK("rt_dst:%u.%u.%u.%u, rt_src:%u.%u.%u.%u\n",
NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_src));
NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_src));
else
SCTP_DEBUG_PRINTK("NO ROUTE\n");
@ -517,14 +517,14 @@ static void sctp_v4_get_saddr(struct sctp_association *asoc,
if (rt) {
saddr->v4.sin_family = AF_INET;
saddr->v4.sin_port = htons(asoc->base.bind_addr.port);
saddr->v4.sin_addr.s_addr = rt->rt_src;
saddr->v4.sin_addr.s_addr = rt->rt_src;
}
}
/* What interface did this skb arrive on? */
static int sctp_v4_skb_iif(const struct sk_buff *skb)
{
return ((struct rtable *)skb->dst)->rt_iif;
return ((struct rtable *)skb->dst)->rt_iif;
}
/* Was this packet marked by Explicit Congestion Notification? */
@ -569,7 +569,7 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
newinet->dport = htons(asoc->peer.port);
newinet->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr;
newinet->pmtudisc = inet->pmtudisc;
newinet->id = asoc->next_tsn ^ jiffies;
newinet->id = asoc->next_tsn ^ jiffies;
newinet->uc_ttl = -1;
newinet->mc_loop = 1;

View File

@ -118,7 +118,7 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
int padlen;
__u16 len;
/* Cause code constants are now defined in network order. */
/* Cause code constants are now defined in network order. */
err.cause = cause_code;
len = sizeof(sctp_errhdr_t) + paylen;
padlen = len % 4;
@ -295,11 +295,11 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
*/
chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len;
/* Tell peer that we'll do ECN only if peer advertised such cap. */
/* Tell peer that we'll do ECN only if peer advertised such cap. */
if (asoc->peer.ecn_capable)
chunksize += sizeof(ecap_param);
/* Tell peer that we'll do PR-SCTP only if peer advertised. */
/* Tell peer that we'll do PR-SCTP only if peer advertised. */
if (asoc->peer.prsctp_capable)
chunksize += sizeof(prsctp_param);
@ -728,7 +728,7 @@ struct sctp_chunk *sctp_make_shutdown_complete(
if (retval && chunk)
retval->transport = chunk->transport;
return retval;
return retval;
}
/* Create an ABORT. Note that we set the T bit if we have no
@ -844,7 +844,7 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
return retval;
}
/* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */
/* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */
struct sctp_chunk *sctp_make_abort_violation(
const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
@ -1265,8 +1265,8 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
/* Header size is static data prior to the actual cookie, including
* any padding.
*/
headersize = sizeof(sctp_paramhdr_t) +
(sizeof(struct sctp_signed_cookie) -
headersize = sizeof(sctp_paramhdr_t) +
(sizeof(struct sctp_signed_cookie) -
sizeof(struct sctp_cookie));
bodysize = sizeof(struct sctp_cookie)
+ ntohs(init_chunk->chunk_hdr->length) + addrs_len;
@ -1315,7 +1315,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
memcpy((__u8 *)&cookie->c.peer_init[0] +
ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len);
if (sctp_sk(ep->base.sk)->hmac) {
if (sctp_sk(ep->base.sk)->hmac) {
struct hash_desc desc;
/* Sign the message. */
@ -1324,8 +1324,8 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
sg.length = bodysize;
keylen = SCTP_SECRET_SIZE;
key = (char *)ep->secret_key[ep->current_key];
desc.tfm = sctp_sk(ep->base.sk)->hmac;
desc.flags = 0;
desc.tfm = sctp_sk(ep->base.sk)->hmac;
desc.flags = 0;
if (crypto_hash_setkey(desc.tfm, key, keylen) ||
crypto_hash_digest(&desc, &sg, bodysize, cookie->signature))
@ -1365,7 +1365,7 @@ struct sctp_association *sctp_unpack_cookie(
* any padding.
*/
headersize = sizeof(sctp_chunkhdr_t) +
(sizeof(struct sctp_signed_cookie) -
(sizeof(struct sctp_signed_cookie) -
sizeof(struct sctp_cookie));
bodysize = ntohs(chunk->chunk_hdr->length) - headersize;
fixed_size = headersize + sizeof(struct sctp_cookie);
@ -1593,7 +1593,7 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
struct sctp_chunk **errp)
{
char error[] = "The following parameter had invalid length:";
size_t payload_len = WORD_ROUND(sizeof(error)) +
size_t payload_len = WORD_ROUND(sizeof(error)) +
sizeof(sctp_paramhdr_t);
@ -1752,7 +1752,7 @@ static int sctp_verify_param(const struct sctp_association *asoc,
case SCTP_PARAM_FWD_TSN_SUPPORT:
if (sctp_prsctp_enable)
break;
/* Fall Through */
/* Fall Through */
default:
SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n",
ntohs(param.p->type), cid);
@ -1861,7 +1861,7 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
sctp_walk_params(param, peer_init, init_hdr.params) {
if (!sctp_process_param(asoc, param, peer_addr, gfp))
goto clean_up;
goto clean_up;
}
/* Walk list of transports, removing transports in the UNKNOWN state. */
@ -1937,7 +1937,7 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
*/
/* Allocate storage for the negotiated streams if it is not a temporary
* association.
* association.
*/
if (!asoc->temp) {
int assoc_id;
@ -2109,7 +2109,7 @@ static int sctp_process_param(struct sctp_association *asoc,
asoc->peer.prsctp_capable = 1;
break;
}
/* Fall Through */
/* Fall Through */
default:
/* Any unrecognized parameters should have been caught
* and handled by sctp_verify_param() which should be
@ -2168,7 +2168,7 @@ __u32 sctp_generate_tsn(const struct sctp_endpoint *ep)
* | ASCONF Parameter #N |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* Address Parameter and other parameter will not be wrapped in this function
* Address Parameter and other parameter will not be wrapped in this function
*/
static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc,
union sctp_addr *addr,
@ -2290,7 +2290,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
* | Address Parameter |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* Create an ASCONF chunk with Set Primary IP address parameter.
* Create an ASCONF chunk with Set Primary IP address parameter.
*/
struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
union sctp_addr *addr)
@ -2339,7 +2339,7 @@ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
* | ASCONF Parameter Response#N |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* Create an ASCONF_ACK chunk with enough space for the parameter responses.
* Create an ASCONF_ACK chunk with enough space for the parameter responses.
*/
static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *asoc,
__u32 serial, int vparam_len)
@ -2381,7 +2381,7 @@ static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id,
ntohs(asconf_param->param_hdr.length);
}
/* Add Success Indication or Error Cause Indication parameter. */
/* Add Success Indication or Error Cause Indication parameter. */
ack_param.param_hdr.type = response_type;
ack_param.param_hdr.length = htons(sizeof(ack_param) +
err_param_len +
@ -2424,11 +2424,11 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
switch (asconf_param->param_hdr.type) {
case SCTP_PARAM_ADD_IP:
/* ADDIP 4.3 D9) If an endpoint receives an ADD IP address
* request and does not have the local resources to add this
* new address to the association, it MUST return an Error
* Cause TLV set to the new error code 'Operation Refused
* Due to Resource Shortage'.
*/
* request and does not have the local resources to add this
* new address to the association, it MUST return an Error
* Cause TLV set to the new error code 'Operation Refused
* Due to Resource Shortage'.
*/
peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED);
if (!peer)
@ -2440,10 +2440,10 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
break;
case SCTP_PARAM_DEL_IP:
/* ADDIP 4.3 D7) If a request is received to delete the
* last remaining IP address of a peer endpoint, the receiver
* MUST send an Error Cause TLV with the error cause set to the
* new error code 'Request to Delete Last Remaining IP Address'.
*/
* last remaining IP address of a peer endpoint, the receiver
* MUST send an Error Cause TLV with the error cause set to the
* new error code 'Request to Delete Last Remaining IP Address'.
*/
pos = asoc->peer.transport_addr_list.next;
if (pos->next == &asoc->peer.transport_addr_list)
return SCTP_ERROR_DEL_LAST_IP;
@ -2475,7 +2475,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
return SCTP_ERROR_NO_ERROR;
}
/* Process an incoming ASCONF chunk with the next expected serial no. and
/* Process an incoming ASCONF chunk with the next expected serial no. and
* return an ASCONF_ACK chunk to be sent in response.
*/
struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
@ -2495,19 +2495,19 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
hdr = (sctp_addiphdr_t *)asconf->skb->data;
serial = ntohl(hdr->serial);
/* Skip the addiphdr and store a pointer to address parameter. */
/* Skip the addiphdr and store a pointer to address parameter. */
length = sizeof(sctp_addiphdr_t);
addr_param = (union sctp_addr_param *)(asconf->skb->data + length);
chunk_len -= length;
/* Skip the address parameter and store a pointer to the first
* asconf paramter.
*/
*/
length = ntohs(addr_param->v4.param_hdr.length);
asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
chunk_len -= length;
/* create an ASCONF_ACK chunk.
/* create an ASCONF_ACK chunk.
* Based on the definitions of parameters, we know that the size of
* ASCONF_ACK parameters are less than or equal to the twice of ASCONF
* paramters.
@ -2538,7 +2538,7 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
/* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
* an IP address sends an 'Out of Resource' in its response, it
* MUST also fail any subsequent add or delete requests bundled
* in the ASCONF.
* in the ASCONF.
*/
if (SCTP_ERROR_RSRC_LOW == err_code)
goto done;
@ -2549,12 +2549,12 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
length);
chunk_len -= length;
}
done:
asoc->peer.addip_serial++;
/* If we are sending a new ASCONF_ACK hold a reference to it in assoc
* after freeing the reference to old asconf ack if any.
* after freeing the reference to old asconf ack if any.
*/
if (asconf_ack) {
if (asoc->addip_last_asconf_ack)
@ -2622,7 +2622,7 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
/* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk
* for the given asconf parameter. If there is no response for this parameter,
* return the error code based on the third argument 'no_err'.
* return the error code based on the third argument 'no_err'.
* ADDIP 4.1
* A7) If an error response is received for a TLV parameter, all TLVs with no
* response before the failed TLV are considered successful if not reported.
@ -2646,7 +2646,7 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
/* Skip the addiphdr from the asconf_ack chunk and store a pointer to
* the first asconf_ack parameter.
*/
*/
length = sizeof(sctp_addiphdr_t);
asconf_ack_param = (sctp_addip_param_t *)(asconf_ack->skb->data +
length);
@ -2697,14 +2697,14 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
/* Skip the chunkhdr and addiphdr from the last asconf sent and store
* a pointer to address parameter.
*/
*/
length = sizeof(sctp_addip_chunk_t);
addr_param = (union sctp_addr_param *)(asconf->skb->data + length);
asconf_len -= length;
/* Skip the address parameter in the last asconf sent and store a
* pointer to the first asconf paramter.
*/
*/
length = ntohs(addr_param->v4.param_hdr.length);
asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
asconf_len -= length;
@ -2741,7 +2741,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
case SCTP_ERROR_INV_PARAM:
/* Disable sending this type of asconf parameter in
* future.
*/
*/
asoc->peer.addip_disabled_mask |=
asconf_param->param_hdr.type;
break;
@ -2755,7 +2755,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
/* Skip the processed asconf parameter and move to the next
* one.
*/
*/
length = ntohs(asconf_param->param_hdr.length);
asconf_param = (sctp_addip_param_t *)((void *)asconf_param +
length);
@ -2784,14 +2784,14 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
return retval;
}
/* Make a FWD TSN chunk. */
/* Make a FWD TSN chunk. */
struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
__u32 new_cum_tsn, size_t nstreams,
struct sctp_fwdtsn_skip *skiplist)
{
struct sctp_chunk *retval = NULL;
struct sctp_fwdtsn_chunk *ftsn_chunk;
struct sctp_fwdtsn_hdr ftsn_hdr;
struct sctp_fwdtsn_hdr ftsn_hdr;
struct sctp_fwdtsn_skip skip;
size_t hint;
int i;

View File

@ -61,7 +61,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
struct sctp_endpoint *ep,
struct sctp_association *asoc,
void *event_arg,
sctp_disposition_t status,
sctp_disposition_t status,
sctp_cmd_seq_t *commands,
gfp_t gfp);
static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
@ -78,7 +78,7 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
********************************************************************/
/* A helper function for delayed processing of INET ECN CE bit. */
static void sctp_do_ecn_ce_work(struct sctp_association *asoc,
static void sctp_do_ecn_ce_work(struct sctp_association *asoc,
__u32 lowest_tsn)
{
/* Save the TSN away for comparison when we receive CWR */
@ -160,7 +160,7 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
struct sctp_transport *trans = asoc->peer.last_data_from;
int error = 0;
if (force ||
if (force ||
(!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) ||
(trans && (trans->param_flags & SPP_SACKDELAY_DISABLE)))
asoc->peer.sack_needed = 1;
@ -178,7 +178,7 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
* [This is actually not mentioned in Section 6, but we
* implement it here anyway. --piggy]
*/
if (max_tsn_seen != ctsn)
if (max_tsn_seen != ctsn)
asoc->peer.sack_needed = 1;
/* From 6.2 Acknowledgement on Reception of DATA Chunks:
@ -199,10 +199,10 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
* for the association.
*/
if (trans)
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
trans->sackdelay;
else
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
asoc->sackdelay;
/* Restart the SACK timer. */
@ -338,8 +338,8 @@ static void sctp_generate_t4_rto_event(unsigned long data)
static void sctp_generate_t5_shutdown_guard_event(unsigned long data)
{
struct sctp_association *asoc = (struct sctp_association *)data;
sctp_generate_timeout_event(asoc,
struct sctp_association *asoc = (struct sctp_association *)data;
sctp_generate_timeout_event(asoc,
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD);
} /* sctp_generate_t5_shutdown_guard_event() */
@ -380,7 +380,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
asoc->state, asoc->ep, asoc,
transport, GFP_ATOMIC);
if (error)
if (error)
asoc->base.sk->sk_err = -error;
out_unlock:
@ -570,7 +570,7 @@ static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
/* Helper function to stop any pending T3-RTX timers */
static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc)
struct sctp_association *asoc)
{
struct sctp_transport *t;
struct list_head *pos;
@ -675,7 +675,7 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
* the transport for a shutdown chunk.
*/
static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds,
static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc,
struct sctp_chunk *chunk)
{
@ -688,7 +688,7 @@ static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds,
}
/* Helper function to change the state of an association. */
static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc,
sctp_state_t state)
{
@ -727,7 +727,7 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
sctp_state(asoc, SHUTDOWN_RECEIVED)) {
/* Wake up any processes waiting in the asoc's wait queue in
* sctp_wait_for_connect() or sctp_wait_for_sndbuf().
*/
*/
if (waitqueue_active(&asoc->wait))
wake_up_interruptible(&asoc->wait);
@ -749,9 +749,9 @@ static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
struct sock *sk = asoc->base.sk;
/* If it is a non-temporary association belonging to a TCP-style
* listening socket that is not closed, do not free it so that accept()
* listening socket that is not closed, do not free it so that accept()
* can pick it up later.
*/
*/
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) &&
(!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
return;
@ -764,7 +764,7 @@ static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
* ADDIP Section 4.1 ASCONF Chunk Procedures
* A4) Start a T-4 RTO timer, using the RTO value of the selected
* destination address (we use active path instead of primary path just
* because primary path may be inactive.
* because primary path may be inactive.
*/
static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc,
@ -777,7 +777,7 @@ static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds,
chunk->transport = t;
}
/* Process an incoming Operation Error Chunk. */
/* Process an incoming Operation Error Chunk. */
static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc,
struct sctp_chunk *chunk)
@ -816,7 +816,7 @@ static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
}
/* Process variable FWDTSN chunk information. */
static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
struct sctp_chunk *chunk)
{
struct sctp_fwdtsn_skip *skip;
@ -828,9 +828,9 @@ static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
return;
}
/* Helper function to remove the association non-primary peer
/* Helper function to remove the association non-primary peer
* transports.
*/
*/
static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
{
struct sctp_transport *t;
@ -840,7 +840,7 @@ static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
t = list_entry(pos, struct sctp_transport, transports);
if (!sctp_cmp_addr_exact(&t->ipaddr,
&asoc->peer.primary_addr)) {
&asoc->peer.primary_addr)) {
sctp_assoc_del_peer(asoc, &t->ipaddr);
}
}
@ -915,7 +915,7 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
DEBUG_POST;
error = sctp_side_effects(event_type, subtype, state,
ep, asoc, event_arg, status,
ep, asoc, event_arg, status,
&commands, gfp);
DEBUG_POST_SFX;
@ -968,7 +968,7 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
error = -ENOMEM;
break;
case SCTP_DISPOSITION_DELETE_TCB:
case SCTP_DISPOSITION_DELETE_TCB:
/* This should now be a command. */
break;
@ -1021,7 +1021,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
struct sctp_endpoint *ep,
struct sctp_association *asoc,
void *event_arg,
sctp_disposition_t status,
sctp_disposition_t status,
sctp_cmd_seq_t *commands,
gfp_t gfp)
{
@ -1057,7 +1057,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_NEW_ASOC:
/* Register a new association. */
if (local_cork) {
sctp_outq_uncork(&asoc->outqueue);
sctp_outq_uncork(&asoc->outqueue);
local_cork = 0;
}
asoc = cmd->obj.ptr;
@ -1074,7 +1074,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
sctp_outq_teardown(&asoc->outqueue);
break;
case SCTP_CMD_DELETE_TCB:
case SCTP_CMD_DELETE_TCB:
if (local_cork) {
sctp_outq_uncork(&asoc->outqueue);
local_cork = 0;
@ -1104,7 +1104,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_PROCESS_FWDTSN:
sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.ptr);
break;
break;
case SCTP_CMD_GEN_SACK:
/* Generate a Selective ACK.
@ -1162,12 +1162,12 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
SCTP_CHUNK(cmd->obj.ptr));
/* FIXME - Eventually come up with a cleaner way to
* enabling COOKIE-ECHO + DATA bundling during
* multihoming stale cookie scenarios, the following
* command plays with asoc->peer.retran_path to
* avoid the problem of sending the COOKIE-ECHO and
* DATA in different paths, which could result
* in the association being ABORTed if the DATA chunk
* enabling COOKIE-ECHO + DATA bundling during
* multihoming stale cookie scenarios, the following
* command plays with asoc->peer.retran_path to
* avoid the problem of sending the COOKIE-ECHO and
* DATA in different paths, which could result
* in the association being ABORTed if the DATA chunk
* is processed first by the server. Checking the
* init error counter simply causes this command
* to be executed only during failed attempts of
@ -1177,7 +1177,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
asoc->peer.primary_path) &&
(asoc->init_err_counter > 0)) {
sctp_add_cmd_sf(commands,
SCTP_CMD_FORCE_PRIM_RETRAN,
SCTP_CMD_FORCE_PRIM_RETRAN,
SCTP_NULL());
}

View File

@ -189,7 +189,7 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
0, 0, 0, GFP_ATOMIC);
if (ev)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ev));
SCTP_ULPEVENT(ev));
/* Upon reception of the SHUTDOWN COMPLETE chunk the endpoint
* will verify that it is in SHUTDOWN-ACK-SENT state, if it is
@ -228,7 +228,7 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
* Verification Tag field to Tag_A, and also provide its own
* Verification Tag (Tag_Z) in the Initiate Tag field.
*
* Verification Tag: Must be 0.
* Verification Tag: Must be 0.
*
* Inputs
* (endpoint, asoc, chunk)
@ -256,7 +256,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*
*
* IG Section 2.11.2
* Furthermore, we require that the receiver of an INIT chunk MUST
* enforce these rules by silently discarding an arriving packet
@ -282,7 +282,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
* Tag.
* Tag.
*/
if (chunk->sctp_hdr->vtag != 0)
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
@ -326,7 +326,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
}
}
/* Grab the INIT header. */
/* Grab the INIT header. */
chunk->subh.init_hdr = (sctp_inithdr_t *)chunk->skb->data;
/* Tag the variable length parameters. */
@ -594,7 +594,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
*/
chunk->subh.cookie_hdr =
chunk->subh.cookie_hdr =
(struct sctp_signed_cookie *)chunk->skb->data;
if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t)))
@ -665,7 +665,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
if (!ev)
goto nomem_ev;
/* Sockets API Draft Section 5.3.1.6
/* Sockets API Draft Section 5.3.1.6
* When a peer sends a Adaptation Layer Indication parameter , SCTP
* delivers this notification to inform the application that of the
* peers requested adaptation layer.
@ -891,7 +891,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE,
SCTP_TRANSPORT(transport));
return SCTP_DISPOSITION_CONSUME;
return SCTP_DISPOSITION_CONSUME;
}
/*
@ -1280,7 +1280,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
* Tag.
* Tag.
*/
if (chunk->sctp_hdr->vtag != 0)
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
@ -1548,7 +1548,7 @@ sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
/* Per the above section, we'll discard the chunk if we have an
* endpoint. If this is an OOTB INIT-ACK, treat it as such.
*/
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
return sctp_sf_ootb(ep, asoc, type, arg, commands);
else
return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
@ -1760,9 +1760,9 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
/* Clarification from Implementor's Guide:
* D) When both local and remote tags match the endpoint should
* enter the ESTABLISHED state, if it is in the COOKIE-ECHOED state.
* It should stop any cookie timer that may be running and send
* a COOKIE ACK.
* enter the ESTABLISHED state, if it is in the COOKIE-ECHOED state.
* It should stop any cookie timer that may be running and send
* a COOKIE ACK.
*/
/* Don't accidentally move back into established state. */
@ -1786,7 +1786,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
SCTP_COMM_UP, 0,
asoc->c.sinit_num_ostreams,
asoc->c.sinit_max_instreams,
GFP_ATOMIC);
GFP_ATOMIC);
if (!ev)
goto nomem;
@ -1870,7 +1870,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
*/
chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data;
chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data;
if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t)))
goto nomem;
@ -1936,7 +1936,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
default: /* Discard packet for all others. */
retval = sctp_sf_pdiscard(ep, asoc, type, arg, commands);
break;
};
};
/* Delete the tempory new association. */
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
@ -2083,7 +2083,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
*/
sctp_walk_errors(err, chunk->chunk_hdr) {
if (SCTP_ERROR_STALE_COOKIE == err->cause)
return sctp_sf_do_5_2_6_stale(ep, asoc, type,
return sctp_sf_do_5_2_6_stale(ep, asoc, type,
arg, commands);
}
@ -2185,10 +2185,10 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DEL_NON_PRIMARY, SCTP_NULL());
/* If we've sent any data bundled with COOKIE-ECHO we will need to
* resend
/* If we've sent any data bundled with COOKIE-ECHO we will need to
* resend
*/
sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN,
sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN,
SCTP_TRANSPORT(asoc->peer.primary_path));
/* Cast away the const modifier, as we want to just
@ -2274,7 +2274,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
/* ASSOC_FAILED will DELETE_TCB. */
/* ASSOC_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
@ -2439,7 +2439,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC);
if (!ev) {
disposition = SCTP_DISPOSITION_NOMEM;
goto out;
goto out;
}
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
@ -2553,7 +2553,7 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep,
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
cwr = (sctp_cwrhdr_t *) chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_cwrhdr_t));
@ -2661,7 +2661,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
}
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
@ -2743,7 +2743,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
return SCTP_DISPOSITION_DISCARD;
consume:
return SCTP_DISPOSITION_CONSUME;
}
/*
@ -2930,7 +2930,7 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
/* Make an ABORT. The T bit will be set if the asoc
* is NULL.
*/
abort = sctp_make_abort(asoc, chunk, 0);
abort = sctp_make_abort(asoc, chunk, 0);
if (!abort) {
sctp_ootb_pkt_free(packet);
return SCTP_DISPOSITION_NOMEM;
@ -2994,7 +2994,7 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
}
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
SCTP_CHUNK(chunk));
SCTP_CHUNK(chunk));
}
return SCTP_DISPOSITION_CONSUME;
@ -3128,7 +3128,7 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
*/
if (SCTP_CID_ABORT == ch->type)
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
ch = (sctp_chunkhdr_t *) ch_end;
} while (ch_end < skb->tail);
@ -3175,8 +3175,8 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
if (packet) {
/* Make an SHUTDOWN_COMPLETE.
* The T bit will be set if the asoc is NULL.
*/
* The T bit will be set if the asoc is NULL.
*/
shut = sctp_make_shutdown_complete(asoc, chunk);
if (!shut) {
sctp_ootb_pkt_free(packet);
@ -3261,10 +3261,10 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
/* ADDIP 4.2 C1) Compare the value of the serial number to the value
* the endpoint stored in a new association variable
* 'Peer-Serial-Number'.
* 'Peer-Serial-Number'.
*/
if (serial == asoc->peer.addip_serial + 1) {
/* ADDIP 4.2 C2) If the value found in the serial number is
/* ADDIP 4.2 C2) If the value found in the serial number is
* equal to the ('Peer-Serial-Number' + 1), the endpoint MUST
* do V1-V5.
*/
@ -3285,9 +3285,9 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
else
return SCTP_DISPOSITION_DISCARD;
} else {
/* ADDIP 4.2 C4) Otherwise, the ASCONF Chunk is discarded since
/* ADDIP 4.2 C4) Otherwise, the ASCONF Chunk is discarded since
* it must be either a stale packet or from an attacker.
*/
*/
return SCTP_DISPOSITION_DISCARD;
}
@ -3296,7 +3296,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
* being responded to.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
return SCTP_DISPOSITION_CONSUME;
}
@ -3307,7 +3307,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
*/
sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *asconf_ack = arg;
@ -3359,7 +3359,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@ -3387,7 +3387,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@ -3451,17 +3451,17 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Count this as receiving DATA. */
if (asoc->autoclose) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
/* FIXME: For now send a SACK, but DATA processing may
* send another.
* send another.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE());
@ -3511,9 +3511,9 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Go a head and force a SACK, since we are shutting down. */
gen_shutdown:
/* Implementor's Guide.
@ -3527,7 +3527,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
return SCTP_DISPOSITION_CONSUME;
return SCTP_DISPOSITION_CONSUME;
}
/*
@ -3706,7 +3706,7 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
* if it's length is set to be smaller then the size of sctp_sack_chunk_t.
*
* We inform the other end by sending an ABORT with a Protocol Violation
* error code.
* error code.
*
* Section: Not specified
* Verification Tag: Nothing to do
@ -3747,7 +3747,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
} else {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
@ -3756,7 +3756,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
return SCTP_DISPOSITION_ABORT;
nomem:
@ -4437,7 +4437,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
/* sctp-implguide 2.10 Issues with Heartbeating and failover
*
* HEARTBEAT ... is discontinued after sending either SHUTDOWN
* or SHUTDOWN-ACK.
* or SHUTDOWN-ACK.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
@ -4515,7 +4515,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
/* sctp-implguide 2.10 Issues with Heartbeating and failover
*
* HEARTBEAT ... is discontinued after sending either SHUTDOWN
* or SHUTDOWN-ACK.
* or SHUTDOWN-ACK.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
@ -4874,7 +4874,7 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
/* ADDIP 4.1 B4) Re-transmit the ASCONF Chunk last sent and if possible
* choose an alternate destination address (please refer to RFC2960
* [5] section 6.4.1). An endpoint MUST NOT add new parameters to this
* chunk, it MUST be the same (including its serial number) as the last
* chunk, it MUST be the same (including its serial number) as the last
* ASCONF sent.
*/
sctp_chunk_hold(asoc->addip_last_asconf);
@ -4953,7 +4953,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(
/* sctpimpguide-05 Section 2.12.2
* The sender of the SHUTDOWN MAY also start an overall guard timer
* 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
*/
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
disposition = SCTP_DISPOSITION_CONSUME;
@ -5127,7 +5127,7 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc
vtag = ntohl(init->init_hdr.init_tag);
break;
}
default:
default:
vtag = ntohl(chunk->sctp_hdr->vtag);
break;
}
@ -5176,7 +5176,7 @@ static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
/* Override the OOTB vtag from the cookie. */
cookie = chunk->subh.cookie_hdr;
packet->vtag = cookie->c.peer_vtag;
/* Set the skb to the belonging sock for accounting. */
err_chunk->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, err_chunk);
@ -5310,7 +5310,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
}
/* Spill over rwnd a little bit. Note: While allowed, this spill over
/* Spill over rwnd a little bit. Note: While allowed, this spill over
* seems a bit troublesome in that frag_point varies based on
* PMTU. In cases, such as loopback, this might be a rather
* large spill over.

View File

@ -954,7 +954,7 @@ static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][S
TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE,
};
static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
sctp_state_t state)
{
if (state > SCTP_STATE_MAX)

View File

@ -381,12 +381,12 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
/* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
*
* R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
* R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
* at any one time. If a sender, after sending an ASCONF chunk, decides
* it needs to transfer another ASCONF Chunk, it MUST wait until the
* it needs to transfer another ASCONF Chunk, it MUST wait until the
* ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a
* subsequent ASCONF. Note this restriction binds each side, so at any
* time two ASCONF may be in-transit on any given association (one sent
* subsequent ASCONF. Note this restriction binds each side, so at any
* time two ASCONF may be in-transit on any given association (one sent
* from each endpoint).
*/
static int sctp_send_asconf(struct sctp_association *asoc,
@ -396,10 +396,10 @@ static int sctp_send_asconf(struct sctp_association *asoc,
/* If there is an outstanding ASCONF chunk, queue it for later
* transmission.
*/
*/
if (asoc->addip_last_asconf) {
list_add_tail(&chunk->list, &asoc->addip_chunk_list);
goto out;
goto out;
}
/* Hold the chunk until an ASCONF_ACK is received. */
@ -449,7 +449,7 @@ int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
goto err_bindx_add;
}
retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr,
retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr,
af->sockaddr_len);
addr_buf += af->sockaddr_len;
@ -470,13 +470,13 @@ int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
* associations that are part of the endpoint indicating that a list of local
* addresses are added to the endpoint.
*
* If any of the addresses is already in the bind address list of the
* If any of the addresses is already in the bind address list of the
* association, we do not send the chunk for that association. But it will not
* affect other associations.
*
* Only sctp_setsockopt_bindx() is supposed to call this function.
*/
static int sctp_send_asconf_add_ip(struct sock *sk,
static int sctp_send_asconf_add_ip(struct sock *sk,
struct sockaddr *addrs,
int addrcnt)
{
@ -517,8 +517,8 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
continue;
/* Check if any address in the packed array of addresses is
* in the bind address list of the association. If so,
* do not send the asconf chunk to its peer, but continue with
* in the bind address list of the association. If so,
* do not send the asconf chunk to its peer, but continue with
* other associations.
*/
addr_buf = addrs;
@ -664,7 +664,7 @@ int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
* the associations that are part of the endpoint indicating that a list of
* local addresses are removed from the endpoint.
*
* If any of the addresses is already in the bind address list of the
* If any of the addresses is already in the bind address list of the
* association, we do not send the chunk for that association. But it will not
* affect other associations.
*
@ -710,7 +710,7 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
continue;
/* Check if any address in the packed array of addresses is
* not present in the bind address list of the association.
* not present in the bind address list of the association.
* If so, do not send the asconf chunk to its peer, but
* continue with other associations.
*/
@ -898,7 +898,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
return -EFAULT;
}
/* Walk through the addrs buffer and count the number of addresses. */
/* Walk through the addrs buffer and count the number of addresses. */
addr_buf = kaddrs;
while (walk_size < addrs_size) {
sa_addr = (struct sockaddr *)addr_buf;
@ -906,7 +906,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
/* If the address family is not supported or if this address
* causes the address buffer to overflow return EINVAL.
*/
*/
if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
kfree(kaddrs);
return -EINVAL;
@ -935,7 +935,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
default:
err = -EINVAL;
break;
};
};
out:
kfree(kaddrs);
@ -1035,10 +1035,10 @@ static int __sctp_connect(struct sock* sk,
}
} else {
/*
* If an unprivileged user inherits a 1-many
* style socket with open associations on a
* privileged port, it MAY be permitted to
* accept new associations, but it SHOULD NOT
* If an unprivileged user inherits a 1-many
* style socket with open associations on a
* privileged port, it MAY be permitted to
* accept new associations, but it SHOULD NOT
* be permitted to open new associations.
*/
if (ep->base.bind_addr.port < PROT_SOCK &&
@ -1094,8 +1094,8 @@ static int __sctp_connect(struct sock* sk,
out_free:
SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
" kaddrs: %p err: %d\n",
asoc, kaddrs, err);
" kaddrs: %p err: %d\n",
asoc, kaddrs, err);
if (asoc)
sctp_association_free(asoc);
return err;
@ -1435,7 +1435,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
* length messages when SCTP_EOF|SCTP_ABORT is not set.
* If SCTP_ABORT is set, the message length could be non zero with
* the msg_iov set to the user abort reason.
*/
*/
if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) ||
(!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) {
err = -EINVAL;
@ -1599,7 +1599,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
= sinit->sinit_max_attempts;
}
if (sinit->sinit_max_init_timeo) {
asoc->max_init_timeo =
asoc->max_init_timeo =
msecs_to_jiffies(sinit->sinit_max_init_timeo);
}
}
@ -2298,7 +2298,7 @@ static int sctp_setsockopt_delayed_ack_time(struct sock *sk,
/* Get association, if assoc_id != 0 and the socket is a one
* to many style socket, and an association was not found, then
* the id was invalid.
*/
*/
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc && params.assoc_id && sctp_style(sk, UDP))
return -EINVAL;
@ -2307,22 +2307,22 @@ static int sctp_setsockopt_delayed_ack_time(struct sock *sk,
if (asoc) {
asoc->sackdelay =
msecs_to_jiffies(params.assoc_value);
asoc->param_flags =
asoc->param_flags =
(asoc->param_flags & ~SPP_SACKDELAY) |
SPP_SACKDELAY_ENABLE;
} else {
sp->sackdelay = params.assoc_value;
sp->param_flags =
sp->param_flags =
(sp->param_flags & ~SPP_SACKDELAY) |
SPP_SACKDELAY_ENABLE;
}
} else {
if (asoc) {
asoc->param_flags =
asoc->param_flags =
(asoc->param_flags & ~SPP_SACKDELAY) |
SPP_SACKDELAY_DISABLE;
} else {
sp->param_flags =
sp->param_flags =
(sp->param_flags & ~SPP_SACKDELAY) |
SPP_SACKDELAY_DISABLE;
}
@ -2338,17 +2338,17 @@ static int sctp_setsockopt_delayed_ack_time(struct sock *sk,
if (params.assoc_value) {
trans->sackdelay =
msecs_to_jiffies(params.assoc_value);
trans->param_flags =
trans->param_flags =
(trans->param_flags & ~SPP_SACKDELAY) |
SPP_SACKDELAY_ENABLE;
} else {
trans->param_flags =
trans->param_flags =
(trans->param_flags & ~SPP_SACKDELAY) |
SPP_SACKDELAY_DISABLE;
}
}
}
return 0;
}
@ -2374,13 +2374,13 @@ static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, int opt
return -EFAULT;
if (sinit.sinit_num_ostreams)
sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams;
sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams;
if (sinit.sinit_max_instreams)
sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams;
sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams;
if (sinit.sinit_max_attempts)
sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts;
sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts;
if (sinit.sinit_max_init_timeo)
sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo;
sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo;
return 0;
}
@ -2511,7 +2511,7 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, int opt
if (asoc) {
if (rtoinfo.srto_initial != 0)
asoc->rto_initial =
asoc->rto_initial =
msecs_to_jiffies(rtoinfo.srto_initial);
if (rtoinfo.srto_max != 0)
asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max);
@ -2665,7 +2665,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optl
/* Update the frag_point of the existing associations. */
list_for_each(pos, &(sp->ep->asocs)) {
asoc = list_entry(pos, struct sctp_association, asocs);
asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu);
asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu);
}
return 0;
@ -2703,7 +2703,7 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
return -EFAULT;
asoc = sctp_id2assoc(sk, prim.sspp_assoc_id);
if (!asoc)
if (!asoc)
return -EINVAL;
if (!asoc->peer.asconf_capable)
@ -3015,7 +3015,7 @@ SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err)
out:
sctp_release_sock(sk);
*err = error;
*err = error;
return newsk;
}
@ -3087,7 +3087,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
sp->assocparams.sasoc_cookie_life = sctp_valid_cookie_life;
/* Initialize default event subscriptions. By default, all the
* options are off.
* options are off.
*/
memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe));
@ -3099,8 +3099,8 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
sp->pathmtu = 0; // allow default discovery
sp->sackdelay = sctp_sack_timeout;
sp->param_flags = SPP_HB_ENABLE |
SPP_PMTUD_ENABLE |
SPP_SACKDELAY_ENABLE;
SPP_PMTUD_ENABLE |
SPP_SACKDELAY_ENABLE;
/* If enabled no SCTP message fragmentation will be performed.
* Configure through SCTP_DISABLE_FRAGMENTS socket option.
@ -3680,7 +3680,7 @@ static int sctp_getsockopt_delayed_ack_time(struct sock *sk, int len,
/* Get association, if assoc_id != 0 and the socket is a one
* to many style socket, and an association was not found, then
* the id was invalid.
*/
*/
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc && params.assoc_id && sctp_style(sk, UDP))
return -EINVAL;
@ -3756,7 +3756,7 @@ static int sctp_getsockopt_peer_addrs_num_old(struct sock *sk, int len,
return cnt;
}
/*
/*
* Old API for getting list of peer addresses. Does not work for 32-bit
* programs running on a 64-bit kernel
*/
@ -3833,7 +3833,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
return -EINVAL;
to = optval + offsetof(struct sctp_getaddrs,addrs);
space_left = len - sizeof(struct sctp_getaddrs) -
space_left = len - sizeof(struct sctp_getaddrs) -
offsetof(struct sctp_getaddrs,addrs);
list_for_each(pos, &asoc->peer.transport_addr_list) {
@ -3907,7 +3907,7 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
addr = list_entry(pos,
struct sctp_sockaddr_entry,
list);
if ((PF_INET == sk->sk_family) &&
if ((PF_INET == sk->sk_family) &&
(AF_INET6 == addr->a.sa.sa_family))
continue;
cnt++;
@ -3941,7 +3941,7 @@ static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_add
list_for_each_safe(pos, next, &sctp_local_addr_list) {
addr = list_entry(pos, struct sctp_sockaddr_entry, list);
if ((PF_INET == sk->sk_family) &&
if ((PF_INET == sk->sk_family) &&
(AF_INET6 == addr->a.sa.sa_family))
continue;
memcpy(&temp, &addr->a, sizeof(temp));
@ -3970,7 +3970,7 @@ static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port,
list_for_each_safe(pos, next, &sctp_local_addr_list) {
addr = list_entry(pos, struct sctp_sockaddr_entry, list);
if ((PF_INET == sk->sk_family) &&
if ((PF_INET == sk->sk_family) &&
(AF_INET6 == addr->a.sa.sa_family))
continue;
memcpy(&temp, &addr->a, sizeof(temp));
@ -4051,7 +4051,7 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
err = cnt;
goto unlock;
}
goto copy_getaddrs;
goto copy_getaddrs;
}
}
@ -4139,7 +4139,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
err = cnt;
goto unlock;
}
goto copy_getaddrs;
goto copy_getaddrs;
}
}
@ -4196,7 +4196,7 @@ static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
if (!asoc->peer.primary_path)
return -ENOTCONN;
memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr,
asoc->peer.primary_path->af_specific->sockaddr_len);
@ -4864,7 +4864,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
if (!backlog) {
if (sctp_sstate(sk, CLOSED))
return 0;
sctp_unhash_endpoint(ep);
sk->sk_state = SCTP_SS_CLOSED;
}
@ -4872,7 +4872,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
/* Return if we are already listening. */
if (sctp_sstate(sk, LISTENING))
return 0;
/*
* If a bind() or sctp_bindx() is not called prior to a listen()
* call that allows new associations to be accepted, the system
@ -4907,7 +4907,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog)
if (!backlog) {
if (sctp_sstate(sk, CLOSED))
return 0;
sctp_unhash_endpoint(ep);
sk->sk_state = SCTP_SS_CLOSED;
}
@ -5010,7 +5010,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
*/
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
return (!list_empty(&sp->ep->asocs)) ?
(POLLIN | POLLRDNORM) : 0;
(POLLIN | POLLRDNORM) : 0;
mask = 0;
@ -5430,7 +5430,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
DEFINE_WAIT(wait);
SCTP_DEBUG_PRINTK("wait_for_sndbuf: asoc=%p, timeo=%ld, msg_len=%zu\n",
asoc, (long)(*timeo_p), msg_len);
asoc, (long)(*timeo_p), msg_len);
/* Increment the association's refcnt. */
sctp_association_hold(asoc);

View File

@ -130,9 +130,9 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
gfp_t gfp)
{
struct sctp_transport *transport;
struct sctp_transport *transport;
transport = t_new(struct sctp_transport, gfp);
transport = t_new(struct sctp_transport, gfp);
if (!transport)
goto fail;
@ -185,7 +185,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
if (transport->asoc)
sctp_association_put(transport->asoc);
sctp_packet_free(&transport->packet);
sctp_packet_free(&transport->packet);
dst_release(transport->dst);
kfree(transport);
@ -268,7 +268,7 @@ void sctp_transport_route(struct sctp_transport *transport,
/* Initialize sk->sk_rcv_saddr, if the transport is the
* association's active path for getsockname().
*/
*/
if (asoc && (transport == asoc->peer.active_path))
opt->pf->af->to_sk_saddr(&transport->saddr,
asoc->base.sk);
@ -459,8 +459,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
* destination address(es) to which the missing DATA chunks
* were last sent, according to the formula described in
* Section 7.2.3.
*
* RFC 2960 7.2.3, sctpimpguide Upon detection of packet
*
* RFC 2960 7.2.3, sctpimpguide Upon detection of packet
* losses from SACK (see Section 7.2.4), An endpoint
* should do the following:
* ssthresh = max(cwnd/2, 4*MTU)
@ -488,7 +488,7 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
if ((jiffies - transport->last_time_ecne_reduced) >
transport->rtt) {
transport->ssthresh = max(transport->cwnd/2,
4*transport->asoc->pathmtu);
4*transport->asoc->pathmtu);
transport->cwnd = transport->ssthresh;
transport->last_time_ecne_reduced = jiffies;
}

View File

@ -277,7 +277,7 @@ static void sctp_tsnmap_update(struct sctp_tsnmap *map)
/* Now tsn_map must have been all '1's,
* so we swap the map and check the overflow table
*/
__u8 *tmp = map->tsn_map;
__u8 *tmp = map->tsn_map;
memset(tmp, 0, map->len);
map->tsn_map = map->overflow_map;
map->overflow_map = tmp;

View File

@ -749,7 +749,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
*/
pd->pdapi_length = sizeof(struct sctp_pdapi_event);
/* pdapi_indication: 32 bits (unsigned integer)
/* pdapi_indication: 32 bits (unsigned integer)
*
* This field holds the indication being sent to the application.
*/
@ -790,13 +790,13 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
return;
/* Sockets API Extensions for SCTP
* Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
*
* sinfo_stream: 16 bits (unsigned integer)
*
* For recvmsg() the SCTP stack places the message's stream number in
* this value.
*/
* Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
*
* sinfo_stream: 16 bits (unsigned integer)
*
* For recvmsg() the SCTP stack places the message's stream number in
* this value.
*/
sinfo.sinfo_stream = event->stream;
/* sinfo_ssn: 16 bits (unsigned integer)
*
@ -828,7 +828,7 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
sinfo.sinfo_flags = event->flags;
/* sinfo_tsn: 32 bit (unsigned integer)
*
* For the receiving side, this field holds a TSN that was
* For the receiving side, this field holds a TSN that was
* assigned to one of the SCTP Data Chunks.
*/
sinfo.sinfo_tsn = event->tsn;
@ -879,7 +879,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
* fragment of the real event. However, we still need to do rwnd
* accounting.
* In general, the skb passed from IP can have only 1 level of
* fragments. But we allow multiple levels of fragments.
* fragments. But we allow multiple levels of fragments.
*/
for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc);
@ -888,7 +888,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
/* Do accounting for bytes just read by user and release the references to
* the association.
*/
*/
static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
{
struct sk_buff *skb, *frag;

View File

@ -191,7 +191,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
queue = &sk->sk_receive_queue;
} else if (ulpq->pd_mode) {
if (event->msg_flags & MSG_NOTIFICATION)
queue = &sctp_sk(sk)->pd_lobby;
queue = &sctp_sk(sk)->pd_lobby;
else {
clear_pd = event->msg_flags & MSG_EOR;
queue = &sk->sk_receive_queue;
@ -298,32 +298,32 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
*/
if (last)
last->next = pos;
else {
if (skb_cloned(f_frag)) {
/* This is a cloned skb, we can't just modify
* the frag_list. We need a new skb to do that.
* Instead of calling skb_unshare(), we'll do it
* ourselves since we need to delay the free.
*/
new = skb_copy(f_frag, GFP_ATOMIC);
if (!new)
return NULL; /* try again later */
else {
if (skb_cloned(f_frag)) {
/* This is a cloned skb, we can't just modify
* the frag_list. We need a new skb to do that.
* Instead of calling skb_unshare(), we'll do it
* ourselves since we need to delay the free.
*/
new = skb_copy(f_frag, GFP_ATOMIC);
if (!new)
return NULL; /* try again later */
sctp_skb_set_owner_r(new, f_frag->sk);
sctp_skb_set_owner_r(new, f_frag->sk);
skb_shinfo(new)->frag_list = pos;
} else
skb_shinfo(f_frag)->frag_list = pos;
}
skb_shinfo(new)->frag_list = pos;
} else
skb_shinfo(f_frag)->frag_list = pos;
}
/* Remove the first fragment from the reassembly queue. */
__skb_unlink(f_frag, queue);
/* if we did unshare, then free the old skb and re-assign */
if (new) {
kfree_skb(f_frag);
f_frag = new;
}
/* if we did unshare, then free the old skb and re-assign */
if (new) {
kfree_skb(f_frag);
f_frag = new;
}
while (pos) {
@ -335,7 +335,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
/* Remove the fragment from the reassembly queue. */
__skb_unlink(pos, queue);
/* Break if we have reached the last fragment. */
if (pos == l_frag)
break;
@ -624,7 +624,7 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
sid = event->stream;
ssn = event->ssn;
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream;
cssn = cevent->ssn;
@ -718,11 +718,11 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
if (cssn != sctp_ssn_peek(in, csid))
break;
/* Found it, so mark in the ssnmap. */
/* Found it, so mark in the ssnmap. */
sctp_ssn_next(in, csid);
__skb_unlink(pos, &ulpq->lobby);
if (!event) {
if (!event) {
/* Create a temporary list to collect chunks on. */
event = sctp_skb2event(pos);
__skb_queue_tail(&temp, sctp_event2skb(event));
@ -755,7 +755,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
sctp_ssn_skip(in, sid, ssn);
/* Go find any other chunks that were waiting for
* ordering and deliver them if needed.
* ordering and deliver them if needed.
*/
sctp_ulpq_reap_ordered(ulpq);
return;
@ -849,7 +849,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
if (chunk) {
needed = ntohs(chunk->chunk_hdr->length);
needed -= sizeof(sctp_data_chunk_t);
} else
} else
needed = SCTP_DEFAULT_MAXWINDOW;
freed = 0;
@ -866,7 +866,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
tsn = ntohl(chunk->subh.data_hdr->tsn);
sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
sctp_ulpq_tail_data(ulpq, chunk, gfp);
sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
}