tcp: unify tcp flag macros

unify tcp flag macros: TCPHDR_FIN, TCPHDR_SYN, TCPHDR_RST, TCPHDR_PSH,
TCPHDR_ACK, TCPHDR_URG, TCPHDR_ECE and TCPHDR_CWR. TCBCB_FLAG_* are replaced
with the corresponding TCPHDR_*.

Signed-off-by: Changli Gao <xiaosuo@gmail.com>
----
 include/net/tcp.h                      |   24 ++++++-------
 net/ipv4/tcp.c                         |    8 ++--
 net/ipv4/tcp_input.c                   |    2 -
 net/ipv4/tcp_output.c                  |   59 ++++++++++++++++-----------------
 net/netfilter/nf_conntrack_proto_tcp.c |   32 ++++++-----------
 net/netfilter/xt_TCPMSS.c              |    4 --
 6 files changed, 58 insertions(+), 71 deletions(-)
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Changli Gao 2010-06-12 14:01:43 +00:00 committed by David S. Miller
parent f350a0a873
commit a3433f35a5
6 changed files with 58 additions and 71 deletions

View File

@ -602,6 +602,17 @@ extern u32 __tcp_select_window(struct sock *sk);
*/ */
#define tcp_time_stamp ((__u32)(jiffies)) #define tcp_time_stamp ((__u32)(jiffies))
#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
#define TCPHDR_FIN 0x01
#define TCPHDR_SYN 0x02
#define TCPHDR_RST 0x04
#define TCPHDR_PSH 0x08
#define TCPHDR_ACK 0x10
#define TCPHDR_URG 0x20
#define TCPHDR_ECE 0x40
#define TCPHDR_CWR 0x80
/* This is what the send packet queuing engine uses to pass /* This is what the send packet queuing engine uses to pass
* TCP per-packet control information to the transmission * TCP per-packet control information to the transmission
* code. We also store the host-order sequence numbers in * code. We also store the host-order sequence numbers in
@ -620,19 +631,6 @@ struct tcp_skb_cb {
__u32 end_seq; /* SEQ + FIN + SYN + datalen */ __u32 end_seq; /* SEQ + FIN + SYN + datalen */
__u32 when; /* used to compute rtt's */ __u32 when; /* used to compute rtt's */
__u8 flags; /* TCP header flags. */ __u8 flags; /* TCP header flags. */
/* NOTE: These must match up to the flags byte in a
* real TCP header.
*/
#define TCPCB_FLAG_FIN 0x01
#define TCPCB_FLAG_SYN 0x02
#define TCPCB_FLAG_RST 0x04
#define TCPCB_FLAG_PSH 0x08
#define TCPCB_FLAG_ACK 0x10
#define TCPCB_FLAG_URG 0x20
#define TCPCB_FLAG_ECE 0x40
#define TCPCB_FLAG_CWR 0x80
__u8 sacked; /* State flags for SACK/FACK. */ __u8 sacked; /* State flags for SACK/FACK. */
#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */

View File

@ -511,7 +511,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{ {
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
tp->pushed_seq = tp->write_seq; tp->pushed_seq = tp->write_seq;
} }
@ -527,7 +527,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
skb->csum = 0; skb->csum = 0;
tcb->seq = tcb->end_seq = tp->write_seq; tcb->seq = tcb->end_seq = tp->write_seq;
tcb->flags = TCPCB_FLAG_ACK; tcb->flags = TCPHDR_ACK;
tcb->sacked = 0; tcb->sacked = 0;
skb_header_release(skb); skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb); tcp_add_write_queue_tail(sk, skb);
@ -815,7 +815,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
skb_shinfo(skb)->gso_segs = 0; skb_shinfo(skb)->gso_segs = 0;
if (!copied) if (!copied)
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
copied += copy; copied += copy;
poffset += copy; poffset += copy;
@ -1061,7 +1061,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
} }
if (!copied) if (!copied)
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
tp->write_seq += copy; tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy; TCP_SKB_CB(skb)->end_seq += copy;

View File

@ -3286,7 +3286,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
* connection startup slow start one packet too * connection startup slow start one packet too
* quickly. This is severely frowned upon behavior. * quickly. This is severely frowned upon behavior.
*/ */
if (!(scb->flags & TCPCB_FLAG_SYN)) { if (!(scb->flags & TCPHDR_SYN)) {
flag |= FLAG_DATA_ACKED; flag |= FLAG_DATA_ACKED;
} else { } else {
flag |= FLAG_SYN_ACKED; flag |= FLAG_SYN_ACKED;

View File

@ -294,9 +294,9 @@ static u16 tcp_select_window(struct sock *sk)
/* Packet ECN state for a SYN-ACK */ /* Packet ECN state for a SYN-ACK */
static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
{ {
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR; TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR;
if (!(tp->ecn_flags & TCP_ECN_OK)) if (!(tp->ecn_flags & TCP_ECN_OK))
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE;
} }
/* Packet ECN state for a SYN. */ /* Packet ECN state for a SYN. */
@ -306,7 +306,7 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
tp->ecn_flags = 0; tp->ecn_flags = 0;
if (sysctl_tcp_ecn == 1) { if (sysctl_tcp_ecn == 1) {
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR; TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR;
tp->ecn_flags = TCP_ECN_OK; tp->ecn_flags = TCP_ECN_OK;
} }
} }
@ -361,7 +361,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
skb_shinfo(skb)->gso_type = 0; skb_shinfo(skb)->gso_type = 0;
TCP_SKB_CB(skb)->seq = seq; TCP_SKB_CB(skb)->seq = seq;
if (flags & (TCPCB_FLAG_SYN | TCPCB_FLAG_FIN)) if (flags & (TCPHDR_SYN | TCPHDR_FIN))
seq++; seq++;
TCP_SKB_CB(skb)->end_seq = seq; TCP_SKB_CB(skb)->end_seq = seq;
} }
@ -820,7 +820,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
tcb = TCP_SKB_CB(skb); tcb = TCP_SKB_CB(skb);
memset(&opts, 0, sizeof(opts)); memset(&opts, 0, sizeof(opts));
if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) if (unlikely(tcb->flags & TCPHDR_SYN))
tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
else else
tcp_options_size = tcp_established_options(sk, skb, &opts, tcp_options_size = tcp_established_options(sk, skb, &opts,
@ -843,7 +843,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
*(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
tcb->flags); tcb->flags);
if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { if (unlikely(tcb->flags & TCPHDR_SYN)) {
/* RFC1323: The window in SYN & SYN/ACK segments /* RFC1323: The window in SYN & SYN/ACK segments
* is never scaled. * is never scaled.
*/ */
@ -866,7 +866,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
} }
tcp_options_write((__be32 *)(th + 1), tp, &opts); tcp_options_write((__be32 *)(th + 1), tp, &opts);
if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0)) if (likely((tcb->flags & TCPHDR_SYN) == 0))
TCP_ECN_send(sk, skb, tcp_header_size); TCP_ECN_send(sk, skb, tcp_header_size);
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
@ -880,7 +880,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
icsk->icsk_af_ops->send_check(sk, skb); icsk->icsk_af_ops->send_check(sk, skb);
if (likely(tcb->flags & TCPCB_FLAG_ACK)) if (likely(tcb->flags & TCPHDR_ACK))
tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
if (skb->len != tcp_header_size) if (skb->len != tcp_header_size)
@ -1023,7 +1023,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
/* PSH and FIN should only be set in the second packet. */ /* PSH and FIN should only be set in the second packet. */
flags = TCP_SKB_CB(skb)->flags; flags = TCP_SKB_CB(skb)->flags;
TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH); TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
TCP_SKB_CB(buff)->flags = flags; TCP_SKB_CB(buff)->flags = flags;
TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
@ -1328,8 +1328,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
u32 in_flight, cwnd; u32 in_flight, cwnd;
/* Don't be strict about the congestion window for the final FIN. */ /* Don't be strict about the congestion window for the final FIN. */
if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1)
tcp_skb_pcount(skb) == 1)
return 1; return 1;
in_flight = tcp_packets_in_flight(tp); in_flight = tcp_packets_in_flight(tp);
@ -1398,7 +1397,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
* Nagle can be ignored during F-RTO too (see RFC4138). * Nagle can be ignored during F-RTO too (see RFC4138).
*/ */
if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) (TCP_SKB_CB(skb)->flags & TCPHDR_FIN))
return 1; return 1;
if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
@ -1487,7 +1486,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
/* PSH and FIN should only be set in the second packet. */ /* PSH and FIN should only be set in the second packet. */
flags = TCP_SKB_CB(skb)->flags; flags = TCP_SKB_CB(skb)->flags;
TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH); TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
TCP_SKB_CB(buff)->flags = flags; TCP_SKB_CB(buff)->flags = flags;
/* This packet was never sent out yet, so no SACK bits. */ /* This packet was never sent out yet, so no SACK bits. */
@ -1518,7 +1517,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
u32 send_win, cong_win, limit, in_flight; u32 send_win, cong_win, limit, in_flight;
if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
goto send_now; goto send_now;
if (icsk->icsk_ca_state != TCP_CA_Open) if (icsk->icsk_ca_state != TCP_CA_Open)
@ -1644,7 +1643,7 @@ static int tcp_mtu_probe(struct sock *sk)
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK; TCP_SKB_CB(nskb)->flags = TCPHDR_ACK;
TCP_SKB_CB(nskb)->sacked = 0; TCP_SKB_CB(nskb)->sacked = 0;
nskb->csum = 0; nskb->csum = 0;
nskb->ip_summed = skb->ip_summed; nskb->ip_summed = skb->ip_summed;
@ -1669,7 +1668,7 @@ static int tcp_mtu_probe(struct sock *sk)
sk_wmem_free_skb(sk, skb); sk_wmem_free_skb(sk, skb);
} else { } else {
TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); ~(TCPHDR_FIN|TCPHDR_PSH);
if (!skb_shinfo(skb)->nr_frags) { if (!skb_shinfo(skb)->nr_frags) {
skb_pull(skb, copy); skb_pull(skb, copy);
if (skb->ip_summed != CHECKSUM_PARTIAL) if (skb->ip_summed != CHECKSUM_PARTIAL)
@ -2020,7 +2019,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
if (!sysctl_tcp_retrans_collapse) if (!sysctl_tcp_retrans_collapse)
return; return;
if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN)
return; return;
tcp_for_write_queue_from_safe(skb, tmp, sk) { tcp_for_write_queue_from_safe(skb, tmp, sk) {
@ -2112,7 +2111,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
* since it is cheap to do so and saves bytes on the network. * since it is cheap to do so and saves bytes on the network.
*/ */
if (skb->len > 0 && if (skb->len > 0 &&
(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) &&
tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
if (!pskb_trim(skb, 0)) { if (!pskb_trim(skb, 0)) {
/* Reuse, even though it does some unnecessary work */ /* Reuse, even though it does some unnecessary work */
@ -2301,7 +2300,7 @@ void tcp_send_fin(struct sock *sk)
mss_now = tcp_current_mss(sk); mss_now = tcp_current_mss(sk);
if (tcp_send_head(sk) != NULL) { if (tcp_send_head(sk) != NULL) {
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; TCP_SKB_CB(skb)->flags |= TCPHDR_FIN;
TCP_SKB_CB(skb)->end_seq++; TCP_SKB_CB(skb)->end_seq++;
tp->write_seq++; tp->write_seq++;
} else { } else {
@ -2318,7 +2317,7 @@ void tcp_send_fin(struct sock *sk)
skb_reserve(skb, MAX_TCP_HEADER); skb_reserve(skb, MAX_TCP_HEADER);
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
tcp_init_nondata_skb(skb, tp->write_seq, tcp_init_nondata_skb(skb, tp->write_seq,
TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); TCPHDR_ACK | TCPHDR_FIN);
tcp_queue_skb(sk, skb); tcp_queue_skb(sk, skb);
} }
__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
@ -2343,7 +2342,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
/* Reserve space for headers and prepare control bits. */ /* Reserve space for headers and prepare control bits. */
skb_reserve(skb, MAX_TCP_HEADER); skb_reserve(skb, MAX_TCP_HEADER);
tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
TCPCB_FLAG_ACK | TCPCB_FLAG_RST); TCPHDR_ACK | TCPHDR_RST);
/* Send it off. */ /* Send it off. */
TCP_SKB_CB(skb)->when = tcp_time_stamp; TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (tcp_transmit_skb(sk, skb, 0, priority)) if (tcp_transmit_skb(sk, skb, 0, priority))
@ -2363,11 +2362,11 @@ int tcp_send_synack(struct sock *sk)
struct sk_buff *skb; struct sk_buff *skb;
skb = tcp_write_queue_head(sk); skb = tcp_write_queue_head(sk);
if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)) { if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) {
printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
return -EFAULT; return -EFAULT;
} }
if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_ACK)) { if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) {
if (skb_cloned(skb)) { if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
if (nskb == NULL) if (nskb == NULL)
@ -2381,7 +2380,7 @@ int tcp_send_synack(struct sock *sk)
skb = nskb; skb = nskb;
} }
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK; TCP_SKB_CB(skb)->flags |= TCPHDR_ACK;
TCP_ECN_send_synack(tcp_sk(sk), skb); TCP_ECN_send_synack(tcp_sk(sk), skb);
} }
TCP_SKB_CB(skb)->when = tcp_time_stamp; TCP_SKB_CB(skb)->when = tcp_time_stamp;
@ -2460,7 +2459,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
* not even correctly set) * not even correctly set)
*/ */
tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
TCPCB_FLAG_SYN | TCPCB_FLAG_ACK); TCPHDR_SYN | TCPHDR_ACK);
if (OPTION_COOKIE_EXTENSION & opts.options) { if (OPTION_COOKIE_EXTENSION & opts.options) {
if (s_data_desired) { if (s_data_desired) {
@ -2592,7 +2591,7 @@ int tcp_connect(struct sock *sk)
skb_reserve(buff, MAX_TCP_HEADER); skb_reserve(buff, MAX_TCP_HEADER);
tp->snd_nxt = tp->write_seq; tp->snd_nxt = tp->write_seq;
tcp_init_nondata_skb(buff, tp->write_seq++, TCPCB_FLAG_SYN); tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
TCP_ECN_send_syn(sk, buff); TCP_ECN_send_syn(sk, buff);
/* Send it off. */ /* Send it off. */
@ -2698,7 +2697,7 @@ void tcp_send_ack(struct sock *sk)
/* Reserve space for headers and prepare control bits. */ /* Reserve space for headers and prepare control bits. */
skb_reserve(buff, MAX_TCP_HEADER); skb_reserve(buff, MAX_TCP_HEADER);
tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPCB_FLAG_ACK); tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
/* Send it off, this clears delayed acks for us. */ /* Send it off, this clears delayed acks for us. */
TCP_SKB_CB(buff)->when = tcp_time_stamp; TCP_SKB_CB(buff)->when = tcp_time_stamp;
@ -2732,7 +2731,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
* end to send an ack. Don't queue or clone SKB, just * end to send an ack. Don't queue or clone SKB, just
* send it. * send it.
*/ */
tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPCB_FLAG_ACK); tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
TCP_SKB_CB(skb)->when = tcp_time_stamp; TCP_SKB_CB(skb)->when = tcp_time_stamp;
return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
} }
@ -2762,13 +2761,13 @@ int tcp_write_wakeup(struct sock *sk)
if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
skb->len > mss) { skb->len > mss) {
seg_size = min(seg_size, mss); seg_size = min(seg_size, mss);
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
if (tcp_fragment(sk, skb, seg_size, mss)) if (tcp_fragment(sk, skb, seg_size, mss))
return -1; return -1;
} else if (!tcp_skb_pcount(skb)) } else if (!tcp_skb_pcount(skb))
tcp_set_skb_tso_segs(sk, skb, mss); tcp_set_skb_tso_segs(sk, skb, mss);
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
TCP_SKB_CB(skb)->when = tcp_time_stamp; TCP_SKB_CB(skb)->when = tcp_time_stamp;
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
if (!err) if (!err)

View File

@ -736,27 +736,19 @@ static bool tcp_in_window(const struct nf_conn *ct,
return res; return res;
} }
#define TH_FIN 0x01
#define TH_SYN 0x02
#define TH_RST 0x04
#define TH_PUSH 0x08
#define TH_ACK 0x10
#define TH_URG 0x20
#define TH_ECE 0x40
#define TH_CWR 0x80
/* table of valid flag combinations - PUSH, ECE and CWR are always valid */ /* table of valid flag combinations - PUSH, ECE and CWR are always valid */
static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + 1] = static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
TCPHDR_URG) + 1] =
{ {
[TH_SYN] = 1, [TCPHDR_SYN] = 1,
[TH_SYN|TH_URG] = 1, [TCPHDR_SYN|TCPHDR_URG] = 1,
[TH_SYN|TH_ACK] = 1, [TCPHDR_SYN|TCPHDR_ACK] = 1,
[TH_RST] = 1, [TCPHDR_RST] = 1,
[TH_RST|TH_ACK] = 1, [TCPHDR_RST|TCPHDR_ACK] = 1,
[TH_FIN|TH_ACK] = 1, [TCPHDR_FIN|TCPHDR_ACK] = 1,
[TH_FIN|TH_ACK|TH_URG] = 1, [TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG] = 1,
[TH_ACK] = 1, [TCPHDR_ACK] = 1,
[TH_ACK|TH_URG] = 1, [TCPHDR_ACK|TCPHDR_URG] = 1,
}; };
/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */ /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
@ -803,7 +795,7 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
} }
/* Check TCP flags. */ /* Check TCP flags. */
tcpflags = (((u_int8_t *)th)[13] & ~(TH_ECE|TH_CWR|TH_PUSH)); tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
if (!tcp_valid_flags[tcpflags]) { if (!tcp_valid_flags[tcpflags]) {
if (LOG_INVALID(net, IPPROTO_TCP)) if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL, nf_log_packet(pf, 0, skb, NULL, NULL, NULL,

View File

@ -220,15 +220,13 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
} }
#endif #endif
#define TH_SYN 0x02
/* Must specify -p tcp --syn */ /* Must specify -p tcp --syn */
static inline bool find_syn_match(const struct xt_entry_match *m) static inline bool find_syn_match(const struct xt_entry_match *m)
{ {
const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data; const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
if (strcmp(m->u.kernel.match->name, "tcp") == 0 && if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
tcpinfo->flg_cmp & TH_SYN && tcpinfo->flg_cmp & TCPHDR_SYN &&
!(tcpinfo->invflags & XT_TCP_INV_FLAGS)) !(tcpinfo->invflags & XT_TCP_INV_FLAGS))
return true; return true;