Merge branch 'listener-sock-const'

Eric Dumazet says:

====================
dccp/tcp: constify listener sock

Another patch bomb to prepare lockless TCP/DCCP LISTEN handling.

SYNACK retransmits are built and sent without listener socket
being locked. Soon, initial SYNACK packets will have same property.

This series makes sure we did not something wrong with this model,
by adding a const qualifier in all the paths taken from synack building
and transmit, for IPv4/IPv6 and TCP/dccp.

The only potential problem was the rewrite of ecn bits for connections
with DCTCP as congestion module, but this was a very minor one.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-09-25 13:00:40 -07:00
commit 4d54d86546
24 changed files with 149 additions and 106 deletions

View File

@ -489,7 +489,8 @@ struct flowi;
#ifndef CONFIG_XFRM
static inline struct dst_entry *xfrm_lookup(struct net *net,
struct dst_entry *dst_orig,
const struct flowi *fl, struct sock *sk,
const struct flowi *fl,
const struct sock *sk,
int flags)
{
return dst_orig;
@ -498,7 +499,7 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
static inline struct dst_entry *xfrm_lookup_route(struct net *net,
struct dst_entry *dst_orig,
const struct flowi *fl,
struct sock *sk,
const struct sock *sk,
int flags)
{
return dst_orig;
@ -511,11 +512,11 @@ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
#else
struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, struct sock *sk,
const struct flowi *fl, const struct sock *sk,
int flags);
struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, struct sock *sk,
const struct flowi *fl, const struct sock *sk,
int flags);
/* skb attached with this dst needs transformation if dst->xfrm is valid */

View File

@ -25,7 +25,7 @@ struct sockaddr;
int inet6_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb, bool relax);
struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6,
struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6,
const struct request_sock *req);
struct request_sock *inet6_csk_search_req(struct sock *sk,

View File

@ -266,7 +266,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb, bool relax);
int inet_csk_get_port(struct sock *sk, unsigned short snum);
struct dst_entry *inet_csk_route_req(struct sock *sk, struct flowi4 *fl4,
struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
const struct request_sock *req);
struct dst_entry *inet_csk_route_child_sock(struct sock *sk, struct sock *newsk,
const struct request_sock *req);

View File

@ -100,7 +100,7 @@ int igmp_mc_init(void);
* Functions provided by ip.c
*/
int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
__be32 saddr, __be32 daddr,
struct ip_options_rcu *opt);
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
@ -282,10 +282,12 @@ int ip_decrease_ttl(struct iphdr *iph)
}
static inline
int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
{
return inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO ||
(inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT &&
u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
return pmtudisc == IP_PMTUDISC_DO ||
(pmtudisc == IP_PMTUDISC_WANT &&
!(dst_metric_locked(dst, RTAX_MTU)));
}

View File

@ -812,7 +812,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
/*
* upper-layer output functions
*/
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
struct ipv6_txoptions *opt, int tclass);
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
@ -849,7 +849,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
struct flowi6 *fl6);
struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst);
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst);

View File

@ -32,7 +32,7 @@ struct request_sock_ops {
int obj_size;
struct kmem_cache *slab;
char *slab_name;
int (*rtx_syn_ack)(struct sock *sk,
int (*rtx_syn_ack)(const struct sock *sk,
struct request_sock *req);
void (*send_ack)(struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
@ -42,7 +42,7 @@ struct request_sock_ops {
void (*syn_ack_timeout)(const struct request_sock *req);
};
int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
/* struct request_sock - mini sock to represent a connection request
*/

View File

@ -114,7 +114,7 @@ void rt_cache_flush(struct net *net);
void rt_flush_dev(struct net_device *dev);
struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
struct sock *sk);
const struct sock *sk);
struct dst_entry *ipv4_blackhole_route(struct net *net,
struct dst_entry *dst_orig);

View File

@ -461,7 +461,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int tcp_connect(struct sock *sk);
struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct tcp_fastopen_cookie *foc);
int tcp_disconnect(struct sock *sk, int flags);
@ -1207,7 +1207,8 @@ static inline int tcp_full_space(const struct sock *sk)
}
extern void tcp_openreq_init_rwin(struct request_sock *req,
struct sock *sk, struct dst_entry *dst);
const struct sock *sk_listener,
const struct dst_entry *dst);
void tcp_enter_memory_pressure(struct sock *sk);
@ -1371,16 +1372,16 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
int family);
struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
const union tcp_md5_addr *addr,
int family);
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
#else
static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
const union tcp_md5_addr *addr,
int family)
{
@ -1675,7 +1676,7 @@ int tcp4_proc_init(void);
void tcp4_proc_exit(void);
#endif
int tcp_rtx_synack(struct sock *sk, struct request_sock *req);
int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
int tcp_conn_request(struct request_sock_ops *rsk_ops,
const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct sk_buff *skb);
@ -1683,7 +1684,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
/* TCP af-specific functions */
struct tcp_sock_af_ops {
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
const struct sock *addr_sk);
int (*calc_md5_hash)(char *location,
const struct tcp_md5sig_key *md5,
@ -1698,14 +1699,15 @@ struct tcp_sock_af_ops {
struct tcp_request_sock_ops {
u16 mss_clamp;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *(*req_md5_lookup)(struct sock *sk,
struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
const struct sock *addr_sk);
int (*calc_md5_hash) (char *location,
const struct tcp_md5sig_key *md5,
const struct sock *sk,
const struct sk_buff *skb);
#endif
void (*init_req)(struct request_sock *req, struct sock *sk,
void (*init_req)(struct request_sock *req,
const struct sock *sk_listener,
struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES
__u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb,
@ -1715,7 +1717,7 @@ struct tcp_request_sock_ops {
const struct request_sock *req,
bool *strict);
__u32 (*init_seq)(const struct sk_buff *skb);
int (*send_synack)(struct sock *sk, struct dst_entry *dst,
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req,
u16 queue_mapping, struct tcp_fastopen_cookie *foc);
void (*queue_hash_add)(struct sock *sk, struct request_sock *req,

View File

@ -293,7 +293,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
void dccp_destroy_sock(struct sock *sk);
void dccp_close(struct sock *sk, long timeout);
struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req);
int dccp_connect(struct sock *sk);

View File

@ -498,7 +498,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
return &rt->dst;
}
static int dccp_v4_send_response(struct sock *sk, struct request_sock *req)
static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req)
{
int err = -1;
struct sk_buff *skb;

View File

@ -181,7 +181,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req)
{
struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);

View File

@ -390,7 +390,7 @@ int dccp_retransmit_skb(struct sock *sk)
return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
}
struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req)
{
struct dccp_hdr *dh;
@ -398,13 +398,18 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
const u32 dccp_header_size = sizeof(struct dccp_hdr) +
sizeof(struct dccp_hdr_ext) +
sizeof(struct dccp_hdr_response);
struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
GFP_ATOMIC);
if (skb == NULL)
struct sk_buff *skb;
/* sk is marked const to clearly express we dont hold socket lock.
* sock_wmalloc() will atomically change sk->sk_wmem_alloc,
* it is safe to promote sk to non const.
*/
skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1,
GFP_ATOMIC);
if (!skb)
return NULL;
/* Reserve space for headers. */
skb_reserve(skb, sk->sk_prot->max_header);
skb_reserve(skb, MAX_DCCP_HEADER);
skb_dst_set(skb, dst_clone(dst));

View File

@ -408,7 +408,7 @@ void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
}
EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
struct dst_entry *inet_csk_route_req(struct sock *sk,
struct dst_entry *inet_csk_route_req(const struct sock *sk,
struct flowi4 *fl4,
const struct request_sock *req)
{
@ -563,7 +563,7 @@ static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
req->num_timeout >= rskq_defer_accept - 1;
}
int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
{
int err = req->rsk_ops->rtx_syn_ack(parent, req);

View File

@ -137,7 +137,7 @@ static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
* Add an ip header to a skbuff and send it out.
*
*/
int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
__be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
{
struct inet_sock *inet = inet_sk(sk);
@ -151,15 +151,17 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
iph->version = 4;
iph->ihl = 5;
iph->tos = inet->tos;
if (ip_dont_fragment(sk, &rt->dst))
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
iph->ttl = ip_select_ttl(inet, &rt->dst);
iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
iph->saddr = saddr;
iph->protocol = sk->sk_protocol;
ip_select_ident(sock_net(sk), skb, sk);
if (ip_dont_fragment(sk, &rt->dst)) {
iph->frag_off = htons(IP_DF);
iph->id = 0;
} else {
iph->frag_off = 0;
__ip_select_ident(sock_net(sk), iph, 1);
}
if (opt && opt->opt.optlen) {
iph->ihl += opt->opt.optlen>>2;

View File

@ -2291,7 +2291,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
}
struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
struct sock *sk)
const struct sock *sk)
{
struct rtable *rt = __ip_route_output_key(net, flp4);

View File

@ -173,6 +173,10 @@ void tcp_assign_congestion_control(struct sock *sk)
*/
if (ca->get_info)
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
if (ca->flags & TCP_CONG_NEEDS_ECN)
INET_ECN_xmit(sk);
else
INET_ECN_dontxmit(sk);
}
void tcp_init_congestion_control(struct sock *sk)
@ -181,6 +185,10 @@ void tcp_init_congestion_control(struct sock *sk)
if (icsk->icsk_ca_ops->init)
icsk->icsk_ca_ops->init(sk);
if (tcp_ca_needs_ecn(sk))
INET_ECN_xmit(sk);
else
INET_ECN_dontxmit(sk);
}
static void tcp_reinit_congestion_control(struct sock *sk,
@ -192,8 +200,8 @@ static void tcp_reinit_congestion_control(struct sock *sk,
icsk->icsk_ca_ops = ca;
icsk->icsk_ca_setsockopt = 1;
if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
icsk->icsk_ca_ops->init(sk);
if (sk->sk_state != TCP_CLOSE)
tcp_init_congestion_control(sk);
}
/* Manage refcounts on socket close. */

View File

@ -818,7 +818,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
* This still operates on a request_sock only, not on a big
* socket.
*/
static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl,
struct request_sock *req,
u16 queue_mapping,
@ -865,7 +865,7 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
*/
/* Find the Key structure for an address. */
struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
const union tcp_md5_addr *addr,
int family)
{
@ -877,7 +877,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
/* caller either holds rcu_read_lock() or socket lock */
md5sig = rcu_dereference_check(tp->md5sig_info,
sock_owned_by_user(sk) ||
lockdep_is_held(&sk->sk_lock.slock));
lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
if (!md5sig)
return NULL;
#if IS_ENABLED(CONFIG_IPV6)
@ -894,7 +894,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
}
EXPORT_SYMBOL(tcp_md5_do_lookup);
struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk)
{
const union tcp_md5_addr *addr;
@ -1168,7 +1168,8 @@ static bool tcp_v4_inbound_md5_hash(struct sock *sk,
}
#endif
static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
static void tcp_v4_init_req(struct request_sock *req,
const struct sock *sk_listener,
struct sk_buff *skb)
{
struct inet_request_sock *ireq = inet_rsk(req);

View File

@ -362,27 +362,35 @@ void tcp_twsk_destructor(struct sock *sk)
}
EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
/* Warning : This function is called without sk_listener being locked.
* Be sure to read socket fields once, as their value could change under us.
*/
void tcp_openreq_init_rwin(struct request_sock *req,
struct sock *sk, struct dst_entry *dst)
const struct sock *sk_listener,
const struct dst_entry *dst)
{
struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_sock *tp = tcp_sk(sk);
__u8 rcv_wscale;
const struct tcp_sock *tp = tcp_sk(sk_listener);
u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
int full_space = tcp_full_space(sk_listener);
int mss = dst_metric_advmss(dst);
u32 window_clamp;
__u8 rcv_wscale;
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
mss = tp->rx_opt.user_mss;
if (user_mss && user_mss < mss)
mss = user_mss;
window_clamp = READ_ONCE(tp->window_clamp);
/* Set this up on the first call only */
req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
req->window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
/* limit the window selection if the user enforce a smaller rx buffer */
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
(req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
req->window_clamp = tcp_full_space(sk);
if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
(req->window_clamp > full_space || req->window_clamp == 0))
req->window_clamp = full_space;
/* tcp_full_space because it is guaranteed to be the first packet */
tcp_select_initial_window(tcp_full_space(sk),
tcp_select_initial_window(full_space,
mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
&req->rcv_wnd,
&req->window_clamp,

View File

@ -357,14 +357,10 @@ static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
}
static void
tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th,
struct sock *sk)
tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
{
if (inet_rsk(req)->ecn_ok) {
if (inet_rsk(req)->ecn_ok)
th->ece = 1;
if (tcp_ca_needs_ecn(sk))
INET_ECN_xmit(sk);
}
}
/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
@ -612,12 +608,11 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
}
/* Set up TCP options for SYN-ACKs. */
static unsigned int tcp_synack_options(struct sock *sk,
struct request_sock *req,
unsigned int mss, struct sk_buff *skb,
struct tcp_out_options *opts,
const struct tcp_md5sig_key *md5,
struct tcp_fastopen_cookie *foc)
static unsigned int tcp_synack_options(struct request_sock *req,
unsigned int mss, struct sk_buff *skb,
struct tcp_out_options *opts,
const struct tcp_md5sig_key *md5,
struct tcp_fastopen_cookie *foc)
{
struct inet_request_sock *ireq = inet_rsk(req);
unsigned int remaining = MAX_TCP_OPTION_SPACE;
@ -2949,20 +2944,25 @@ int tcp_send_synack(struct sock *sk)
* Allocate one skb and build a SYNACK packet.
* @dst is consumed : Caller should not use it again.
*/
struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct tcp_fastopen_cookie *foc)
{
struct tcp_out_options opts;
struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_sock *tp = tcp_sk(sk);
struct tcphdr *th;
struct sk_buff *skb;
const struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_key *md5 = NULL;
struct tcp_out_options opts;
struct sk_buff *skb;
int tcp_header_size;
struct tcphdr *th;
u16 user_mss;
int mss;
skb = sock_wmalloc(sk, MAX_TCP_HEADER, 1, GFP_ATOMIC);
/* sk is a const pointer, because we want to express multiple cpus
* might call us concurrently.
* sock_wmalloc() will change sk->sk_wmem_alloc in an atomic way.
*/
skb = sock_wmalloc((struct sock *)sk, MAX_TCP_HEADER, 1, GFP_ATOMIC);
if (unlikely(!skb)) {
dst_release(dst);
return NULL;
@ -2973,8 +2973,9 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
skb_dst_set(skb, dst);
mss = dst_metric_advmss(dst);
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
mss = tp->rx_opt.user_mss;
user_mss = READ_ONCE(tp->rx_opt.user_mss);
if (user_mss && user_mss < mss)
mss = user_mss;
memset(&opts, 0, sizeof(opts));
#ifdef CONFIG_SYN_COOKIES
@ -2989,8 +2990,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
#endif
skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
foc) + sizeof(*th);
tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5, foc) +
sizeof(*th);
skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
@ -2999,7 +3000,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
memset(th, 0, sizeof(struct tcphdr));
th->syn = 1;
th->ack = 1;
tcp_ecn_make_synack(req, th, sk);
tcp_ecn_make_synack(req, th);
th->source = htons(ireq->ir_num);
th->dest = ireq->ir_rmt_port;
/* Setting of flags are superfluous here for callers (and ECE is
@ -3014,7 +3015,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
th->window = htons(min(req->rcv_wnd, 65535U));
tcp_options_write((__be32 *)(th + 1), tp, &opts);
tcp_options_write((__be32 *)(th + 1), NULL, &opts);
th->doff = (tcp_header_size >> 2);
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS);
@ -3501,7 +3502,7 @@ void tcp_send_probe0(struct sock *sk)
TCP_RTO_MAX);
}
int tcp_rtx_synack(struct sock *sk, struct request_sock *req)
int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
{
const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
struct flowi fl;

View File

@ -263,7 +263,7 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
{
struct ipv6_pinfo *np = inet6_sk(sk);
const struct ipv6_pinfo *np = inet6_sk(sk);
struct sock_exterr_skb *serr;
struct ipv6hdr *iph;
struct sk_buff *skb;

View File

@ -65,7 +65,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
}
EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
struct dst_entry *inet6_csk_route_req(struct sock *sk,
struct dst_entry *inet6_csk_route_req(const struct sock *sk,
struct flowi6 *fl6,
const struct request_sock *req)
{

View File

@ -150,14 +150,16 @@ int ip6_output(struct sock *sk, struct sk_buff *skb)
}
/*
* xmit an sk_buff (used by TCP, SCTP and DCCP)
* xmit an sk_buff (used by TCP, SCTP and DCCP)
* Note : socket lock is not held for SYNACK packets, but might be modified
* by calls to skb_set_owner_w() and ipv6_local_error(),
* which are using proper atomic operations or spinlocks.
*/
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
struct ipv6_txoptions *opt, int tclass)
{
struct net *net = sock_net(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
const struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *first_hop = &fl6->daddr;
struct dst_entry *dst = skb_dst(skb);
struct ipv6hdr *hdr;
@ -186,7 +188,10 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
}
consume_skb(skb);
skb = skb2;
skb_set_owner_w(skb, sk);
/* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
* it is safe to call in our context (socket lock not held)
*/
skb_set_owner_w(skb, (struct sock *)sk);
}
if (opt->opt_flen)
ipv6_push_frag_opts(skb, opt, &proto);
@ -224,13 +229,20 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUT, skb->len);
/* hooks should never assume socket lock is held.
* we promote our socket to non const
*/
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
net, sk, skb, NULL, dst->dev,
net, (struct sock *)sk, skb, NULL, dst->dev,
dst_output_okfn);
}
skb->dev = dst->dev;
ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
/* ipv6_local_error() does not require socket lock,
* we promote our socket to non const
*/
ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
return -EMSGSIZE;
@ -883,7 +895,7 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
return dst;
}
static int ip6_dst_lookup_tail(struct net *net, struct sock *sk,
static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
struct dst_entry **dst, struct flowi6 *fl6)
{
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@ -1014,7 +1026,7 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup);
* It returns a valid dst pointer on success, or a pointer encoded
* error code.
*/
struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst)
{
struct dst_entry *dst = NULL;

View File

@ -434,7 +434,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl,
struct request_sock *req,
u16 queue_mapping,
@ -476,13 +476,13 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req)
}
#ifdef CONFIG_TCP_MD5SIG
static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
const struct in6_addr *addr)
{
return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
}
static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
const struct sock *addr_sk)
{
return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
@ -663,22 +663,23 @@ static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
}
#endif
static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
static void tcp_v6_init_req(struct request_sock *req,
const struct sock *sk_listener,
struct sk_buff *skb)
{
struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
const struct ipv6_pinfo *np = inet6_sk(sk_listener);
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
/* So that link locals have meaning */
if (!sk->sk_bound_dev_if &&
if (!sk_listener->sk_bound_dev_if &&
ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
ireq->ir_iif = tcp_v6_iif(skb);
if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
(ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
(ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
np->rxopt.bits.rxinfo ||
np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
np->rxopt.bits.rxohlim || np->repflow)) {

View File

@ -1208,7 +1208,7 @@ static inline int policy_to_flow_dir(int dir)
}
}
static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
const struct flowi *fl)
{
struct xfrm_policy *pol;
@ -2185,7 +2185,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family,
*/
struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl,
struct sock *sk, int flags)
const struct sock *sk, int flags)
{
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
struct flow_cache_object *flo;
@ -2333,7 +2333,7 @@ EXPORT_SYMBOL(xfrm_lookup);
*/
struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl,
struct sock *sk, int flags)
const struct sock *sk, int flags)
{
struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
flags | XFRM_LOOKUP_QUEUE |