2013-03-25 22:49:35 +08:00
|
|
|
#ifndef __NET_IP_TUNNELS_H
|
|
|
|
#define __NET_IP_TUNNELS_H 1
|
|
|
|
|
|
|
|
#include <linux/if_tunnel.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/skbuff.h>
|
2015-08-29 02:48:20 +08:00
|
|
|
#include <linux/socket.h>
|
2013-03-25 22:49:35 +08:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/u64_stats_sync.h>
|
2016-03-16 08:42:51 +08:00
|
|
|
#include <linux/bitops.h>
|
|
|
|
|
2013-03-25 22:49:35 +08:00
|
|
|
#include <net/dsfield.h>
|
|
|
|
#include <net/gro_cells.h>
|
|
|
|
#include <net/inet_ecn.h>
|
2014-09-18 03:25:58 +08:00
|
|
|
#include <net/netns/generic.h>
|
2013-03-25 22:49:35 +08:00
|
|
|
#include <net/rtnetlink.h>
|
2015-07-21 16:44:00 +08:00
|
|
|
#include <net/lwtunnel.h>
|
2016-02-12 22:43:55 +08:00
|
|
|
#include <net/dst_cache.h>
|
2013-03-25 22:49:35 +08:00
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
#include <net/ipv6.h>
|
|
|
|
#include <net/ip6_fib.h>
|
|
|
|
#include <net/ip6_route.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Keep error state on tunnel for 30 sec */
|
|
|
|
#define IPTUNNEL_ERR_TIMEO (30*HZ)
|
|
|
|
|
2015-07-21 16:43:54 +08:00
|
|
|
/* Used to memset ip_tunnel padding. */
|
2015-08-20 19:56:22 +08:00
|
|
|
#define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
|
2015-07-21 16:43:54 +08:00
|
|
|
|
2015-08-20 19:56:23 +08:00
|
|
|
/* Used to memset ipv4 address padding. */
|
|
|
|
#define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
|
|
|
|
#define IP_TUNNEL_KEY_IPV4_PAD_LEN \
|
|
|
|
(FIELD_SIZEOF(struct ip_tunnel_key, u) - \
|
|
|
|
FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
|
|
|
|
|
2015-07-21 16:43:54 +08:00
|
|
|
struct ip_tunnel_key {
|
|
|
|
__be64 tun_id;
|
2015-08-20 19:56:23 +08:00
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
__be32 src;
|
|
|
|
__be32 dst;
|
|
|
|
} ipv4;
|
|
|
|
struct {
|
|
|
|
struct in6_addr src;
|
|
|
|
struct in6_addr dst;
|
|
|
|
} ipv6;
|
|
|
|
} u;
|
2015-07-21 16:43:54 +08:00
|
|
|
__be16 tun_flags;
|
2015-08-20 19:56:24 +08:00
|
|
|
u8 tos; /* TOS for IPv4, TC for IPv6 */
|
|
|
|
u8 ttl; /* TTL for IPv4, HL for IPv6 */
|
2016-03-09 10:00:02 +08:00
|
|
|
__be32 label; /* Flow Label for IPv6 */
|
2015-07-21 16:43:54 +08:00
|
|
|
__be16 tp_src;
|
|
|
|
__be16 tp_dst;
|
2015-08-20 19:56:20 +08:00
|
|
|
};
|
2015-07-21 16:43:54 +08:00
|
|
|
|
2015-08-29 02:48:19 +08:00
|
|
|
/* Flags for ip_tunnel_info mode. */
|
|
|
|
#define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
|
2015-08-29 02:48:20 +08:00
|
|
|
#define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
|
2015-07-21 16:43:58 +08:00
|
|
|
|
2016-03-16 08:42:51 +08:00
|
|
|
/* Maximum tunnel options length. */
|
|
|
|
#define IP_TUNNEL_OPTS_MAX \
|
|
|
|
GENMASK((FIELD_SIZEOF(struct ip_tunnel_info, \
|
|
|
|
options_len) * BITS_PER_BYTE) - 1, 0)
|
|
|
|
|
2015-07-21 16:43:54 +08:00
|
|
|
struct ip_tunnel_info {
|
|
|
|
struct ip_tunnel_key key;
|
2016-02-12 22:43:57 +08:00
|
|
|
#ifdef CONFIG_DST_CACHE
|
|
|
|
struct dst_cache dst_cache;
|
|
|
|
#endif
|
2015-07-21 16:43:54 +08:00
|
|
|
u8 options_len;
|
2015-07-21 16:43:58 +08:00
|
|
|
u8 mode;
|
2015-07-21 16:43:54 +08:00
|
|
|
};
|
|
|
|
|
2013-03-25 22:49:35 +08:00
|
|
|
/* 6rd prefix/relay information */
|
|
|
|
#ifdef CONFIG_IPV6_SIT_6RD
|
|
|
|
struct ip_tunnel_6rd_parm {
|
|
|
|
struct in6_addr prefix;
|
|
|
|
__be32 relay_prefix;
|
|
|
|
u16 prefixlen;
|
|
|
|
u16 relay_prefixlen;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2014-09-18 03:25:58 +08:00
|
|
|
struct ip_tunnel_encap {
|
2015-08-20 19:56:21 +08:00
|
|
|
u16 type;
|
|
|
|
u16 flags;
|
2014-09-18 03:25:58 +08:00
|
|
|
__be16 sport;
|
|
|
|
__be16 dport;
|
|
|
|
};
|
|
|
|
|
2013-03-25 22:49:35 +08:00
|
|
|
struct ip_tunnel_prl_entry {
|
|
|
|
struct ip_tunnel_prl_entry __rcu *next;
|
|
|
|
__be32 addr;
|
|
|
|
u16 flags;
|
|
|
|
struct rcu_head rcu_head;
|
|
|
|
};
|
|
|
|
|
2015-08-08 14:51:42 +08:00
|
|
|
struct metadata_dst;
|
|
|
|
|
2013-03-25 22:49:35 +08:00
|
|
|
struct ip_tunnel {
|
|
|
|
struct ip_tunnel __rcu *next;
|
|
|
|
struct hlist_node hash_node;
|
|
|
|
struct net_device *dev;
|
2013-06-26 22:11:28 +08:00
|
|
|
struct net *net; /* netns for packet i/o */
|
2013-03-25 22:49:35 +08:00
|
|
|
|
|
|
|
int err_count; /* Number of arrived ICMP errors */
|
|
|
|
unsigned long err_time; /* Time when the last ICMP error
|
|
|
|
* arrived */
|
|
|
|
|
|
|
|
/* These four fields used only by GRE */
|
2015-08-20 19:56:21 +08:00
|
|
|
u32 i_seqno; /* The last seen seqno */
|
|
|
|
u32 o_seqno; /* The last output seqno */
|
2014-09-18 03:25:58 +08:00
|
|
|
int tun_hlen; /* Precalculated header length */
|
2013-03-25 22:49:35 +08:00
|
|
|
int mlink;
|
|
|
|
|
2016-02-12 22:43:55 +08:00
|
|
|
struct dst_cache dst_cache;
|
2014-01-03 03:48:26 +08:00
|
|
|
|
2013-03-25 22:49:35 +08:00
|
|
|
struct ip_tunnel_parm parms;
|
|
|
|
|
2014-09-18 03:25:58 +08:00
|
|
|
int encap_hlen; /* Encap header length (FOU,GUE) */
|
|
|
|
struct ip_tunnel_encap encap;
|
|
|
|
|
|
|
|
int hlen; /* tun_hlen + encap_hlen */
|
|
|
|
|
2013-03-25 22:49:35 +08:00
|
|
|
/* for SIT */
|
|
|
|
#ifdef CONFIG_IPV6_SIT_6RD
|
|
|
|
struct ip_tunnel_6rd_parm ip6rd;
|
|
|
|
#endif
|
|
|
|
struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
|
|
|
|
unsigned int prl_count; /* # of entries in PRL */
|
|
|
|
int ip_tnl_net_id;
|
|
|
|
struct gro_cells gro_cells;
|
2015-08-08 14:51:42 +08:00
|
|
|
bool collect_md;
|
2013-03-25 22:49:35 +08:00
|
|
|
};
|
|
|
|
|
2014-10-04 06:35:33 +08:00
|
|
|
#define TUNNEL_CSUM __cpu_to_be16(0x01)
|
|
|
|
#define TUNNEL_ROUTING __cpu_to_be16(0x02)
|
|
|
|
#define TUNNEL_KEY __cpu_to_be16(0x04)
|
|
|
|
#define TUNNEL_SEQ __cpu_to_be16(0x08)
|
|
|
|
#define TUNNEL_STRICT __cpu_to_be16(0x10)
|
|
|
|
#define TUNNEL_REC __cpu_to_be16(0x20)
|
|
|
|
#define TUNNEL_VERSION __cpu_to_be16(0x40)
|
|
|
|
#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
|
2013-06-18 08:50:07 +08:00
|
|
|
#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
|
2014-10-04 06:35:33 +08:00
|
|
|
#define TUNNEL_OAM __cpu_to_be16(0x0200)
|
|
|
|
#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
|
2015-01-15 10:53:59 +08:00
|
|
|
#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
|
|
|
|
#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
|
bpf, vxlan, geneve, gre: fix usage of dst_cache on xmit
The assumptions from commit 0c1d70af924b ("net: use dst_cache for vxlan
device"), 468dfffcd762 ("geneve: add dst caching support") and 3c1cb4d2604c
("net/ipv4: add dst cache support for gre lwtunnels") on dst_cache usage
when ip_tunnel_info is used is unfortunately not always valid as assumed.
While it seems correct for ip_tunnel_info front-ends such as OVS, eBPF
however can fill in ip_tunnel_info for consumers like vxlan, geneve or gre
with different remote dsts, tos, etc, therefore they cannot be assumed as
packet independent.
Right now vxlan, geneve, gre would cache the dst for eBPF and every packet
would reuse the same entry that was first created on the initial route
lookup. eBPF doesn't store/cache the ip_tunnel_info, so each skb may have
a different one.
Fix it by adding a flag that checks the ip_tunnel_info. Also the !tos test
in vxlan needs to be handeled differently in this context as it is currently
inferred from ip_tunnel_info as well if present. ip_tunnel_dst_cache_usable()
helper is added for the three tunnel cases, which checks if we can use dst
cache.
Fixes: 0c1d70af924b ("net: use dst_cache for vxlan device")
Fixes: 468dfffcd762 ("geneve: add dst caching support")
Fixes: 3c1cb4d2604c ("net/ipv4: add dst cache support for gre lwtunnels")
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Paolo Abeni <pabeni@redhat.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-03-04 22:15:07 +08:00
|
|
|
#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
|
2015-01-15 10:53:59 +08:00
|
|
|
|
|
|
|
#define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT)
|
2013-03-25 22:49:35 +08:00
|
|
|
|
|
|
|
struct tnl_ptk_info {
|
|
|
|
__be16 flags;
|
|
|
|
__be16 proto;
|
|
|
|
__be32 key;
|
|
|
|
__be32 seq;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define PACKET_RCVD 0
|
|
|
|
#define PACKET_REJECT 1
|
|
|
|
|
2013-08-06 13:51:37 +08:00
|
|
|
#define IP_TNL_HASH_BITS 7
|
2013-03-25 22:49:35 +08:00
|
|
|
#define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS)
|
|
|
|
|
|
|
|
struct ip_tunnel_net {
|
|
|
|
struct net_device *fb_tunnel_dev;
|
2013-08-06 13:51:37 +08:00
|
|
|
struct hlist_head tunnels[IP_TNL_HASH_SIZE];
|
2015-08-08 14:51:42 +08:00
|
|
|
struct ip_tunnel __rcu *collect_md_tun;
|
2013-03-25 22:49:35 +08:00
|
|
|
};
|
|
|
|
|
2014-11-13 03:54:09 +08:00
|
|
|
struct ip_tunnel_encap_ops {
|
|
|
|
size_t (*encap_hlen)(struct ip_tunnel_encap *e);
|
|
|
|
int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
|
|
|
|
u8 *protocol, struct flowi4 *fl4);
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MAX_IPTUN_ENCAP_OPS 8
|
|
|
|
|
|
|
|
extern const struct ip_tunnel_encap_ops __rcu *
|
|
|
|
iptun_encaps[MAX_IPTUN_ENCAP_OPS];
|
|
|
|
|
|
|
|
int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
|
|
|
|
unsigned int num);
|
|
|
|
int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
|
|
|
|
unsigned int num);
|
|
|
|
|
2015-08-31 09:09:38 +08:00
|
|
|
static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
|
|
|
|
__be32 saddr, __be32 daddr,
|
2016-03-09 10:00:02 +08:00
|
|
|
u8 tos, u8 ttl, __be32 label,
|
2015-08-31 09:09:38 +08:00
|
|
|
__be16 tp_src, __be16 tp_dst,
|
|
|
|
__be64 tun_id, __be16 tun_flags)
|
2015-07-21 16:43:54 +08:00
|
|
|
{
|
2015-08-31 09:09:38 +08:00
|
|
|
key->tun_id = tun_id;
|
|
|
|
key->u.ipv4.src = saddr;
|
|
|
|
key->u.ipv4.dst = daddr;
|
|
|
|
memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
|
2015-08-20 19:56:23 +08:00
|
|
|
0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
|
2015-08-31 09:09:38 +08:00
|
|
|
key->tos = tos;
|
|
|
|
key->ttl = ttl;
|
2016-03-09 10:00:02 +08:00
|
|
|
key->label = label;
|
2015-08-31 09:09:38 +08:00
|
|
|
key->tun_flags = tun_flags;
|
2015-07-21 16:43:54 +08:00
|
|
|
|
|
|
|
/* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
|
|
|
|
* the upper tunnel are used.
|
|
|
|
* E.g: GRE over IPSEC, the tp_src and tp_port are zero.
|
|
|
|
*/
|
2015-08-31 09:09:38 +08:00
|
|
|
key->tp_src = tp_src;
|
|
|
|
key->tp_dst = tp_dst;
|
2015-07-21 16:43:54 +08:00
|
|
|
|
|
|
|
/* Clear struct padding. */
|
2015-08-31 09:09:38 +08:00
|
|
|
if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
|
|
|
|
memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
|
|
|
|
0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
|
2015-07-21 16:43:54 +08:00
|
|
|
}
|
|
|
|
|
bpf, vxlan, geneve, gre: fix usage of dst_cache on xmit
The assumptions from commit 0c1d70af924b ("net: use dst_cache for vxlan
device"), 468dfffcd762 ("geneve: add dst caching support") and 3c1cb4d2604c
("net/ipv4: add dst cache support for gre lwtunnels") on dst_cache usage
when ip_tunnel_info is used is unfortunately not always valid as assumed.
While it seems correct for ip_tunnel_info front-ends such as OVS, eBPF
however can fill in ip_tunnel_info for consumers like vxlan, geneve or gre
with different remote dsts, tos, etc, therefore they cannot be assumed as
packet independent.
Right now vxlan, geneve, gre would cache the dst for eBPF and every packet
would reuse the same entry that was first created on the initial route
lookup. eBPF doesn't store/cache the ip_tunnel_info, so each skb may have
a different one.
Fix it by adding a flag that checks the ip_tunnel_info. Also the !tos test
in vxlan needs to be handeled differently in this context as it is currently
inferred from ip_tunnel_info as well if present. ip_tunnel_dst_cache_usable()
helper is added for the three tunnel cases, which checks if we can use dst
cache.
Fixes: 0c1d70af924b ("net: use dst_cache for vxlan device")
Fixes: 468dfffcd762 ("geneve: add dst caching support")
Fixes: 3c1cb4d2604c ("net/ipv4: add dst cache support for gre lwtunnels")
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Paolo Abeni <pabeni@redhat.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-03-04 22:15:07 +08:00
|
|
|
static inline bool
|
|
|
|
ip_tunnel_dst_cache_usable(const struct sk_buff *skb,
|
|
|
|
const struct ip_tunnel_info *info)
|
|
|
|
{
|
|
|
|
if (skb->mark)
|
|
|
|
return false;
|
|
|
|
if (!info)
|
|
|
|
return true;
|
|
|
|
if (info->key.tun_flags & TUNNEL_NOCACHE)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-08-29 02:48:20 +08:00
|
|
|
static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
|
|
|
|
*tun_info)
|
|
|
|
{
|
|
|
|
return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
|
|
|
|
}
|
|
|
|
|
2013-06-22 07:17:11 +08:00
|
|
|
#ifdef CONFIG_INET
|
|
|
|
|
2013-03-25 22:49:35 +08:00
|
|
|
int ip_tunnel_init(struct net_device *dev);
|
|
|
|
void ip_tunnel_uninit(struct net_device *dev);
|
|
|
|
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
|
2015-01-15 22:11:17 +08:00
|
|
|
struct net *ip_tunnel_get_link_net(const struct net_device *dev);
|
2015-04-02 23:07:02 +08:00
|
|
|
int ip_tunnel_get_iflink(const struct net_device *dev);
|
2013-06-08 04:26:05 +08:00
|
|
|
int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
|
|
|
|
struct rtnl_link_ops *ops, char *devname);
|
2013-03-25 22:49:35 +08:00
|
|
|
|
2013-08-13 23:51:11 +08:00
|
|
|
void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
|
2013-03-25 22:49:35 +08:00
|
|
|
|
|
|
|
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
2013-05-28 07:48:15 +08:00
|
|
|
const struct iphdr *tnl_params, const u8 protocol);
|
2013-03-25 22:49:35 +08:00
|
|
|
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
|
2014-09-18 03:25:58 +08:00
|
|
|
int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
|
|
|
|
u8 *protocol, struct flowi4 *fl4);
|
vxlan, gre, geneve: Set a large MTU on ovs-created tunnel devices
Prior to 4.3, openvswitch tunnel vports (vxlan, gre and geneve) could
transmit vxlan packets of any size, constrained only by the ability to
send out the resulting packets. 4.3 introduced netdevs corresponding
to tunnel vports. These netdevs have an MTU, which limits the size of
a packet that can be successfully encapsulated. The default MTU
values are low (1500 or less), which is awkwardly small in the context
of physical networks supporting jumbo frames, and leads to a
conspicuous change in behaviour for userspace.
Instead, set the MTU on openvswitch-created netdevs to be the relevant
maximum (i.e. the maximum IP packet size minus any relevant overhead),
effectively restoring the behaviour prior to 4.3.
Signed-off-by: David Wragg <david@weave.works>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-02-10 08:05:58 +08:00
|
|
|
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
|
2013-03-25 22:49:35 +08:00
|
|
|
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
|
|
|
|
|
|
|
|
struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
|
|
|
|
struct rtnl_link_stats64 *tot);
|
|
|
|
struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
|
|
|
|
int link, __be16 flags,
|
|
|
|
__be32 remote, __be32 local,
|
|
|
|
__be32 key);
|
|
|
|
|
|
|
|
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
|
2015-08-08 14:51:42 +08:00
|
|
|
const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
|
|
|
|
bool log_ecn_error);
|
2013-03-25 22:49:35 +08:00
|
|
|
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
|
|
|
|
struct ip_tunnel_parm *p);
|
|
|
|
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
|
|
|
|
struct ip_tunnel_parm *p);
|
|
|
|
void ip_tunnel_setup(struct net_device *dev, int net_id);
|
2014-09-18 03:25:58 +08:00
|
|
|
int ip_tunnel_encap_setup(struct ip_tunnel *t,
|
|
|
|
struct ip_tunnel_encap *ipencap);
|
2013-03-25 22:49:35 +08:00
|
|
|
|
|
|
|
/* Extract dsfield from inner protocol */
|
|
|
|
static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
|
|
|
|
const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
if (skb->protocol == htons(ETH_P_IP))
|
|
|
|
return iph->tos;
|
|
|
|
else if (skb->protocol == htons(ETH_P_IPV6))
|
|
|
|
return ipv6_get_dsfield((const struct ipv6hdr *)iph);
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Propogate ECN bits out */
|
|
|
|
static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
|
|
|
|
const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
u8 inner = ip_tunnel_get_dsfield(iph, skb);
|
|
|
|
|
|
|
|
return INET_ECN_encapsulate(tos, inner);
|
|
|
|
}
|
|
|
|
|
2016-02-18 18:22:52 +08:00
|
|
|
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto,
|
|
|
|
bool xnet);
|
2015-12-25 06:34:54 +08:00
|
|
|
void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
|
|
|
|
__be32 src, __be32 dst, u8 proto,
|
|
|
|
u8 tos, u8 ttl, __be16 df, bool xnet);
|
2015-09-23 00:12:11 +08:00
|
|
|
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
|
|
|
|
gfp_t flags);
|
2013-06-18 08:49:56 +08:00
|
|
|
|
2016-02-12 05:02:31 +08:00
|
|
|
struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
|
2013-10-20 02:42:55 +08:00
|
|
|
|
2016-03-20 00:32:02 +08:00
|
|
|
static inline int iptunnel_pull_offloads(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
if (skb_is_gso(skb)) {
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = skb_unclone(skb, GFP_ATOMIC);
|
|
|
|
if (unlikely(err))
|
|
|
|
return err;
|
|
|
|
skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
|
|
|
|
NETIF_F_GSO_SHIFT);
|
|
|
|
}
|
|
|
|
|
|
|
|
skb->encapsulation = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-25 06:34:54 +08:00
|
|
|
static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
|
2013-03-25 22:49:35 +08:00
|
|
|
{
|
2015-12-25 06:34:54 +08:00
|
|
|
if (pkt_len > 0) {
|
|
|
|
struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
|
2013-03-25 22:49:35 +08:00
|
|
|
|
|
|
|
u64_stats_update_begin(&tstats->syncp);
|
2015-12-25 06:34:54 +08:00
|
|
|
tstats->tx_bytes += pkt_len;
|
2013-03-25 22:49:35 +08:00
|
|
|
tstats->tx_packets++;
|
|
|
|
u64_stats_update_end(&tstats->syncp);
|
ip_tunnel: disable preemption when updating per-cpu tstats
Drivers like vxlan use the recently introduced
udp_tunnel_xmit_skb/udp_tunnel6_xmit_skb APIs. udp_tunnel6_xmit_skb
makes use of ip6tunnel_xmit, and ip6tunnel_xmit, after sending the
packet, updates the struct stats using the usual
u64_stats_update_begin/end calls on this_cpu_ptr(dev->tstats).
udp_tunnel_xmit_skb makes use of iptunnel_xmit, which doesn't touch
tstats, so drivers like vxlan, immediately after, call
iptunnel_xmit_stats, which does the same thing - calls
u64_stats_update_begin/end on this_cpu_ptr(dev->tstats).
While vxlan is probably fine (I don't know?), calling a similar function
from, say, an unbound workqueue, on a fully preemptable kernel causes
real issues:
[ 188.434537] BUG: using smp_processor_id() in preemptible [00000000] code: kworker/u8:0/6
[ 188.435579] caller is debug_smp_processor_id+0x17/0x20
[ 188.435583] CPU: 0 PID: 6 Comm: kworker/u8:0 Not tainted 4.2.6 #2
[ 188.435607] Call Trace:
[ 188.435611] [<ffffffff8234e936>] dump_stack+0x4f/0x7b
[ 188.435615] [<ffffffff81915f3d>] check_preemption_disabled+0x19d/0x1c0
[ 188.435619] [<ffffffff81915f77>] debug_smp_processor_id+0x17/0x20
The solution would be to protect the whole
this_cpu_ptr(dev->tstats)/u64_stats_update_begin/end blocks with
disabling preemption and then reenabling it.
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-11-13 00:35:58 +08:00
|
|
|
put_cpu_ptr(tstats);
|
2013-03-25 22:49:35 +08:00
|
|
|
} else {
|
2015-12-25 06:34:54 +08:00
|
|
|
struct net_device_stats *err_stats = &dev->stats;
|
|
|
|
|
|
|
|
if (pkt_len < 0) {
|
|
|
|
err_stats->tx_errors++;
|
|
|
|
err_stats->tx_aborted_errors++;
|
|
|
|
} else {
|
|
|
|
err_stats->tx_dropped++;
|
|
|
|
}
|
2013-03-25 22:49:35 +08:00
|
|
|
}
|
|
|
|
}
|
2013-06-22 07:17:11 +08:00
|
|
|
|
2015-08-31 09:09:38 +08:00
|
|
|
static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
|
2015-07-21 16:43:58 +08:00
|
|
|
{
|
|
|
|
return info + 1;
|
|
|
|
}
|
|
|
|
|
2015-08-31 09:09:38 +08:00
|
|
|
static inline void ip_tunnel_info_opts_get(void *to,
|
|
|
|
const struct ip_tunnel_info *info)
|
|
|
|
{
|
|
|
|
memcpy(to, info + 1, info->options_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
|
|
|
|
const void *from, int len)
|
|
|
|
{
|
|
|
|
memcpy(ip_tunnel_info_opts(info), from, len);
|
|
|
|
info->options_len = len;
|
|
|
|
}
|
|
|
|
|
2015-07-21 16:44:00 +08:00
|
|
|
static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
|
|
|
|
{
|
|
|
|
return (struct ip_tunnel_info *)lwtstate->data;
|
|
|
|
}
|
|
|
|
|
2015-07-21 16:44:01 +08:00
|
|
|
extern struct static_key ip_tunnel_metadata_cnt;
|
|
|
|
|
|
|
|
/* Returns > 0 if metadata should be collected */
|
|
|
|
static inline int ip_tunnel_collect_metadata(void)
|
|
|
|
{
|
|
|
|
return static_key_false(&ip_tunnel_metadata_cnt);
|
|
|
|
}
|
|
|
|
|
2015-07-23 16:08:44 +08:00
|
|
|
void __init ip_tunnel_core_init(void);
|
|
|
|
|
2015-07-21 16:44:01 +08:00
|
|
|
void ip_tunnel_need_metadata(void);
|
|
|
|
void ip_tunnel_unneed_metadata(void);
|
|
|
|
|
2015-07-22 20:43:58 +08:00
|
|
|
#else /* CONFIG_INET */
|
|
|
|
|
|
|
|
static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ip_tunnel_need_metadata(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ip_tunnel_unneed_metadata(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2016-03-09 06:36:03 +08:00
|
|
|
static inline void ip_tunnel_info_opts_get(void *to,
|
|
|
|
const struct ip_tunnel_info *info)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
|
|
|
|
const void *from, int len)
|
|
|
|
{
|
|
|
|
info->options_len = 0;
|
|
|
|
}
|
|
|
|
|
2013-06-22 07:17:11 +08:00
|
|
|
#endif /* CONFIG_INET */
|
|
|
|
|
2013-03-25 22:49:35 +08:00
|
|
|
#endif /* __NET_IP_TUNNELS_H */
|