2015-05-14 00:57:30 +08:00
|
|
|
/*
|
|
|
|
* GENEVE: Generic Network Virtualization Encapsulation
|
|
|
|
*
|
|
|
|
* Copyright (c) 2015 Red Hat, Inc.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/hash.h>
|
2015-08-27 14:46:52 +08:00
|
|
|
#include <net/dst_metadata.h>
|
2015-08-29 07:54:40 +08:00
|
|
|
#include <net/gro_cells.h>
|
2015-05-14 00:57:30 +08:00
|
|
|
#include <net/rtnetlink.h>
|
|
|
|
#include <net/geneve.h>
|
2015-08-27 14:46:54 +08:00
|
|
|
#include <net/protocol.h>
|
2015-05-14 00:57:30 +08:00
|
|
|
|
|
|
|
#define GENEVE_NETDEV_VER "0.6"
|
|
|
|
|
|
|
|
#define GENEVE_UDP_PORT 6081
|
|
|
|
|
|
|
|
#define GENEVE_N_VID (1u << 24)
|
|
|
|
#define GENEVE_VID_MASK (GENEVE_N_VID - 1)
|
|
|
|
|
|
|
|
#define VNI_HASH_BITS 10
|
|
|
|
#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
|
|
|
|
|
|
|
|
static bool log_ecn_error = true;
|
|
|
|
module_param(log_ecn_error, bool, 0644);
|
|
|
|
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
|
|
|
|
|
2015-08-27 14:46:54 +08:00
|
|
|
#define GENEVE_VER 0
|
|
|
|
#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
|
|
|
|
|
2015-05-14 00:57:30 +08:00
|
|
|
/* per-network namespace private data for this module */
|
|
|
|
struct geneve_net {
|
2015-08-27 14:46:54 +08:00
|
|
|
struct list_head geneve_list;
|
|
|
|
struct list_head sock_list;
|
2015-05-14 00:57:30 +08:00
|
|
|
};
|
|
|
|
|
2015-08-27 14:46:54 +08:00
|
|
|
static int geneve_net_id;
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
union geneve_addr {
|
|
|
|
struct sockaddr_in sin;
|
|
|
|
struct sockaddr_in6 sin6;
|
|
|
|
struct sockaddr sa;
|
|
|
|
};
|
|
|
|
|
|
|
|
static union geneve_addr geneve_remote_unspec = { .sa.sa_family = AF_UNSPEC, };
|
|
|
|
|
2015-05-14 00:57:30 +08:00
|
|
|
/* Pseudo network device */
|
|
|
|
struct geneve_dev {
|
|
|
|
struct hlist_node hlist; /* vni hash table */
|
|
|
|
struct net *net; /* netns for packet i/o */
|
|
|
|
struct net_device *dev; /* netdev for geneve tunnel */
|
2016-10-29 00:59:16 +08:00
|
|
|
struct geneve_sock __rcu *sock4; /* IPv4 socket used for geneve tunnel */
|
2015-10-27 05:01:44 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2016-10-29 00:59:16 +08:00
|
|
|
struct geneve_sock __rcu *sock6; /* IPv6 socket used for geneve tunnel */
|
2015-10-27 05:01:44 +08:00
|
|
|
#endif
|
2015-06-02 03:51:34 +08:00
|
|
|
u8 vni[3]; /* virtual network ID for tunnel */
|
|
|
|
u8 ttl; /* TTL override */
|
2015-06-02 03:51:35 +08:00
|
|
|
u8 tos; /* TOS override */
|
2015-10-27 05:01:44 +08:00
|
|
|
union geneve_addr remote; /* IP address for link partner */
|
2015-05-14 00:57:30 +08:00
|
|
|
struct list_head next; /* geneve's per namespace list */
|
2016-03-09 10:00:04 +08:00
|
|
|
__be32 label; /* IPv6 flowlabel override */
|
2015-08-27 14:46:51 +08:00
|
|
|
__be16 dst_port;
|
2015-08-27 14:46:52 +08:00
|
|
|
bool collect_md;
|
2015-08-29 07:54:40 +08:00
|
|
|
struct gro_cells gro_cells;
|
2015-12-11 04:37:45 +08:00
|
|
|
u32 flags;
|
2016-02-12 22:43:58 +08:00
|
|
|
struct dst_cache dst_cache;
|
2015-05-14 00:57:30 +08:00
|
|
|
};
|
|
|
|
|
2015-12-11 04:37:45 +08:00
|
|
|
/* Geneve device flags */
|
2016-02-20 03:26:24 +08:00
|
|
|
#define GENEVE_F_UDP_ZERO_CSUM_TX BIT(0)
|
2015-12-11 04:37:45 +08:00
|
|
|
#define GENEVE_F_UDP_ZERO_CSUM6_TX BIT(1)
|
|
|
|
#define GENEVE_F_UDP_ZERO_CSUM6_RX BIT(2)
|
|
|
|
|
2015-08-27 14:46:54 +08:00
|
|
|
struct geneve_sock {
|
|
|
|
bool collect_md;
|
|
|
|
struct list_head list;
|
|
|
|
struct socket *sock;
|
|
|
|
struct rcu_head rcu;
|
|
|
|
int refcnt;
|
2015-08-27 14:46:55 +08:00
|
|
|
struct hlist_head vni_list[VNI_HASH_SIZE];
|
2015-12-11 04:37:45 +08:00
|
|
|
u32 flags;
|
2015-08-27 14:46:54 +08:00
|
|
|
};
|
2015-05-14 00:57:30 +08:00
|
|
|
|
|
|
|
static inline __u32 geneve_net_vni_hash(u8 vni[3])
|
|
|
|
{
|
|
|
|
__u32 vnid;
|
|
|
|
|
|
|
|
vnid = (vni[0] << 16) | (vni[1] << 8) | vni[2];
|
|
|
|
return hash_32(vnid, VNI_HASH_BITS);
|
|
|
|
}
|
|
|
|
|
2015-08-27 14:46:52 +08:00
|
|
|
static __be64 vni_to_tunnel_id(const __u8 *vni)
|
|
|
|
{
|
|
|
|
#ifdef __BIG_ENDIAN
|
|
|
|
return (vni[0] << 16) | (vni[1] << 8) | vni[2];
|
|
|
|
#else
|
|
|
|
return (__force __be64)(((__force u64)vni[0] << 40) |
|
|
|
|
((__force u64)vni[1] << 48) |
|
|
|
|
((__force u64)vni[2] << 56));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-02-18 18:22:49 +08:00
|
|
|
static sa_family_t geneve_get_sk_family(struct geneve_sock *gs)
|
|
|
|
{
|
|
|
|
return gs->sock->sk->sk_family;
|
|
|
|
}
|
|
|
|
|
2015-08-27 14:46:55 +08:00
|
|
|
static struct geneve_dev *geneve_lookup(struct geneve_sock *gs,
|
2015-08-27 14:46:54 +08:00
|
|
|
__be32 addr, u8 vni[])
|
2015-05-14 00:57:30 +08:00
|
|
|
{
|
|
|
|
struct hlist_head *vni_list_head;
|
2015-08-27 14:46:52 +08:00
|
|
|
struct geneve_dev *geneve;
|
2015-05-14 00:57:30 +08:00
|
|
|
__u32 hash;
|
|
|
|
|
|
|
|
/* Find the device for this VNI */
|
2015-08-27 14:46:54 +08:00
|
|
|
hash = geneve_net_vni_hash(vni);
|
2015-08-27 14:46:55 +08:00
|
|
|
vni_list_head = &gs->vni_list[hash];
|
2015-08-27 14:46:52 +08:00
|
|
|
hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
|
2015-08-27 14:46:54 +08:00
|
|
|
if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
|
2015-10-27 05:01:44 +08:00
|
|
|
addr == geneve->remote.sin.sin_addr.s_addr)
|
|
|
|
return geneve;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
static struct geneve_dev *geneve6_lookup(struct geneve_sock *gs,
|
|
|
|
struct in6_addr addr6, u8 vni[])
|
|
|
|
{
|
|
|
|
struct hlist_head *vni_list_head;
|
|
|
|
struct geneve_dev *geneve;
|
|
|
|
__u32 hash;
|
|
|
|
|
|
|
|
/* Find the device for this VNI */
|
|
|
|
hash = geneve_net_vni_hash(vni);
|
|
|
|
vni_list_head = &gs->vni_list[hash];
|
|
|
|
hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
|
|
|
|
if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
|
|
|
|
ipv6_addr_equal(&addr6, &geneve->remote.sin6.sin6_addr))
|
2015-08-27 14:46:52 +08:00
|
|
|
return geneve;
|
2015-05-14 00:57:30 +08:00
|
|
|
}
|
2015-08-27 14:46:52 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
2015-10-27 05:01:44 +08:00
|
|
|
#endif
|
2015-08-27 14:46:52 +08:00
|
|
|
|
2015-08-27 14:46:54 +08:00
|
|
|
static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
return (struct genevehdr *)(udp_hdr(skb) + 1);
|
|
|
|
}
|
|
|
|
|
2016-02-18 18:22:50 +08:00
|
|
|
static struct geneve_dev *geneve_lookup_skb(struct geneve_sock *gs,
|
|
|
|
struct sk_buff *skb)
|
2015-08-27 14:46:52 +08:00
|
|
|
{
|
2016-02-18 18:22:50 +08:00
|
|
|
u8 *vni;
|
2015-08-27 14:46:54 +08:00
|
|
|
__be32 addr;
|
2015-10-27 05:01:44 +08:00
|
|
|
static u8 zero_vni[3];
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
static struct in6_addr zero_addr6;
|
|
|
|
#endif
|
2015-08-27 14:46:52 +08:00
|
|
|
|
2016-02-18 18:22:49 +08:00
|
|
|
if (geneve_get_sk_family(gs) == AF_INET) {
|
2016-02-18 18:22:50 +08:00
|
|
|
struct iphdr *iph;
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
iph = ip_hdr(skb); /* outer IP header... */
|
2015-08-27 14:46:54 +08:00
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
if (gs->collect_md) {
|
|
|
|
vni = zero_vni;
|
|
|
|
addr = 0;
|
|
|
|
} else {
|
2016-02-18 18:22:50 +08:00
|
|
|
vni = geneve_hdr(skb)->vni;
|
2015-10-27 05:01:44 +08:00
|
|
|
addr = iph->saddr;
|
|
|
|
}
|
|
|
|
|
2016-02-18 18:22:50 +08:00
|
|
|
return geneve_lookup(gs, addr, vni);
|
2015-10-27 05:01:44 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2016-02-18 18:22:49 +08:00
|
|
|
} else if (geneve_get_sk_family(gs) == AF_INET6) {
|
2016-02-18 18:22:50 +08:00
|
|
|
struct ipv6hdr *ip6h;
|
|
|
|
struct in6_addr addr6;
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
ip6h = ipv6_hdr(skb); /* outer IPv6 header... */
|
2015-08-27 14:46:54 +08:00
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
if (gs->collect_md) {
|
|
|
|
vni = zero_vni;
|
|
|
|
addr6 = zero_addr6;
|
|
|
|
} else {
|
2016-02-18 18:22:50 +08:00
|
|
|
vni = geneve_hdr(skb)->vni;
|
2015-10-27 05:01:44 +08:00
|
|
|
addr6 = ip6h->saddr;
|
|
|
|
}
|
|
|
|
|
2016-02-18 18:22:50 +08:00
|
|
|
return geneve6_lookup(gs, addr6, vni);
|
2015-10-27 05:01:44 +08:00
|
|
|
#endif
|
|
|
|
}
|
2016-02-18 18:22:50 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* geneve receive/decap routine */
|
|
|
|
static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct genevehdr *gnvh = geneve_hdr(skb);
|
|
|
|
struct metadata_dst *tun_dst = NULL;
|
|
|
|
struct pcpu_sw_netstats *stats;
|
|
|
|
int err = 0;
|
|
|
|
void *oiph;
|
2015-05-14 00:57:30 +08:00
|
|
|
|
2015-08-27 14:46:54 +08:00
|
|
|
if (ip_tunnel_collect_metadata() || gs->collect_md) {
|
2015-08-27 14:46:52 +08:00
|
|
|
__be16 flags;
|
|
|
|
|
|
|
|
flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
|
|
|
|
(gnvh->oam ? TUNNEL_OAM : 0) |
|
|
|
|
(gnvh->critical ? TUNNEL_CRIT_OPT : 0);
|
|
|
|
|
2016-02-18 18:22:49 +08:00
|
|
|
tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags,
|
2015-08-27 14:46:52 +08:00
|
|
|
vni_to_tunnel_id(gnvh->vni),
|
|
|
|
gnvh->opt_len * 4);
|
|
|
|
if (!tun_dst)
|
|
|
|
goto drop;
|
|
|
|
/* Update tunnel dst according to Geneve options. */
|
2015-08-31 09:09:38 +08:00
|
|
|
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
|
|
|
|
gnvh->options, gnvh->opt_len * 4);
|
2015-08-27 14:46:52 +08:00
|
|
|
} else {
|
|
|
|
/* Drop packets w/ critical options,
|
|
|
|
* since we don't support any...
|
|
|
|
*/
|
|
|
|
if (gnvh->critical)
|
|
|
|
goto drop;
|
|
|
|
}
|
2015-05-14 00:57:30 +08:00
|
|
|
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
skb->protocol = eth_type_trans(skb, geneve->dev);
|
|
|
|
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
|
|
|
|
|
2015-08-27 14:46:52 +08:00
|
|
|
if (tun_dst)
|
|
|
|
skb_dst_set(skb, &tun_dst->dst);
|
|
|
|
|
2015-05-14 00:57:30 +08:00
|
|
|
/* Ignore packet loops (and multicast echo) */
|
|
|
|
if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
|
|
|
|
goto drop;
|
|
|
|
|
2016-02-18 18:22:50 +08:00
|
|
|
oiph = skb_network_header(skb);
|
2015-05-14 00:57:30 +08:00
|
|
|
skb_reset_network_header(skb);
|
|
|
|
|
2016-02-18 18:22:50 +08:00
|
|
|
if (geneve_get_sk_family(gs) == AF_INET)
|
|
|
|
err = IP_ECN_decapsulate(oiph, skb);
|
2015-10-27 05:01:44 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2016-02-18 18:22:50 +08:00
|
|
|
else
|
|
|
|
err = IP6_ECN_decapsulate(oiph, skb);
|
2015-10-27 05:01:44 +08:00
|
|
|
#endif
|
2015-05-14 00:57:30 +08:00
|
|
|
|
|
|
|
if (unlikely(err)) {
|
2015-10-27 05:01:44 +08:00
|
|
|
if (log_ecn_error) {
|
2016-02-18 18:22:50 +08:00
|
|
|
if (geneve_get_sk_family(gs) == AF_INET)
|
2015-10-27 05:01:44 +08:00
|
|
|
net_info_ratelimited("non-ECT from %pI4 "
|
|
|
|
"with TOS=%#x\n",
|
2016-02-18 18:22:50 +08:00
|
|
|
&((struct iphdr *)oiph)->saddr,
|
|
|
|
((struct iphdr *)oiph)->tos);
|
2015-10-27 05:01:44 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2016-02-18 18:22:50 +08:00
|
|
|
else
|
2015-10-27 05:01:44 +08:00
|
|
|
net_info_ratelimited("non-ECT from %pI6\n",
|
2016-02-18 18:22:50 +08:00
|
|
|
&((struct ipv6hdr *)oiph)->saddr);
|
2015-10-27 05:01:44 +08:00
|
|
|
#endif
|
|
|
|
}
|
2015-05-14 00:57:30 +08:00
|
|
|
if (err > 1) {
|
|
|
|
++geneve->dev->stats.rx_frame_errors;
|
|
|
|
++geneve->dev->stats.rx_errors;
|
|
|
|
goto drop;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
stats = this_cpu_ptr(geneve->dev->tstats);
|
|
|
|
u64_stats_update_begin(&stats->syncp);
|
|
|
|
stats->rx_packets++;
|
|
|
|
stats->rx_bytes += skb->len;
|
|
|
|
u64_stats_update_end(&stats->syncp);
|
|
|
|
|
2015-08-29 07:54:40 +08:00
|
|
|
gro_cells_receive(&geneve->gro_cells, skb);
|
2015-05-14 00:57:30 +08:00
|
|
|
return;
|
|
|
|
drop:
|
|
|
|
/* Consume bad packet */
|
|
|
|
kfree_skb(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup stats when device is created */
|
|
|
|
static int geneve_init(struct net_device *dev)
|
|
|
|
{
|
2015-08-29 07:54:40 +08:00
|
|
|
struct geneve_dev *geneve = netdev_priv(dev);
|
|
|
|
int err;
|
|
|
|
|
2015-05-14 00:57:30 +08:00
|
|
|
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
|
|
|
if (!dev->tstats)
|
|
|
|
return -ENOMEM;
|
2015-08-29 07:54:40 +08:00
|
|
|
|
|
|
|
err = gro_cells_init(&geneve->gro_cells, dev);
|
|
|
|
if (err) {
|
|
|
|
free_percpu(dev->tstats);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-02-12 22:43:58 +08:00
|
|
|
err = dst_cache_init(&geneve->dst_cache, GFP_KERNEL);
|
|
|
|
if (err) {
|
|
|
|
free_percpu(dev->tstats);
|
|
|
|
gro_cells_destroy(&geneve->gro_cells);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-05-14 00:57:30 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void geneve_uninit(struct net_device *dev)
|
|
|
|
{
|
2015-08-29 07:54:40 +08:00
|
|
|
struct geneve_dev *geneve = netdev_priv(dev);
|
|
|
|
|
2016-02-12 22:43:58 +08:00
|
|
|
dst_cache_destroy(&geneve->dst_cache);
|
2015-08-29 07:54:40 +08:00
|
|
|
gro_cells_destroy(&geneve->gro_cells);
|
2015-05-14 00:57:30 +08:00
|
|
|
free_percpu(dev->tstats);
|
|
|
|
}
|
|
|
|
|
2015-08-27 14:46:54 +08:00
|
|
|
/* Callback from net/ipv4/udp.c to receive packets */
|
|
|
|
static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct genevehdr *geneveh;
|
2016-02-18 18:22:50 +08:00
|
|
|
struct geneve_dev *geneve;
|
2015-08-27 14:46:54 +08:00
|
|
|
struct geneve_sock *gs;
|
|
|
|
int opts_len;
|
|
|
|
|
|
|
|
/* Need Geneve and inner Ethernet header to be present */
|
|
|
|
if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
|
2016-05-19 21:58:33 +08:00
|
|
|
goto drop;
|
2015-08-27 14:46:54 +08:00
|
|
|
|
|
|
|
/* Return packets with reserved bits set */
|
|
|
|
geneveh = geneve_hdr(skb);
|
|
|
|
if (unlikely(geneveh->ver != GENEVE_VER))
|
2016-05-19 21:58:33 +08:00
|
|
|
goto drop;
|
2015-08-27 14:46:54 +08:00
|
|
|
|
|
|
|
if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
|
2016-05-19 21:58:33 +08:00
|
|
|
goto drop;
|
2015-08-27 14:46:54 +08:00
|
|
|
|
2016-02-18 18:22:50 +08:00
|
|
|
gs = rcu_dereference_sk_user_data(sk);
|
|
|
|
if (!gs)
|
|
|
|
goto drop;
|
|
|
|
|
|
|
|
geneve = geneve_lookup_skb(gs, skb);
|
|
|
|
if (!geneve)
|
|
|
|
goto drop;
|
|
|
|
|
2015-08-27 14:46:54 +08:00
|
|
|
opts_len = geneveh->opt_len * 4;
|
|
|
|
if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
|
2016-02-18 18:22:52 +08:00
|
|
|
htons(ETH_P_TEB),
|
|
|
|
!net_eq(geneve->net, dev_net(geneve->dev))))
|
2015-08-27 14:46:54 +08:00
|
|
|
goto drop;
|
|
|
|
|
2016-02-18 18:22:50 +08:00
|
|
|
geneve_rx(geneve, gs, skb);
|
2015-08-27 14:46:54 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
drop:
|
|
|
|
/* Consume bad packet */
|
|
|
|
kfree_skb(skb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct socket *geneve_create_sock(struct net *net, bool ipv6,
|
2015-12-11 04:37:45 +08:00
|
|
|
__be16 port, u32 flags)
|
2015-08-27 14:46:54 +08:00
|
|
|
{
|
|
|
|
struct socket *sock;
|
|
|
|
struct udp_port_cfg udp_conf;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
memset(&udp_conf, 0, sizeof(udp_conf));
|
|
|
|
|
|
|
|
if (ipv6) {
|
|
|
|
udp_conf.family = AF_INET6;
|
2015-10-27 05:01:44 +08:00
|
|
|
udp_conf.ipv6_v6only = 1;
|
2015-12-11 04:37:45 +08:00
|
|
|
udp_conf.use_udp6_rx_checksums =
|
|
|
|
!(flags & GENEVE_F_UDP_ZERO_CSUM6_RX);
|
2015-08-27 14:46:54 +08:00
|
|
|
} else {
|
|
|
|
udp_conf.family = AF_INET;
|
|
|
|
udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
|
|
|
|
}
|
|
|
|
|
|
|
|
udp_conf.local_udp_port = port;
|
|
|
|
|
|
|
|
/* Open UDP socket */
|
|
|
|
err = udp_sock_create(net, &udp_conf, &sock);
|
|
|
|
if (err < 0)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
|
|
|
|
return sock;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int geneve_hlen(struct genevehdr *gh)
|
|
|
|
{
|
|
|
|
return sizeof(*gh) + gh->opt_len * 4;
|
|
|
|
}
|
|
|
|
|
2016-04-05 23:22:55 +08:00
|
|
|
static struct sk_buff **geneve_gro_receive(struct sock *sk,
|
|
|
|
struct sk_buff **head,
|
|
|
|
struct sk_buff *skb)
|
2015-08-27 14:46:54 +08:00
|
|
|
{
|
|
|
|
struct sk_buff *p, **pp = NULL;
|
|
|
|
struct genevehdr *gh, *gh2;
|
|
|
|
unsigned int hlen, gh_len, off_gnv;
|
|
|
|
const struct packet_offload *ptype;
|
|
|
|
__be16 type;
|
|
|
|
int flush = 1;
|
|
|
|
|
|
|
|
off_gnv = skb_gro_offset(skb);
|
|
|
|
hlen = off_gnv + sizeof(*gh);
|
|
|
|
gh = skb_gro_header_fast(skb, off_gnv);
|
|
|
|
if (skb_gro_header_hard(skb, hlen)) {
|
|
|
|
gh = skb_gro_header_slow(skb, hlen, off_gnv);
|
|
|
|
if (unlikely(!gh))
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gh->ver != GENEVE_VER || gh->oam)
|
|
|
|
goto out;
|
|
|
|
gh_len = geneve_hlen(gh);
|
|
|
|
|
|
|
|
hlen = off_gnv + gh_len;
|
|
|
|
if (skb_gro_header_hard(skb, hlen)) {
|
|
|
|
gh = skb_gro_header_slow(skb, hlen, off_gnv);
|
|
|
|
if (unlikely(!gh))
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (p = *head; p; p = p->next) {
|
|
|
|
if (!NAPI_GRO_CB(p)->same_flow)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
gh2 = (struct genevehdr *)(p->data + off_gnv);
|
|
|
|
if (gh->opt_len != gh2->opt_len ||
|
|
|
|
memcmp(gh, gh2, gh_len)) {
|
|
|
|
NAPI_GRO_CB(p)->same_flow = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type = gh->proto_type;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
ptype = gro_find_receive_by_type(type);
|
2016-03-10 01:24:23 +08:00
|
|
|
if (!ptype)
|
2015-08-27 14:46:54 +08:00
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
skb_gro_pull(skb, gh_len);
|
|
|
|
skb_gro_postpull_rcsum(skb, gh, gh_len);
|
2016-10-20 21:58:02 +08:00
|
|
|
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
|
2016-03-10 01:24:23 +08:00
|
|
|
flush = 0;
|
2015-08-27 14:46:54 +08:00
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
rcu_read_unlock();
|
|
|
|
out:
|
|
|
|
NAPI_GRO_CB(skb)->flush |= flush;
|
|
|
|
|
|
|
|
return pp;
|
|
|
|
}
|
|
|
|
|
2016-04-05 23:22:55 +08:00
|
|
|
static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
|
|
|
|
int nhoff)
|
2015-08-27 14:46:54 +08:00
|
|
|
{
|
|
|
|
struct genevehdr *gh;
|
|
|
|
struct packet_offload *ptype;
|
|
|
|
__be16 type;
|
|
|
|
int gh_len;
|
|
|
|
int err = -ENOSYS;
|
|
|
|
|
|
|
|
gh = (struct genevehdr *)(skb->data + nhoff);
|
|
|
|
gh_len = geneve_hlen(gh);
|
|
|
|
type = gh->proto_type;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
ptype = gro_find_complete_by_type(type);
|
|
|
|
if (ptype)
|
|
|
|
err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
2016-05-04 07:10:21 +08:00
|
|
|
|
|
|
|
skb_set_inner_mac_header(skb, nhoff + gh_len);
|
|
|
|
|
2015-08-27 14:46:54 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create new listen socket if needed */
|
|
|
|
static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
|
2015-12-11 04:37:45 +08:00
|
|
|
bool ipv6, u32 flags)
|
2015-08-27 14:46:54 +08:00
|
|
|
{
|
|
|
|
struct geneve_net *gn = net_generic(net, geneve_net_id);
|
|
|
|
struct geneve_sock *gs;
|
|
|
|
struct socket *sock;
|
|
|
|
struct udp_tunnel_sock_cfg tunnel_cfg;
|
2015-08-27 14:46:55 +08:00
|
|
|
int h;
|
2015-08-27 14:46:54 +08:00
|
|
|
|
|
|
|
gs = kzalloc(sizeof(*gs), GFP_KERNEL);
|
|
|
|
if (!gs)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2015-12-11 04:37:45 +08:00
|
|
|
sock = geneve_create_sock(net, ipv6, port, flags);
|
2015-08-27 14:46:54 +08:00
|
|
|
if (IS_ERR(sock)) {
|
|
|
|
kfree(gs);
|
|
|
|
return ERR_CAST(sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
gs->sock = sock;
|
|
|
|
gs->refcnt = 1;
|
2015-08-27 14:46:55 +08:00
|
|
|
for (h = 0; h < VNI_HASH_SIZE; ++h)
|
|
|
|
INIT_HLIST_HEAD(&gs->vni_list[h]);
|
2015-08-27 14:46:54 +08:00
|
|
|
|
|
|
|
/* Initialize the geneve udp offloads structure */
|
2016-06-17 03:20:52 +08:00
|
|
|
udp_tunnel_notify_add_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE);
|
2015-08-27 14:46:54 +08:00
|
|
|
|
|
|
|
/* Mark socket as an encapsulation socket */
|
2016-04-05 23:22:55 +08:00
|
|
|
memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
|
2015-08-27 14:46:54 +08:00
|
|
|
tunnel_cfg.sk_user_data = gs;
|
|
|
|
tunnel_cfg.encap_type = 1;
|
2016-04-05 23:22:55 +08:00
|
|
|
tunnel_cfg.gro_receive = geneve_gro_receive;
|
|
|
|
tunnel_cfg.gro_complete = geneve_gro_complete;
|
2015-08-27 14:46:54 +08:00
|
|
|
tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
|
|
|
|
tunnel_cfg.encap_destroy = NULL;
|
|
|
|
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
|
|
|
|
list_add(&gs->list, &gn->sock_list);
|
|
|
|
return gs;
|
|
|
|
}
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
static void __geneve_sock_release(struct geneve_sock *gs)
|
2015-08-27 14:46:54 +08:00
|
|
|
{
|
2015-10-27 05:01:44 +08:00
|
|
|
if (!gs || --gs->refcnt)
|
2015-08-27 14:46:54 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
list_del(&gs->list);
|
2016-06-17 03:20:52 +08:00
|
|
|
udp_tunnel_notify_del_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE);
|
2015-08-27 14:46:54 +08:00
|
|
|
udp_tunnel_sock_release(gs->sock);
|
|
|
|
kfree_rcu(gs, rcu);
|
|
|
|
}
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
static void geneve_sock_release(struct geneve_dev *geneve)
|
|
|
|
{
|
2016-10-29 00:59:16 +08:00
|
|
|
struct geneve_sock *gs4 = rtnl_dereference(geneve->sock4);
|
2015-10-27 05:01:44 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2016-10-29 00:59:16 +08:00
|
|
|
struct geneve_sock *gs6 = rtnl_dereference(geneve->sock6);
|
|
|
|
|
|
|
|
rcu_assign_pointer(geneve->sock6, NULL);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
rcu_assign_pointer(geneve->sock4, NULL);
|
|
|
|
synchronize_net();
|
|
|
|
|
|
|
|
__geneve_sock_release(gs4);
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
__geneve_sock_release(gs6);
|
2015-10-27 05:01:44 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2015-08-27 14:46:54 +08:00
|
|
|
static struct geneve_sock *geneve_find_sock(struct geneve_net *gn,
|
2015-10-27 05:01:44 +08:00
|
|
|
sa_family_t family,
|
2015-08-27 14:46:54 +08:00
|
|
|
__be16 dst_port)
|
|
|
|
{
|
|
|
|
struct geneve_sock *gs;
|
|
|
|
|
|
|
|
list_for_each_entry(gs, &gn->sock_list, list) {
|
|
|
|
if (inet_sk(gs->sock->sk)->inet_sport == dst_port &&
|
2016-02-18 18:22:49 +08:00
|
|
|
geneve_get_sk_family(gs) == family) {
|
2015-08-27 14:46:54 +08:00
|
|
|
return gs;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6)
|
2015-05-14 00:57:30 +08:00
|
|
|
{
|
|
|
|
struct net *net = geneve->net;
|
2015-08-27 14:46:54 +08:00
|
|
|
struct geneve_net *gn = net_generic(net, geneve_net_id);
|
2015-05-14 00:57:30 +08:00
|
|
|
struct geneve_sock *gs;
|
2015-08-27 14:46:55 +08:00
|
|
|
__u32 hash;
|
2015-05-14 00:57:30 +08:00
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
gs = geneve_find_sock(gn, ipv6 ? AF_INET6 : AF_INET, geneve->dst_port);
|
2015-08-27 14:46:54 +08:00
|
|
|
if (gs) {
|
|
|
|
gs->refcnt++;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2015-12-11 04:37:45 +08:00
|
|
|
gs = geneve_socket_create(net, geneve->dst_port, ipv6, geneve->flags);
|
2015-05-14 00:57:30 +08:00
|
|
|
if (IS_ERR(gs))
|
|
|
|
return PTR_ERR(gs);
|
|
|
|
|
2015-08-27 14:46:54 +08:00
|
|
|
out:
|
|
|
|
gs->collect_md = geneve->collect_md;
|
2015-12-11 04:37:45 +08:00
|
|
|
gs->flags = geneve->flags;
|
2015-10-27 05:01:44 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
if (ipv6)
|
2016-10-29 00:59:16 +08:00
|
|
|
rcu_assign_pointer(geneve->sock6, gs);
|
2015-10-27 05:01:44 +08:00
|
|
|
else
|
|
|
|
#endif
|
2016-10-29 00:59:16 +08:00
|
|
|
rcu_assign_pointer(geneve->sock4, gs);
|
2015-08-27 14:46:55 +08:00
|
|
|
|
|
|
|
hash = geneve_net_vni_hash(geneve->vni);
|
|
|
|
hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
|
2015-05-14 00:57:30 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
static int geneve_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct geneve_dev *geneve = netdev_priv(dev);
|
|
|
|
bool ipv6 = geneve->remote.sa.sa_family == AF_INET6;
|
|
|
|
bool metadata = geneve->collect_md;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
if (ipv6 || metadata)
|
|
|
|
ret = geneve_sock_add(geneve, true);
|
|
|
|
#endif
|
|
|
|
if (!ret && (!ipv6 || metadata))
|
|
|
|
ret = geneve_sock_add(geneve, false);
|
|
|
|
if (ret < 0)
|
|
|
|
geneve_sock_release(geneve);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-05-14 00:57:30 +08:00
|
|
|
static int geneve_stop(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct geneve_dev *geneve = netdev_priv(dev);
|
|
|
|
|
2015-08-27 14:46:55 +08:00
|
|
|
if (!hlist_unhashed(&geneve->hlist))
|
|
|
|
hlist_del_rcu(&geneve->hlist);
|
2015-10-27 05:01:44 +08:00
|
|
|
geneve_sock_release(geneve);
|
2015-08-27 14:46:54 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
static void geneve_build_header(struct genevehdr *geneveh,
|
|
|
|
__be16 tun_flags, u8 vni[3],
|
|
|
|
u8 options_len, u8 *options)
|
|
|
|
{
|
|
|
|
geneveh->ver = GENEVE_VER;
|
|
|
|
geneveh->opt_len = options_len / 4;
|
|
|
|
geneveh->oam = !!(tun_flags & TUNNEL_OAM);
|
|
|
|
geneveh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
|
|
|
|
geneveh->rsvd1 = 0;
|
|
|
|
memcpy(geneveh->vni, vni, 3);
|
|
|
|
geneveh->proto_type = htons(ETH_P_TEB);
|
|
|
|
geneveh->rsvd2 = 0;
|
|
|
|
|
|
|
|
memcpy(geneveh->options, options, options_len);
|
|
|
|
}
|
|
|
|
|
2015-08-27 14:46:54 +08:00
|
|
|
static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
|
|
|
|
__be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
|
2015-12-11 04:37:45 +08:00
|
|
|
u32 flags, bool xnet)
|
2015-08-27 14:46:54 +08:00
|
|
|
{
|
|
|
|
struct genevehdr *gnvh;
|
|
|
|
int min_headroom;
|
|
|
|
int err;
|
2016-02-20 03:26:24 +08:00
|
|
|
bool udp_sum = !(flags & GENEVE_F_UDP_ZERO_CSUM_TX);
|
2015-05-14 00:57:30 +08:00
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
skb_scrub_packet(skb, xnet);
|
|
|
|
|
2015-08-27 14:46:54 +08:00
|
|
|
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
|
|
|
|
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
|
|
|
|
err = skb_cow_head(skb, min_headroom);
|
2016-04-15 03:33:37 +08:00
|
|
|
if (unlikely(err))
|
2015-08-27 14:46:54 +08:00
|
|
|
goto free_rt;
|
|
|
|
|
2016-04-15 03:33:37 +08:00
|
|
|
err = udp_tunnel_handle_offloads(skb, udp_sum);
|
|
|
|
if (err)
|
2015-08-27 14:46:54 +08:00
|
|
|
goto free_rt;
|
|
|
|
|
|
|
|
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
|
2015-10-27 05:01:44 +08:00
|
|
|
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
|
2015-08-27 14:46:54 +08:00
|
|
|
|
|
|
|
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
|
2015-05-14 00:57:30 +08:00
|
|
|
return 0;
|
2015-08-27 14:46:54 +08:00
|
|
|
|
|
|
|
free_rt:
|
|
|
|
ip_rt_put(rt);
|
|
|
|
return err;
|
2015-05-14 00:57:30 +08:00
|
|
|
}
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb,
|
|
|
|
__be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
|
2015-12-11 04:37:45 +08:00
|
|
|
u32 flags, bool xnet)
|
2015-10-27 05:01:44 +08:00
|
|
|
{
|
|
|
|
struct genevehdr *gnvh;
|
|
|
|
int min_headroom;
|
|
|
|
int err;
|
2015-12-11 04:37:45 +08:00
|
|
|
bool udp_sum = !(flags & GENEVE_F_UDP_ZERO_CSUM6_TX);
|
2015-10-27 05:01:44 +08:00
|
|
|
|
|
|
|
skb_scrub_packet(skb, xnet);
|
|
|
|
|
|
|
|
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
|
|
|
|
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr);
|
|
|
|
err = skb_cow_head(skb, min_headroom);
|
2016-04-15 03:33:37 +08:00
|
|
|
if (unlikely(err))
|
2015-10-27 05:01:44 +08:00
|
|
|
goto free_dst;
|
|
|
|
|
2016-04-15 03:33:37 +08:00
|
|
|
err = udp_tunnel_handle_offloads(skb, udp_sum);
|
2016-04-19 22:30:56 +08:00
|
|
|
if (err)
|
2015-10-27 05:01:44 +08:00
|
|
|
goto free_dst;
|
|
|
|
|
|
|
|
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
|
|
|
|
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
|
|
|
|
|
|
|
|
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
free_dst:
|
|
|
|
dst_release(dst);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
|
|
|
|
struct net_device *dev,
|
|
|
|
struct flowi4 *fl4,
|
|
|
|
struct ip_tunnel_info *info)
|
2015-08-27 14:46:52 +08:00
|
|
|
{
|
bpf, vxlan, geneve, gre: fix usage of dst_cache on xmit
The assumptions from commit 0c1d70af924b ("net: use dst_cache for vxlan
device"), 468dfffcd762 ("geneve: add dst caching support") and 3c1cb4d2604c
("net/ipv4: add dst cache support for gre lwtunnels") on dst_cache usage
when ip_tunnel_info is used is unfortunately not always valid as assumed.
While it seems correct for ip_tunnel_info front-ends such as OVS, eBPF
however can fill in ip_tunnel_info for consumers like vxlan, geneve or gre
with different remote dsts, tos, etc, therefore they cannot be assumed as
packet independent.
Right now vxlan, geneve, gre would cache the dst for eBPF and every packet
would reuse the same entry that was first created on the initial route
lookup. eBPF doesn't store/cache the ip_tunnel_info, so each skb may have
a different one.
Fix it by adding a flag that checks the ip_tunnel_info. Also the !tos test
in vxlan needs to be handeled differently in this context as it is currently
inferred from ip_tunnel_info as well if present. ip_tunnel_dst_cache_usable()
helper is added for the three tunnel cases, which checks if we can use dst
cache.
Fixes: 0c1d70af924b ("net: use dst_cache for vxlan device")
Fixes: 468dfffcd762 ("geneve: add dst caching support")
Fixes: 3c1cb4d2604c ("net/ipv4: add dst cache support for gre lwtunnels")
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Paolo Abeni <pabeni@redhat.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-03-04 22:15:07 +08:00
|
|
|
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
|
2015-08-27 14:46:52 +08:00
|
|
|
struct geneve_dev *geneve = netdev_priv(dev);
|
2016-02-12 22:43:58 +08:00
|
|
|
struct dst_cache *dst_cache;
|
2015-08-27 14:46:52 +08:00
|
|
|
struct rtable *rt = NULL;
|
|
|
|
__u8 tos;
|
|
|
|
|
2016-10-29 00:59:16 +08:00
|
|
|
if (!rcu_dereference(geneve->sock4))
|
|
|
|
return ERR_PTR(-EIO);
|
|
|
|
|
2015-08-27 14:46:52 +08:00
|
|
|
memset(fl4, 0, sizeof(*fl4));
|
|
|
|
fl4->flowi4_mark = skb->mark;
|
|
|
|
fl4->flowi4_proto = IPPROTO_UDP;
|
|
|
|
|
|
|
|
if (info) {
|
|
|
|
fl4->daddr = info->key.u.ipv4.dst;
|
|
|
|
fl4->saddr = info->key.u.ipv4.src;
|
|
|
|
fl4->flowi4_tos = RT_TOS(info->key.tos);
|
2016-02-12 22:43:58 +08:00
|
|
|
dst_cache = &info->dst_cache;
|
2015-08-27 14:46:52 +08:00
|
|
|
} else {
|
|
|
|
tos = geneve->tos;
|
|
|
|
if (tos == 1) {
|
|
|
|
const struct iphdr *iip = ip_hdr(skb);
|
|
|
|
|
|
|
|
tos = ip_tunnel_get_dsfield(iip, skb);
|
2016-02-12 22:43:58 +08:00
|
|
|
use_cache = false;
|
2015-08-27 14:46:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
fl4->flowi4_tos = RT_TOS(tos);
|
2015-10-27 05:01:44 +08:00
|
|
|
fl4->daddr = geneve->remote.sin.sin_addr.s_addr;
|
2016-02-12 22:43:58 +08:00
|
|
|
dst_cache = &geneve->dst_cache;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (use_cache) {
|
|
|
|
rt = dst_cache_get_ip4(dst_cache, &fl4->saddr);
|
|
|
|
if (rt)
|
|
|
|
return rt;
|
2015-08-27 14:46:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
rt = ip_route_output_key(geneve->net, fl4);
|
|
|
|
if (IS_ERR(rt)) {
|
|
|
|
netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr);
|
2015-10-23 09:17:16 +08:00
|
|
|
return ERR_PTR(-ENETUNREACH);
|
2015-08-27 14:46:52 +08:00
|
|
|
}
|
|
|
|
if (rt->dst.dev == dev) { /* is this necessary? */
|
|
|
|
netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr);
|
|
|
|
ip_rt_put(rt);
|
2015-10-23 09:17:16 +08:00
|
|
|
return ERR_PTR(-ELOOP);
|
2015-08-27 14:46:52 +08:00
|
|
|
}
|
2016-02-12 22:43:58 +08:00
|
|
|
if (use_cache)
|
|
|
|
dst_cache_set_ip4(dst_cache, &rt->dst, fl4->saddr);
|
2015-08-27 14:46:52 +08:00
|
|
|
return rt;
|
|
|
|
}
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
|
|
|
|
struct net_device *dev,
|
|
|
|
struct flowi6 *fl6,
|
|
|
|
struct ip_tunnel_info *info)
|
|
|
|
{
|
bpf, vxlan, geneve, gre: fix usage of dst_cache on xmit
The assumptions from commit 0c1d70af924b ("net: use dst_cache for vxlan
device"), 468dfffcd762 ("geneve: add dst caching support") and 3c1cb4d2604c
("net/ipv4: add dst cache support for gre lwtunnels") on dst_cache usage
when ip_tunnel_info is used is unfortunately not always valid as assumed.
While it seems correct for ip_tunnel_info front-ends such as OVS, eBPF
however can fill in ip_tunnel_info for consumers like vxlan, geneve or gre
with different remote dsts, tos, etc, therefore they cannot be assumed as
packet independent.
Right now vxlan, geneve, gre would cache the dst for eBPF and every packet
would reuse the same entry that was first created on the initial route
lookup. eBPF doesn't store/cache the ip_tunnel_info, so each skb may have
a different one.
Fix it by adding a flag that checks the ip_tunnel_info. Also the !tos test
in vxlan needs to be handeled differently in this context as it is currently
inferred from ip_tunnel_info as well if present. ip_tunnel_dst_cache_usable()
helper is added for the three tunnel cases, which checks if we can use dst
cache.
Fixes: 0c1d70af924b ("net: use dst_cache for vxlan device")
Fixes: 468dfffcd762 ("geneve: add dst caching support")
Fixes: 3c1cb4d2604c ("net/ipv4: add dst cache support for gre lwtunnels")
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Paolo Abeni <pabeni@redhat.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-03-04 22:15:07 +08:00
|
|
|
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
|
2015-10-27 05:01:44 +08:00
|
|
|
struct geneve_dev *geneve = netdev_priv(dev);
|
|
|
|
struct dst_entry *dst = NULL;
|
2016-02-12 22:43:58 +08:00
|
|
|
struct dst_cache *dst_cache;
|
2016-10-29 00:59:16 +08:00
|
|
|
struct geneve_sock *gs6;
|
2015-10-27 05:01:45 +08:00
|
|
|
__u8 prio;
|
2015-10-27 05:01:44 +08:00
|
|
|
|
2016-10-29 00:59:16 +08:00
|
|
|
gs6 = rcu_dereference(geneve->sock6);
|
|
|
|
if (!gs6)
|
|
|
|
return ERR_PTR(-EIO);
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
memset(fl6, 0, sizeof(*fl6));
|
|
|
|
fl6->flowi6_mark = skb->mark;
|
|
|
|
fl6->flowi6_proto = IPPROTO_UDP;
|
|
|
|
|
|
|
|
if (info) {
|
|
|
|
fl6->daddr = info->key.u.ipv6.dst;
|
|
|
|
fl6->saddr = info->key.u.ipv6.src;
|
2016-03-19 01:37:58 +08:00
|
|
|
fl6->flowlabel = ip6_make_flowinfo(RT_TOS(info->key.tos),
|
|
|
|
info->key.label);
|
2016-02-12 22:43:58 +08:00
|
|
|
dst_cache = &info->dst_cache;
|
2015-10-27 05:01:44 +08:00
|
|
|
} else {
|
2015-10-27 05:01:45 +08:00
|
|
|
prio = geneve->tos;
|
|
|
|
if (prio == 1) {
|
|
|
|
const struct iphdr *iip = ip_hdr(skb);
|
|
|
|
|
|
|
|
prio = ip_tunnel_get_dsfield(iip, skb);
|
2016-02-12 22:43:58 +08:00
|
|
|
use_cache = false;
|
2015-10-27 05:01:45 +08:00
|
|
|
}
|
|
|
|
|
2016-03-19 01:37:58 +08:00
|
|
|
fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio),
|
|
|
|
geneve->label);
|
2015-10-27 05:01:44 +08:00
|
|
|
fl6->daddr = geneve->remote.sin6.sin6_addr;
|
2016-02-12 22:43:58 +08:00
|
|
|
dst_cache = &geneve->dst_cache;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (use_cache) {
|
|
|
|
dst = dst_cache_get_ip6(dst_cache, &fl6->saddr);
|
|
|
|
if (dst)
|
|
|
|
return dst;
|
2015-10-27 05:01:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) {
|
|
|
|
netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr);
|
|
|
|
return ERR_PTR(-ENETUNREACH);
|
|
|
|
}
|
|
|
|
if (dst->dev == dev) { /* is this necessary? */
|
|
|
|
netdev_dbg(dev, "circular route to %pI6\n", &fl6->daddr);
|
|
|
|
dst_release(dst);
|
|
|
|
return ERR_PTR(-ELOOP);
|
|
|
|
}
|
|
|
|
|
2016-02-12 22:43:58 +08:00
|
|
|
if (use_cache)
|
|
|
|
dst_cache_set_ip6(dst_cache, dst, &fl6->saddr);
|
2015-10-27 05:01:44 +08:00
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-08-27 14:46:52 +08:00
|
|
|
/* Convert 64 bit tunnel ID to 24 bit VNI. */
|
|
|
|
static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
|
|
|
|
{
|
|
|
|
#ifdef __BIG_ENDIAN
|
|
|
|
vni[0] = (__force __u8)(tun_id >> 16);
|
|
|
|
vni[1] = (__force __u8)(tun_id >> 8);
|
|
|
|
vni[2] = (__force __u8)tun_id;
|
|
|
|
#else
|
|
|
|
vni[0] = (__force __u8)((__force u64)tun_id >> 40);
|
|
|
|
vni[1] = (__force __u8)((__force u64)tun_id >> 48);
|
|
|
|
vni[2] = (__force __u8)((__force u64)tun_id >> 56);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
struct ip_tunnel_info *info)
|
2015-05-14 00:57:30 +08:00
|
|
|
{
|
|
|
|
struct geneve_dev *geneve = netdev_priv(dev);
|
2016-10-29 00:59:16 +08:00
|
|
|
struct geneve_sock *gs4;
|
2015-05-14 00:57:30 +08:00
|
|
|
struct rtable *rt = NULL;
|
2015-09-21 22:29:09 +08:00
|
|
|
const struct iphdr *iip; /* interior IP header */
|
2015-10-23 09:17:16 +08:00
|
|
|
int err = -EINVAL;
|
2015-05-14 00:57:30 +08:00
|
|
|
struct flowi4 fl4;
|
2015-06-02 03:51:34 +08:00
|
|
|
__u8 tos, ttl;
|
2015-08-27 14:46:52 +08:00
|
|
|
__be16 sport;
|
2015-08-27 14:46:54 +08:00
|
|
|
__be16 df;
|
2015-10-27 05:01:44 +08:00
|
|
|
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
|
2015-12-11 04:37:45 +08:00
|
|
|
u32 flags = geneve->flags;
|
2015-05-14 00:57:30 +08:00
|
|
|
|
2016-10-29 00:59:16 +08:00
|
|
|
gs4 = rcu_dereference(geneve->sock4);
|
|
|
|
if (!gs4)
|
|
|
|
goto tx_error;
|
|
|
|
|
2015-08-27 14:46:52 +08:00
|
|
|
if (geneve->collect_md) {
|
2015-10-27 05:01:44 +08:00
|
|
|
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
|
2015-08-27 14:46:52 +08:00
|
|
|
netdev_dbg(dev, "no tunnel metadata\n");
|
|
|
|
goto tx_error;
|
|
|
|
}
|
2015-08-29 02:48:20 +08:00
|
|
|
if (info && ip_tunnel_info_af(info) != AF_INET)
|
|
|
|
goto tx_error;
|
2015-08-27 14:46:52 +08:00
|
|
|
}
|
2015-08-27 14:46:49 +08:00
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
rt = geneve_get_v4_rt(skb, dev, &fl4, info);
|
2015-05-14 00:57:30 +08:00
|
|
|
if (IS_ERR(rt)) {
|
2015-10-23 09:17:16 +08:00
|
|
|
err = PTR_ERR(rt);
|
2015-05-14 00:57:30 +08:00
|
|
|
goto tx_error;
|
|
|
|
}
|
2015-08-27 14:46:54 +08:00
|
|
|
|
|
|
|
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
|
2015-08-27 14:46:52 +08:00
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
|
2015-09-21 22:29:09 +08:00
|
|
|
iip = ip_hdr(skb);
|
|
|
|
|
2015-08-27 14:46:52 +08:00
|
|
|
if (info) {
|
|
|
|
const struct ip_tunnel_key *key = &info->key;
|
|
|
|
u8 *opts = NULL;
|
|
|
|
u8 vni[3];
|
|
|
|
|
|
|
|
tunnel_id_to_vni(key->tun_id, vni);
|
bpf: support for access to tunnel options
After eBPF being able to programmatically access/manage tunnel key meta
data via commit d3aa45ce6b94 ("bpf: add helpers to access tunnel metadata")
and more recently also for IPv6 through c6c33454072f ("bpf: support ipv6
for bpf_skb_{set,get}_tunnel_key"), this work adds two complementary
helpers to generically access their auxiliary tunnel options.
Geneve and vxlan support this facility. For geneve, TLVs can be pushed,
and for the vxlan case its GBP extension. I.e. setting tunnel key for geneve
case only makes sense, if we can also read/write TLVs into it. In the GBP
case, it provides the flexibility to easily map the group policy ID in
combination with other helpers or maps.
I chose to model this as two separate helpers, bpf_skb_{set,get}_tunnel_opt(),
for a couple of reasons. bpf_skb_{set,get}_tunnel_key() is already rather
complex by itself, and there may be cases for tunnel key backends where
tunnel options are not always needed. If we would have integrated this
into bpf_skb_{set,get}_tunnel_key() nevertheless, we are very limited with
remaining helper arguments, so keeping compatibility on structs in case of
passing in a flat buffer gets more cumbersome. Separating both also allows
for more flexibility and future extensibility, f.e. options could be fed
directly from a map, etc.
Moreover, change geneve's xmit path to test only for info->options_len
instead of TUNNEL_GENEVE_OPT flag. This makes it more consistent with vxlan's
xmit path and allows for avoiding to specify a protocol flag in the API on
xmit, so it can be protocol agnostic. Having info->options_len is enough
information that is needed. Tested with vxlan and geneve.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-03-04 22:15:06 +08:00
|
|
|
if (info->options_len)
|
2015-08-31 09:09:38 +08:00
|
|
|
opts = ip_tunnel_info_opts(info);
|
2015-08-27 14:46:52 +08:00
|
|
|
|
2015-12-11 04:37:45 +08:00
|
|
|
if (key->tun_flags & TUNNEL_CSUM)
|
2016-02-20 03:26:24 +08:00
|
|
|
flags &= ~GENEVE_F_UDP_ZERO_CSUM_TX;
|
2015-12-11 04:37:45 +08:00
|
|
|
else
|
2016-02-20 03:26:24 +08:00
|
|
|
flags |= GENEVE_F_UDP_ZERO_CSUM_TX;
|
2015-12-11 04:37:45 +08:00
|
|
|
|
2015-08-27 14:46:54 +08:00
|
|
|
err = geneve_build_skb(rt, skb, key->tun_flags, vni,
|
2015-12-11 04:37:45 +08:00
|
|
|
info->options_len, opts, flags, xnet);
|
2015-08-27 14:46:54 +08:00
|
|
|
if (unlikely(err))
|
2016-04-15 03:33:37 +08:00
|
|
|
goto tx_error;
|
2015-08-27 14:46:54 +08:00
|
|
|
|
2015-09-21 22:29:09 +08:00
|
|
|
tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
|
2015-08-27 14:46:54 +08:00
|
|
|
ttl = key->ttl;
|
|
|
|
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
|
2015-08-27 14:46:52 +08:00
|
|
|
} else {
|
2015-08-27 14:46:54 +08:00
|
|
|
err = geneve_build_skb(rt, skb, 0, geneve->vni,
|
2015-12-11 04:37:45 +08:00
|
|
|
0, NULL, flags, xnet);
|
2015-08-27 14:46:54 +08:00
|
|
|
if (unlikely(err))
|
2016-04-15 03:33:37 +08:00
|
|
|
goto tx_error;
|
2015-08-27 14:46:54 +08:00
|
|
|
|
2015-08-27 14:46:52 +08:00
|
|
|
tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
|
|
|
|
ttl = geneve->ttl;
|
|
|
|
if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
|
|
|
|
ttl = 1;
|
|
|
|
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
|
2015-08-27 14:46:54 +08:00
|
|
|
df = 0;
|
2015-05-14 00:57:30 +08:00
|
|
|
}
|
2015-12-25 06:34:54 +08:00
|
|
|
udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr,
|
|
|
|
tos, ttl, df, sport, geneve->dst_port,
|
|
|
|
!net_eq(geneve->net, dev_net(geneve->dev)),
|
2016-02-20 03:26:24 +08:00
|
|
|
!!(flags & GENEVE_F_UDP_ZERO_CSUM_TX));
|
2015-05-14 00:57:30 +08:00
|
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
|
|
tx_error:
|
|
|
|
dev_kfree_skb(skb);
|
2016-04-15 03:33:37 +08:00
|
|
|
|
2015-10-23 09:17:16 +08:00
|
|
|
if (err == -ELOOP)
|
|
|
|
dev->stats.collisions++;
|
|
|
|
else if (err == -ENETUNREACH)
|
|
|
|
dev->stats.tx_carrier_errors++;
|
2016-06-21 16:26:49 +08:00
|
|
|
|
|
|
|
dev->stats.tx_errors++;
|
2015-05-14 00:57:30 +08:00
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
struct ip_tunnel_info *info)
|
|
|
|
{
|
|
|
|
struct geneve_dev *geneve = netdev_priv(dev);
|
|
|
|
struct dst_entry *dst = NULL;
|
2015-10-27 05:01:45 +08:00
|
|
|
const struct iphdr *iip; /* interior IP header */
|
2016-10-29 00:59:16 +08:00
|
|
|
struct geneve_sock *gs6;
|
2015-10-27 05:01:44 +08:00
|
|
|
int err = -EINVAL;
|
|
|
|
struct flowi6 fl6;
|
2015-10-27 05:01:45 +08:00
|
|
|
__u8 prio, ttl;
|
2015-10-27 05:01:44 +08:00
|
|
|
__be16 sport;
|
2016-03-09 10:00:04 +08:00
|
|
|
__be32 label;
|
2015-10-27 05:01:44 +08:00
|
|
|
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
|
2015-12-11 04:37:45 +08:00
|
|
|
u32 flags = geneve->flags;
|
2015-10-27 05:01:44 +08:00
|
|
|
|
2016-10-29 00:59:16 +08:00
|
|
|
gs6 = rcu_dereference(geneve->sock6);
|
|
|
|
if (!gs6)
|
|
|
|
goto tx_error;
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
if (geneve->collect_md) {
|
|
|
|
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
|
|
|
|
netdev_dbg(dev, "no tunnel metadata\n");
|
|
|
|
goto tx_error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dst = geneve_get_v6_dst(skb, dev, &fl6, info);
|
|
|
|
if (IS_ERR(dst)) {
|
|
|
|
err = PTR_ERR(dst);
|
|
|
|
goto tx_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
|
2015-10-27 05:01:45 +08:00
|
|
|
iip = ip_hdr(skb);
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
if (info) {
|
|
|
|
const struct ip_tunnel_key *key = &info->key;
|
|
|
|
u8 *opts = NULL;
|
|
|
|
u8 vni[3];
|
|
|
|
|
|
|
|
tunnel_id_to_vni(key->tun_id, vni);
|
bpf: support for access to tunnel options
After eBPF being able to programmatically access/manage tunnel key meta
data via commit d3aa45ce6b94 ("bpf: add helpers to access tunnel metadata")
and more recently also for IPv6 through c6c33454072f ("bpf: support ipv6
for bpf_skb_{set,get}_tunnel_key"), this work adds two complementary
helpers to generically access their auxiliary tunnel options.
Geneve and vxlan support this facility. For geneve, TLVs can be pushed,
and for the vxlan case its GBP extension. I.e. setting tunnel key for geneve
case only makes sense, if we can also read/write TLVs into it. In the GBP
case, it provides the flexibility to easily map the group policy ID in
combination with other helpers or maps.
I chose to model this as two separate helpers, bpf_skb_{set,get}_tunnel_opt(),
for a couple of reasons. bpf_skb_{set,get}_tunnel_key() is already rather
complex by itself, and there may be cases for tunnel key backends where
tunnel options are not always needed. If we would have integrated this
into bpf_skb_{set,get}_tunnel_key() nevertheless, we are very limited with
remaining helper arguments, so keeping compatibility on structs in case of
passing in a flat buffer gets more cumbersome. Separating both also allows
for more flexibility and future extensibility, f.e. options could be fed
directly from a map, etc.
Moreover, change geneve's xmit path to test only for info->options_len
instead of TUNNEL_GENEVE_OPT flag. This makes it more consistent with vxlan's
xmit path and allows for avoiding to specify a protocol flag in the API on
xmit, so it can be protocol agnostic. Having info->options_len is enough
information that is needed. Tested with vxlan and geneve.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-03-04 22:15:06 +08:00
|
|
|
if (info->options_len)
|
2015-10-27 05:01:44 +08:00
|
|
|
opts = ip_tunnel_info_opts(info);
|
|
|
|
|
2015-12-11 04:37:45 +08:00
|
|
|
if (key->tun_flags & TUNNEL_CSUM)
|
2016-01-21 08:22:47 +08:00
|
|
|
flags &= ~GENEVE_F_UDP_ZERO_CSUM6_TX;
|
2015-12-11 04:37:45 +08:00
|
|
|
else
|
2016-01-21 08:22:47 +08:00
|
|
|
flags |= GENEVE_F_UDP_ZERO_CSUM6_TX;
|
2015-12-11 04:37:45 +08:00
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
err = geneve6_build_skb(dst, skb, key->tun_flags, vni,
|
|
|
|
info->options_len, opts,
|
2015-12-11 04:37:45 +08:00
|
|
|
flags, xnet);
|
2015-10-27 05:01:44 +08:00
|
|
|
if (unlikely(err))
|
2016-04-15 03:33:37 +08:00
|
|
|
goto tx_error;
|
2015-10-27 05:01:44 +08:00
|
|
|
|
2015-10-27 05:01:45 +08:00
|
|
|
prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
|
2015-10-27 05:01:44 +08:00
|
|
|
ttl = key->ttl;
|
2016-03-09 10:00:04 +08:00
|
|
|
label = info->key.label;
|
2015-10-27 05:01:44 +08:00
|
|
|
} else {
|
|
|
|
err = geneve6_build_skb(dst, skb, 0, geneve->vni,
|
2015-12-11 04:37:45 +08:00
|
|
|
0, NULL, flags, xnet);
|
2015-10-27 05:01:44 +08:00
|
|
|
if (unlikely(err))
|
2016-04-15 03:33:37 +08:00
|
|
|
goto tx_error;
|
2015-10-27 05:01:44 +08:00
|
|
|
|
2016-03-19 01:37:58 +08:00
|
|
|
prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
|
|
|
|
iip, skb);
|
2015-10-27 05:01:44 +08:00
|
|
|
ttl = geneve->ttl;
|
|
|
|
if (!ttl && ipv6_addr_is_multicast(&fl6.daddr))
|
|
|
|
ttl = 1;
|
|
|
|
ttl = ttl ? : ip6_dst_hoplimit(dst);
|
2016-03-09 10:00:04 +08:00
|
|
|
label = geneve->label;
|
2015-10-27 05:01:44 +08:00
|
|
|
}
|
2016-03-09 10:00:04 +08:00
|
|
|
|
2015-12-25 06:34:54 +08:00
|
|
|
udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
|
2016-03-09 10:00:04 +08:00
|
|
|
&fl6.saddr, &fl6.daddr, prio, ttl, label,
|
2015-12-25 06:34:54 +08:00
|
|
|
sport, geneve->dst_port,
|
|
|
|
!!(flags & GENEVE_F_UDP_ZERO_CSUM6_TX));
|
2015-10-27 05:01:44 +08:00
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
|
|
tx_error:
|
|
|
|
dev_kfree_skb(skb);
|
2016-04-15 03:33:37 +08:00
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
if (err == -ELOOP)
|
|
|
|
dev->stats.collisions++;
|
|
|
|
else if (err == -ENETUNREACH)
|
|
|
|
dev->stats.tx_carrier_errors++;
|
2016-06-21 16:26:49 +08:00
|
|
|
|
|
|
|
dev->stats.tx_errors++;
|
2015-10-27 05:01:44 +08:00
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct geneve_dev *geneve = netdev_priv(dev);
|
|
|
|
struct ip_tunnel_info *info = NULL;
|
|
|
|
|
|
|
|
if (geneve->collect_md)
|
|
|
|
info = skb_tunnel_info(skb);
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
if ((info && ip_tunnel_info_af(info) == AF_INET6) ||
|
|
|
|
(!info && geneve->remote.sa.sa_family == AF_INET6))
|
|
|
|
return geneve6_xmit_skb(skb, dev, info);
|
|
|
|
#endif
|
|
|
|
return geneve_xmit_skb(skb, dev, info);
|
|
|
|
}
|
|
|
|
|
net: use core MTU range checking in core net infra
geneve:
- Merge __geneve_change_mtu back into geneve_change_mtu, set max_mtu
- This one isn't quite as straight-forward as others, could use some
closer inspection and testing
macvlan:
- set min/max_mtu
tun:
- set min/max_mtu, remove tun_net_change_mtu
vxlan:
- Merge __vxlan_change_mtu back into vxlan_change_mtu
- Set max_mtu to IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
- This one is also not as straight-forward and could use closer inspection
and testing from vxlan folks
bridge:
- set max_mtu of IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
openvswitch:
- set min/max_mtu, remove internal_dev_change_mtu
- note: max_mtu wasn't checked previously, it's been set to 65535, which
is the largest possible size supported
sch_teql:
- set min/max_mtu (note: max_mtu previously unchecked, used max of 65535)
macsec:
- min_mtu = 0, max_mtu = 65535
macvlan:
- min_mtu = 0, max_mtu = 65535
ntb_netdev:
- min_mtu = 0, max_mtu = 65535
veth:
- min_mtu = 68, max_mtu = 65535
8021q:
- min_mtu = 0, max_mtu = 65535
CC: netdev@vger.kernel.org
CC: Nicolas Dichtel <nicolas.dichtel@6wind.com>
CC: Hannes Frederic Sowa <hannes@stressinduktion.org>
CC: Tom Herbert <tom@herbertland.com>
CC: Daniel Borkmann <daniel@iogearbox.net>
CC: Alexander Duyck <alexander.h.duyck@intel.com>
CC: Paolo Abeni <pabeni@redhat.com>
CC: Jiri Benc <jbenc@redhat.com>
CC: WANG Cong <xiyou.wangcong@gmail.com>
CC: Roopa Prabhu <roopa@cumulusnetworks.com>
CC: Pravin B Shelar <pshelar@ovn.org>
CC: Sabrina Dubroca <sd@queasysnail.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Stephen Hemminger <stephen@networkplumber.org>
CC: Pravin Shelar <pshelar@nicira.com>
CC: Maxim Krasnyansky <maxk@qti.qualcomm.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-21 01:55:20 +08:00
|
|
|
static int geneve_change_mtu(struct net_device *dev, int new_mtu)
|
2016-02-10 08:05:57 +08:00
|
|
|
{
|
net: use core MTU range checking in core net infra
geneve:
- Merge __geneve_change_mtu back into geneve_change_mtu, set max_mtu
- This one isn't quite as straight-forward as others, could use some
closer inspection and testing
macvlan:
- set min/max_mtu
tun:
- set min/max_mtu, remove tun_net_change_mtu
vxlan:
- Merge __vxlan_change_mtu back into vxlan_change_mtu
- Set max_mtu to IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
- This one is also not as straight-forward and could use closer inspection
and testing from vxlan folks
bridge:
- set max_mtu of IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
openvswitch:
- set min/max_mtu, remove internal_dev_change_mtu
- note: max_mtu wasn't checked previously, it's been set to 65535, which
is the largest possible size supported
sch_teql:
- set min/max_mtu (note: max_mtu previously unchecked, used max of 65535)
macsec:
- min_mtu = 0, max_mtu = 65535
macvlan:
- min_mtu = 0, max_mtu = 65535
ntb_netdev:
- min_mtu = 0, max_mtu = 65535
veth:
- min_mtu = 68, max_mtu = 65535
8021q:
- min_mtu = 0, max_mtu = 65535
CC: netdev@vger.kernel.org
CC: Nicolas Dichtel <nicolas.dichtel@6wind.com>
CC: Hannes Frederic Sowa <hannes@stressinduktion.org>
CC: Tom Herbert <tom@herbertland.com>
CC: Daniel Borkmann <daniel@iogearbox.net>
CC: Alexander Duyck <alexander.h.duyck@intel.com>
CC: Paolo Abeni <pabeni@redhat.com>
CC: Jiri Benc <jbenc@redhat.com>
CC: WANG Cong <xiyou.wangcong@gmail.com>
CC: Roopa Prabhu <roopa@cumulusnetworks.com>
CC: Pravin B Shelar <pshelar@ovn.org>
CC: Sabrina Dubroca <sd@queasysnail.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Stephen Hemminger <stephen@networkplumber.org>
CC: Pravin Shelar <pshelar@nicira.com>
CC: Maxim Krasnyansky <maxk@qti.qualcomm.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-21 01:55:20 +08:00
|
|
|
/* Only possible if called internally, ndo_change_mtu path's new_mtu
|
|
|
|
* is guaranteed to be between dev->min_mtu and dev->max_mtu.
|
2016-02-10 08:05:57 +08:00
|
|
|
*/
|
net: use core MTU range checking in core net infra
geneve:
- Merge __geneve_change_mtu back into geneve_change_mtu, set max_mtu
- This one isn't quite as straight-forward as others, could use some
closer inspection and testing
macvlan:
- set min/max_mtu
tun:
- set min/max_mtu, remove tun_net_change_mtu
vxlan:
- Merge __vxlan_change_mtu back into vxlan_change_mtu
- Set max_mtu to IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
- This one is also not as straight-forward and could use closer inspection
and testing from vxlan folks
bridge:
- set max_mtu of IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
openvswitch:
- set min/max_mtu, remove internal_dev_change_mtu
- note: max_mtu wasn't checked previously, it's been set to 65535, which
is the largest possible size supported
sch_teql:
- set min/max_mtu (note: max_mtu previously unchecked, used max of 65535)
macsec:
- min_mtu = 0, max_mtu = 65535
macvlan:
- min_mtu = 0, max_mtu = 65535
ntb_netdev:
- min_mtu = 0, max_mtu = 65535
veth:
- min_mtu = 68, max_mtu = 65535
8021q:
- min_mtu = 0, max_mtu = 65535
CC: netdev@vger.kernel.org
CC: Nicolas Dichtel <nicolas.dichtel@6wind.com>
CC: Hannes Frederic Sowa <hannes@stressinduktion.org>
CC: Tom Herbert <tom@herbertland.com>
CC: Daniel Borkmann <daniel@iogearbox.net>
CC: Alexander Duyck <alexander.h.duyck@intel.com>
CC: Paolo Abeni <pabeni@redhat.com>
CC: Jiri Benc <jbenc@redhat.com>
CC: WANG Cong <xiyou.wangcong@gmail.com>
CC: Roopa Prabhu <roopa@cumulusnetworks.com>
CC: Pravin B Shelar <pshelar@ovn.org>
CC: Sabrina Dubroca <sd@queasysnail.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Stephen Hemminger <stephen@networkplumber.org>
CC: Pravin Shelar <pshelar@nicira.com>
CC: Maxim Krasnyansky <maxk@qti.qualcomm.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-21 01:55:20 +08:00
|
|
|
if (new_mtu > dev->max_mtu)
|
|
|
|
new_mtu = dev->max_mtu;
|
2016-02-19 01:43:29 +08:00
|
|
|
|
2016-02-10 08:05:57 +08:00
|
|
|
dev->mtu = new_mtu;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-23 09:17:16 +08:00
|
|
|
static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ip_tunnel_info *info = skb_tunnel_info(skb);
|
|
|
|
struct geneve_dev *geneve = netdev_priv(dev);
|
|
|
|
struct rtable *rt;
|
|
|
|
struct flowi4 fl4;
|
2015-10-27 21:49:00 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
struct dst_entry *dst;
|
|
|
|
struct flowi6 fl6;
|
|
|
|
#endif
|
2015-10-23 09:17:16 +08:00
|
|
|
|
2015-10-27 21:49:00 +08:00
|
|
|
if (ip_tunnel_info_af(info) == AF_INET) {
|
|
|
|
rt = geneve_get_v4_rt(skb, dev, &fl4, info);
|
|
|
|
if (IS_ERR(rt))
|
|
|
|
return PTR_ERR(rt);
|
2015-10-23 09:17:16 +08:00
|
|
|
|
2015-10-27 21:49:00 +08:00
|
|
|
ip_rt_put(rt);
|
|
|
|
info->key.u.ipv4.src = fl4.saddr;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
} else if (ip_tunnel_info_af(info) == AF_INET6) {
|
|
|
|
dst = geneve_get_v6_dst(skb, dev, &fl6, info);
|
|
|
|
if (IS_ERR(dst))
|
|
|
|
return PTR_ERR(dst);
|
|
|
|
|
|
|
|
dst_release(dst);
|
|
|
|
info->key.u.ipv6.src = fl6.saddr;
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2015-10-23 09:17:16 +08:00
|
|
|
|
|
|
|
info->key.tp_src = udp_flow_src_port(geneve->net, skb,
|
|
|
|
1, USHRT_MAX, true);
|
|
|
|
info->key.tp_dst = geneve->dst_port;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-14 00:57:30 +08:00
|
|
|
static const struct net_device_ops geneve_netdev_ops = {
|
|
|
|
.ndo_init = geneve_init,
|
|
|
|
.ndo_uninit = geneve_uninit,
|
|
|
|
.ndo_open = geneve_open,
|
|
|
|
.ndo_stop = geneve_stop,
|
|
|
|
.ndo_start_xmit = geneve_xmit,
|
|
|
|
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
2016-02-10 08:05:57 +08:00
|
|
|
.ndo_change_mtu = geneve_change_mtu,
|
2015-05-14 00:57:30 +08:00
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
|
|
.ndo_set_mac_address = eth_mac_addr,
|
2015-10-23 09:17:16 +08:00
|
|
|
.ndo_fill_metadata_dst = geneve_fill_metadata_dst,
|
2015-05-14 00:57:30 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static void geneve_get_drvinfo(struct net_device *dev,
|
|
|
|
struct ethtool_drvinfo *drvinfo)
|
|
|
|
{
|
|
|
|
strlcpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version));
|
|
|
|
strlcpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver));
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct ethtool_ops geneve_ethtool_ops = {
|
|
|
|
.get_drvinfo = geneve_get_drvinfo,
|
|
|
|
.get_link = ethtool_op_get_link,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Info for udev, that this is a virtual tunnel endpoint */
|
|
|
|
static struct device_type geneve_type = {
|
|
|
|
.name = "geneve",
|
|
|
|
};
|
|
|
|
|
2016-07-11 19:12:28 +08:00
|
|
|
/* Calls the ndo_udp_tunnel_add of the caller in order to
|
2015-12-15 04:21:20 +08:00
|
|
|
* supply the listening GENEVE udp ports. Callers are expected
|
2016-07-11 19:12:28 +08:00
|
|
|
* to implement the ndo_udp_tunnel_add.
|
2015-12-15 04:21:20 +08:00
|
|
|
*/
|
2016-04-19 03:19:48 +08:00
|
|
|
static void geneve_push_rx_ports(struct net_device *dev)
|
2015-12-15 04:21:20 +08:00
|
|
|
{
|
|
|
|
struct net *net = dev_net(dev);
|
|
|
|
struct geneve_net *gn = net_generic(net, geneve_net_id);
|
|
|
|
struct geneve_sock *gs;
|
2016-04-19 03:19:48 +08:00
|
|
|
|
2015-12-15 04:21:20 +08:00
|
|
|
rcu_read_lock();
|
2016-06-17 03:20:52 +08:00
|
|
|
list_for_each_entry_rcu(gs, &gn->sock_list, list)
|
|
|
|
udp_tunnel_push_rx_port(dev, gs->sock,
|
|
|
|
UDP_TUNNEL_TYPE_GENEVE);
|
2015-12-15 04:21:20 +08:00
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2015-05-14 00:57:30 +08:00
|
|
|
/* Initialize the device structure. */
|
|
|
|
static void geneve_setup(struct net_device *dev)
|
|
|
|
{
|
|
|
|
ether_setup(dev);
|
|
|
|
|
|
|
|
dev->netdev_ops = &geneve_netdev_ops;
|
|
|
|
dev->ethtool_ops = &geneve_ethtool_ops;
|
|
|
|
dev->destructor = free_netdev;
|
|
|
|
|
|
|
|
SET_NETDEV_DEVTYPE(dev, &geneve_type);
|
|
|
|
|
|
|
|
dev->features |= NETIF_F_LLTX;
|
|
|
|
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
|
|
|
|
dev->features |= NETIF_F_RXCSUM;
|
|
|
|
dev->features |= NETIF_F_GSO_SOFTWARE;
|
|
|
|
|
|
|
|
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
|
|
|
|
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
|
|
|
|
|
net: use core MTU range checking in core net infra
geneve:
- Merge __geneve_change_mtu back into geneve_change_mtu, set max_mtu
- This one isn't quite as straight-forward as others, could use some
closer inspection and testing
macvlan:
- set min/max_mtu
tun:
- set min/max_mtu, remove tun_net_change_mtu
vxlan:
- Merge __vxlan_change_mtu back into vxlan_change_mtu
- Set max_mtu to IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
- This one is also not as straight-forward and could use closer inspection
and testing from vxlan folks
bridge:
- set max_mtu of IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
openvswitch:
- set min/max_mtu, remove internal_dev_change_mtu
- note: max_mtu wasn't checked previously, it's been set to 65535, which
is the largest possible size supported
sch_teql:
- set min/max_mtu (note: max_mtu previously unchecked, used max of 65535)
macsec:
- min_mtu = 0, max_mtu = 65535
macvlan:
- min_mtu = 0, max_mtu = 65535
ntb_netdev:
- min_mtu = 0, max_mtu = 65535
veth:
- min_mtu = 68, max_mtu = 65535
8021q:
- min_mtu = 0, max_mtu = 65535
CC: netdev@vger.kernel.org
CC: Nicolas Dichtel <nicolas.dichtel@6wind.com>
CC: Hannes Frederic Sowa <hannes@stressinduktion.org>
CC: Tom Herbert <tom@herbertland.com>
CC: Daniel Borkmann <daniel@iogearbox.net>
CC: Alexander Duyck <alexander.h.duyck@intel.com>
CC: Paolo Abeni <pabeni@redhat.com>
CC: Jiri Benc <jbenc@redhat.com>
CC: WANG Cong <xiyou.wangcong@gmail.com>
CC: Roopa Prabhu <roopa@cumulusnetworks.com>
CC: Pravin B Shelar <pshelar@ovn.org>
CC: Sabrina Dubroca <sd@queasysnail.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Stephen Hemminger <stephen@networkplumber.org>
CC: Pravin Shelar <pshelar@nicira.com>
CC: Maxim Krasnyansky <maxk@qti.qualcomm.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-21 01:55:20 +08:00
|
|
|
/* MTU range: 68 - (something less than 65535) */
|
|
|
|
dev->min_mtu = ETH_MIN_MTU;
|
|
|
|
/* The max_mtu calculation does not take account of GENEVE
|
|
|
|
* options, to avoid excluding potentially valid
|
|
|
|
* configurations. This will be further reduced by IPvX hdr size.
|
|
|
|
*/
|
|
|
|
dev->max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
|
|
|
|
|
2015-05-14 00:57:30 +08:00
|
|
|
netif_keep_dst(dev);
|
2016-02-17 22:31:35 +08:00
|
|
|
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
2015-08-18 16:30:31 +08:00
|
|
|
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
|
2015-08-27 14:46:48 +08:00
|
|
|
eth_hw_addr_random(dev);
|
2015-05-14 00:57:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
|
|
|
|
[IFLA_GENEVE_ID] = { .type = NLA_U32 },
|
|
|
|
[IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
|
2015-10-27 05:01:44 +08:00
|
|
|
[IFLA_GENEVE_REMOTE6] = { .len = sizeof(struct in6_addr) },
|
2015-06-02 03:51:34 +08:00
|
|
|
[IFLA_GENEVE_TTL] = { .type = NLA_U8 },
|
2015-06-02 03:51:35 +08:00
|
|
|
[IFLA_GENEVE_TOS] = { .type = NLA_U8 },
|
2016-03-09 10:00:04 +08:00
|
|
|
[IFLA_GENEVE_LABEL] = { .type = NLA_U32 },
|
2015-08-27 14:46:51 +08:00
|
|
|
[IFLA_GENEVE_PORT] = { .type = NLA_U16 },
|
2015-08-27 14:46:52 +08:00
|
|
|
[IFLA_GENEVE_COLLECT_METADATA] = { .type = NLA_FLAG },
|
2015-12-11 04:37:45 +08:00
|
|
|
[IFLA_GENEVE_UDP_CSUM] = { .type = NLA_U8 },
|
|
|
|
[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
|
|
|
|
[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
|
2015-05-14 00:57:30 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int geneve_validate(struct nlattr *tb[], struct nlattr *data[])
|
|
|
|
{
|
|
|
|
if (tb[IFLA_ADDRESS]) {
|
|
|
|
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!data)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (data[IFLA_GENEVE_ID]) {
|
|
|
|
__u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]);
|
|
|
|
|
|
|
|
if (vni >= GENEVE_VID_MASK)
|
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-08-27 14:46:54 +08:00
|
|
|
static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
|
|
|
|
__be16 dst_port,
|
2015-10-27 05:01:44 +08:00
|
|
|
union geneve_addr *remote,
|
2015-08-27 14:46:54 +08:00
|
|
|
u8 vni[],
|
|
|
|
bool *tun_on_same_port,
|
|
|
|
bool *tun_collect_md)
|
|
|
|
{
|
|
|
|
struct geneve_dev *geneve, *t;
|
|
|
|
|
|
|
|
*tun_on_same_port = false;
|
|
|
|
*tun_collect_md = false;
|
|
|
|
t = NULL;
|
|
|
|
list_for_each_entry(geneve, &gn->geneve_list, next) {
|
|
|
|
if (geneve->dst_port == dst_port) {
|
|
|
|
*tun_collect_md = geneve->collect_md;
|
|
|
|
*tun_on_same_port = true;
|
|
|
|
}
|
|
|
|
if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
|
2015-10-27 05:01:44 +08:00
|
|
|
!memcmp(remote, &geneve->remote, sizeof(geneve->remote)) &&
|
2015-08-27 14:46:54 +08:00
|
|
|
dst_port == geneve->dst_port)
|
|
|
|
t = geneve;
|
|
|
|
}
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2015-08-27 14:46:52 +08:00
|
|
|
static int geneve_configure(struct net *net, struct net_device *dev,
|
2015-10-27 05:01:44 +08:00
|
|
|
union geneve_addr *remote,
|
2016-03-09 10:00:04 +08:00
|
|
|
__u32 vni, __u8 ttl, __u8 tos, __be32 label,
|
|
|
|
__be16 dst_port, bool metadata, u32 flags)
|
2015-05-14 00:57:30 +08:00
|
|
|
{
|
|
|
|
struct geneve_net *gn = net_generic(net, geneve_net_id);
|
2015-08-27 14:46:54 +08:00
|
|
|
struct geneve_dev *t, *geneve = netdev_priv(dev);
|
|
|
|
bool tun_collect_md, tun_on_same_port;
|
2015-12-23 23:54:27 +08:00
|
|
|
int err, encap_len;
|
2015-05-14 00:57:30 +08:00
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
if (!remote)
|
|
|
|
return -EINVAL;
|
|
|
|
if (metadata &&
|
2016-03-09 10:00:04 +08:00
|
|
|
(remote->sa.sa_family != AF_UNSPEC || vni || tos || ttl || label))
|
2015-10-27 05:01:44 +08:00
|
|
|
return -EINVAL;
|
2015-05-14 00:57:30 +08:00
|
|
|
|
|
|
|
geneve->net = net;
|
|
|
|
geneve->dev = dev;
|
|
|
|
|
|
|
|
geneve->vni[0] = (vni & 0x00ff0000) >> 16;
|
|
|
|
geneve->vni[1] = (vni & 0x0000ff00) >> 8;
|
|
|
|
geneve->vni[2] = vni & 0x000000ff;
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
if ((remote->sa.sa_family == AF_INET &&
|
|
|
|
IN_MULTICAST(ntohl(remote->sin.sin_addr.s_addr))) ||
|
|
|
|
(remote->sa.sa_family == AF_INET6 &&
|
|
|
|
ipv6_addr_is_multicast(&remote->sin6.sin6_addr)))
|
2015-05-14 00:57:30 +08:00
|
|
|
return -EINVAL;
|
2016-03-09 10:00:04 +08:00
|
|
|
if (label && remote->sa.sa_family != AF_INET6)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
geneve->remote = *remote;
|
2015-05-14 00:57:30 +08:00
|
|
|
|
2015-08-27 14:46:52 +08:00
|
|
|
geneve->ttl = ttl;
|
|
|
|
geneve->tos = tos;
|
2016-03-09 10:00:04 +08:00
|
|
|
geneve->label = label;
|
2015-09-23 01:09:32 +08:00
|
|
|
geneve->dst_port = dst_port;
|
2015-08-27 14:46:52 +08:00
|
|
|
geneve->collect_md = metadata;
|
2015-12-11 04:37:45 +08:00
|
|
|
geneve->flags = flags;
|
2015-08-27 14:46:52 +08:00
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
t = geneve_find_dev(gn, dst_port, remote, geneve->vni,
|
2015-08-27 14:46:54 +08:00
|
|
|
&tun_on_same_port, &tun_collect_md);
|
|
|
|
if (t)
|
|
|
|
return -EBUSY;
|
|
|
|
|
2015-12-23 23:54:27 +08:00
|
|
|
/* make enough headroom for basic scenario */
|
|
|
|
encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
|
net: use core MTU range checking in core net infra
geneve:
- Merge __geneve_change_mtu back into geneve_change_mtu, set max_mtu
- This one isn't quite as straight-forward as others, could use some
closer inspection and testing
macvlan:
- set min/max_mtu
tun:
- set min/max_mtu, remove tun_net_change_mtu
vxlan:
- Merge __vxlan_change_mtu back into vxlan_change_mtu
- Set max_mtu to IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
- This one is also not as straight-forward and could use closer inspection
and testing from vxlan folks
bridge:
- set max_mtu of IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
openvswitch:
- set min/max_mtu, remove internal_dev_change_mtu
- note: max_mtu wasn't checked previously, it's been set to 65535, which
is the largest possible size supported
sch_teql:
- set min/max_mtu (note: max_mtu previously unchecked, used max of 65535)
macsec:
- min_mtu = 0, max_mtu = 65535
macvlan:
- min_mtu = 0, max_mtu = 65535
ntb_netdev:
- min_mtu = 0, max_mtu = 65535
veth:
- min_mtu = 68, max_mtu = 65535
8021q:
- min_mtu = 0, max_mtu = 65535
CC: netdev@vger.kernel.org
CC: Nicolas Dichtel <nicolas.dichtel@6wind.com>
CC: Hannes Frederic Sowa <hannes@stressinduktion.org>
CC: Tom Herbert <tom@herbertland.com>
CC: Daniel Borkmann <daniel@iogearbox.net>
CC: Alexander Duyck <alexander.h.duyck@intel.com>
CC: Paolo Abeni <pabeni@redhat.com>
CC: Jiri Benc <jbenc@redhat.com>
CC: WANG Cong <xiyou.wangcong@gmail.com>
CC: Roopa Prabhu <roopa@cumulusnetworks.com>
CC: Pravin B Shelar <pshelar@ovn.org>
CC: Sabrina Dubroca <sd@queasysnail.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Stephen Hemminger <stephen@networkplumber.org>
CC: Pravin Shelar <pshelar@nicira.com>
CC: Maxim Krasnyansky <maxk@qti.qualcomm.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-21 01:55:20 +08:00
|
|
|
if (remote->sa.sa_family == AF_INET) {
|
2015-12-23 23:54:27 +08:00
|
|
|
encap_len += sizeof(struct iphdr);
|
net: use core MTU range checking in core net infra
geneve:
- Merge __geneve_change_mtu back into geneve_change_mtu, set max_mtu
- This one isn't quite as straight-forward as others, could use some
closer inspection and testing
macvlan:
- set min/max_mtu
tun:
- set min/max_mtu, remove tun_net_change_mtu
vxlan:
- Merge __vxlan_change_mtu back into vxlan_change_mtu
- Set max_mtu to IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
- This one is also not as straight-forward and could use closer inspection
and testing from vxlan folks
bridge:
- set max_mtu of IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
openvswitch:
- set min/max_mtu, remove internal_dev_change_mtu
- note: max_mtu wasn't checked previously, it's been set to 65535, which
is the largest possible size supported
sch_teql:
- set min/max_mtu (note: max_mtu previously unchecked, used max of 65535)
macsec:
- min_mtu = 0, max_mtu = 65535
macvlan:
- min_mtu = 0, max_mtu = 65535
ntb_netdev:
- min_mtu = 0, max_mtu = 65535
veth:
- min_mtu = 68, max_mtu = 65535
8021q:
- min_mtu = 0, max_mtu = 65535
CC: netdev@vger.kernel.org
CC: Nicolas Dichtel <nicolas.dichtel@6wind.com>
CC: Hannes Frederic Sowa <hannes@stressinduktion.org>
CC: Tom Herbert <tom@herbertland.com>
CC: Daniel Borkmann <daniel@iogearbox.net>
CC: Alexander Duyck <alexander.h.duyck@intel.com>
CC: Paolo Abeni <pabeni@redhat.com>
CC: Jiri Benc <jbenc@redhat.com>
CC: WANG Cong <xiyou.wangcong@gmail.com>
CC: Roopa Prabhu <roopa@cumulusnetworks.com>
CC: Pravin B Shelar <pshelar@ovn.org>
CC: Sabrina Dubroca <sd@queasysnail.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Stephen Hemminger <stephen@networkplumber.org>
CC: Pravin Shelar <pshelar@nicira.com>
CC: Maxim Krasnyansky <maxk@qti.qualcomm.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-21 01:55:20 +08:00
|
|
|
dev->max_mtu -= sizeof(struct iphdr);
|
|
|
|
} else {
|
2015-12-23 23:54:27 +08:00
|
|
|
encap_len += sizeof(struct ipv6hdr);
|
net: use core MTU range checking in core net infra
geneve:
- Merge __geneve_change_mtu back into geneve_change_mtu, set max_mtu
- This one isn't quite as straight-forward as others, could use some
closer inspection and testing
macvlan:
- set min/max_mtu
tun:
- set min/max_mtu, remove tun_net_change_mtu
vxlan:
- Merge __vxlan_change_mtu back into vxlan_change_mtu
- Set max_mtu to IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
- This one is also not as straight-forward and could use closer inspection
and testing from vxlan folks
bridge:
- set max_mtu of IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
openvswitch:
- set min/max_mtu, remove internal_dev_change_mtu
- note: max_mtu wasn't checked previously, it's been set to 65535, which
is the largest possible size supported
sch_teql:
- set min/max_mtu (note: max_mtu previously unchecked, used max of 65535)
macsec:
- min_mtu = 0, max_mtu = 65535
macvlan:
- min_mtu = 0, max_mtu = 65535
ntb_netdev:
- min_mtu = 0, max_mtu = 65535
veth:
- min_mtu = 68, max_mtu = 65535
8021q:
- min_mtu = 0, max_mtu = 65535
CC: netdev@vger.kernel.org
CC: Nicolas Dichtel <nicolas.dichtel@6wind.com>
CC: Hannes Frederic Sowa <hannes@stressinduktion.org>
CC: Tom Herbert <tom@herbertland.com>
CC: Daniel Borkmann <daniel@iogearbox.net>
CC: Alexander Duyck <alexander.h.duyck@intel.com>
CC: Paolo Abeni <pabeni@redhat.com>
CC: Jiri Benc <jbenc@redhat.com>
CC: WANG Cong <xiyou.wangcong@gmail.com>
CC: Roopa Prabhu <roopa@cumulusnetworks.com>
CC: Pravin B Shelar <pshelar@ovn.org>
CC: Sabrina Dubroca <sd@queasysnail.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Stephen Hemminger <stephen@networkplumber.org>
CC: Pravin Shelar <pshelar@nicira.com>
CC: Maxim Krasnyansky <maxk@qti.qualcomm.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-21 01:55:20 +08:00
|
|
|
dev->max_mtu -= sizeof(struct ipv6hdr);
|
|
|
|
}
|
2015-12-23 23:54:27 +08:00
|
|
|
dev->needed_headroom = encap_len + ETH_HLEN;
|
|
|
|
|
2015-08-27 14:46:54 +08:00
|
|
|
if (metadata) {
|
|
|
|
if (tun_on_same_port)
|
|
|
|
return -EPERM;
|
|
|
|
} else {
|
|
|
|
if (tun_collect_md)
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
2016-02-12 22:43:58 +08:00
|
|
|
dst_cache_reset(&geneve->dst_cache);
|
|
|
|
|
2015-05-14 00:57:30 +08:00
|
|
|
err = register_netdevice(dev);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2015-08-27 14:46:52 +08:00
|
|
|
list_add(&geneve->next, &gn->geneve_list);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int geneve_newlink(struct net *net, struct net_device *dev,
|
|
|
|
struct nlattr *tb[], struct nlattr *data[])
|
|
|
|
{
|
2015-09-23 01:09:32 +08:00
|
|
|
__be16 dst_port = htons(GENEVE_UDP_PORT);
|
2015-08-27 14:46:52 +08:00
|
|
|
__u8 ttl = 0, tos = 0;
|
|
|
|
bool metadata = false;
|
2015-10-27 05:01:44 +08:00
|
|
|
union geneve_addr remote = geneve_remote_unspec;
|
2016-03-09 10:00:04 +08:00
|
|
|
__be32 label = 0;
|
2015-10-17 07:36:00 +08:00
|
|
|
__u32 vni = 0;
|
2015-12-11 04:37:45 +08:00
|
|
|
u32 flags = 0;
|
2015-08-27 14:46:52 +08:00
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (data[IFLA_GENEVE_REMOTE]) {
|
|
|
|
remote.sa.sa_family = AF_INET;
|
|
|
|
remote.sin.sin_addr.s_addr =
|
|
|
|
nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data[IFLA_GENEVE_REMOTE6]) {
|
|
|
|
if (!IS_ENABLED(CONFIG_IPV6))
|
|
|
|
return -EPFNOSUPPORT;
|
|
|
|
|
|
|
|
remote.sa.sa_family = AF_INET6;
|
|
|
|
remote.sin6.sin6_addr =
|
|
|
|
nla_get_in6_addr(data[IFLA_GENEVE_REMOTE6]);
|
|
|
|
|
|
|
|
if (ipv6_addr_type(&remote.sin6.sin6_addr) &
|
|
|
|
IPV6_ADDR_LINKLOCAL) {
|
|
|
|
netdev_dbg(dev, "link-local remote is unsupported\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-17 07:36:00 +08:00
|
|
|
if (data[IFLA_GENEVE_ID])
|
|
|
|
vni = nla_get_u32(data[IFLA_GENEVE_ID]);
|
2015-08-27 14:46:52 +08:00
|
|
|
|
2015-06-02 03:51:34 +08:00
|
|
|
if (data[IFLA_GENEVE_TTL])
|
2015-08-27 14:46:52 +08:00
|
|
|
ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
|
2015-06-02 03:51:34 +08:00
|
|
|
|
2015-06-02 03:51:35 +08:00
|
|
|
if (data[IFLA_GENEVE_TOS])
|
2015-08-27 14:46:52 +08:00
|
|
|
tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
|
2015-06-02 03:51:35 +08:00
|
|
|
|
2016-03-09 10:00:04 +08:00
|
|
|
if (data[IFLA_GENEVE_LABEL])
|
|
|
|
label = nla_get_be32(data[IFLA_GENEVE_LABEL]) &
|
|
|
|
IPV6_FLOWLABEL_MASK;
|
|
|
|
|
2015-08-27 14:46:52 +08:00
|
|
|
if (data[IFLA_GENEVE_PORT])
|
2015-09-23 01:09:32 +08:00
|
|
|
dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]);
|
2015-05-14 00:57:30 +08:00
|
|
|
|
2015-08-27 14:46:52 +08:00
|
|
|
if (data[IFLA_GENEVE_COLLECT_METADATA])
|
|
|
|
metadata = true;
|
2015-05-14 00:57:30 +08:00
|
|
|
|
2015-12-11 04:37:45 +08:00
|
|
|
if (data[IFLA_GENEVE_UDP_CSUM] &&
|
2016-02-20 03:26:24 +08:00
|
|
|
!nla_get_u8(data[IFLA_GENEVE_UDP_CSUM]))
|
|
|
|
flags |= GENEVE_F_UDP_ZERO_CSUM_TX;
|
2015-12-11 04:37:45 +08:00
|
|
|
|
|
|
|
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] &&
|
|
|
|
nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
|
|
|
|
flags |= GENEVE_F_UDP_ZERO_CSUM6_TX;
|
|
|
|
|
|
|
|
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] &&
|
|
|
|
nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]))
|
|
|
|
flags |= GENEVE_F_UDP_ZERO_CSUM6_RX;
|
|
|
|
|
2016-03-09 10:00:04 +08:00
|
|
|
return geneve_configure(net, dev, &remote, vni, ttl, tos, label,
|
|
|
|
dst_port, metadata, flags);
|
2015-05-14 00:57:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void geneve_dellink(struct net_device *dev, struct list_head *head)
|
|
|
|
{
|
|
|
|
struct geneve_dev *geneve = netdev_priv(dev);
|
|
|
|
|
|
|
|
list_del(&geneve->next);
|
|
|
|
unregister_netdevice_queue(dev, head);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t geneve_get_size(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */
|
2015-10-27 05:01:44 +08:00
|
|
|
nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GENEVE_REMOTE{6} */
|
2015-06-02 03:51:34 +08:00
|
|
|
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */
|
2015-06-02 03:51:35 +08:00
|
|
|
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */
|
2016-03-09 10:00:04 +08:00
|
|
|
nla_total_size(sizeof(__be32)) + /* IFLA_GENEVE_LABEL */
|
2015-09-23 01:09:32 +08:00
|
|
|
nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */
|
2015-08-27 14:46:52 +08:00
|
|
|
nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */
|
2015-12-11 04:37:45 +08:00
|
|
|
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_CSUM */
|
|
|
|
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_TX */
|
|
|
|
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */
|
2015-05-14 00:57:30 +08:00
|
|
|
0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct geneve_dev *geneve = netdev_priv(dev);
|
|
|
|
__u32 vni;
|
|
|
|
|
|
|
|
vni = (geneve->vni[0] << 16) | (geneve->vni[1] << 8) | geneve->vni[2];
|
|
|
|
if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
if (geneve->remote.sa.sa_family == AF_INET) {
|
|
|
|
if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
|
|
|
|
geneve->remote.sin.sin_addr.s_addr))
|
|
|
|
goto nla_put_failure;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
} else {
|
|
|
|
if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6,
|
|
|
|
&geneve->remote.sin6.sin6_addr))
|
|
|
|
goto nla_put_failure;
|
|
|
|
#endif
|
|
|
|
}
|
2015-05-14 00:57:30 +08:00
|
|
|
|
2015-06-02 03:51:35 +08:00
|
|
|
if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) ||
|
2016-03-09 10:00:04 +08:00
|
|
|
nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos) ||
|
|
|
|
nla_put_be32(skb, IFLA_GENEVE_LABEL, geneve->label))
|
2015-06-02 03:51:34 +08:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
2015-09-23 01:09:32 +08:00
|
|
|
if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port))
|
2015-08-27 14:46:51 +08:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
2015-08-27 14:46:52 +08:00
|
|
|
if (geneve->collect_md) {
|
|
|
|
if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
|
2015-12-11 04:37:45 +08:00
|
|
|
if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM,
|
2016-02-20 03:26:24 +08:00
|
|
|
!(geneve->flags & GENEVE_F_UDP_ZERO_CSUM_TX)) ||
|
2015-12-11 04:37:45 +08:00
|
|
|
nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
|
|
|
|
!!(geneve->flags & GENEVE_F_UDP_ZERO_CSUM6_TX)) ||
|
|
|
|
nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
|
|
|
|
!!(geneve->flags & GENEVE_F_UDP_ZERO_CSUM6_RX)))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2015-05-14 00:57:30 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rtnl_link_ops geneve_link_ops __read_mostly = {
|
|
|
|
.kind = "geneve",
|
|
|
|
.maxtype = IFLA_GENEVE_MAX,
|
|
|
|
.policy = geneve_policy,
|
|
|
|
.priv_size = sizeof(struct geneve_dev),
|
|
|
|
.setup = geneve_setup,
|
|
|
|
.validate = geneve_validate,
|
|
|
|
.newlink = geneve_newlink,
|
|
|
|
.dellink = geneve_dellink,
|
|
|
|
.get_size = geneve_get_size,
|
|
|
|
.fill_info = geneve_fill_info,
|
|
|
|
};
|
|
|
|
|
2015-08-27 14:46:52 +08:00
|
|
|
struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
|
|
|
|
u8 name_assign_type, u16 dst_port)
|
|
|
|
{
|
|
|
|
struct nlattr *tb[IFLA_MAX + 1];
|
|
|
|
struct net_device *dev;
|
2016-06-13 16:31:04 +08:00
|
|
|
LIST_HEAD(list_kill);
|
2015-08-27 14:46:52 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
memset(tb, 0, sizeof(tb));
|
|
|
|
dev = rtnl_create_link(net, name, name_assign_type,
|
|
|
|
&geneve_link_ops, tb);
|
|
|
|
if (IS_ERR(dev))
|
|
|
|
return dev;
|
|
|
|
|
2015-10-27 05:01:44 +08:00
|
|
|
err = geneve_configure(net, dev, &geneve_remote_unspec,
|
2016-03-09 10:00:04 +08:00
|
|
|
0, 0, 0, 0, htons(dst_port), true,
|
2016-02-18 02:30:01 +08:00
|
|
|
GENEVE_F_UDP_ZERO_CSUM6_RX);
|
2016-06-13 16:31:04 +08:00
|
|
|
if (err) {
|
|
|
|
free_netdev(dev);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
vxlan, gre, geneve: Set a large MTU on ovs-created tunnel devices
Prior to 4.3, openvswitch tunnel vports (vxlan, gre and geneve) could
transmit vxlan packets of any size, constrained only by the ability to
send out the resulting packets. 4.3 introduced netdevs corresponding
to tunnel vports. These netdevs have an MTU, which limits the size of
a packet that can be successfully encapsulated. The default MTU
values are low (1500 or less), which is awkwardly small in the context
of physical networks supporting jumbo frames, and leads to a
conspicuous change in behaviour for userspace.
Instead, set the MTU on openvswitch-created netdevs to be the relevant
maximum (i.e. the maximum IP packet size minus any relevant overhead),
effectively restoring the behaviour prior to 4.3.
Signed-off-by: David Wragg <david@weave.works>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-02-10 08:05:58 +08:00
|
|
|
|
|
|
|
/* openvswitch users expect packet sizes to be unrestricted,
|
|
|
|
* so set the largest MTU we can.
|
|
|
|
*/
|
net: use core MTU range checking in core net infra
geneve:
- Merge __geneve_change_mtu back into geneve_change_mtu, set max_mtu
- This one isn't quite as straight-forward as others, could use some
closer inspection and testing
macvlan:
- set min/max_mtu
tun:
- set min/max_mtu, remove tun_net_change_mtu
vxlan:
- Merge __vxlan_change_mtu back into vxlan_change_mtu
- Set max_mtu to IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
- This one is also not as straight-forward and could use closer inspection
and testing from vxlan folks
bridge:
- set max_mtu of IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
openvswitch:
- set min/max_mtu, remove internal_dev_change_mtu
- note: max_mtu wasn't checked previously, it's been set to 65535, which
is the largest possible size supported
sch_teql:
- set min/max_mtu (note: max_mtu previously unchecked, used max of 65535)
macsec:
- min_mtu = 0, max_mtu = 65535
macvlan:
- min_mtu = 0, max_mtu = 65535
ntb_netdev:
- min_mtu = 0, max_mtu = 65535
veth:
- min_mtu = 68, max_mtu = 65535
8021q:
- min_mtu = 0, max_mtu = 65535
CC: netdev@vger.kernel.org
CC: Nicolas Dichtel <nicolas.dichtel@6wind.com>
CC: Hannes Frederic Sowa <hannes@stressinduktion.org>
CC: Tom Herbert <tom@herbertland.com>
CC: Daniel Borkmann <daniel@iogearbox.net>
CC: Alexander Duyck <alexander.h.duyck@intel.com>
CC: Paolo Abeni <pabeni@redhat.com>
CC: Jiri Benc <jbenc@redhat.com>
CC: WANG Cong <xiyou.wangcong@gmail.com>
CC: Roopa Prabhu <roopa@cumulusnetworks.com>
CC: Pravin B Shelar <pshelar@ovn.org>
CC: Sabrina Dubroca <sd@queasysnail.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Stephen Hemminger <stephen@networkplumber.org>
CC: Pravin Shelar <pshelar@nicira.com>
CC: Maxim Krasnyansky <maxk@qti.qualcomm.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-21 01:55:20 +08:00
|
|
|
err = geneve_change_mtu(dev, IP_MAX_MTU);
|
vxlan, gre, geneve: Set a large MTU on ovs-created tunnel devices
Prior to 4.3, openvswitch tunnel vports (vxlan, gre and geneve) could
transmit vxlan packets of any size, constrained only by the ability to
send out the resulting packets. 4.3 introduced netdevs corresponding
to tunnel vports. These netdevs have an MTU, which limits the size of
a packet that can be successfully encapsulated. The default MTU
values are low (1500 or less), which is awkwardly small in the context
of physical networks supporting jumbo frames, and leads to a
conspicuous change in behaviour for userspace.
Instead, set the MTU on openvswitch-created netdevs to be the relevant
maximum (i.e. the maximum IP packet size minus any relevant overhead),
effectively restoring the behaviour prior to 4.3.
Signed-off-by: David Wragg <david@weave.works>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-02-10 08:05:58 +08:00
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
2016-06-13 16:31:07 +08:00
|
|
|
err = rtnl_configure_link(dev, NULL);
|
|
|
|
if (err < 0)
|
|
|
|
goto err;
|
|
|
|
|
2015-08-27 14:46:52 +08:00
|
|
|
return dev;
|
vxlan, gre, geneve: Set a large MTU on ovs-created tunnel devices
Prior to 4.3, openvswitch tunnel vports (vxlan, gre and geneve) could
transmit vxlan packets of any size, constrained only by the ability to
send out the resulting packets. 4.3 introduced netdevs corresponding
to tunnel vports. These netdevs have an MTU, which limits the size of
a packet that can be successfully encapsulated. The default MTU
values are low (1500 or less), which is awkwardly small in the context
of physical networks supporting jumbo frames, and leads to a
conspicuous change in behaviour for userspace.
Instead, set the MTU on openvswitch-created netdevs to be the relevant
maximum (i.e. the maximum IP packet size minus any relevant overhead),
effectively restoring the behaviour prior to 4.3.
Signed-off-by: David Wragg <david@weave.works>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-02-10 08:05:58 +08:00
|
|
|
|
|
|
|
err:
|
2016-06-13 16:31:04 +08:00
|
|
|
geneve_dellink(dev, &list_kill);
|
|
|
|
unregister_netdevice_many(&list_kill);
|
vxlan, gre, geneve: Set a large MTU on ovs-created tunnel devices
Prior to 4.3, openvswitch tunnel vports (vxlan, gre and geneve) could
transmit vxlan packets of any size, constrained only by the ability to
send out the resulting packets. 4.3 introduced netdevs corresponding
to tunnel vports. These netdevs have an MTU, which limits the size of
a packet that can be successfully encapsulated. The default MTU
values are low (1500 or less), which is awkwardly small in the context
of physical networks supporting jumbo frames, and leads to a
conspicuous change in behaviour for userspace.
Instead, set the MTU on openvswitch-created netdevs to be the relevant
maximum (i.e. the maximum IP packet size minus any relevant overhead),
effectively restoring the behaviour prior to 4.3.
Signed-off-by: David Wragg <david@weave.works>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-02-10 08:05:58 +08:00
|
|
|
return ERR_PTR(err);
|
2015-08-27 14:46:52 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
|
|
|
|
|
2016-04-19 03:19:48 +08:00
|
|
|
static int geneve_netdevice_event(struct notifier_block *unused,
|
|
|
|
unsigned long event, void *ptr)
|
|
|
|
{
|
|
|
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
|
|
|
|
2016-06-17 03:21:00 +08:00
|
|
|
if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
|
2016-04-19 03:19:48 +08:00
|
|
|
geneve_push_rx_ports(dev);
|
|
|
|
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block geneve_notifier_block __read_mostly = {
|
|
|
|
.notifier_call = geneve_netdevice_event,
|
|
|
|
};
|
|
|
|
|
2015-05-14 00:57:30 +08:00
|
|
|
static __net_init int geneve_init_net(struct net *net)
|
|
|
|
{
|
|
|
|
struct geneve_net *gn = net_generic(net, geneve_net_id);
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&gn->geneve_list);
|
2015-08-27 14:46:54 +08:00
|
|
|
INIT_LIST_HEAD(&gn->sock_list);
|
2015-05-14 00:57:30 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __net_exit geneve_exit_net(struct net *net)
|
|
|
|
{
|
|
|
|
struct geneve_net *gn = net_generic(net, geneve_net_id);
|
|
|
|
struct geneve_dev *geneve, *next;
|
|
|
|
struct net_device *dev, *aux;
|
|
|
|
LIST_HEAD(list);
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
|
|
|
|
/* gather any geneve devices that were moved into this ns */
|
|
|
|
for_each_netdev_safe(net, dev, aux)
|
|
|
|
if (dev->rtnl_link_ops == &geneve_link_ops)
|
|
|
|
unregister_netdevice_queue(dev, &list);
|
|
|
|
|
|
|
|
/* now gather any other geneve devices that were created in this ns */
|
|
|
|
list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
|
|
|
|
/* If geneve->dev is in the same netns, it was already added
|
|
|
|
* to the list by the previous loop.
|
|
|
|
*/
|
|
|
|
if (!net_eq(dev_net(geneve->dev), net))
|
|
|
|
unregister_netdevice_queue(geneve->dev, &list);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* unregister the devices gathered above */
|
|
|
|
unregister_netdevice_many(&list);
|
|
|
|
rtnl_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pernet_operations geneve_net_ops = {
|
|
|
|
.init = geneve_init_net,
|
|
|
|
.exit = geneve_exit_net,
|
|
|
|
.id = &geneve_net_id,
|
|
|
|
.size = sizeof(struct geneve_net),
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init geneve_init_module(void)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = register_pernet_subsys(&geneve_net_ops);
|
|
|
|
if (rc)
|
|
|
|
goto out1;
|
|
|
|
|
2016-04-19 03:19:48 +08:00
|
|
|
rc = register_netdevice_notifier(&geneve_notifier_block);
|
2015-05-14 00:57:30 +08:00
|
|
|
if (rc)
|
|
|
|
goto out2;
|
|
|
|
|
2016-04-19 03:19:48 +08:00
|
|
|
rc = rtnl_link_register(&geneve_link_ops);
|
|
|
|
if (rc)
|
|
|
|
goto out3;
|
|
|
|
|
2015-05-14 00:57:30 +08:00
|
|
|
return 0;
|
2016-04-19 03:19:48 +08:00
|
|
|
|
|
|
|
out3:
|
|
|
|
unregister_netdevice_notifier(&geneve_notifier_block);
|
2015-05-14 00:57:30 +08:00
|
|
|
out2:
|
|
|
|
unregister_pernet_subsys(&geneve_net_ops);
|
|
|
|
out1:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
late_initcall(geneve_init_module);
|
|
|
|
|
|
|
|
static void __exit geneve_cleanup_module(void)
|
|
|
|
{
|
|
|
|
rtnl_link_unregister(&geneve_link_ops);
|
2016-04-19 03:19:48 +08:00
|
|
|
unregister_netdevice_notifier(&geneve_notifier_block);
|
2015-05-14 00:57:30 +08:00
|
|
|
unregister_pernet_subsys(&geneve_net_ops);
|
|
|
|
}
|
|
|
|
module_exit(geneve_cleanup_module);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_VERSION(GENEVE_NETDEV_VER);
|
|
|
|
MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>");
|
|
|
|
MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic");
|
|
|
|
MODULE_ALIAS_RTNL_LINK("geneve");
|