2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* RAW sockets for IPv6
|
2007-02-09 22:24:49 +08:00
|
|
|
* Linux INET6 implementation
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* Authors:
|
2007-02-09 22:24:49 +08:00
|
|
|
* Pedro Roque <roque@di.fc.ul.pt>
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* Adapted from linux/net/ipv4/raw.c
|
|
|
|
*
|
|
|
|
* Fixes:
|
|
|
|
* Hideaki YOSHIFUJI : sin6_scope_id support
|
2007-02-09 22:24:49 +08:00
|
|
|
* YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
|
2005-04-17 06:20:36 +08:00
|
|
|
* Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/socket.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/sockios.h>
|
|
|
|
#include <linux/net.h>
|
|
|
|
#include <linux/in6.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/if_arp.h>
|
|
|
|
#include <linux/icmpv6.h>
|
|
|
|
#include <linux/netfilter.h>
|
|
|
|
#include <linux/netfilter_ipv6.h>
|
2005-12-14 15:16:37 +08:00
|
|
|
#include <linux/skbuff.h>
|
2011-02-04 09:59:32 +08:00
|
|
|
#include <linux/compat.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/ioctls.h>
|
|
|
|
|
2007-09-12 18:01:34 +08:00
|
|
|
#include <net/net_namespace.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <net/ip.h>
|
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/snmp.h>
|
|
|
|
|
|
|
|
#include <net/ipv6.h>
|
|
|
|
#include <net/ndisc.h>
|
|
|
|
#include <net/protocol.h>
|
|
|
|
#include <net/ip6_route.h>
|
|
|
|
#include <net/ip6_checksum.h>
|
|
|
|
#include <net/addrconf.h>
|
|
|
|
#include <net/transp_v6.h>
|
|
|
|
#include <net/udp.h>
|
|
|
|
#include <net/inet_common.h>
|
2005-08-10 11:08:28 +08:00
|
|
|
#include <net/tcp_states.h>
|
2012-10-30 00:23:10 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6_MIP6)
|
2006-08-24 11:35:31 +08:00
|
|
|
#include <net/mip6.h>
|
|
|
|
#endif
|
2008-04-03 08:22:53 +08:00
|
|
|
#include <linux/mroute6.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-11-20 14:36:45 +08:00
|
|
|
#include <net/raw.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <net/rawv6.h>
|
|
|
|
#include <net/xfrm.h>
|
|
|
|
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
#include <linux/seq_file.h>
|
2011-07-15 23:47:34 +08:00
|
|
|
#include <linux/export.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-02 21:51:34 +08:00
|
|
|
#define ICMPV6_HDRLEN 4 /* ICMPv6 header, RFC 4443 Section 2.1 */
|
|
|
|
|
2007-11-20 14:36:45 +08:00
|
|
|
static struct raw_hashinfo raw_v6_hashinfo = {
|
2008-03-18 15:59:23 +08:00
|
|
|
.lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock),
|
2007-11-20 14:36:45 +08:00
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-14 21:35:31 +08:00
|
|
|
static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
|
2011-04-22 12:53:02 +08:00
|
|
|
unsigned short num, const struct in6_addr *loc_addr,
|
|
|
|
const struct in6_addr *rmt_addr, int dif)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2012-05-19 02:57:34 +08:00
|
|
|
bool is_multicast = ipv6_addr_is_multicast(loc_addr);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
sk_for_each_from(sk)
|
2009-10-15 14:30:45 +08:00
|
|
|
if (inet_sk(sk)->inet_num == num) {
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-03-26 02:57:35 +08:00
|
|
|
if (!net_eq(sock_net(sk), net))
|
2008-01-14 21:35:31 +08:00
|
|
|
continue;
|
|
|
|
|
ipv6: make lookups simpler and faster
TCP listener refactoring, part 4 :
To speed up inet lookups, we moved IPv4 addresses from inet to struct
sock_common
Now is time to do the same for IPv6, because it permits us to have fast
lookups for all kind of sockets, including upcoming SYN_RECV.
Getting IPv6 addresses in TCP lookups currently requires two extra cache
lines, plus a dereference (and memory stall).
inet6_sk(sk) does the dereference of inet_sk(__sk)->pinet6
This patch is way bigger than its IPv4 counter part, because for IPv4,
we could add aliases (inet_daddr, inet_rcv_saddr), while on IPv6,
it's not doable easily.
inet6_sk(sk)->daddr becomes sk->sk_v6_daddr
inet6_sk(sk)->rcv_saddr becomes sk->sk_v6_rcv_saddr
And timewait socket also have tw->tw_v6_daddr & tw->tw_v6_rcv_saddr
at the same offset.
We get rid of INET6_TW_MATCH() as INET6_MATCH() is now the generic
macro.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-04 06:42:29 +08:00
|
|
|
if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
|
|
|
|
!ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
|
2005-04-17 06:20:36 +08:00
|
|
|
continue;
|
|
|
|
|
2005-08-10 10:44:42 +08:00
|
|
|
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
|
|
|
|
continue;
|
|
|
|
|
ipv6: make lookups simpler and faster
TCP listener refactoring, part 4 :
To speed up inet lookups, we moved IPv4 addresses from inet to struct
sock_common
Now is time to do the same for IPv6, because it permits us to have fast
lookups for all kind of sockets, including upcoming SYN_RECV.
Getting IPv6 addresses in TCP lookups currently requires two extra cache
lines, plus a dereference (and memory stall).
inet6_sk(sk) does the dereference of inet_sk(__sk)->pinet6
This patch is way bigger than its IPv4 counter part, because for IPv4,
we could add aliases (inet_daddr, inet_rcv_saddr), while on IPv6,
it's not doable easily.
inet6_sk(sk)->daddr becomes sk->sk_v6_daddr
inet6_sk(sk)->rcv_saddr becomes sk->sk_v6_rcv_saddr
And timewait socket also have tw->tw_v6_daddr & tw->tw_v6_rcv_saddr
at the same offset.
We get rid of INET6_TW_MATCH() as INET6_MATCH() is now the generic
macro.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-04 06:42:29 +08:00
|
|
|
if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
|
|
|
|
if (ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto found;
|
|
|
|
if (is_multicast &&
|
|
|
|
inet6_mc_check(sk, loc_addr, rmt_addr))
|
|
|
|
goto found;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
sk = NULL;
|
|
|
|
found:
|
|
|
|
return sk;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 0 - deliver
|
|
|
|
* 1 - block
|
|
|
|
*/
|
2012-09-25 15:03:40 +08:00
|
|
|
static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-08-02 21:51:19 +08:00
|
|
|
struct icmp6hdr _hdr;
|
2012-09-25 15:03:40 +08:00
|
|
|
const struct icmp6hdr *hdr;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-02 21:51:34 +08:00
|
|
|
/* We require only the four bytes of the ICMPv6 header, not any
|
|
|
|
* additional bytes of message body in "struct icmp6hdr".
|
|
|
|
*/
|
2012-09-25 15:03:40 +08:00
|
|
|
hdr = skb_header_pointer(skb, skb_transport_offset(skb),
|
2013-08-02 21:51:34 +08:00
|
|
|
ICMPV6_HDRLEN, &_hdr);
|
2012-09-25 15:03:40 +08:00
|
|
|
if (hdr) {
|
|
|
|
const __u32 *data = &raw6_sk(sk)->filter.data[0];
|
|
|
|
unsigned int type = hdr->icmp6_type;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-09-25 15:03:40 +08:00
|
|
|
return (data[type >> 5] & (1U << (type & 31))) != 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-09-25 15:03:40 +08:00
|
|
|
return 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-10-30 00:23:10 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6_MIP6)
|
2011-01-20 15:37:53 +08:00
|
|
|
typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
|
2007-06-27 14:56:32 +08:00
|
|
|
|
2011-01-20 15:37:53 +08:00
|
|
|
static mh_filter_t __rcu *mh_filter __read_mostly;
|
|
|
|
|
|
|
|
int rawv6_mh_filter_register(mh_filter_t filter)
|
2007-06-27 14:56:32 +08:00
|
|
|
{
|
2012-01-12 12:41:32 +08:00
|
|
|
rcu_assign_pointer(mh_filter, filter);
|
2007-06-27 14:56:32 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rawv6_mh_filter_register);
|
|
|
|
|
2011-01-20 15:37:53 +08:00
|
|
|
int rawv6_mh_filter_unregister(mh_filter_t filter)
|
2007-06-27 14:56:32 +08:00
|
|
|
{
|
2011-08-02 00:19:00 +08:00
|
|
|
RCU_INIT_POINTER(mh_filter, NULL);
|
2007-06-27 14:56:32 +08:00
|
|
|
synchronize_rcu();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rawv6_mh_filter_unregister);
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* demultiplex raw sockets.
|
|
|
|
* (should consider queueing the skb in the sock receive_queue
|
|
|
|
* without calling rawv6.c)
|
|
|
|
*
|
|
|
|
* Caller owns SKB so we must make clones.
|
|
|
|
*/
|
2012-05-19 02:57:34 +08:00
|
|
|
static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-04-22 12:53:02 +08:00
|
|
|
const struct in6_addr *saddr;
|
|
|
|
const struct in6_addr *daddr;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sock *sk;
|
2012-05-19 02:57:34 +08:00
|
|
|
bool delivered = false;
|
2005-04-17 06:20:36 +08:00
|
|
|
__u8 hash;
|
2008-01-14 21:35:31 +08:00
|
|
|
struct net *net;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-04-26 08:54:47 +08:00
|
|
|
saddr = &ipv6_hdr(skb)->saddr;
|
2005-04-17 06:20:36 +08:00
|
|
|
daddr = saddr + 1;
|
|
|
|
|
2012-06-20 09:56:21 +08:00
|
|
|
hash = nexthdr & (RAW_HTABLE_SIZE - 1);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-11-20 14:36:45 +08:00
|
|
|
read_lock(&raw_v6_hashinfo.lock);
|
|
|
|
sk = sk_head(&raw_v6_hashinfo.ht[hash]);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (sk == NULL)
|
|
|
|
goto out;
|
|
|
|
|
2008-03-25 20:47:49 +08:00
|
|
|
net = dev_net(skb->dev);
|
2008-01-14 21:35:31 +08:00
|
|
|
sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, IP6CB(skb)->iif);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
while (sk) {
|
2006-08-24 11:35:31 +08:00
|
|
|
int filtered;
|
|
|
|
|
2012-05-19 02:57:34 +08:00
|
|
|
delivered = true;
|
2006-08-24 11:35:31 +08:00
|
|
|
switch (nexthdr) {
|
|
|
|
case IPPROTO_ICMPV6:
|
|
|
|
filtered = icmpv6_filter(sk, skb);
|
|
|
|
break;
|
2007-06-27 14:56:32 +08:00
|
|
|
|
2012-10-30 00:23:10 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6_MIP6)
|
2006-08-24 11:35:31 +08:00
|
|
|
case IPPROTO_MH:
|
2007-06-27 14:56:32 +08:00
|
|
|
{
|
2006-08-24 11:35:31 +08:00
|
|
|
/* XXX: To validate MH only once for each packet,
|
|
|
|
* this is placed here. It should be after checking
|
|
|
|
* xfrm policy, however it doesn't. The checking xfrm
|
|
|
|
* policy is placed in rawv6_rcv() because it is
|
|
|
|
* required for each socket.
|
|
|
|
*/
|
2011-01-20 15:37:53 +08:00
|
|
|
mh_filter_t *filter;
|
2007-06-27 14:56:32 +08:00
|
|
|
|
|
|
|
filter = rcu_dereference(mh_filter);
|
2011-01-20 15:37:53 +08:00
|
|
|
filtered = filter ? (*filter)(sk, skb) : 0;
|
2006-08-24 11:35:31 +08:00
|
|
|
break;
|
2007-06-27 14:56:32 +08:00
|
|
|
}
|
2006-08-24 11:35:31 +08:00
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
filtered = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (filtered < 0)
|
|
|
|
break;
|
|
|
|
if (filtered == 0) {
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
|
|
|
|
/* Not releasing hash table! */
|
[NETFILTER]: Add nf_conntrack subsystem.
The existing connection tracking subsystem in netfilter can only
handle ipv4. There were basically two choices present to add
connection tracking support for ipv6. We could either duplicate all
of the ipv4 connection tracking code into an ipv6 counterpart, or (the
choice taken by these patches) we could design a generic layer that
could handle both ipv4 and ipv6 and thus requiring only one sub-protocol
(TCP, UDP, etc.) connection tracking helper module to be written.
In fact nf_conntrack is capable of working with any layer 3
protocol.
The existing ipv4 specific conntrack code could also not deal
with the pecularities of doing connection tracking on ipv6,
which is also cured here. For example, these issues include:
1) ICMPv6 handling, which is used for neighbour discovery in
ipv6 thus some messages such as these should not participate
in connection tracking since effectively they are like ARP
messages
2) fragmentation must be handled differently in ipv6, because
the simplistic "defrag, connection track and NAT, refrag"
(which the existing ipv4 connection tracking does) approach simply
isn't feasible in ipv6
3) ipv6 extension header parsing must occur at the correct spots
before and after connection tracking decisions, and there were
no provisions for this in the existing connection tracking
design
4) ipv6 has no need for stateful NAT
The ipv4 specific conntrack layer is kept around, until all of
the ipv4 specific conntrack helpers are ported over to nf_conntrack
and it is feature complete. Once that occurs, the old conntrack
stuff will get placed into the feature-removal-schedule and we will
fully kill it off 6 months later.
Signed-off-by: Yasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp>
Signed-off-by: Harald Welte <laforge@netfilter.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
2005-11-10 08:38:16 +08:00
|
|
|
if (clone) {
|
|
|
|
nf_reset(clone);
|
2005-04-17 06:20:36 +08:00
|
|
|
rawv6_rcv(sk, clone);
|
[NETFILTER]: Add nf_conntrack subsystem.
The existing connection tracking subsystem in netfilter can only
handle ipv4. There were basically two choices present to add
connection tracking support for ipv6. We could either duplicate all
of the ipv4 connection tracking code into an ipv6 counterpart, or (the
choice taken by these patches) we could design a generic layer that
could handle both ipv4 and ipv6 and thus requiring only one sub-protocol
(TCP, UDP, etc.) connection tracking helper module to be written.
In fact nf_conntrack is capable of working with any layer 3
protocol.
The existing ipv4 specific conntrack code could also not deal
with the pecularities of doing connection tracking on ipv6,
which is also cured here. For example, these issues include:
1) ICMPv6 handling, which is used for neighbour discovery in
ipv6 thus some messages such as these should not participate
in connection tracking since effectively they are like ARP
messages
2) fragmentation must be handled differently in ipv6, because
the simplistic "defrag, connection track and NAT, refrag"
(which the existing ipv4 connection tracking does) approach simply
isn't feasible in ipv6
3) ipv6 extension header parsing must occur at the correct spots
before and after connection tracking decisions, and there were
no provisions for this in the existing connection tracking
design
4) ipv6 has no need for stateful NAT
The ipv4 specific conntrack layer is kept around, until all of
the ipv4 specific conntrack helpers are ported over to nf_conntrack
and it is feature complete. Once that occurs, the old conntrack
stuff will get placed into the feature-removal-schedule and we will
fully kill it off 6 months later.
Signed-off-by: Yasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp>
Signed-off-by: Harald Welte <laforge@netfilter.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
2005-11-10 08:38:16 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2008-01-14 21:35:31 +08:00
|
|
|
sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr,
|
2005-09-02 08:44:49 +08:00
|
|
|
IP6CB(skb)->iif);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
out:
|
2007-11-20 14:36:45 +08:00
|
|
|
read_unlock(&raw_v6_hashinfo.lock);
|
2005-08-10 10:45:02 +08:00
|
|
|
return delivered;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-05-19 02:57:34 +08:00
|
|
|
bool raw6_local_deliver(struct sk_buff *skb, int nexthdr)
|
2007-11-20 14:35:57 +08:00
|
|
|
{
|
|
|
|
struct sock *raw_sk;
|
|
|
|
|
2012-06-20 09:56:21 +08:00
|
|
|
raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (RAW_HTABLE_SIZE - 1)]);
|
2007-11-20 14:35:57 +08:00
|
|
|
if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
|
|
|
|
raw_sk = NULL;
|
|
|
|
|
|
|
|
return raw_sk != NULL;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* This cleans up af_inet6 a bit. -DaveM */
|
|
|
|
static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|
|
|
{
|
|
|
|
struct inet_sock *inet = inet_sk(sk);
|
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
|
|
|
struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
|
2006-11-15 12:56:00 +08:00
|
|
|
__be32 v4addr = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
int addr_type;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (addr_len < SIN6_LEN_RFC2133)
|
|
|
|
return -EINVAL;
|
|
|
|
addr_type = ipv6_addr_type(&addr->sin6_addr);
|
|
|
|
|
|
|
|
/* Raw sockets are IPv6 only */
|
|
|
|
if (addr_type == IPV6_ADDR_MAPPED)
|
2009-11-06 15:01:17 +08:00
|
|
|
return -EADDRNOTAVAIL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
lock_sock(sk);
|
|
|
|
|
|
|
|
err = -EINVAL;
|
|
|
|
if (sk->sk_state != TCP_CLOSE)
|
|
|
|
goto out;
|
|
|
|
|
2009-11-06 15:01:17 +08:00
|
|
|
rcu_read_lock();
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Check if the address belongs to the host. */
|
|
|
|
if (addr_type != IPV6_ADDR_ANY) {
|
|
|
|
struct net_device *dev = NULL;
|
|
|
|
|
2013-03-08 10:07:19 +08:00
|
|
|
if (__ipv6_addr_needs_scope_id(addr_type)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (addr_len >= sizeof(struct sockaddr_in6) &&
|
|
|
|
addr->sin6_scope_id) {
|
|
|
|
/* Override any existing binding, if another
|
|
|
|
* one is supplied by user.
|
|
|
|
*/
|
|
|
|
sk->sk_bound_dev_if = addr->sin6_scope_id;
|
|
|
|
}
|
2007-02-09 22:24:49 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Binding to link-local address requires an interface */
|
|
|
|
if (!sk->sk_bound_dev_if)
|
2009-11-06 15:01:17 +08:00
|
|
|
goto out_unlock;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-11-06 15:01:17 +08:00
|
|
|
err = -ENODEV;
|
|
|
|
dev = dev_get_by_index_rcu(sock_net(sk),
|
|
|
|
sk->sk_bound_dev_if);
|
|
|
|
if (!dev)
|
|
|
|
goto out_unlock;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-02-09 22:24:49 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* ipv4 addr of the socket is invalid. Only the
|
|
|
|
* unspecified and mapped address have a v4 equivalent.
|
|
|
|
*/
|
|
|
|
v4addr = LOOPBACK4_IPV6;
|
|
|
|
if (!(addr_type & IPV6_ADDR_MULTICAST)) {
|
|
|
|
err = -EADDRNOTAVAIL;
|
2008-03-26 01:26:21 +08:00
|
|
|
if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
|
2008-01-11 14:43:18 +08:00
|
|
|
dev, 0)) {
|
2009-11-06 15:01:17 +08:00
|
|
|
goto out_unlock;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-15 14:30:45 +08:00
|
|
|
inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
|
ipv6: make lookups simpler and faster
TCP listener refactoring, part 4 :
To speed up inet lookups, we moved IPv4 addresses from inet to struct
sock_common
Now is time to do the same for IPv6, because it permits us to have fast
lookups for all kind of sockets, including upcoming SYN_RECV.
Getting IPv6 addresses in TCP lookups currently requires two extra cache
lines, plus a dereference (and memory stall).
inet6_sk(sk) does the dereference of inet_sk(__sk)->pinet6
This patch is way bigger than its IPv4 counter part, because for IPv4,
we could add aliases (inet_daddr, inet_rcv_saddr), while on IPv6,
it's not doable easily.
inet6_sk(sk)->daddr becomes sk->sk_v6_daddr
inet6_sk(sk)->rcv_saddr becomes sk->sk_v6_rcv_saddr
And timewait socket also have tw->tw_v6_daddr & tw->tw_v6_rcv_saddr
at the same offset.
We get rid of INET6_TW_MATCH() as INET6_MATCH() is now the generic
macro.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-04 06:42:29 +08:00
|
|
|
sk->sk_v6_rcv_saddr = addr->sin6_addr;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!(addr_type & IPV6_ADDR_MULTICAST))
|
2011-11-21 11:39:03 +08:00
|
|
|
np->saddr = addr->sin6_addr;
|
2005-04-17 06:20:36 +08:00
|
|
|
err = 0;
|
2009-11-06 15:01:17 +08:00
|
|
|
out_unlock:
|
|
|
|
rcu_read_unlock();
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
|
|
|
release_sock(sk);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2007-11-20 14:35:57 +08:00
|
|
|
static void rawv6_err(struct sock *sk, struct sk_buff *skb,
|
2005-04-17 06:20:36 +08:00
|
|
|
struct inet6_skb_parm *opt,
|
2009-06-23 19:31:07 +08:00
|
|
|
u8 type, u8 code, int offset, __be32 info)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct inet_sock *inet = inet_sk(sk);
|
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
|
|
|
int err;
|
|
|
|
int harderr;
|
|
|
|
|
|
|
|
/* Report error on raw socket, if:
|
|
|
|
1. User requested recverr.
|
|
|
|
2. Socket is connected (otherwise the error indication
|
|
|
|
is useless without recverr and error is hard.
|
|
|
|
*/
|
|
|
|
if (!np->recverr && sk->sk_state != TCP_ESTABLISHED)
|
|
|
|
return;
|
|
|
|
|
|
|
|
harderr = icmpv6_err_convert(type, code, &err);
|
2012-06-16 05:54:11 +08:00
|
|
|
if (type == ICMPV6_PKT_TOOBIG) {
|
|
|
|
ip6_sk_update_pmtu(skb, sk, info);
|
2005-04-17 06:20:36 +08:00
|
|
|
harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
|
2012-06-16 05:54:11 +08:00
|
|
|
}
|
2013-09-20 18:21:25 +08:00
|
|
|
if (type == NDISC_REDIRECT) {
|
2012-07-12 15:25:15 +08:00
|
|
|
ip6_sk_redirect(skb, sk);
|
2013-09-20 18:21:25 +08:00
|
|
|
return;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
if (np->recverr) {
|
|
|
|
u8 *payload = skb->data;
|
|
|
|
if (!inet->hdrincl)
|
|
|
|
payload += offset;
|
|
|
|
ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (np->recverr || harderr) {
|
|
|
|
sk->sk_err = err;
|
|
|
|
sk->sk_error_report(sk);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-11-20 14:35:57 +08:00
|
|
|
void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
|
2009-06-23 19:31:07 +08:00
|
|
|
u8 type, u8 code, int inner_offset, __be32 info)
|
2007-11-20 14:35:57 +08:00
|
|
|
{
|
|
|
|
struct sock *sk;
|
|
|
|
int hash;
|
2011-04-22 12:53:02 +08:00
|
|
|
const struct in6_addr *saddr, *daddr;
|
2008-01-14 21:35:31 +08:00
|
|
|
struct net *net;
|
2007-11-20 14:35:57 +08:00
|
|
|
|
2007-11-20 14:36:45 +08:00
|
|
|
hash = nexthdr & (RAW_HTABLE_SIZE - 1);
|
2007-11-20 14:35:57 +08:00
|
|
|
|
2007-11-20 14:36:45 +08:00
|
|
|
read_lock(&raw_v6_hashinfo.lock);
|
|
|
|
sk = sk_head(&raw_v6_hashinfo.ht[hash]);
|
2007-11-20 14:35:57 +08:00
|
|
|
if (sk != NULL) {
|
2008-04-11 22:51:26 +08:00
|
|
|
/* Note: ipv6_hdr(skb) != skb->data */
|
2011-04-22 12:53:02 +08:00
|
|
|
const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
|
2008-04-11 22:51:26 +08:00
|
|
|
saddr = &ip6h->saddr;
|
|
|
|
daddr = &ip6h->daddr;
|
2008-03-25 20:47:49 +08:00
|
|
|
net = dev_net(skb->dev);
|
2007-11-20 14:35:57 +08:00
|
|
|
|
2008-01-14 21:35:31 +08:00
|
|
|
while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr,
|
2007-11-20 14:35:57 +08:00
|
|
|
IP6CB(skb)->iif))) {
|
|
|
|
rawv6_err(sk, skb, NULL, type, code,
|
|
|
|
inner_offset, info);
|
|
|
|
sk = sk_next(sk);
|
|
|
|
}
|
|
|
|
}
|
2007-11-20 14:36:45 +08:00
|
|
|
read_unlock(&raw_v6_hashinfo.lock);
|
2007-11-20 14:35:57 +08:00
|
|
|
}
|
|
|
|
|
2011-08-12 03:30:52 +08:00
|
|
|
static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-08-12 03:30:52 +08:00
|
|
|
if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
|
2005-11-11 05:01:24 +08:00
|
|
|
skb_checksum_complete(skb)) {
|
2007-11-14 12:31:14 +08:00
|
|
|
atomic_inc(&sk->sk_drops);
|
2005-11-11 05:01:24 +08:00
|
|
|
kfree_skb(skb);
|
2008-08-30 05:06:51 +08:00
|
|
|
return NET_RX_DROP;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Charge it to the socket. */
|
ipv4: PKTINFO doesnt need dst reference
Le lundi 07 novembre 2011 à 15:33 +0100, Eric Dumazet a écrit :
> At least, in recent kernels we dont change dst->refcnt in forwarding
> patch (usinf NOREF skb->dst)
>
> One particular point is the atomic_inc(dst->refcnt) we have to perform
> when queuing an UDP packet if socket asked PKTINFO stuff (for example a
> typical DNS server has to setup this option)
>
> I have one patch somewhere that stores the information in skb->cb[] and
> avoid the atomic_{inc|dec}(dst->refcnt).
>
OK I found it, I did some extra tests and believe its ready.
[PATCH net-next] ipv4: IP_PKTINFO doesnt need dst reference
When a socket uses IP_PKTINFO notifications, we currently force a dst
reference for each received skb. Reader has to access dst to get needed
information (rt_iif & rt_spec_dst) and must release dst reference.
We also forced a dst reference if skb was put in socket backlog, even
without IP_PKTINFO handling. This happens under stress/load.
We can instead store the needed information in skb->cb[], so that only
softirq handler really access dst, improving cache hit ratios.
This removes two atomic operations per packet, and false sharing as
well.
On a benchmark using a mono threaded receiver (doing only recvmsg()
calls), I can reach 720.000 pps instead of 570.000 pps.
IP_PKTINFO is typically used by DNS servers, and any multihomed aware
UDP application.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-11-09 15:24:35 +08:00
|
|
|
skb_dst_drop(skb);
|
|
|
|
if (sock_queue_rcv_skb(sk, skb) < 0) {
|
2005-04-17 06:20:36 +08:00
|
|
|
kfree_skb(skb);
|
2008-08-30 05:06:51 +08:00
|
|
|
return NET_RX_DROP;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-02-09 22:24:49 +08:00
|
|
|
* This is next to useless...
|
2005-04-17 06:20:36 +08:00
|
|
|
* if we demultiplex in network layer we don't need the extra call
|
2007-02-09 22:24:49 +08:00
|
|
|
* just to queue the skb...
|
|
|
|
* maybe we could have the network decide upon a hint if it
|
2005-04-17 06:20:36 +08:00
|
|
|
* should call raw_rcv for demultiplexing
|
|
|
|
*/
|
|
|
|
int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct inet_sock *inet = inet_sk(sk);
|
|
|
|
struct raw6_sock *rp = raw6_sk(sk);
|
|
|
|
|
2007-02-09 22:24:49 +08:00
|
|
|
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
|
2007-11-14 12:31:14 +08:00
|
|
|
atomic_inc(&sk->sk_drops);
|
2007-02-09 22:24:49 +08:00
|
|
|
kfree_skb(skb);
|
|
|
|
return NET_RX_DROP;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (!rp->checksum)
|
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
|
2006-08-30 07:44:56 +08:00
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE) {
|
2007-04-11 11:50:43 +08:00
|
|
|
skb_postpull_rcsum(skb, skb_network_header(skb),
|
2007-03-17 04:26:39 +08:00
|
|
|
skb_network_header_len(skb));
|
2007-04-26 08:54:47 +08:00
|
|
|
if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
|
|
|
|
&ipv6_hdr(skb)->daddr,
|
2009-10-15 14:30:45 +08:00
|
|
|
skb->len, inet->inet_num, skb->csum))
|
2005-04-17 06:20:36 +08:00
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
}
|
2007-04-10 02:59:39 +08:00
|
|
|
if (!skb_csum_unnecessary(skb))
|
2007-04-26 08:54:47 +08:00
|
|
|
skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
|
|
|
|
&ipv6_hdr(skb)->daddr,
|
|
|
|
skb->len,
|
2009-10-15 14:30:45 +08:00
|
|
|
inet->inet_num, 0));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (inet->hdrincl) {
|
2005-11-11 05:01:24 +08:00
|
|
|
if (skb_checksum_complete(skb)) {
|
2007-11-14 12:31:14 +08:00
|
|
|
atomic_inc(&sk->sk_drops);
|
2005-04-17 06:20:36 +08:00
|
|
|
kfree_skb(skb);
|
2008-08-30 05:06:51 +08:00
|
|
|
return NET_RX_DROP;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rawv6_rcv_skb(sk, skb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This should be easy, if there is something there
|
|
|
|
* we return it, otherwise we block.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
|
|
|
|
struct msghdr *msg, size_t len,
|
|
|
|
int noblock, int flags, int *addr_len)
|
|
|
|
{
|
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
|
|
|
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
size_t copied;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (flags & MSG_OOB)
|
|
|
|
return -EOPNOTSUPP;
|
2007-02-09 22:24:49 +08:00
|
|
|
|
|
|
|
if (addr_len)
|
2005-04-17 06:20:36 +08:00
|
|
|
*addr_len=sizeof(*sin6);
|
|
|
|
|
|
|
|
if (flags & MSG_ERRQUEUE)
|
|
|
|
return ipv6_recv_error(sk, msg, len);
|
|
|
|
|
2010-04-23 19:26:09 +08:00
|
|
|
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
|
|
|
|
return ipv6_recv_rxpmtu(sk, msg, len);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
skb = skb_recv_datagram(sk, flags, noblock, &err);
|
|
|
|
if (!skb)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
copied = skb->len;
|
2007-02-09 22:24:49 +08:00
|
|
|
if (copied > len) {
|
|
|
|
copied = len;
|
|
|
|
msg->msg_flags |= MSG_TRUNC;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-04-10 02:59:39 +08:00
|
|
|
if (skb_csum_unnecessary(skb)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
|
|
|
|
} else if (msg->msg_flags&MSG_TRUNC) {
|
2005-11-11 05:01:24 +08:00
|
|
|
if (__skb_checksum_complete(skb))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto csum_copy_err;
|
|
|
|
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
|
|
|
|
} else {
|
|
|
|
err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
|
|
|
|
if (err == -EINVAL)
|
|
|
|
goto csum_copy_err;
|
|
|
|
}
|
|
|
|
if (err)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
/* Copy the address. */
|
|
|
|
if (sin6) {
|
|
|
|
sin6->sin6_family = AF_INET6;
|
2006-07-26 08:05:35 +08:00
|
|
|
sin6->sin6_port = 0;
|
2011-11-21 11:39:03 +08:00
|
|
|
sin6->sin6_addr = ipv6_hdr(skb)->saddr;
|
2005-04-17 06:20:36 +08:00
|
|
|
sin6->sin6_flowinfo = 0;
|
2013-03-08 10:07:19 +08:00
|
|
|
sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
|
|
|
|
IP6CB(skb)->iif);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
net: Generalize socket rx gap / receive queue overflow cmsg
Create a new socket level option to report number of queue overflows
Recently I augmented the AF_PACKET protocol to report the number of frames lost
on the socket receive queue between any two enqueued frames. This value was
exported via a SOL_PACKET level cmsg. AFter I completed that work it was
requested that this feature be generalized so that any datagram oriented socket
could make use of this option. As such I've created this patch, It creates a
new SOL_SOCKET level option called SO_RXQ_OVFL, which when enabled exports a
SOL_SOCKET level cmsg that reports the nubmer of times the sk_receive_queue
overflowed between any two given frames. It also augments the AF_PACKET
protocol to take advantage of this new feature (as it previously did not touch
sk->sk_drops, which this patch uses to record the overflow count). Tested
successfully by me.
Notes:
1) Unlike my previous patch, this patch simply records the sk_drops value, which
is not a number of drops between packets, but rather a total number of drops.
Deltas must be computed in user space.
2) While this patch currently works with datagram oriented protocols, it will
also be accepted by non-datagram oriented protocols. I'm not sure if thats
agreeable to everyone, but my argument in favor of doing so is that, for those
protocols which aren't applicable to this option, sk_drops will always be zero,
and reporting no drops on a receive queue that isn't used for those
non-participating protocols seems reasonable to me. This also saves us having
to code in a per-protocol opt in mechanism.
3) This applies cleanly to net-next assuming that commit
977750076d98c7ff6cbda51858bb5a5894a9d9ab (my af packet cmsg patch) is reverted
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-10-13 04:26:31 +08:00
|
|
|
sock_recv_ts_and_drops(msg, sk, skb);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (np->rxopt.all)
|
2013-01-31 09:02:24 +08:00
|
|
|
ip6_datagram_recv_ctl(sk, msg, skb);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
err = copied;
|
|
|
|
if (flags & MSG_TRUNC)
|
|
|
|
err = skb->len;
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
skb_free_datagram(sk, skb);
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
|
|
|
|
csum_copy_err:
|
2005-12-14 15:16:37 +08:00
|
|
|
skb_kill_datagram(sk, skb, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Error for blocking case is chosen to masquerade
|
|
|
|
as some normal condition.
|
|
|
|
*/
|
|
|
|
err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
|
2005-12-14 15:16:37 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-03-13 05:22:43 +08:00
|
|
|
static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
|
2005-04-20 13:30:14 +08:00
|
|
|
struct raw6_sock *rp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int err = 0;
|
2005-04-20 13:30:14 +08:00
|
|
|
int offset;
|
|
|
|
int len;
|
2005-05-04 05:24:36 +08:00
|
|
|
int total_len;
|
2006-11-15 13:35:48 +08:00
|
|
|
__wsum tmp_csum;
|
|
|
|
__sum16 csum;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (!rp->checksum)
|
|
|
|
goto send;
|
|
|
|
|
|
|
|
if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
|
|
|
|
goto out;
|
|
|
|
|
2005-04-20 13:30:14 +08:00
|
|
|
offset = rp->offset;
|
2011-10-11 09:43:33 +08:00
|
|
|
total_len = inet_sk(sk)->cork.base.length;
|
2005-05-04 05:24:36 +08:00
|
|
|
if (offset >= total_len - 1) {
|
2005-04-17 06:20:36 +08:00
|
|
|
err = -EINVAL;
|
2005-04-20 13:30:14 +08:00
|
|
|
ip6_flush_pending_frames(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* should be check HW csum miyazawa */
|
|
|
|
if (skb_queue_len(&sk->sk_write_queue) == 1) {
|
|
|
|
/*
|
|
|
|
* Only one fragment on the socket.
|
|
|
|
*/
|
|
|
|
tmp_csum = skb->csum;
|
|
|
|
} else {
|
2005-04-20 13:30:14 +08:00
|
|
|
struct sk_buff *csum_skb = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
tmp_csum = 0;
|
|
|
|
|
|
|
|
skb_queue_walk(&sk->sk_write_queue, skb) {
|
|
|
|
tmp_csum = csum_add(tmp_csum, skb->csum);
|
2005-04-20 13:30:14 +08:00
|
|
|
|
|
|
|
if (csum_skb)
|
|
|
|
continue;
|
|
|
|
|
2007-04-26 08:55:53 +08:00
|
|
|
len = skb->len - skb_transport_offset(skb);
|
2005-04-20 13:30:14 +08:00
|
|
|
if (offset >= len) {
|
|
|
|
offset -= len;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
csum_skb = skb;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2005-04-20 13:30:14 +08:00
|
|
|
|
|
|
|
skb = csum_skb;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-04-26 08:55:53 +08:00
|
|
|
offset += skb_transport_offset(skb);
|
2005-04-20 13:30:14 +08:00
|
|
|
if (skb_copy_bits(skb, offset, &csum, 2))
|
|
|
|
BUG();
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* in case cksum was not initialized */
|
2005-04-20 13:30:14 +08:00
|
|
|
if (unlikely(csum))
|
2006-11-15 13:36:54 +08:00
|
|
|
tmp_csum = csum_sub(tmp_csum, csum_unfold(csum));
|
2005-04-20 13:30:14 +08:00
|
|
|
|
2011-03-13 05:22:43 +08:00
|
|
|
csum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
|
|
|
|
total_len, fl6->flowi6_proto, tmp_csum);
|
2005-04-20 13:30:14 +08:00
|
|
|
|
2011-03-13 05:22:43 +08:00
|
|
|
if (csum == 0 && fl6->flowi6_proto == IPPROTO_UDP)
|
2006-11-16 18:36:50 +08:00
|
|
|
csum = CSUM_MANGLED_0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-04-20 13:30:14 +08:00
|
|
|
if (skb_store_bits(skb, offset, &csum, 2))
|
|
|
|
BUG();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
send:
|
|
|
|
err = ip6_push_pending_frames(sk);
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
|
2011-03-13 05:22:43 +08:00
|
|
|
struct flowi6 *fl6, struct dst_entry **dstp,
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned int flags)
|
|
|
|
{
|
2005-04-20 13:32:22 +08:00
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct ipv6hdr *iph;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int err;
|
2010-06-04 06:23:57 +08:00
|
|
|
struct rt6_info *rt = (struct rt6_info *)*dstp;
|
2011-11-18 10:20:04 +08:00
|
|
|
int hlen = LL_RESERVED_SPACE(rt->dst.dev);
|
|
|
|
int tlen = rt->dst.dev->needed_tailroom;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-06-11 14:31:35 +08:00
|
|
|
if (length > rt->dst.dev->mtu) {
|
2011-03-13 05:22:43 +08:00
|
|
|
ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
if (flags&MSG_PROBE)
|
|
|
|
goto out;
|
|
|
|
|
2008-05-13 11:48:31 +08:00
|
|
|
skb = sock_alloc_send_skb(sk,
|
2011-11-18 10:20:04 +08:00
|
|
|
length + hlen + tlen + 15,
|
2008-05-13 11:48:31 +08:00
|
|
|
flags & MSG_DONTWAIT, &err);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (skb == NULL)
|
2007-02-09 22:24:49 +08:00
|
|
|
goto error;
|
2011-11-18 10:20:04 +08:00
|
|
|
skb_reserve(skb, hlen);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-26 18:31:23 +08:00
|
|
|
skb->protocol = htons(ETH_P_IPV6);
|
2005-04-17 06:20:36 +08:00
|
|
|
skb->priority = sk->sk_priority;
|
2008-01-31 11:08:16 +08:00
|
|
|
skb->mark = sk->sk_mark;
|
2010-06-11 14:31:35 +08:00
|
|
|
skb_dst_set(skb, &rt->dst);
|
2010-06-04 06:23:57 +08:00
|
|
|
*dstp = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-03-11 06:57:15 +08:00
|
|
|
skb_put(skb, length);
|
|
|
|
skb_reset_network_header(skb);
|
2007-04-26 08:54:47 +08:00
|
|
|
iph = ipv6_hdr(skb);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
|
|
|
2007-04-11 12:21:55 +08:00
|
|
|
skb->transport_header = skb->network_header;
|
2005-04-17 06:20:36 +08:00
|
|
|
err = memcpy_fromiovecend((void *)iph, from, 0, length);
|
|
|
|
if (err)
|
|
|
|
goto error_fault;
|
|
|
|
|
2009-04-27 17:45:02 +08:00
|
|
|
IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
|
2010-03-23 11:09:07 +08:00
|
|
|
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
|
2010-06-11 14:31:35 +08:00
|
|
|
rt->dst.dev, dst_output);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (err > 0)
|
ip: Report qdisc packet drops
Christoph Lameter pointed out that packet drops at qdisc level where not
accounted in SNMP counters. Only if application sets IP_RECVERR, drops
are reported to user (-ENOBUFS errors) and SNMP counters updated.
IP_RECVERR is used to enable extended reliable error message passing,
but these are not needed to update system wide SNMP stats.
This patch changes things a bit to allow SNMP counters to be updated,
regardless of IP_RECVERR being set or not on the socket.
Example after an UDP tx flood
# netstat -s
...
IP:
1487048 outgoing packets dropped
...
Udp:
...
SndbufErrors: 1487048
send() syscalls, do however still return an OK status, to not
break applications.
Note : send() manual page explicitly says for -ENOBUFS error :
"The output queue for a network interface was full.
This generally indicates that the interface has stopped sending,
but may be caused by transient congestion.
(Normally, this does not occur in Linux. Packets are just silently
dropped when a device queue overflows.) "
This is not true for IP_RECVERR enabled sockets : a send() syscall
that hit a qdisc drop returns an ENOBUFS error.
Many thanks to Christoph, David, and last but not least, Alexey !
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-09-03 09:05:33 +08:00
|
|
|
err = net_xmit_errno(err);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (err)
|
|
|
|
goto error;
|
|
|
|
out:
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error_fault:
|
|
|
|
err = -EFAULT;
|
|
|
|
kfree_skb(skb);
|
|
|
|
error:
|
2008-10-09 01:54:51 +08:00
|
|
|
IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
|
ip: Report qdisc packet drops
Christoph Lameter pointed out that packet drops at qdisc level where not
accounted in SNMP counters. Only if application sets IP_RECVERR, drops
are reported to user (-ENOBUFS errors) and SNMP counters updated.
IP_RECVERR is used to enable extended reliable error message passing,
but these are not needed to update system wide SNMP stats.
This patch changes things a bit to allow SNMP counters to be updated,
regardless of IP_RECVERR being set or not on the socket.
Example after an UDP tx flood
# netstat -s
...
IP:
1487048 outgoing packets dropped
...
Udp:
...
SndbufErrors: 1487048
send() syscalls, do however still return an OK status, to not
break applications.
Note : send() manual page explicitly says for -ENOBUFS error :
"The output queue for a network interface was full.
This generally indicates that the interface has stopped sending,
but may be caused by transient congestion.
(Normally, this does not occur in Linux. Packets are just silently
dropped when a device queue overflows.) "
This is not true for IP_RECVERR enabled sockets : a send() syscall
that hit a qdisc drop returns an ENOBUFS error.
Many thanks to Christoph, David, and last but not least, Alexey !
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-09-03 09:05:33 +08:00
|
|
|
if (err == -ENOBUFS && !np->recverr)
|
|
|
|
err = 0;
|
2007-02-09 22:24:49 +08:00
|
|
|
return err;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-03-13 05:22:43 +08:00
|
|
|
static int rawv6_probe_proto_opt(struct flowi6 *fl6, struct msghdr *msg)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct iovec *iov;
|
|
|
|
u8 __user *type = NULL;
|
|
|
|
u8 __user *code = NULL;
|
2006-08-24 11:36:47 +08:00
|
|
|
u8 len = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
int probed = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!msg->msg_iov)
|
2006-10-31 07:06:12 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
for (i = 0; i < msg->msg_iovlen; i++) {
|
|
|
|
iov = &msg->msg_iov[i];
|
|
|
|
if (!iov)
|
|
|
|
continue;
|
|
|
|
|
2011-03-13 05:22:43 +08:00
|
|
|
switch (fl6->flowi6_proto) {
|
2005-04-17 06:20:36 +08:00
|
|
|
case IPPROTO_ICMPV6:
|
|
|
|
/* check if one-byte field is readable or not. */
|
|
|
|
if (iov->iov_base && iov->iov_len < 1)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!type) {
|
|
|
|
type = iov->iov_base;
|
|
|
|
/* check if code field is readable or not. */
|
|
|
|
if (iov->iov_len > 1)
|
|
|
|
code = type + 1;
|
|
|
|
} else if (!code)
|
|
|
|
code = iov->iov_base;
|
|
|
|
|
|
|
|
if (type && code) {
|
2011-03-13 05:36:19 +08:00
|
|
|
if (get_user(fl6->fl6_icmp_type, type) ||
|
|
|
|
get_user(fl6->fl6_icmp_code, code))
|
2006-10-31 07:06:12 +08:00
|
|
|
return -EFAULT;
|
2005-04-17 06:20:36 +08:00
|
|
|
probed = 1;
|
|
|
|
}
|
|
|
|
break;
|
2006-08-24 11:36:47 +08:00
|
|
|
case IPPROTO_MH:
|
|
|
|
if (iov->iov_base && iov->iov_len < 1)
|
|
|
|
break;
|
|
|
|
/* check if type field is readable or not. */
|
|
|
|
if (iov->iov_len > 2 - len) {
|
|
|
|
u8 __user *p = iov->iov_base;
|
2011-03-13 05:36:19 +08:00
|
|
|
if (get_user(fl6->fl6_mh_type, &p[2 - len]))
|
2006-10-31 07:06:12 +08:00
|
|
|
return -EFAULT;
|
2006-08-24 11:36:47 +08:00
|
|
|
probed = 1;
|
|
|
|
} else
|
|
|
|
len += iov->iov_len;
|
|
|
|
|
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
default:
|
|
|
|
probed = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (probed)
|
|
|
|
break;
|
|
|
|
}
|
2006-10-31 07:06:12 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
|
|
|
|
struct msghdr *msg, size_t len)
|
|
|
|
{
|
|
|
|
struct ipv6_txoptions opt_space;
|
|
|
|
struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name;
|
2010-06-02 05:35:01 +08:00
|
|
|
struct in6_addr *daddr, *final_p, final;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct inet_sock *inet = inet_sk(sk);
|
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
|
|
|
struct raw6_sock *rp = raw6_sk(sk);
|
|
|
|
struct ipv6_txoptions *opt = NULL;
|
|
|
|
struct ip6_flowlabel *flowlabel = NULL;
|
|
|
|
struct dst_entry *dst = NULL;
|
2011-03-13 05:22:43 +08:00
|
|
|
struct flowi6 fl6;
|
2005-04-17 06:20:36 +08:00
|
|
|
int addr_len = msg->msg_namelen;
|
|
|
|
int hlimit = -1;
|
2005-09-08 09:19:03 +08:00
|
|
|
int tclass = -1;
|
2010-04-23 19:26:08 +08:00
|
|
|
int dontfrag = -1;
|
2005-04-17 06:20:36 +08:00
|
|
|
u16 proto;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Rough check on arithmetic overflow,
|
[IPv6]: Fix incorrect length check in rawv6_sendmsg()
In article <20070329.142644.70222545.davem@davemloft.net> (at Thu, 29 Mar 2007 14:26:44 -0700 (PDT)), David Miller <davem@davemloft.net> says:
> From: Sridhar Samudrala <sri@us.ibm.com>
> Date: Thu, 29 Mar 2007 14:17:28 -0700
>
> > The check for length in rawv6_sendmsg() is incorrect.
> > As len is an unsigned int, (len < 0) will never be TRUE.
> > I think checking for IPV6_MAXPLEN(65535) is better.
> >
> > Is it possible to send ipv6 jumbo packets using raw
> > sockets? If so, we can remove this check.
>
> I don't see why such a limitation against jumbo would exist,
> does anyone else?
>
> Thanks for catching this Sridhar. A good compiler should simply
> fail to compile "if (x < 0)" when 'x' is an unsigned type, don't
> you think :-)
Dave, we use "int" for returning value,
so we should fix this anyway, IMHO;
we should not allow len > INT_MAX.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Acked-by: Sridhar Samudrala <sri@us.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-03-31 05:45:35 +08:00
|
|
|
better check is made in ip6_append_data().
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
[IPv6]: Fix incorrect length check in rawv6_sendmsg()
In article <20070329.142644.70222545.davem@davemloft.net> (at Thu, 29 Mar 2007 14:26:44 -0700 (PDT)), David Miller <davem@davemloft.net> says:
> From: Sridhar Samudrala <sri@us.ibm.com>
> Date: Thu, 29 Mar 2007 14:17:28 -0700
>
> > The check for length in rawv6_sendmsg() is incorrect.
> > As len is an unsigned int, (len < 0) will never be TRUE.
> > I think checking for IPV6_MAXPLEN(65535) is better.
> >
> > Is it possible to send ipv6 jumbo packets using raw
> > sockets? If so, we can remove this check.
>
> I don't see why such a limitation against jumbo would exist,
> does anyone else?
>
> Thanks for catching this Sridhar. A good compiler should simply
> fail to compile "if (x < 0)" when 'x' is an unsigned type, don't
> you think :-)
Dave, we use "int" for returning value,
so we should fix this anyway, IMHO;
we should not allow len > INT_MAX.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Acked-by: Sridhar Samudrala <sri@us.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-03-31 05:45:35 +08:00
|
|
|
if (len > INT_MAX)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
/* Mirror BSD error message compatibility */
|
2007-02-09 22:24:49 +08:00
|
|
|
if (msg->msg_flags & MSG_OOB)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
/*
|
2007-02-09 22:24:49 +08:00
|
|
|
* Get and verify the address.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2011-03-13 05:22:43 +08:00
|
|
|
memset(&fl6, 0, sizeof(fl6));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-13 05:22:43 +08:00
|
|
|
fl6.flowi6_mark = sk->sk_mark;
|
2008-01-31 11:08:16 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (sin6) {
|
2007-02-09 22:24:49 +08:00
|
|
|
if (addr_len < SIN6_LEN_RFC2133)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2007-02-09 22:24:49 +08:00
|
|
|
if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
|
2010-09-23 04:43:57 +08:00
|
|
|
return -EAFNOSUPPORT;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* port is the proto value [0..255] carried in nexthdr */
|
|
|
|
proto = ntohs(sin6->sin6_port);
|
|
|
|
|
|
|
|
if (!proto)
|
2009-10-15 14:30:45 +08:00
|
|
|
proto = inet->inet_num;
|
|
|
|
else if (proto != inet->inet_num)
|
2010-09-23 04:43:57 +08:00
|
|
|
return -EINVAL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (proto > 255)
|
2010-09-23 04:43:57 +08:00
|
|
|
return -EINVAL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
daddr = &sin6->sin6_addr;
|
|
|
|
if (np->sndflow) {
|
2011-03-13 05:22:43 +08:00
|
|
|
fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
|
|
|
|
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
|
|
|
|
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (flowlabel == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
daddr = &flowlabel->dst;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Otherwise it will be difficult to maintain
|
|
|
|
* sk->sk_dst_cache.
|
|
|
|
*/
|
|
|
|
if (sk->sk_state == TCP_ESTABLISHED &&
|
ipv6: make lookups simpler and faster
TCP listener refactoring, part 4 :
To speed up inet lookups, we moved IPv4 addresses from inet to struct
sock_common
Now is time to do the same for IPv6, because it permits us to have fast
lookups for all kind of sockets, including upcoming SYN_RECV.
Getting IPv6 addresses in TCP lookups currently requires two extra cache
lines, plus a dereference (and memory stall).
inet6_sk(sk) does the dereference of inet_sk(__sk)->pinet6
This patch is way bigger than its IPv4 counter part, because for IPv4,
we could add aliases (inet_daddr, inet_rcv_saddr), while on IPv6,
it's not doable easily.
inet6_sk(sk)->daddr becomes sk->sk_v6_daddr
inet6_sk(sk)->rcv_saddr becomes sk->sk_v6_rcv_saddr
And timewait socket also have tw->tw_v6_daddr & tw->tw_v6_rcv_saddr
at the same offset.
We get rid of INET6_TW_MATCH() as INET6_MATCH() is now the generic
macro.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-04 06:42:29 +08:00
|
|
|
ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
|
|
|
|
daddr = &sk->sk_v6_daddr;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (addr_len >= sizeof(struct sockaddr_in6) &&
|
|
|
|
sin6->sin6_scope_id &&
|
2013-03-08 10:07:19 +08:00
|
|
|
__ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
|
2011-03-13 05:22:43 +08:00
|
|
|
fl6.flowi6_oif = sin6->sin6_scope_id;
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2007-02-09 22:24:49 +08:00
|
|
|
if (sk->sk_state != TCP_ESTABLISHED)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EDESTADDRREQ;
|
2007-02-09 22:24:49 +08:00
|
|
|
|
2009-10-15 14:30:45 +08:00
|
|
|
proto = inet->inet_num;
|
ipv6: make lookups simpler and faster
TCP listener refactoring, part 4 :
To speed up inet lookups, we moved IPv4 addresses from inet to struct
sock_common
Now is time to do the same for IPv6, because it permits us to have fast
lookups for all kind of sockets, including upcoming SYN_RECV.
Getting IPv6 addresses in TCP lookups currently requires two extra cache
lines, plus a dereference (and memory stall).
inet6_sk(sk) does the dereference of inet_sk(__sk)->pinet6
This patch is way bigger than its IPv4 counter part, because for IPv4,
we could add aliases (inet_daddr, inet_rcv_saddr), while on IPv6,
it's not doable easily.
inet6_sk(sk)->daddr becomes sk->sk_v6_daddr
inet6_sk(sk)->rcv_saddr becomes sk->sk_v6_rcv_saddr
And timewait socket also have tw->tw_v6_daddr & tw->tw_v6_rcv_saddr
at the same offset.
We get rid of INET6_TW_MATCH() as INET6_MATCH() is now the generic
macro.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-04 06:42:29 +08:00
|
|
|
daddr = &sk->sk_v6_daddr;
|
2011-03-13 05:22:43 +08:00
|
|
|
fl6.flowlabel = np->flow_label;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-03-13 05:22:43 +08:00
|
|
|
if (fl6.flowi6_oif == 0)
|
|
|
|
fl6.flowi6_oif = sk->sk_bound_dev_if;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (msg->msg_controllen) {
|
|
|
|
opt = &opt_space;
|
|
|
|
memset(opt, 0, sizeof(struct ipv6_txoptions));
|
|
|
|
opt->tot_len = sizeof(struct ipv6_txoptions);
|
|
|
|
|
2013-01-31 09:02:24 +08:00
|
|
|
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
|
|
|
|
&hlimit, &tclass, &dontfrag);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (err < 0) {
|
|
|
|
fl6_sock_release(flowlabel);
|
|
|
|
return err;
|
|
|
|
}
|
2011-03-13 05:22:43 +08:00
|
|
|
if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
|
|
|
|
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (flowlabel == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (!(opt->opt_nflen|opt->opt_flen))
|
|
|
|
opt = NULL;
|
|
|
|
}
|
|
|
|
if (opt == NULL)
|
|
|
|
opt = np->opt;
|
2005-11-20 11:23:18 +08:00
|
|
|
if (flowlabel)
|
|
|
|
opt = fl6_merge_options(&opt_space, flowlabel, opt);
|
|
|
|
opt = ipv6_fixup_options(&opt_space, opt);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-13 05:22:43 +08:00
|
|
|
fl6.flowi6_proto = proto;
|
|
|
|
err = rawv6_probe_proto_opt(&fl6, msg);
|
2006-10-31 07:06:12 +08:00
|
|
|
if (err)
|
|
|
|
goto out;
|
2007-02-09 22:24:49 +08:00
|
|
|
|
2008-04-11 12:38:24 +08:00
|
|
|
if (!ipv6_addr_any(daddr))
|
2011-11-21 11:39:03 +08:00
|
|
|
fl6.daddr = *daddr;
|
2008-04-11 12:38:24 +08:00
|
|
|
else
|
2011-03-13 05:22:43 +08:00
|
|
|
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
|
|
|
|
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
|
2011-11-21 11:39:03 +08:00
|
|
|
fl6.saddr = np->saddr;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-13 05:22:43 +08:00
|
|
|
final_p = fl6_update_dst(&fl6, opt, &final);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-13 05:22:43 +08:00
|
|
|
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
|
|
|
|
fl6.flowi6_oif = np->mcast_oif;
|
2012-02-08 17:11:08 +08:00
|
|
|
else if (!fl6.flowi6_oif)
|
|
|
|
fl6.flowi6_oif = np->ucast_oif;
|
2011-03-13 05:22:43 +08:00
|
|
|
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-13 05:22:43 +08:00
|
|
|
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
|
2011-03-02 05:19:07 +08:00
|
|
|
if (IS_ERR(dst)) {
|
|
|
|
err = PTR_ERR(dst);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out;
|
2007-05-25 09:17:54 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
if (hlimit < 0) {
|
2011-03-13 05:22:43 +08:00
|
|
|
if (ipv6_addr_is_multicast(&fl6.daddr))
|
2005-04-17 06:20:36 +08:00
|
|
|
hlimit = np->mcast_hops;
|
|
|
|
else
|
|
|
|
hlimit = np->hop_limit;
|
|
|
|
if (hlimit < 0)
|
2008-03-10 18:00:30 +08:00
|
|
|
hlimit = ip6_dst_hoplimit(dst);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-08-09 16:12:48 +08:00
|
|
|
if (tclass < 0)
|
2006-09-14 11:01:28 +08:00
|
|
|
tclass = np->tclass;
|
2005-09-08 09:19:03 +08:00
|
|
|
|
2010-04-23 19:26:08 +08:00
|
|
|
if (dontfrag < 0)
|
|
|
|
dontfrag = np->dontfrag;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (msg->msg_flags&MSG_CONFIRM)
|
|
|
|
goto do_confirm;
|
|
|
|
|
|
|
|
back_from_confirm:
|
2010-06-04 06:23:57 +08:00
|
|
|
if (inet->hdrincl)
|
2011-03-13 05:22:43 +08:00
|
|
|
err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl6, &dst, msg->msg_flags);
|
2010-06-04 06:23:57 +08:00
|
|
|
else {
|
2005-04-17 06:20:36 +08:00
|
|
|
lock_sock(sk);
|
2005-09-08 09:19:03 +08:00
|
|
|
err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
|
2011-03-13 05:22:43 +08:00
|
|
|
len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info*)dst,
|
2010-04-23 19:26:08 +08:00
|
|
|
msg->msg_flags, dontfrag);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (err)
|
|
|
|
ip6_flush_pending_frames(sk);
|
|
|
|
else if (!(msg->msg_flags & MSG_MORE))
|
2011-03-13 05:22:43 +08:00
|
|
|
err = rawv6_push_pending_frames(sk, &fl6, rp);
|
2007-09-15 07:45:40 +08:00
|
|
|
release_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
done:
|
2006-02-14 07:56:13 +08:00
|
|
|
dst_release(dst);
|
2007-02-09 22:24:49 +08:00
|
|
|
out:
|
2005-04-17 06:20:36 +08:00
|
|
|
fl6_sock_release(flowlabel);
|
|
|
|
return err<0?err:len;
|
|
|
|
do_confirm:
|
|
|
|
dst_confirm(dst);
|
|
|
|
if (!(msg->msg_flags & MSG_PROBE) || len)
|
|
|
|
goto back_from_confirm;
|
|
|
|
err = 0;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:24:49 +08:00
|
|
|
static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
|
2005-04-17 06:20:36 +08:00
|
|
|
char __user *optval, int optlen)
|
|
|
|
{
|
|
|
|
switch (optname) {
|
|
|
|
case ICMPV6_FILTER:
|
|
|
|
if (optlen > sizeof(struct icmp6_filter))
|
|
|
|
optlen = sizeof(struct icmp6_filter);
|
|
|
|
if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
return -ENOPROTOOPT;
|
2007-04-21 08:09:22 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:24:49 +08:00
|
|
|
static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
|
2005-04-17 06:20:36 +08:00
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
int len;
|
|
|
|
|
|
|
|
switch (optname) {
|
|
|
|
case ICMPV6_FILTER:
|
|
|
|
if (get_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (len < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
if (len > sizeof(struct icmp6_filter))
|
|
|
|
len = sizeof(struct icmp6_filter);
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
return -ENOPROTOOPT;
|
2007-04-21 08:09:22 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-03-21 14:45:21 +08:00
|
|
|
static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
|
2009-10-01 07:12:20 +08:00
|
|
|
char __user *optval, unsigned int optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct raw6_sock *rp = raw6_sk(sk);
|
|
|
|
int val;
|
|
|
|
|
2007-02-09 22:24:49 +08:00
|
|
|
if (get_user(val, (int __user *)optval))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
switch (optname) {
|
2011-07-01 17:43:08 +08:00
|
|
|
case IPV6_CHECKSUM:
|
|
|
|
if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 &&
|
|
|
|
level == IPPROTO_IPV6) {
|
|
|
|
/*
|
|
|
|
* RFC3542 tells that IPV6_CHECKSUM socket
|
|
|
|
* option in the IPPROTO_IPV6 level is not
|
|
|
|
* allowed on ICMPv6 sockets.
|
|
|
|
* If you want to set it, use IPPROTO_RAW
|
|
|
|
* level IPV6_CHECKSUM socket option
|
|
|
|
* (Linux extension).
|
|
|
|
*/
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2008-04-25 12:30:38 +08:00
|
|
|
|
2011-07-01 17:43:08 +08:00
|
|
|
/* You may get strange result with a positive odd offset;
|
|
|
|
RFC2292bis agrees with me. */
|
|
|
|
if (val > 0 && (val&1))
|
|
|
|
return -EINVAL;
|
|
|
|
if (val < 0) {
|
|
|
|
rp->checksum = 0;
|
|
|
|
} else {
|
|
|
|
rp->checksum = 1;
|
|
|
|
rp->offset = val;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-07-01 17:43:08 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-07-01 17:43:08 +08:00
|
|
|
default:
|
|
|
|
return -ENOPROTOOPT;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-03-21 14:45:21 +08:00
|
|
|
static int rawv6_setsockopt(struct sock *sk, int level, int optname,
|
2009-10-01 07:12:20 +08:00
|
|
|
char __user *optval, unsigned int optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-07-01 17:43:08 +08:00
|
|
|
switch (level) {
|
|
|
|
case SOL_RAW:
|
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-07-01 17:43:08 +08:00
|
|
|
case SOL_ICMPV6:
|
|
|
|
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
|
|
|
|
case SOL_IPV6:
|
|
|
|
if (optname == IPV6_CHECKSUM)
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return ipv6_setsockopt(sk, level, optname, optval, optlen);
|
2007-04-21 08:09:22 +08:00
|
|
|
}
|
|
|
|
|
2006-03-21 14:45:21 +08:00
|
|
|
return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname,
|
2009-10-01 07:12:20 +08:00
|
|
|
char __user *optval, unsigned int optlen)
|
2006-03-21 14:45:21 +08:00
|
|
|
{
|
2006-03-21 14:48:35 +08:00
|
|
|
switch (level) {
|
|
|
|
case SOL_RAW:
|
|
|
|
break;
|
|
|
|
case SOL_ICMPV6:
|
2009-10-15 14:30:45 +08:00
|
|
|
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
|
2006-03-21 14:48:35 +08:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
|
|
|
|
case SOL_IPV6:
|
|
|
|
if (optname == IPV6_CHECKSUM)
|
2006-03-21 14:45:21 +08:00
|
|
|
break;
|
2006-03-21 14:48:35 +08:00
|
|
|
default:
|
|
|
|
return compat_ipv6_setsockopt(sk, level, optname,
|
|
|
|
optval, optlen);
|
2007-04-21 08:09:22 +08:00
|
|
|
}
|
2006-03-21 14:45:21 +08:00
|
|
|
return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static int do_rawv6_getsockopt(struct sock *sk, int level, int optname,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
struct raw6_sock *rp = raw6_sk(sk);
|
|
|
|
int val, len;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (get_user(len,optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
switch (optname) {
|
|
|
|
case IPV6_CHECKSUM:
|
2008-04-25 12:30:38 +08:00
|
|
|
/*
|
|
|
|
* We allow getsockopt() for IPPROTO_IPV6-level
|
|
|
|
* IPV6_CHECKSUM socket option on ICMPv6 sockets
|
|
|
|
* since RFC3542 is silent about it.
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
if (rp->checksum == 0)
|
|
|
|
val = -1;
|
|
|
|
else
|
|
|
|
val = rp->offset;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -ENOPROTOOPT;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = min_t(unsigned int, sizeof(int), len);
|
|
|
|
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(optval,&val,len))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-03-21 14:45:21 +08:00
|
|
|
static int rawv6_getsockopt(struct sock *sk, int level, int optname,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
2011-07-01 17:43:08 +08:00
|
|
|
switch (level) {
|
|
|
|
case SOL_RAW:
|
|
|
|
break;
|
2006-03-21 14:45:21 +08:00
|
|
|
|
2011-07-01 17:43:08 +08:00
|
|
|
case SOL_ICMPV6:
|
|
|
|
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
|
|
|
|
case SOL_IPV6:
|
|
|
|
if (optname == IPV6_CHECKSUM)
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return ipv6_getsockopt(sk, level, optname, optval, optlen);
|
2007-04-21 08:09:22 +08:00
|
|
|
}
|
|
|
|
|
2006-03-21 14:45:21 +08:00
|
|
|
return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname,
|
2006-03-21 14:48:35 +08:00
|
|
|
char __user *optval, int __user *optlen)
|
2006-03-21 14:45:21 +08:00
|
|
|
{
|
2006-03-21 14:48:35 +08:00
|
|
|
switch (level) {
|
|
|
|
case SOL_RAW:
|
|
|
|
break;
|
|
|
|
case SOL_ICMPV6:
|
2009-10-15 14:30:45 +08:00
|
|
|
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
|
2006-03-21 14:48:35 +08:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
|
|
|
|
case SOL_IPV6:
|
|
|
|
if (optname == IPV6_CHECKSUM)
|
2006-03-21 14:45:21 +08:00
|
|
|
break;
|
2006-03-21 14:48:35 +08:00
|
|
|
default:
|
|
|
|
return compat_ipv6_getsockopt(sk, level, optname,
|
|
|
|
optval, optlen);
|
2007-04-21 08:09:22 +08:00
|
|
|
}
|
2006-03-21 14:45:21 +08:00
|
|
|
return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
|
|
|
{
|
2011-07-01 17:43:08 +08:00
|
|
|
switch (cmd) {
|
|
|
|
case SIOCOUTQ: {
|
|
|
|
int amount = sk_wmem_alloc_get(sk);
|
2009-06-18 10:05:41 +08:00
|
|
|
|
2011-07-01 17:43:08 +08:00
|
|
|
return put_user(amount, (int __user *)arg);
|
|
|
|
}
|
|
|
|
case SIOCINQ: {
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int amount = 0;
|
|
|
|
|
|
|
|
spin_lock_bh(&sk->sk_receive_queue.lock);
|
|
|
|
skb = skb_peek(&sk->sk_receive_queue);
|
|
|
|
if (skb != NULL)
|
2013-05-29 04:34:26 +08:00
|
|
|
amount = skb_tail_pointer(skb) -
|
|
|
|
skb_transport_header(skb);
|
2011-07-01 17:43:08 +08:00
|
|
|
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
|
|
|
return put_user(amount, (int __user *)arg);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-07-01 17:43:08 +08:00
|
|
|
default:
|
2008-04-03 08:22:53 +08:00
|
|
|
#ifdef CONFIG_IPV6_MROUTE
|
2011-07-01 17:43:08 +08:00
|
|
|
return ip6mr_ioctl(sk, cmd, (void __user *)arg);
|
2008-04-03 08:22:53 +08:00
|
|
|
#else
|
2011-07-01 17:43:08 +08:00
|
|
|
return -ENOIOCTLCMD;
|
2008-04-03 08:22:53 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-04 09:59:32 +08:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCOUTQ:
|
|
|
|
case SIOCINQ:
|
|
|
|
return -ENOIOCTLCMD;
|
|
|
|
default:
|
|
|
|
#ifdef CONFIG_IPV6_MROUTE
|
|
|
|
return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg));
|
|
|
|
#else
|
|
|
|
return -ENOIOCTLCMD;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void rawv6_close(struct sock *sk, long timeout)
|
|
|
|
{
|
2009-10-15 14:30:45 +08:00
|
|
|
if (inet_sk(sk)->inet_num == IPPROTO_RAW)
|
2008-07-19 15:28:58 +08:00
|
|
|
ip6_ra_control(sk, -1);
|
2008-04-03 08:22:53 +08:00
|
|
|
ip6mr_sk_done(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
sk_common_release(sk);
|
|
|
|
}
|
|
|
|
|
2008-06-15 08:04:49 +08:00
|
|
|
static void raw6_destroy(struct sock *sk)
|
raw: Raw socket leak.
The program below just leaks the raw kernel socket
int main() {
int fd = socket(PF_INET, SOCK_RAW, IPPROTO_UDP);
struct sockaddr_in addr;
memset(&addr, 0, sizeof(addr));
inet_aton("127.0.0.1", &addr.sin_addr);
addr.sin_family = AF_INET;
addr.sin_port = htons(2048);
sendto(fd, "a", 1, MSG_MORE, &addr, sizeof(addr));
return 0;
}
Corked packet is allocated via sock_wmalloc which holds the owner socket,
so one should uncork it and flush all pending data on close. Do this in the
same way as in UDP.
Signed-off-by: Denis V. Lunev <den@openvz.org>
Acked-by: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-06-05 06:16:12 +08:00
|
|
|
{
|
|
|
|
lock_sock(sk);
|
|
|
|
ip6_flush_pending_frames(sk);
|
|
|
|
release_sock(sk);
|
2008-06-13 05:47:58 +08:00
|
|
|
|
2008-06-15 08:04:49 +08:00
|
|
|
inet6_destroy_sock(sk);
|
raw: Raw socket leak.
The program below just leaks the raw kernel socket
int main() {
int fd = socket(PF_INET, SOCK_RAW, IPPROTO_UDP);
struct sockaddr_in addr;
memset(&addr, 0, sizeof(addr));
inet_aton("127.0.0.1", &addr.sin_addr);
addr.sin_family = AF_INET;
addr.sin_port = htons(2048);
sendto(fd, "a", 1, MSG_MORE, &addr, sizeof(addr));
return 0;
}
Corked packet is allocated via sock_wmalloc which holds the owner socket,
so one should uncork it and flush all pending data on close. Do this in the
same way as in UDP.
Signed-off-by: Denis V. Lunev <den@openvz.org>
Acked-by: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-06-05 06:16:12 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int rawv6_init_sk(struct sock *sk)
|
|
|
|
{
|
2007-02-07 16:07:39 +08:00
|
|
|
struct raw6_sock *rp = raw6_sk(sk);
|
|
|
|
|
2009-10-15 14:30:45 +08:00
|
|
|
switch (inet_sk(sk)->inet_num) {
|
2007-02-07 16:07:39 +08:00
|
|
|
case IPPROTO_ICMPV6:
|
2005-04-17 06:20:36 +08:00
|
|
|
rp->checksum = 1;
|
|
|
|
rp->offset = 2;
|
2007-02-07 16:07:39 +08:00
|
|
|
break;
|
|
|
|
case IPPROTO_MH:
|
|
|
|
rp->checksum = 1;
|
|
|
|
rp->offset = 4;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2010-09-23 04:43:57 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct proto rawv6_prot = {
|
2006-03-21 14:48:35 +08:00
|
|
|
.name = "RAWv6",
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.close = rawv6_close,
|
raw: Raw socket leak.
The program below just leaks the raw kernel socket
int main() {
int fd = socket(PF_INET, SOCK_RAW, IPPROTO_UDP);
struct sockaddr_in addr;
memset(&addr, 0, sizeof(addr));
inet_aton("127.0.0.1", &addr.sin_addr);
addr.sin_family = AF_INET;
addr.sin_port = htons(2048);
sendto(fd, "a", 1, MSG_MORE, &addr, sizeof(addr));
return 0;
}
Corked packet is allocated via sock_wmalloc which holds the owner socket,
so one should uncork it and flush all pending data on close. Do this in the
same way as in UDP.
Signed-off-by: Denis V. Lunev <den@openvz.org>
Acked-by: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-06-05 06:16:12 +08:00
|
|
|
.destroy = raw6_destroy,
|
2006-03-21 14:48:35 +08:00
|
|
|
.connect = ip6_datagram_connect,
|
|
|
|
.disconnect = udp_disconnect,
|
|
|
|
.ioctl = rawv6_ioctl,
|
|
|
|
.init = rawv6_init_sk,
|
|
|
|
.setsockopt = rawv6_setsockopt,
|
|
|
|
.getsockopt = rawv6_getsockopt,
|
|
|
|
.sendmsg = rawv6_sendmsg,
|
|
|
|
.recvmsg = rawv6_recvmsg,
|
|
|
|
.bind = rawv6_bind,
|
|
|
|
.backlog_rcv = rawv6_rcv_skb,
|
2008-03-23 07:56:51 +08:00
|
|
|
.hash = raw_hash_sk,
|
|
|
|
.unhash = raw_unhash_sk,
|
2006-03-21 14:48:35 +08:00
|
|
|
.obj_size = sizeof(struct raw6_sock),
|
2008-03-23 07:56:51 +08:00
|
|
|
.h.raw_hash = &raw_v6_hashinfo,
|
2006-03-21 14:45:21 +08:00
|
|
|
#ifdef CONFIG_COMPAT
|
2006-03-21 14:48:35 +08:00
|
|
|
.compat_setsockopt = compat_rawv6_setsockopt,
|
|
|
|
.compat_getsockopt = compat_rawv6_getsockopt,
|
2011-02-04 09:59:32 +08:00
|
|
|
.compat_ioctl = compat_rawv6_ioctl,
|
2006-03-21 14:45:21 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
static int raw6_seq_show(struct seq_file *seq, void *v)
|
|
|
|
{
|
2013-05-31 23:05:48 +08:00
|
|
|
if (v == SEQ_START_TOKEN) {
|
|
|
|
seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
|
|
|
|
} else {
|
|
|
|
struct sock *sp = v;
|
|
|
|
__u16 srcp = inet_sk(sp)->inet_num;
|
|
|
|
ip6_dgram_sock_seq_show(seq, v, srcp, 0,
|
|
|
|
raw_seq_private(seq)->bucket);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-07-11 14:07:31 +08:00
|
|
|
static const struct seq_operations raw6_seq_ops = {
|
2007-11-20 14:38:33 +08:00
|
|
|
.start = raw_seq_start,
|
|
|
|
.next = raw_seq_next,
|
|
|
|
.stop = raw_seq_stop,
|
2005-04-17 06:20:36 +08:00
|
|
|
.show = raw6_seq_show,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int raw6_seq_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2008-01-31 19:48:55 +08:00
|
|
|
return raw_seq_open(inode, file, &raw_v6_hashinfo, &raw6_seq_ops);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-02-12 16:55:35 +08:00
|
|
|
static const struct file_operations raw6_seq_fops = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = raw6_seq_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
2008-01-14 21:35:57 +08:00
|
|
|
.release = seq_release_net,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2010-01-17 11:35:32 +08:00
|
|
|
static int __net_init raw6_init_net(struct net *net)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-02-18 09:34:54 +08:00
|
|
|
if (!proc_create("raw6", S_IRUGO, net->proc_net, &raw6_seq_fops))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -ENOMEM;
|
2008-01-14 21:36:50 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-01-17 11:35:32 +08:00
|
|
|
static void __net_exit raw6_exit_net(struct net *net)
|
2008-01-14 21:36:50 +08:00
|
|
|
{
|
2013-02-18 09:34:56 +08:00
|
|
|
remove_proc_entry("raw6", net->proc_net);
|
2008-01-14 21:36:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct pernet_operations raw6_net_ops = {
|
|
|
|
.init = raw6_init_net,
|
|
|
|
.exit = raw6_exit_net,
|
|
|
|
};
|
|
|
|
|
|
|
|
int __init raw6_proc_init(void)
|
|
|
|
{
|
|
|
|
return register_pernet_subsys(&raw6_net_ops);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
void raw6_proc_exit(void)
|
|
|
|
{
|
2008-01-14 21:36:50 +08:00
|
|
|
unregister_pernet_subsys(&raw6_net_ops);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_PROC_FS */
|
2007-12-11 18:25:35 +08:00
|
|
|
|
|
|
|
/* Same as inet6_dgram_ops, sans udp_poll. */
|
|
|
|
static const struct proto_ops inet6_sockraw_ops = {
|
|
|
|
.family = PF_INET6,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.release = inet6_release,
|
|
|
|
.bind = inet6_bind,
|
|
|
|
.connect = inet_dgram_connect, /* ok */
|
|
|
|
.socketpair = sock_no_socketpair, /* a do nothing */
|
|
|
|
.accept = sock_no_accept, /* a do nothing */
|
|
|
|
.getname = inet6_getname,
|
|
|
|
.poll = datagram_poll, /* ok */
|
|
|
|
.ioctl = inet6_ioctl, /* must change */
|
|
|
|
.listen = sock_no_listen, /* ok */
|
|
|
|
.shutdown = inet_shutdown, /* ok */
|
|
|
|
.setsockopt = sock_common_setsockopt, /* ok */
|
|
|
|
.getsockopt = sock_common_getsockopt, /* ok */
|
|
|
|
.sendmsg = inet_sendmsg, /* ok */
|
|
|
|
.recvmsg = sock_common_recvmsg, /* ok */
|
|
|
|
.mmap = sock_no_mmap,
|
|
|
|
.sendpage = sock_no_sendpage,
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
.compat_setsockopt = compat_sock_common_setsockopt,
|
|
|
|
.compat_getsockopt = compat_sock_common_getsockopt,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct inet_protosw rawv6_protosw = {
|
|
|
|
.type = SOCK_RAW,
|
|
|
|
.protocol = IPPROTO_IP, /* wild card */
|
|
|
|
.prot = &rawv6_prot,
|
|
|
|
.ops = &inet6_sockraw_ops,
|
|
|
|
.no_check = UDP_CSUM_DEFAULT,
|
|
|
|
.flags = INET_PROTOSW_REUSE,
|
|
|
|
};
|
|
|
|
|
|
|
|
int __init rawv6_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = inet6_register_protosw(&rawv6_protosw);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-12-13 21:34:58 +08:00
|
|
|
void rawv6_exit(void)
|
2007-12-11 18:25:35 +08:00
|
|
|
{
|
|
|
|
inet6_unregister_protosw(&rawv6_protosw);
|
|
|
|
}
|