linux_old1/include/linux/skbuff.h

1763 lines
48 KiB
C
Raw Normal View History

/*
* Definitions for the 'struct sk_buff' memory handlers.
*
* Authors:
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* Florian La Roche, <rzsfl@rz.uni-sb.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_SKBUFF_H
#define _LINUX_SKBUFF_H
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/time.h>
#include <linux/cache.h>
#include <asm/atomic.h>
#include <asm/types.h>
#include <linux/spinlock.h>
#include <linux/net.h>
#include <linux/textsearch.h>
#include <net/checksum.h>
#include <linux/rcupdate.h>
#include <linux/dmaengine.h>
#include <linux/hrtimer.h>
#define HAVE_ALLOC_SKB /* For the drivers to know */
#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
/* Don't change this without changing skb_csum_unnecessary! */
#define CHECKSUM_NONE 0
#define CHECKSUM_UNNECESSARY 1
#define CHECKSUM_COMPLETE 2
#define CHECKSUM_PARTIAL 3
#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
~(SMP_CACHE_BYTES - 1))
#define SKB_WITH_OVERHEAD(X) \
(((X) - sizeof(struct skb_shared_info)) & \
~(SMP_CACHE_BYTES - 1))
#define SKB_MAX_ORDER(X, ORDER) \
SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
/* A. Checksumming of received packets by device.
*
* NONE: device failed to checksum this packet.
* skb->csum is undefined.
*
* UNNECESSARY: device parsed packet and wouldbe verified checksum.
* skb->csum is undefined.
* It is bad option, but, unfortunately, many of vendors do this.
* Apparently with secret goal to sell you new device, when you
* will add new protocol to your host. F.e. IPv6. 8)
*
* COMPLETE: the most generic way. Device supplied checksum of _all_
* the packet as seen by netif_rx in skb->csum.
* NOTE: Even if device supports only some protocols, but
* is able to produce some skb->csum, it MUST use COMPLETE,
* not UNNECESSARY.
*
* B. Checksumming on output.
*
* NONE: skb is checksummed by protocol or csum is not required.
*
* PARTIAL: device is required to csum packet as seen by hard_start_xmit
* from skb->transport_header to the end and to record the checksum
* at skb->transport_header + skb->csum.
*
* Device must show its capabilities in dev->features, set
* at device setup time.
* NETIF_F_HW_CSUM - it is clever device, it is able to checksum
* everything.
* NETIF_F_NO_CSUM - loopback or reliable single hop media.
* NETIF_F_IP_CSUM - device is dumb. It is able to csum only
* TCP/UDP over IPv4. Sigh. Vendors like this
* way by an unknown reason. Though, see comment above
* about CHECKSUM_UNNECESSARY. 8)
*
* Any questions? No questions, good. --ANK
*/
struct net_device;
struct scatterlist;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct nf_conntrack {
atomic_t use;
};
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
struct nf_bridge_info {
atomic_t use;
struct net_device *physindev;
struct net_device *physoutdev;
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
struct net_device *netoutdev;
#endif
unsigned int mask;
unsigned long data[32 / sizeof(unsigned long)];
};
#endif
struct sk_buff_head {
/* These two members must be first. */
struct sk_buff *next;
struct sk_buff *prev;
__u32 qlen;
spinlock_t lock;
};
struct sk_buff;
/* To allow 64K frame to be packed as single skb without frag_list */
#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
typedef struct skb_frag_struct skb_frag_t;
struct skb_frag_struct {
struct page *page;
__u16 page_offset;
__u16 size;
};
/* This data is invariant across clones and lives at
* the end of the header data, ie. at skb->end.
*/
struct skb_shared_info {
atomic_t dataref;
unsigned short nr_frags;
unsigned short gso_size;
/* Warning: this field is not always filled in (UFO)! */
unsigned short gso_segs;
unsigned short gso_type;
__be32 ip6_frag_id;
struct sk_buff *frag_list;
skb_frag_t frags[MAX_SKB_FRAGS];
};
/* We divide dataref into two halves. The higher 16 bits hold references
* to the payload part of skb->data. The lower 16 bits hold references to
[SKBUFF]: Keep track of writable header len of headerless clones Currently NAT (and others) that want to modify cloned skbs copy them, even if in the vast majority of cases its not necessary because the skb is a clone made by TCP and the portion NAT wants to modify is actually writable because TCP release the header reference before cloning. The problem is that there is no clean way for NAT to find out how long the writable header area is, so this patch introduces skb->hdr_len to hold this length. When a headerless skb is cloned skb->hdr_len is set to the current headroom, for regular clones it is copied from the original. A new function skb_clone_writable(skb, len) returns whether the skb is writable up to len bytes from skb->data. To avoid enlarging the skb the mac_len field is reduced to 16 bit and the new hdr_len field is put in the remaining 16 bit. I've done a few rough benchmarks of NAT (not with this exact patch, but a very similar one). As expected it saves huge amounts of system time in case of sendfile, bringing it down to basically the same amount as without NAT, with sendmsg it only helps on loopback, probably because of the large MTU. Transmit a 1GB file using sendfile/sendmsg over eth0/lo with and without NAT: - sendfile eth0, no NAT: sys 0m0.388s - sendfile eth0, NAT: sys 0m1.835s - sendfile eth0: NAT + path: sys 0m0.370s (~ -80%) - sendfile lo, no NAT: sys 0m0.258s - sendfile lo, NAT: sys 0m2.609s - sendfile lo, NAT + patch: sys 0m0.260s (~ -90%) - sendmsg eth0, no NAT: sys 0m2.508s - sendmsg eth0, NAT: sys 0m2.539s - sendmsg eth0, NAT + patch: sys 0m2.445s (no change) - sendmsg lo, no NAT: sys 0m2.151s - sendmsg lo, NAT: sys 0m3.557s - sendmsg lo, NAT + patch: sys 0m2.159s (~ -40%) I expect other users can see a similar performance improvement, packet mangling iptables targets, ipip and ip_gre come to mind .. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-06-25 19:35:20 +08:00
* the entire skb->data. A clone of a headerless skb holds the length of
* the header in skb->hdr_len.
*
* All users must obey the rule that the skb->data reference count must be
* greater than or equal to the payload reference count.
*
* Holding a reference to the payload part means that the user does not
* care about modifications to the header part of skb->data.
*/
#define SKB_DATAREF_SHIFT 16
#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
enum {
SKB_FCLONE_UNAVAILABLE,
SKB_FCLONE_ORIG,
SKB_FCLONE_CLONE,
};
enum {
SKB_GSO_TCPV4 = 1 << 0,
[IPV6]: Added GSO support for TCPv6 This patch adds GSO support for IPv6 and TCPv6. This is based on a patch by Ananda Raju <Ananda.Raju@neterion.com>. His original description is: This patch enables TSO over IPv6. Currently Linux network stacks restricts TSO over IPv6 by clearing of the NETIF_F_TSO bit from "dev->features". This patch will remove this restriction. This patch will introduce a new flag NETIF_F_TSO6 which will be used to check whether device supports TSO over IPv6. If device support TSO over IPv6 then we don't clear of NETIF_F_TSO and which will make the TCP layer to create TSO packets. Any device supporting TSO over IPv6 will set NETIF_F_TSO6 flag in "dev->features" along with NETIF_F_TSO. In case when user disables TSO using ethtool, NETIF_F_TSO will get cleared from "dev->features". So even if we have NETIF_F_TSO6 we don't get TSO packets created by TCP layer. SKB_GSO_TCPV4 renamed to SKB_GSO_TCP to make it generic GSO packet. SKB_GSO_UDPV4 renamed to SKB_GSO_UDP as UFO is not a IPv4 feature. UFO is supported over IPv6 also The following table shows there is significant improvement in throughput with normal frames and CPU usage for both normal and jumbo. -------------------------------------------------- | | 1500 | 9600 | | ------------------|-------------------| | | thru CPU | thru CPU | -------------------------------------------------- | TSO OFF | 2.00 5.5% id | 5.66 20.0% id | -------------------------------------------------- | TSO ON | 2.63 78.0 id | 5.67 39.0% id | -------------------------------------------------- Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-07-01 04:37:03 +08:00
SKB_GSO_UDP = 1 << 1,
/* This indicates the skb is from an untrusted source. */
SKB_GSO_DODGY = 1 << 2,
/* This indicates the tcp segment has CWR set. */
[IPV6]: Added GSO support for TCPv6 This patch adds GSO support for IPv6 and TCPv6. This is based on a patch by Ananda Raju <Ananda.Raju@neterion.com>. His original description is: This patch enables TSO over IPv6. Currently Linux network stacks restricts TSO over IPv6 by clearing of the NETIF_F_TSO bit from "dev->features". This patch will remove this restriction. This patch will introduce a new flag NETIF_F_TSO6 which will be used to check whether device supports TSO over IPv6. If device support TSO over IPv6 then we don't clear of NETIF_F_TSO and which will make the TCP layer to create TSO packets. Any device supporting TSO over IPv6 will set NETIF_F_TSO6 flag in "dev->features" along with NETIF_F_TSO. In case when user disables TSO using ethtool, NETIF_F_TSO will get cleared from "dev->features". So even if we have NETIF_F_TSO6 we don't get TSO packets created by TCP layer. SKB_GSO_TCPV4 renamed to SKB_GSO_TCP to make it generic GSO packet. SKB_GSO_UDPV4 renamed to SKB_GSO_UDP as UFO is not a IPv4 feature. UFO is supported over IPv6 also The following table shows there is significant improvement in throughput with normal frames and CPU usage for both normal and jumbo. -------------------------------------------------- | | 1500 | 9600 | | ------------------|-------------------| | | thru CPU | thru CPU | -------------------------------------------------- | TSO OFF | 2.00 5.5% id | 5.66 20.0% id | -------------------------------------------------- | TSO ON | 2.63 78.0 id | 5.67 39.0% id | -------------------------------------------------- Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
2006-07-01 04:37:03 +08:00
SKB_GSO_TCP_ECN = 1 << 3,
SKB_GSO_TCPV6 = 1 << 4,
};
[SK_BUFF]: Use offsets for skb->{mac,network,transport}_header on 64bit architectures With this we save 8 bytes per network packet, leaving a 4 bytes hole to be used in further shrinking work, likely with the offsetization of other pointers, such as ->{data,tail,end}, at the cost of adds, that were minimized by the usual practice of setting skb->{mac,nh,n}.raw to a local variable that is then accessed multiple times in each function, it also is not more expensive than before with regards to most of the handling of such headers, like setting one of these headers to another (transport to network, etc), or subtracting, adding to/from it, comparing them, etc. Now we have this layout for sk_buff on a x86_64 machine: [acme@mica net-2.6.22]$ pahole vmlinux sk_buff struct sk_buff { struct sk_buff * next; /* 0 8 */ struct sk_buff * prev; /* 8 8 */ struct rb_node rb; /* 16 24 */ struct sock * sk; /* 40 8 */ ktime_t tstamp; /* 48 8 */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct net_device * input_dev; /* 64 8 */ sk_buff_data_t transport_header; /* 72 4 */ sk_buff_data_t network_header; /* 76 4 */ sk_buff_data_t mac_header; /* 80 4 */ /* XXX 4 bytes hole, try to pack */ struct dst_entry * dst; /* 88 8 */ struct sec_path * sp; /* 96 8 */ char cb[48]; /* 104 48 */ /* cacheline 2 boundary (128 bytes) was 24 bytes ago*/ unsigned int len; /* 152 4 */ unsigned int data_len; /* 156 4 */ unsigned int mac_len; /* 160 4 */ union { __wsum csum; /* 4 */ __u32 csum_offset; /* 4 */ }; /* 164 4 */ __u32 priority; /* 168 4 */ __u8 local_df:1; /* 172 1 */ __u8 cloned:1; /* 172 1 */ __u8 ip_summed:2; /* 172 1 */ __u8 nohdr:1; /* 172 1 */ __u8 nfctinfo:3; /* 172 1 */ __u8 pkt_type:3; /* 173 1 */ __u8 fclone:2; /* 173 1 */ __u8 ipvs_property:1; /* 173 1 */ /* XXX 2 bits hole, try to pack */ __be16 protocol; /* 174 2 */ void (*destructor)(struct sk_buff *); /* 176 8 */ struct nf_conntrack * nfct; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct sk_buff * nfct_reasm; /* 192 8 */ struct nf_bridge_info *nf_bridge; /* 200 8 */ __u16 tc_index; /* 208 2 */ __u16 tc_verd; /* 210 2 */ dma_cookie_t dma_cookie; /* 212 4 */ __u32 secmark; /* 216 4 */ __u32 mark; /* 220 4 */ unsigned int truesize; /* 224 4 */ atomic_t users; /* 228 4 */ unsigned char * head; /* 232 8 */ unsigned char * data; /* 240 8 */ unsigned char * tail; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned char * end; /* 256 8 */ }; /* size: 264, cachelines: 5 */ /* sum members: 260, holes: 1, sum holes: 4 */ /* bit holes: 1, sum bit holes: 2 bits */ /* last cacheline: 8 bytes */ On 32 bits nothing changes, and pointers continue to be used with the compiler turning all this abstraction layer into dust. But there are some sk_buff validation tricks that are now possible, humm... :-) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-11 12:22:35 +08:00
#if BITS_PER_LONG > 32
#define NET_SKBUFF_DATA_USES_OFFSET 1
#endif
#ifdef NET_SKBUFF_DATA_USES_OFFSET
typedef unsigned int sk_buff_data_t;
#else
typedef unsigned char *sk_buff_data_t;
#endif
/**
* struct sk_buff - socket buffer
* @next: Next buffer in list
* @prev: Previous buffer in list
* @sk: Socket we are owned by
* @tstamp: Time we arrived
* @dev: Device we arrived on/are leaving by
* @transport_header: Transport layer header
* @network_header: Network layer header
* @mac_header: Link layer header
* @dst: destination entry
* @sp: the security path, used for xfrm
* @cb: Control buffer. Free for use by every layer. Put private vars here
* @len: Length of actual data
* @data_len: Data length
* @mac_len: Length of link layer header
[SKBUFF]: Keep track of writable header len of headerless clones Currently NAT (and others) that want to modify cloned skbs copy them, even if in the vast majority of cases its not necessary because the skb is a clone made by TCP and the portion NAT wants to modify is actually writable because TCP release the header reference before cloning. The problem is that there is no clean way for NAT to find out how long the writable header area is, so this patch introduces skb->hdr_len to hold this length. When a headerless skb is cloned skb->hdr_len is set to the current headroom, for regular clones it is copied from the original. A new function skb_clone_writable(skb, len) returns whether the skb is writable up to len bytes from skb->data. To avoid enlarging the skb the mac_len field is reduced to 16 bit and the new hdr_len field is put in the remaining 16 bit. I've done a few rough benchmarks of NAT (not with this exact patch, but a very similar one). As expected it saves huge amounts of system time in case of sendfile, bringing it down to basically the same amount as without NAT, with sendmsg it only helps on loopback, probably because of the large MTU. Transmit a 1GB file using sendfile/sendmsg over eth0/lo with and without NAT: - sendfile eth0, no NAT: sys 0m0.388s - sendfile eth0, NAT: sys 0m1.835s - sendfile eth0: NAT + path: sys 0m0.370s (~ -80%) - sendfile lo, no NAT: sys 0m0.258s - sendfile lo, NAT: sys 0m2.609s - sendfile lo, NAT + patch: sys 0m0.260s (~ -90%) - sendmsg eth0, no NAT: sys 0m2.508s - sendmsg eth0, NAT: sys 0m2.539s - sendmsg eth0, NAT + patch: sys 0m2.445s (no change) - sendmsg lo, no NAT: sys 0m2.151s - sendmsg lo, NAT: sys 0m3.557s - sendmsg lo, NAT + patch: sys 0m2.159s (~ -40%) I expect other users can see a similar performance improvement, packet mangling iptables targets, ipip and ip_gre come to mind .. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-06-25 19:35:20 +08:00
* @hdr_len: writable header length of cloned skb
* @csum: Checksum (must include start/offset pair)
* @csum_start: Offset from skb->head where checksumming should start
* @csum_offset: Offset from csum_start where checksum should be stored
* @local_df: allow local fragmentation
* @cloned: Head may be cloned (check refcnt to be sure)
* @nohdr: Payload reference only, must not modify header
* @pkt_type: Packet class
* @fclone: skbuff clone status
* @ip_summed: Driver fed us an IP checksum
* @priority: Packet queueing priority
* @users: User count - see {datagram,tcp}.c
* @protocol: Packet protocol from driver
* @truesize: Buffer size
* @head: Head of buffer
* @data: Data head pointer
* @tail: Tail pointer
* @end: End pointer
* @destructor: Destruct function
* @mark: Generic packet mark
* @nfct: Associated connection, if any
* @ipvs_property: skbuff is owned by ipvs
* @nf_trace: netfilter packet trace flag
* @nfctinfo: Relationship of this skb to the connection
* @nfct_reasm: netfilter conntrack re-assembly pointer
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @iif: ifindex of device we arrived on
* @queue_mapping: Queue mapping for multiqueue devices
* @tc_index: Traffic control index
* @tc_verd: traffic control verdict
* @dma_cookie: a cookie to one of several possible DMA operations
* done by skb DMA functions
* @secmark: security marking
*/
struct sk_buff {
/* These two members must be first. */
struct sk_buff *next;
struct sk_buff *prev;
struct sock *sk;
ktime_t tstamp;
struct net_device *dev;
struct dst_entry *dst;
struct sec_path *sp;
/*
* This is the control buffer. It is free to use for every
* layer. Please put your private variables there. If you
* want to keep them across layers you have to do a skb_clone()
* first. This is owned by whoever has the skb queued ATM.
*/
char cb[48];
unsigned int len,
[SKBUFF]: Keep track of writable header len of headerless clones Currently NAT (and others) that want to modify cloned skbs copy them, even if in the vast majority of cases its not necessary because the skb is a clone made by TCP and the portion NAT wants to modify is actually writable because TCP release the header reference before cloning. The problem is that there is no clean way for NAT to find out how long the writable header area is, so this patch introduces skb->hdr_len to hold this length. When a headerless skb is cloned skb->hdr_len is set to the current headroom, for regular clones it is copied from the original. A new function skb_clone_writable(skb, len) returns whether the skb is writable up to len bytes from skb->data. To avoid enlarging the skb the mac_len field is reduced to 16 bit and the new hdr_len field is put in the remaining 16 bit. I've done a few rough benchmarks of NAT (not with this exact patch, but a very similar one). As expected it saves huge amounts of system time in case of sendfile, bringing it down to basically the same amount as without NAT, with sendmsg it only helps on loopback, probably because of the large MTU. Transmit a 1GB file using sendfile/sendmsg over eth0/lo with and without NAT: - sendfile eth0, no NAT: sys 0m0.388s - sendfile eth0, NAT: sys 0m1.835s - sendfile eth0: NAT + path: sys 0m0.370s (~ -80%) - sendfile lo, no NAT: sys 0m0.258s - sendfile lo, NAT: sys 0m2.609s - sendfile lo, NAT + patch: sys 0m0.260s (~ -90%) - sendmsg eth0, no NAT: sys 0m2.508s - sendmsg eth0, NAT: sys 0m2.539s - sendmsg eth0, NAT + patch: sys 0m2.445s (no change) - sendmsg lo, no NAT: sys 0m2.151s - sendmsg lo, NAT: sys 0m3.557s - sendmsg lo, NAT + patch: sys 0m2.159s (~ -40%) I expect other users can see a similar performance improvement, packet mangling iptables targets, ipip and ip_gre come to mind .. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-06-25 19:35:20 +08:00
data_len;
__u16 mac_len,
hdr_len;
union {
__wsum csum;
struct {
__u16 csum_start;
__u16 csum_offset;
};
};
__u32 priority;
__u8 local_df:1,
cloned:1,
ip_summed:2,
nohdr:1,
nfctinfo:3;
__u8 pkt_type:3,
fclone:2,
ipvs_property:1,
nf_trace:1;
__be16 protocol;
void (*destructor)(struct sk_buff *skb);
[NETFILTER]: Add nf_conntrack subsystem. The existing connection tracking subsystem in netfilter can only handle ipv4. There were basically two choices present to add connection tracking support for ipv6. We could either duplicate all of the ipv4 connection tracking code into an ipv6 counterpart, or (the choice taken by these patches) we could design a generic layer that could handle both ipv4 and ipv6 and thus requiring only one sub-protocol (TCP, UDP, etc.) connection tracking helper module to be written. In fact nf_conntrack is capable of working with any layer 3 protocol. The existing ipv4 specific conntrack code could also not deal with the pecularities of doing connection tracking on ipv6, which is also cured here. For example, these issues include: 1) ICMPv6 handling, which is used for neighbour discovery in ipv6 thus some messages such as these should not participate in connection tracking since effectively they are like ARP messages 2) fragmentation must be handled differently in ipv6, because the simplistic "defrag, connection track and NAT, refrag" (which the existing ipv4 connection tracking does) approach simply isn't feasible in ipv6 3) ipv6 extension header parsing must occur at the correct spots before and after connection tracking decisions, and there were no provisions for this in the existing connection tracking design 4) ipv6 has no need for stateful NAT The ipv4 specific conntrack layer is kept around, until all of the ipv4 specific conntrack helpers are ported over to nf_conntrack and it is feature complete. Once that occurs, the old conntrack stuff will get placed into the feature-removal-schedule and we will fully kill it off 6 months later. Signed-off-by: Yasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp> Signed-off-by: Harald Welte <laforge@netfilter.org> Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
2005-11-10 08:38:16 +08:00
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct nf_conntrack *nfct;
[NETFILTER]: Add nf_conntrack subsystem. The existing connection tracking subsystem in netfilter can only handle ipv4. There were basically two choices present to add connection tracking support for ipv6. We could either duplicate all of the ipv4 connection tracking code into an ipv6 counterpart, or (the choice taken by these patches) we could design a generic layer that could handle both ipv4 and ipv6 and thus requiring only one sub-protocol (TCP, UDP, etc.) connection tracking helper module to be written. In fact nf_conntrack is capable of working with any layer 3 protocol. The existing ipv4 specific conntrack code could also not deal with the pecularities of doing connection tracking on ipv6, which is also cured here. For example, these issues include: 1) ICMPv6 handling, which is used for neighbour discovery in ipv6 thus some messages such as these should not participate in connection tracking since effectively they are like ARP messages 2) fragmentation must be handled differently in ipv6, because the simplistic "defrag, connection track and NAT, refrag" (which the existing ipv4 connection tracking does) approach simply isn't feasible in ipv6 3) ipv6 extension header parsing must occur at the correct spots before and after connection tracking decisions, and there were no provisions for this in the existing connection tracking design 4) ipv6 has no need for stateful NAT The ipv4 specific conntrack layer is kept around, until all of the ipv4 specific conntrack helpers are ported over to nf_conntrack and it is feature complete. Once that occurs, the old conntrack stuff will get placed into the feature-removal-schedule and we will fully kill it off 6 months later. Signed-off-by: Yasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp> Signed-off-by: Harald Welte <laforge@netfilter.org> Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
2005-11-10 08:38:16 +08:00
struct sk_buff *nfct_reasm;
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
struct nf_bridge_info *nf_bridge;
#endif
int iif;
__u16 queue_mapping;
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
#ifdef CONFIG_NET_CLS_ACT
__u16 tc_verd; /* traffic control verdict */
#endif
#endif
/* 2 byte hole */
#ifdef CONFIG_NET_DMA
dma_cookie_t dma_cookie;
#endif
#ifdef CONFIG_NETWORK_SECMARK
__u32 secmark;
#endif
__u32 mark;
sk_buff_data_t transport_header;
sk_buff_data_t network_header;
sk_buff_data_t mac_header;
/* These elements must be at the end, see alloc_skb() for details. */
sk_buff_data_t tail;
sk_buff_data_t end;
unsigned char *head,
*data;
unsigned int truesize;
atomic_t users;
};
#ifdef __KERNEL__
/*
* Handling routines are only of interest to the kernel
*/
#include <linux/slab.h>
#include <asm/system.h>
extern void kfree_skb(struct sk_buff *skb);
extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff *__alloc_skb(unsigned int size,
gfp_t priority, int fclone, int node);
static inline struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
{
return __alloc_skb(size, priority, 0, -1);
}
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
gfp_t priority)
{
return __alloc_skb(size, priority, 1, -1);
}
extern void kfree_skbmem(struct sk_buff *skb);
extern struct sk_buff *skb_clone(struct sk_buff *skb,
gfp_t priority);
extern struct sk_buff *skb_copy(const struct sk_buff *skb,
gfp_t priority);
extern struct sk_buff *pskb_copy(struct sk_buff *skb,
gfp_t gfp_mask);
extern int pskb_expand_head(struct sk_buff *skb,
int nhead, int ntail,
gfp_t gfp_mask);
extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
unsigned int headroom);
extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
int newheadroom, int newtailroom,
gfp_t priority);
extern int skb_to_sgvec(struct sk_buff *skb,
struct scatterlist *sg, int offset,
int len);
extern int skb_cow_data(struct sk_buff *skb, int tailbits,
struct sk_buff **trailer);
extern int skb_pad(struct sk_buff *skb, int pad);
#define dev_kfree_skb(a) kfree_skb(a)
extern void skb_over_panic(struct sk_buff *skb, int len,
void *here);
extern void skb_under_panic(struct sk_buff *skb, int len,
void *here);
extern void skb_truesize_bug(struct sk_buff *skb);
static inline void skb_truesize_check(struct sk_buff *skb)
{
if (unlikely((int)skb->truesize < sizeof(struct sk_buff) + skb->len))
skb_truesize_bug(skb);
}
[IPv4/IPv6]: UFO Scatter-gather approach Attached is kernel patch for UDP Fragmentation Offload (UFO) feature. 1. This patch incorporate the review comments by Jeff Garzik. 2. Renamed USO as UFO (UDP Fragmentation Offload) 3. udp sendfile support with UFO This patches uses scatter-gather feature of skb to generate large UDP datagram. Below is a "how-to" on changes required in network device driver to use the UFO interface. UDP Fragmentation Offload (UFO) Interface: ------------------------------------------- UFO is a feature wherein the Linux kernel network stack will offload the IP fragmentation functionality of large UDP datagram to hardware. This will reduce the overhead of stack in fragmenting the large UDP datagram to MTU sized packets 1) Drivers indicate their capability of UFO using dev->features |= NETIF_F_UFO | NETIF_F_HW_CSUM | NETIF_F_SG NETIF_F_HW_CSUM is required for UFO over ipv6. 2) UFO packet will be submitted for transmission using driver xmit routine. UFO packet will have a non-zero value for "skb_shinfo(skb)->ufo_size" skb_shinfo(skb)->ufo_size will indicate the length of data part in each IP fragment going out of the adapter after IP fragmentation by hardware. skb->data will contain MAC/IP/UDP header and skb_shinfo(skb)->frags[] contains the data payload. The skb->ip_summed will be set to CHECKSUM_HW indicating that hardware has to do checksum calculation. Hardware should compute the UDP checksum of complete datagram and also ip header checksum of each fragmented IP packet. For IPV6 the UFO provides the fragment identification-id in skb_shinfo(skb)->ip6_frag_id. The adapter should use this ID for generating IPv6 fragments. Signed-off-by: Ananda Raju <ananda.raju@neterion.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (forwarded) Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
2005-10-19 06:46:41 +08:00
extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
int getfrag(void *from, char *to, int offset,
int len,int odd, struct sk_buff *skb),
void *from, int length);
struct skb_seq_state
{
__u32 lower_offset;
__u32 upper_offset;
__u32 frag_idx;
__u32 stepped_offset;
struct sk_buff *root_skb;
struct sk_buff *cur_skb;
__u8 *frag_data;
};
extern void skb_prepare_seq_read(struct sk_buff *skb,
unsigned int from, unsigned int to,
struct skb_seq_state *st);
extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
struct skb_seq_state *st);
extern void skb_abort_seq_read(struct skb_seq_state *st);
extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config,
struct ts_state *state);
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
return skb->head + skb->end;
}
#else
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
return skb->end;
}
#endif
/* Internal */
#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
/**
* skb_queue_empty - check if a queue is empty
* @list: queue head
*
* Returns true if the queue is empty, false otherwise.
*/
static inline int skb_queue_empty(const struct sk_buff_head *list)
{
return list->next == (struct sk_buff *)list;
}
/**
* skb_get - reference buffer
* @skb: buffer to reference
*
* Makes another reference to a socket buffer and returns a pointer
* to the buffer.
*/
static inline struct sk_buff *skb_get(struct sk_buff *skb)
{
atomic_inc(&skb->users);
return skb;
}
/*
* If users == 1, we are the only owner and are can avoid redundant
* atomic change.
*/
/**
* skb_cloned - is the buffer a clone
* @skb: buffer to check
*
* Returns true if the buffer was generated with skb_clone() and is
* one of multiple shared copies of the buffer. Cloned buffers are
* shared data so must not be written to under normal circumstances.
*/
static inline int skb_cloned(const struct sk_buff *skb)
{
return skb->cloned &&
(atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
}
/**
* skb_header_cloned - is the header a clone
* @skb: buffer to check
*
* Returns true if modifying the header part of the buffer requires
* the data to be copied.
*/
static inline int skb_header_cloned(const struct sk_buff *skb)
{
int dataref;
if (!skb->cloned)
return 0;
dataref = atomic_read(&skb_shinfo(skb)->dataref);
dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
return dataref != 1;
}
/**
* skb_header_release - release reference to header
* @skb: buffer to operate on
*
* Drop a reference to the header part of the buffer. This is done
* by acquiring a payload reference. You must not read from the header
* part of skb->data after this.
*/
static inline void skb_header_release(struct sk_buff *skb)
{
BUG_ON(skb->nohdr);
skb->nohdr = 1;
atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
}
/**
* skb_shared - is the buffer shared
* @skb: buffer to check
*
* Returns true if more than one person has a reference to this
* buffer.
*/
static inline int skb_shared(const struct sk_buff *skb)
{
return atomic_read(&skb->users) != 1;
}
/**
* skb_share_check - check if buffer is shared and if so clone it
* @skb: buffer to check
* @pri: priority for memory allocation
*
* If the buffer is shared the buffer is cloned and the old copy
* drops a reference. A new clone with a single reference is returned.
* If the buffer is not shared the original buffer is returned. When
* being called from interrupt status or with spinlocks held pri must
* be GFP_ATOMIC.
*
* NULL is returned on a memory allocation failure.
*/
static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
gfp_t pri)
{
might_sleep_if(pri & __GFP_WAIT);
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, pri);
kfree_skb(skb);
skb = nskb;
}
return skb;
}
/*
* Copy shared buffers into a new sk_buff. We effectively do COW on
* packets to handle cases where we have a local reader and forward
* and a couple of other messy ones. The normal one is tcpdumping
* a packet thats being forwarded.
*/
/**
* skb_unshare - make a copy of a shared buffer
* @skb: buffer to check
* @pri: priority for memory allocation
*
* If the socket buffer is a clone then this function creates a new
* copy of the data, drops a reference count on the old copy and returns
* the new copy with the reference count at 1. If the buffer is not a clone
* the original buffer is returned. When called with a spinlock held or
* from interrupt state @pri must be %GFP_ATOMIC
*
* %NULL is returned on a memory allocation failure.
*/
static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
gfp_t pri)
{
might_sleep_if(pri & __GFP_WAIT);
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, pri);
kfree_skb(skb); /* Free our shared copy */
skb = nskb;
}
return skb;
}
/**
* skb_peek
* @list_: list to peek at
*
* Peek an &sk_buff. Unlike most other operations you _MUST_
* be careful with this one. A peek leaves the buffer on the
* list and someone else may run off with it. You must hold
* the appropriate locks or have a private queue to do this.
*
* Returns %NULL for an empty list or a pointer to the head element.
* The reference count is not incremented and the reference is therefore
* volatile. Use with caution.
*/
static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
{
struct sk_buff *list = ((struct sk_buff *)list_)->next;
if (list == (struct sk_buff *)list_)
list = NULL;
return list;
}
/**
* skb_peek_tail
* @list_: list to peek at
*
* Peek an &sk_buff. Unlike most other operations you _MUST_
* be careful with this one. A peek leaves the buffer on the
* list and someone else may run off with it. You must hold
* the appropriate locks or have a private queue to do this.
*
* Returns %NULL for an empty list or a pointer to the tail element.
* The reference count is not incremented and the reference is therefore
* volatile. Use with caution.
*/
static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
{
struct sk_buff *list = ((struct sk_buff *)list_)->prev;
if (list == (struct sk_buff *)list_)
list = NULL;
return list;
}
/**
* skb_queue_len - get queue length
* @list_: list to measure
*
* Return the length of an &sk_buff queue.
*/
static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
{
return list_->qlen;
}
/*
* This function creates a split out lock class for each invocation;
* this is needed for now since a whole lot of users of the skb-queue
* infrastructure in drivers have different locking usage (in hardirq)
* than the networking core (in softirq only). In the long run either the
* network layer or drivers should need annotation to consolidate the
* main types of usage into 3 classes.
*/
static inline void skb_queue_head_init(struct sk_buff_head *list)
{
spin_lock_init(&list->lock);
list->prev = list->next = (struct sk_buff *)list;
list->qlen = 0;
}
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
struct lock_class_key *class)
{
skb_queue_head_init(list);
lockdep_set_class(&list->lock, class);
}
/*
* Insert an sk_buff at the start of a list.
*
* The "__skb_xxxx()" functions are the non-atomic ones that
* can only be called with interrupts disabled.
*/
/**
* __skb_queue_after - queue a buffer at the list head
* @list: list to use
* @prev: place after this buffer
* @newsk: buffer to queue
*
* Queue a buffer int the middle of a list. This function takes no locks
* and you must therefore hold required locks before calling it.
*
* A buffer cannot be placed on two lists at the same time.
*/
static inline void __skb_queue_after(struct sk_buff_head *list,
struct sk_buff *prev,
struct sk_buff *newsk)
{
struct sk_buff *next;
list->qlen++;
next = prev->next;
newsk->next = next;
newsk->prev = prev;
next->prev = prev->next = newsk;
}
/**
* __skb_queue_head - queue a buffer at the list head
* @list: list to use
* @newsk: buffer to queue
*
* Queue a buffer at the start of a list. This function takes no locks
* and you must therefore hold required locks before calling it.
*
* A buffer cannot be placed on two lists at the same time.
*/
extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
static inline void __skb_queue_head(struct sk_buff_head *list,
struct sk_buff *newsk)
{
__skb_queue_after(list, (struct sk_buff *)list, newsk);
}
/**
* __skb_queue_tail - queue a buffer at the list tail
* @list: list to use
* @newsk: buffer to queue
*
* Queue a buffer at the end of a list. This function takes no locks
* and you must therefore hold required locks before calling it.
*
* A buffer cannot be placed on two lists at the same time.
*/
extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
static inline void __skb_queue_tail(struct sk_buff_head *list,
struct sk_buff *newsk)
{
struct sk_buff *prev, *next;
list->qlen++;
next = (struct sk_buff *)list;
prev = next->prev;
newsk->next = next;
newsk->prev = prev;
next->prev = prev->next = newsk;
}
/**
* __skb_dequeue - remove from the head of the queue
* @list: list to dequeue from
*
* Remove the head of the list. This function does not take any locks
* so must be used with appropriate locks held only. The head item is
* returned or %NULL if the list is empty.
*/
extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
struct sk_buff *next, *prev, *result;
prev = (struct sk_buff *) list;
next = prev->next;
result = NULL;
if (next != prev) {
result = next;
next = next->next;
list->qlen--;
next->prev = prev;
prev->next = next;
result->next = result->prev = NULL;
}
return result;
}
/*
* Insert a packet on a list.
*/
extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
{
newsk->next = next;
newsk->prev = prev;
next->prev = prev->next = newsk;
list->qlen++;
}
/*
* Place a packet after a given packet in a list.
*/
extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
{
__skb_insert(newsk, old, old->next, list);
}
/*
* remove sk_buff from list. _Must_ be called atomically, and with
* the list known..
*/
extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
struct sk_buff *next, *prev;
list->qlen--;
next = skb->next;
prev = skb->prev;
skb->next = skb->prev = NULL;
next->prev = prev;
prev->next = next;
}
/* XXX: more streamlined implementation */
/**
* __skb_dequeue_tail - remove from the tail of the queue
* @list: list to dequeue from
*
* Remove the tail of the list. This function does not take any locks
* so must be used with appropriate locks held only. The tail item is
* returned or %NULL if the list is empty.
*/
extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
{
struct sk_buff *skb = skb_peek_tail(list);
if (skb)
__skb_unlink(skb, list);
return skb;
}
static inline int skb_is_nonlinear(const struct sk_buff *skb)
{
return skb->data_len;
}
static inline unsigned int skb_headlen(const struct sk_buff *skb)
{
return skb->len - skb->data_len;
}
static inline int skb_pagelen(const struct sk_buff *skb)
{
int i, len = 0;
for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
len += skb_shinfo(skb)->frags[i].size;
return len + skb_headlen(skb);
}
static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
struct page *page, int off, int size)
{
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
frag->page = page;
frag->page_offset = off;
frag->size = size;
skb_shinfo(skb)->nr_frags = i + 1;
}
#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)
#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
{
return skb->head + skb->tail;
}
static inline void skb_reset_tail_pointer(struct sk_buff *skb)
{
skb->tail = skb->data - skb->head;
}
static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
{
skb_reset_tail_pointer(skb);
skb->tail += offset;
}
#else /* NET_SKBUFF_DATA_USES_OFFSET */
static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
{
return skb->tail;
}
static inline void skb_reset_tail_pointer(struct sk_buff *skb)
{
skb->tail = skb->data;
}
static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
{
skb->tail = skb->data + offset;
}
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
/*
* Add data to an sk_buff
*/
static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
{
unsigned char *tmp = skb_tail_pointer(skb);
SKB_LINEAR_ASSERT(skb);
skb->tail += len;
skb->len += len;
return tmp;
}
/**
* skb_put - add data to a buffer
* @skb: buffer to use
* @len: amount of data to add
*
* This function extends the used data area of the buffer. If this would
* exceed the total buffer size the kernel will panic. A pointer to the
* first byte of the extra data is returned.
*/
static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
{
unsigned char *tmp = skb_tail_pointer(skb);
SKB_LINEAR_ASSERT(skb);
skb->tail += len;
skb->len += len;
if (unlikely(skb->tail > skb->end))
skb_over_panic(skb, len, current_text_addr());
return tmp;
}
static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data -= len;
skb->len += len;
return skb->data;
}
/**
* skb_push - add data to the start of a buffer
* @skb: buffer to use
* @len: amount of data to add
*
* This function extends the used data area of the buffer at the buffer
* start. If this would exceed the total buffer headroom the kernel will
* panic. A pointer to the first byte of the extra data is returned.
*/
static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data -= len;
skb->len += len;
if (unlikely(skb->data<skb->head))
skb_under_panic(skb, len, current_text_addr());
return skb->data;
}
static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
{
skb->len -= len;
BUG_ON(skb->len < skb->data_len);
return skb->data += len;
}
/**
* skb_pull - remove data from the start of a buffer
* @skb: buffer to use
* @len: amount of data to remove
*
* This function removes data from the start of a buffer, returning
* the memory to the headroom. A pointer to the next data in the buffer
* is returned. Once the data has been pulled future pushes will overwrite
* the old data.
*/
static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
{
return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
}
extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
{
if (len > skb_headlen(skb) &&
!__pskb_pull_tail(skb, len-skb_headlen(skb)))
return NULL;
skb->len -= len;
return skb->data += len;
}
static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
{
return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
}
static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
{
if (likely(len <= skb_headlen(skb)))
return 1;
if (unlikely(len > skb->len))
return 0;
return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
}
/**
* skb_headroom - bytes at buffer head
* @skb: buffer to check
*
* Return the number of bytes of free space at the head of an &sk_buff.
*/
static inline int skb_headroom(const struct sk_buff *skb)
{
return skb->data - skb->head;
}
/**
* skb_tailroom - bytes at buffer end
* @skb: buffer to check
*
* Return the number of bytes of free space at the tail of an sk_buff
*/
static inline int skb_tailroom(const struct sk_buff *skb)
{
return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
}
/**
* skb_reserve - adjust headroom
* @skb: buffer to alter
* @len: bytes to move
*
* Increase the headroom of an empty &sk_buff by reducing the tail
* room. This is only allowed for an empty buffer.
*/
static inline void skb_reserve(struct sk_buff *skb, int len)
{
skb->data += len;
skb->tail += len;
}
[SK_BUFF]: Use offsets for skb->{mac,network,transport}_header on 64bit architectures With this we save 8 bytes per network packet, leaving a 4 bytes hole to be used in further shrinking work, likely with the offsetization of other pointers, such as ->{data,tail,end}, at the cost of adds, that were minimized by the usual practice of setting skb->{mac,nh,n}.raw to a local variable that is then accessed multiple times in each function, it also is not more expensive than before with regards to most of the handling of such headers, like setting one of these headers to another (transport to network, etc), or subtracting, adding to/from it, comparing them, etc. Now we have this layout for sk_buff on a x86_64 machine: [acme@mica net-2.6.22]$ pahole vmlinux sk_buff struct sk_buff { struct sk_buff * next; /* 0 8 */ struct sk_buff * prev; /* 8 8 */ struct rb_node rb; /* 16 24 */ struct sock * sk; /* 40 8 */ ktime_t tstamp; /* 48 8 */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct net_device * input_dev; /* 64 8 */ sk_buff_data_t transport_header; /* 72 4 */ sk_buff_data_t network_header; /* 76 4 */ sk_buff_data_t mac_header; /* 80 4 */ /* XXX 4 bytes hole, try to pack */ struct dst_entry * dst; /* 88 8 */ struct sec_path * sp; /* 96 8 */ char cb[48]; /* 104 48 */ /* cacheline 2 boundary (128 bytes) was 24 bytes ago*/ unsigned int len; /* 152 4 */ unsigned int data_len; /* 156 4 */ unsigned int mac_len; /* 160 4 */ union { __wsum csum; /* 4 */ __u32 csum_offset; /* 4 */ }; /* 164 4 */ __u32 priority; /* 168 4 */ __u8 local_df:1; /* 172 1 */ __u8 cloned:1; /* 172 1 */ __u8 ip_summed:2; /* 172 1 */ __u8 nohdr:1; /* 172 1 */ __u8 nfctinfo:3; /* 172 1 */ __u8 pkt_type:3; /* 173 1 */ __u8 fclone:2; /* 173 1 */ __u8 ipvs_property:1; /* 173 1 */ /* XXX 2 bits hole, try to pack */ __be16 protocol; /* 174 2 */ void (*destructor)(struct sk_buff *); /* 176 8 */ struct nf_conntrack * nfct; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct sk_buff * nfct_reasm; /* 192 8 */ struct nf_bridge_info *nf_bridge; /* 200 8 */ __u16 tc_index; /* 208 2 */ __u16 tc_verd; /* 210 2 */ dma_cookie_t dma_cookie; /* 212 4 */ __u32 secmark; /* 216 4 */ __u32 mark; /* 220 4 */ unsigned int truesize; /* 224 4 */ atomic_t users; /* 228 4 */ unsigned char * head; /* 232 8 */ unsigned char * data; /* 240 8 */ unsigned char * tail; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned char * end; /* 256 8 */ }; /* size: 264, cachelines: 5 */ /* sum members: 260, holes: 1, sum holes: 4 */ /* bit holes: 1, sum bit holes: 2 bits */ /* last cacheline: 8 bytes */ On 32 bits nothing changes, and pointers continue to be used with the compiler turning all this abstraction layer into dust. But there are some sk_buff validation tricks that are now possible, humm... :-) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-11 12:22:35 +08:00
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
{
[SK_BUFF]: Use offsets for skb->{mac,network,transport}_header on 64bit architectures With this we save 8 bytes per network packet, leaving a 4 bytes hole to be used in further shrinking work, likely with the offsetization of other pointers, such as ->{data,tail,end}, at the cost of adds, that were minimized by the usual practice of setting skb->{mac,nh,n}.raw to a local variable that is then accessed multiple times in each function, it also is not more expensive than before with regards to most of the handling of such headers, like setting one of these headers to another (transport to network, etc), or subtracting, adding to/from it, comparing them, etc. Now we have this layout for sk_buff on a x86_64 machine: [acme@mica net-2.6.22]$ pahole vmlinux sk_buff struct sk_buff { struct sk_buff * next; /* 0 8 */ struct sk_buff * prev; /* 8 8 */ struct rb_node rb; /* 16 24 */ struct sock * sk; /* 40 8 */ ktime_t tstamp; /* 48 8 */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct net_device * input_dev; /* 64 8 */ sk_buff_data_t transport_header; /* 72 4 */ sk_buff_data_t network_header; /* 76 4 */ sk_buff_data_t mac_header; /* 80 4 */ /* XXX 4 bytes hole, try to pack */ struct dst_entry * dst; /* 88 8 */ struct sec_path * sp; /* 96 8 */ char cb[48]; /* 104 48 */ /* cacheline 2 boundary (128 bytes) was 24 bytes ago*/ unsigned int len; /* 152 4 */ unsigned int data_len; /* 156 4 */ unsigned int mac_len; /* 160 4 */ union { __wsum csum; /* 4 */ __u32 csum_offset; /* 4 */ }; /* 164 4 */ __u32 priority; /* 168 4 */ __u8 local_df:1; /* 172 1 */ __u8 cloned:1; /* 172 1 */ __u8 ip_summed:2; /* 172 1 */ __u8 nohdr:1; /* 172 1 */ __u8 nfctinfo:3; /* 172 1 */ __u8 pkt_type:3; /* 173 1 */ __u8 fclone:2; /* 173 1 */ __u8 ipvs_property:1; /* 173 1 */ /* XXX 2 bits hole, try to pack */ __be16 protocol; /* 174 2 */ void (*destructor)(struct sk_buff *); /* 176 8 */ struct nf_conntrack * nfct; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct sk_buff * nfct_reasm; /* 192 8 */ struct nf_bridge_info *nf_bridge; /* 200 8 */ __u16 tc_index; /* 208 2 */ __u16 tc_verd; /* 210 2 */ dma_cookie_t dma_cookie; /* 212 4 */ __u32 secmark; /* 216 4 */ __u32 mark; /* 220 4 */ unsigned int truesize; /* 224 4 */ atomic_t users; /* 228 4 */ unsigned char * head; /* 232 8 */ unsigned char * data; /* 240 8 */ unsigned char * tail; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned char * end; /* 256 8 */ }; /* size: 264, cachelines: 5 */ /* sum members: 260, holes: 1, sum holes: 4 */ /* bit holes: 1, sum bit holes: 2 bits */ /* last cacheline: 8 bytes */ On 32 bits nothing changes, and pointers continue to be used with the compiler turning all this abstraction layer into dust. But there are some sk_buff validation tricks that are now possible, humm... :-) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-11 12:22:35 +08:00
return skb->head + skb->transport_header;
}
static inline void skb_reset_transport_header(struct sk_buff *skb)
{
[SK_BUFF]: Use offsets for skb->{mac,network,transport}_header on 64bit architectures With this we save 8 bytes per network packet, leaving a 4 bytes hole to be used in further shrinking work, likely with the offsetization of other pointers, such as ->{data,tail,end}, at the cost of adds, that were minimized by the usual practice of setting skb->{mac,nh,n}.raw to a local variable that is then accessed multiple times in each function, it also is not more expensive than before with regards to most of the handling of such headers, like setting one of these headers to another (transport to network, etc), or subtracting, adding to/from it, comparing them, etc. Now we have this layout for sk_buff on a x86_64 machine: [acme@mica net-2.6.22]$ pahole vmlinux sk_buff struct sk_buff { struct sk_buff * next; /* 0 8 */ struct sk_buff * prev; /* 8 8 */ struct rb_node rb; /* 16 24 */ struct sock * sk; /* 40 8 */ ktime_t tstamp; /* 48 8 */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct net_device * input_dev; /* 64 8 */ sk_buff_data_t transport_header; /* 72 4 */ sk_buff_data_t network_header; /* 76 4 */ sk_buff_data_t mac_header; /* 80 4 */ /* XXX 4 bytes hole, try to pack */ struct dst_entry * dst; /* 88 8 */ struct sec_path * sp; /* 96 8 */ char cb[48]; /* 104 48 */ /* cacheline 2 boundary (128 bytes) was 24 bytes ago*/ unsigned int len; /* 152 4 */ unsigned int data_len; /* 156 4 */ unsigned int mac_len; /* 160 4 */ union { __wsum csum; /* 4 */ __u32 csum_offset; /* 4 */ }; /* 164 4 */ __u32 priority; /* 168 4 */ __u8 local_df:1; /* 172 1 */ __u8 cloned:1; /* 172 1 */ __u8 ip_summed:2; /* 172 1 */ __u8 nohdr:1; /* 172 1 */ __u8 nfctinfo:3; /* 172 1 */ __u8 pkt_type:3; /* 173 1 */ __u8 fclone:2; /* 173 1 */ __u8 ipvs_property:1; /* 173 1 */ /* XXX 2 bits hole, try to pack */ __be16 protocol; /* 174 2 */ void (*destructor)(struct sk_buff *); /* 176 8 */ struct nf_conntrack * nfct; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct sk_buff * nfct_reasm; /* 192 8 */ struct nf_bridge_info *nf_bridge; /* 200 8 */ __u16 tc_index; /* 208 2 */ __u16 tc_verd; /* 210 2 */ dma_cookie_t dma_cookie; /* 212 4 */ __u32 secmark; /* 216 4 */ __u32 mark; /* 220 4 */ unsigned int truesize; /* 224 4 */ atomic_t users; /* 228 4 */ unsigned char * head; /* 232 8 */ unsigned char * data; /* 240 8 */ unsigned char * tail; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned char * end; /* 256 8 */ }; /* size: 264, cachelines: 5 */ /* sum members: 260, holes: 1, sum holes: 4 */ /* bit holes: 1, sum bit holes: 2 bits */ /* last cacheline: 8 bytes */ On 32 bits nothing changes, and pointers continue to be used with the compiler turning all this abstraction layer into dust. But there are some sk_buff validation tricks that are now possible, humm... :-) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-11 12:22:35 +08:00
skb->transport_header = skb->data - skb->head;
}
static inline void skb_set_transport_header(struct sk_buff *skb,
const int offset)
{
[SK_BUFF]: Use offsets for skb->{mac,network,transport}_header on 64bit architectures With this we save 8 bytes per network packet, leaving a 4 bytes hole to be used in further shrinking work, likely with the offsetization of other pointers, such as ->{data,tail,end}, at the cost of adds, that were minimized by the usual practice of setting skb->{mac,nh,n}.raw to a local variable that is then accessed multiple times in each function, it also is not more expensive than before with regards to most of the handling of such headers, like setting one of these headers to another (transport to network, etc), or subtracting, adding to/from it, comparing them, etc. Now we have this layout for sk_buff on a x86_64 machine: [acme@mica net-2.6.22]$ pahole vmlinux sk_buff struct sk_buff { struct sk_buff * next; /* 0 8 */ struct sk_buff * prev; /* 8 8 */ struct rb_node rb; /* 16 24 */ struct sock * sk; /* 40 8 */ ktime_t tstamp; /* 48 8 */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct net_device * input_dev; /* 64 8 */ sk_buff_data_t transport_header; /* 72 4 */ sk_buff_data_t network_header; /* 76 4 */ sk_buff_data_t mac_header; /* 80 4 */ /* XXX 4 bytes hole, try to pack */ struct dst_entry * dst; /* 88 8 */ struct sec_path * sp; /* 96 8 */ char cb[48]; /* 104 48 */ /* cacheline 2 boundary (128 bytes) was 24 bytes ago*/ unsigned int len; /* 152 4 */ unsigned int data_len; /* 156 4 */ unsigned int mac_len; /* 160 4 */ union { __wsum csum; /* 4 */ __u32 csum_offset; /* 4 */ }; /* 164 4 */ __u32 priority; /* 168 4 */ __u8 local_df:1; /* 172 1 */ __u8 cloned:1; /* 172 1 */ __u8 ip_summed:2; /* 172 1 */ __u8 nohdr:1; /* 172 1 */ __u8 nfctinfo:3; /* 172 1 */ __u8 pkt_type:3; /* 173 1 */ __u8 fclone:2; /* 173 1 */ __u8 ipvs_property:1; /* 173 1 */ /* XXX 2 bits hole, try to pack */ __be16 protocol; /* 174 2 */ void (*destructor)(struct sk_buff *); /* 176 8 */ struct nf_conntrack * nfct; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct sk_buff * nfct_reasm; /* 192 8 */ struct nf_bridge_info *nf_bridge; /* 200 8 */ __u16 tc_index; /* 208 2 */ __u16 tc_verd; /* 210 2 */ dma_cookie_t dma_cookie; /* 212 4 */ __u32 secmark; /* 216 4 */ __u32 mark; /* 220 4 */ unsigned int truesize; /* 224 4 */ atomic_t users; /* 228 4 */ unsigned char * head; /* 232 8 */ unsigned char * data; /* 240 8 */ unsigned char * tail; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned char * end; /* 256 8 */ }; /* size: 264, cachelines: 5 */ /* sum members: 260, holes: 1, sum holes: 4 */ /* bit holes: 1, sum bit holes: 2 bits */ /* last cacheline: 8 bytes */ On 32 bits nothing changes, and pointers continue to be used with the compiler turning all this abstraction layer into dust. But there are some sk_buff validation tricks that are now possible, humm... :-) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-11 12:22:35 +08:00
skb_reset_transport_header(skb);
skb->transport_header += offset;
}
static inline unsigned char *skb_network_header(const struct sk_buff *skb)
{
[SK_BUFF]: Use offsets for skb->{mac,network,transport}_header on 64bit architectures With this we save 8 bytes per network packet, leaving a 4 bytes hole to be used in further shrinking work, likely with the offsetization of other pointers, such as ->{data,tail,end}, at the cost of adds, that were minimized by the usual practice of setting skb->{mac,nh,n}.raw to a local variable that is then accessed multiple times in each function, it also is not more expensive than before with regards to most of the handling of such headers, like setting one of these headers to another (transport to network, etc), or subtracting, adding to/from it, comparing them, etc. Now we have this layout for sk_buff on a x86_64 machine: [acme@mica net-2.6.22]$ pahole vmlinux sk_buff struct sk_buff { struct sk_buff * next; /* 0 8 */ struct sk_buff * prev; /* 8 8 */ struct rb_node rb; /* 16 24 */ struct sock * sk; /* 40 8 */ ktime_t tstamp; /* 48 8 */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct net_device * input_dev; /* 64 8 */ sk_buff_data_t transport_header; /* 72 4 */ sk_buff_data_t network_header; /* 76 4 */ sk_buff_data_t mac_header; /* 80 4 */ /* XXX 4 bytes hole, try to pack */ struct dst_entry * dst; /* 88 8 */ struct sec_path * sp; /* 96 8 */ char cb[48]; /* 104 48 */ /* cacheline 2 boundary (128 bytes) was 24 bytes ago*/ unsigned int len; /* 152 4 */ unsigned int data_len; /* 156 4 */ unsigned int mac_len; /* 160 4 */ union { __wsum csum; /* 4 */ __u32 csum_offset; /* 4 */ }; /* 164 4 */ __u32 priority; /* 168 4 */ __u8 local_df:1; /* 172 1 */ __u8 cloned:1; /* 172 1 */ __u8 ip_summed:2; /* 172 1 */ __u8 nohdr:1; /* 172 1 */ __u8 nfctinfo:3; /* 172 1 */ __u8 pkt_type:3; /* 173 1 */ __u8 fclone:2; /* 173 1 */ __u8 ipvs_property:1; /* 173 1 */ /* XXX 2 bits hole, try to pack */ __be16 protocol; /* 174 2 */ void (*destructor)(struct sk_buff *); /* 176 8 */ struct nf_conntrack * nfct; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct sk_buff * nfct_reasm; /* 192 8 */ struct nf_bridge_info *nf_bridge; /* 200 8 */ __u16 tc_index; /* 208 2 */ __u16 tc_verd; /* 210 2 */ dma_cookie_t dma_cookie; /* 212 4 */ __u32 secmark; /* 216 4 */ __u32 mark; /* 220 4 */ unsigned int truesize; /* 224 4 */ atomic_t users; /* 228 4 */ unsigned char * head; /* 232 8 */ unsigned char * data; /* 240 8 */ unsigned char * tail; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned char * end; /* 256 8 */ }; /* size: 264, cachelines: 5 */ /* sum members: 260, holes: 1, sum holes: 4 */ /* bit holes: 1, sum bit holes: 2 bits */ /* last cacheline: 8 bytes */ On 32 bits nothing changes, and pointers continue to be used with the compiler turning all this abstraction layer into dust. But there are some sk_buff validation tricks that are now possible, humm... :-) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-11 12:22:35 +08:00
return skb->head + skb->network_header;
}
static inline void skb_reset_network_header(struct sk_buff *skb)
{
[SK_BUFF]: Use offsets for skb->{mac,network,transport}_header on 64bit architectures With this we save 8 bytes per network packet, leaving a 4 bytes hole to be used in further shrinking work, likely with the offsetization of other pointers, such as ->{data,tail,end}, at the cost of adds, that were minimized by the usual practice of setting skb->{mac,nh,n}.raw to a local variable that is then accessed multiple times in each function, it also is not more expensive than before with regards to most of the handling of such headers, like setting one of these headers to another (transport to network, etc), or subtracting, adding to/from it, comparing them, etc. Now we have this layout for sk_buff on a x86_64 machine: [acme@mica net-2.6.22]$ pahole vmlinux sk_buff struct sk_buff { struct sk_buff * next; /* 0 8 */ struct sk_buff * prev; /* 8 8 */ struct rb_node rb; /* 16 24 */ struct sock * sk; /* 40 8 */ ktime_t tstamp; /* 48 8 */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct net_device * input_dev; /* 64 8 */ sk_buff_data_t transport_header; /* 72 4 */ sk_buff_data_t network_header; /* 76 4 */ sk_buff_data_t mac_header; /* 80 4 */ /* XXX 4 bytes hole, try to pack */ struct dst_entry * dst; /* 88 8 */ struct sec_path * sp; /* 96 8 */ char cb[48]; /* 104 48 */ /* cacheline 2 boundary (128 bytes) was 24 bytes ago*/ unsigned int len; /* 152 4 */ unsigned int data_len; /* 156 4 */ unsigned int mac_len; /* 160 4 */ union { __wsum csum; /* 4 */ __u32 csum_offset; /* 4 */ }; /* 164 4 */ __u32 priority; /* 168 4 */ __u8 local_df:1; /* 172 1 */ __u8 cloned:1; /* 172 1 */ __u8 ip_summed:2; /* 172 1 */ __u8 nohdr:1; /* 172 1 */ __u8 nfctinfo:3; /* 172 1 */ __u8 pkt_type:3; /* 173 1 */ __u8 fclone:2; /* 173 1 */ __u8 ipvs_property:1; /* 173 1 */ /* XXX 2 bits hole, try to pack */ __be16 protocol; /* 174 2 */ void (*destructor)(struct sk_buff *); /* 176 8 */ struct nf_conntrack * nfct; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct sk_buff * nfct_reasm; /* 192 8 */ struct nf_bridge_info *nf_bridge; /* 200 8 */ __u16 tc_index; /* 208 2 */ __u16 tc_verd; /* 210 2 */ dma_cookie_t dma_cookie; /* 212 4 */ __u32 secmark; /* 216 4 */ __u32 mark; /* 220 4 */ unsigned int truesize; /* 224 4 */ atomic_t users; /* 228 4 */ unsigned char * head; /* 232 8 */ unsigned char * data; /* 240 8 */ unsigned char * tail; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned char * end; /* 256 8 */ }; /* size: 264, cachelines: 5 */ /* sum members: 260, holes: 1, sum holes: 4 */ /* bit holes: 1, sum bit holes: 2 bits */ /* last cacheline: 8 bytes */ On 32 bits nothing changes, and pointers continue to be used with the compiler turning all this abstraction layer into dust. But there are some sk_buff validation tricks that are now possible, humm... :-) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-11 12:22:35 +08:00
skb->network_header = skb->data - skb->head;
}
static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
{
[SK_BUFF]: Use offsets for skb->{mac,network,transport}_header on 64bit architectures With this we save 8 bytes per network packet, leaving a 4 bytes hole to be used in further shrinking work, likely with the offsetization of other pointers, such as ->{data,tail,end}, at the cost of adds, that were minimized by the usual practice of setting skb->{mac,nh,n}.raw to a local variable that is then accessed multiple times in each function, it also is not more expensive than before with regards to most of the handling of such headers, like setting one of these headers to another (transport to network, etc), or subtracting, adding to/from it, comparing them, etc. Now we have this layout for sk_buff on a x86_64 machine: [acme@mica net-2.6.22]$ pahole vmlinux sk_buff struct sk_buff { struct sk_buff * next; /* 0 8 */ struct sk_buff * prev; /* 8 8 */ struct rb_node rb; /* 16 24 */ struct sock * sk; /* 40 8 */ ktime_t tstamp; /* 48 8 */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct net_device * input_dev; /* 64 8 */ sk_buff_data_t transport_header; /* 72 4 */ sk_buff_data_t network_header; /* 76 4 */ sk_buff_data_t mac_header; /* 80 4 */ /* XXX 4 bytes hole, try to pack */ struct dst_entry * dst; /* 88 8 */ struct sec_path * sp; /* 96 8 */ char cb[48]; /* 104 48 */ /* cacheline 2 boundary (128 bytes) was 24 bytes ago*/ unsigned int len; /* 152 4 */ unsigned int data_len; /* 156 4 */ unsigned int mac_len; /* 160 4 */ union { __wsum csum; /* 4 */ __u32 csum_offset; /* 4 */ }; /* 164 4 */ __u32 priority; /* 168 4 */ __u8 local_df:1; /* 172 1 */ __u8 cloned:1; /* 172 1 */ __u8 ip_summed:2; /* 172 1 */ __u8 nohdr:1; /* 172 1 */ __u8 nfctinfo:3; /* 172 1 */ __u8 pkt_type:3; /* 173 1 */ __u8 fclone:2; /* 173 1 */ __u8 ipvs_property:1; /* 173 1 */ /* XXX 2 bits hole, try to pack */ __be16 protocol; /* 174 2 */ void (*destructor)(struct sk_buff *); /* 176 8 */ struct nf_conntrack * nfct; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct sk_buff * nfct_reasm; /* 192 8 */ struct nf_bridge_info *nf_bridge; /* 200 8 */ __u16 tc_index; /* 208 2 */ __u16 tc_verd; /* 210 2 */ dma_cookie_t dma_cookie; /* 212 4 */ __u32 secmark; /* 216 4 */ __u32 mark; /* 220 4 */ unsigned int truesize; /* 224 4 */ atomic_t users; /* 228 4 */ unsigned char * head; /* 232 8 */ unsigned char * data; /* 240 8 */ unsigned char * tail; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned char * end; /* 256 8 */ }; /* size: 264, cachelines: 5 */ /* sum members: 260, holes: 1, sum holes: 4 */ /* bit holes: 1, sum bit holes: 2 bits */ /* last cacheline: 8 bytes */ On 32 bits nothing changes, and pointers continue to be used with the compiler turning all this abstraction layer into dust. But there are some sk_buff validation tricks that are now possible, humm... :-) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-11 12:22:35 +08:00
skb_reset_network_header(skb);
skb->network_header += offset;
}
[SK_BUFF]: Use offsets for skb->{mac,network,transport}_header on 64bit architectures With this we save 8 bytes per network packet, leaving a 4 bytes hole to be used in further shrinking work, likely with the offsetization of other pointers, such as ->{data,tail,end}, at the cost of adds, that were minimized by the usual practice of setting skb->{mac,nh,n}.raw to a local variable that is then accessed multiple times in each function, it also is not more expensive than before with regards to most of the handling of such headers, like setting one of these headers to another (transport to network, etc), or subtracting, adding to/from it, comparing them, etc. Now we have this layout for sk_buff on a x86_64 machine: [acme@mica net-2.6.22]$ pahole vmlinux sk_buff struct sk_buff { struct sk_buff * next; /* 0 8 */ struct sk_buff * prev; /* 8 8 */ struct rb_node rb; /* 16 24 */ struct sock * sk; /* 40 8 */ ktime_t tstamp; /* 48 8 */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct net_device * input_dev; /* 64 8 */ sk_buff_data_t transport_header; /* 72 4 */ sk_buff_data_t network_header; /* 76 4 */ sk_buff_data_t mac_header; /* 80 4 */ /* XXX 4 bytes hole, try to pack */ struct dst_entry * dst; /* 88 8 */ struct sec_path * sp; /* 96 8 */ char cb[48]; /* 104 48 */ /* cacheline 2 boundary (128 bytes) was 24 bytes ago*/ unsigned int len; /* 152 4 */ unsigned int data_len; /* 156 4 */ unsigned int mac_len; /* 160 4 */ union { __wsum csum; /* 4 */ __u32 csum_offset; /* 4 */ }; /* 164 4 */ __u32 priority; /* 168 4 */ __u8 local_df:1; /* 172 1 */ __u8 cloned:1; /* 172 1 */ __u8 ip_summed:2; /* 172 1 */ __u8 nohdr:1; /* 172 1 */ __u8 nfctinfo:3; /* 172 1 */ __u8 pkt_type:3; /* 173 1 */ __u8 fclone:2; /* 173 1 */ __u8 ipvs_property:1; /* 173 1 */ /* XXX 2 bits hole, try to pack */ __be16 protocol; /* 174 2 */ void (*destructor)(struct sk_buff *); /* 176 8 */ struct nf_conntrack * nfct; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct sk_buff * nfct_reasm; /* 192 8 */ struct nf_bridge_info *nf_bridge; /* 200 8 */ __u16 tc_index; /* 208 2 */ __u16 tc_verd; /* 210 2 */ dma_cookie_t dma_cookie; /* 212 4 */ __u32 secmark; /* 216 4 */ __u32 mark; /* 220 4 */ unsigned int truesize; /* 224 4 */ atomic_t users; /* 228 4 */ unsigned char * head; /* 232 8 */ unsigned char * data; /* 240 8 */ unsigned char * tail; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned char * end; /* 256 8 */ }; /* size: 264, cachelines: 5 */ /* sum members: 260, holes: 1, sum holes: 4 */ /* bit holes: 1, sum bit holes: 2 bits */ /* last cacheline: 8 bytes */ On 32 bits nothing changes, and pointers continue to be used with the compiler turning all this abstraction layer into dust. But there are some sk_buff validation tricks that are now possible, humm... :-) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-11 12:22:35 +08:00
static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
{
[SK_BUFF]: Use offsets for skb->{mac,network,transport}_header on 64bit architectures With this we save 8 bytes per network packet, leaving a 4 bytes hole to be used in further shrinking work, likely with the offsetization of other pointers, such as ->{data,tail,end}, at the cost of adds, that were minimized by the usual practice of setting skb->{mac,nh,n}.raw to a local variable that is then accessed multiple times in each function, it also is not more expensive than before with regards to most of the handling of such headers, like setting one of these headers to another (transport to network, etc), or subtracting, adding to/from it, comparing them, etc. Now we have this layout for sk_buff on a x86_64 machine: [acme@mica net-2.6.22]$ pahole vmlinux sk_buff struct sk_buff { struct sk_buff * next; /* 0 8 */ struct sk_buff * prev; /* 8 8 */ struct rb_node rb; /* 16 24 */ struct sock * sk; /* 40 8 */ ktime_t tstamp; /* 48 8 */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct net_device * input_dev; /* 64 8 */ sk_buff_data_t transport_header; /* 72 4 */ sk_buff_data_t network_header; /* 76 4 */ sk_buff_data_t mac_header; /* 80 4 */ /* XXX 4 bytes hole, try to pack */ struct dst_entry * dst; /* 88 8 */ struct sec_path * sp; /* 96 8 */ char cb[48]; /* 104 48 */ /* cacheline 2 boundary (128 bytes) was 24 bytes ago*/ unsigned int len; /* 152 4 */ unsigned int data_len; /* 156 4 */ unsigned int mac_len; /* 160 4 */ union { __wsum csum; /* 4 */ __u32 csum_offset; /* 4 */ }; /* 164 4 */ __u32 priority; /* 168 4 */ __u8 local_df:1; /* 172 1 */ __u8 cloned:1; /* 172 1 */ __u8 ip_summed:2; /* 172 1 */ __u8 nohdr:1; /* 172 1 */ __u8 nfctinfo:3; /* 172 1 */ __u8 pkt_type:3; /* 173 1 */ __u8 fclone:2; /* 173 1 */ __u8 ipvs_property:1; /* 173 1 */ /* XXX 2 bits hole, try to pack */ __be16 protocol; /* 174 2 */ void (*destructor)(struct sk_buff *); /* 176 8 */ struct nf_conntrack * nfct; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct sk_buff * nfct_reasm; /* 192 8 */ struct nf_bridge_info *nf_bridge; /* 200 8 */ __u16 tc_index; /* 208 2 */ __u16 tc_verd; /* 210 2 */ dma_cookie_t dma_cookie; /* 212 4 */ __u32 secmark; /* 216 4 */ __u32 mark; /* 220 4 */ unsigned int truesize; /* 224 4 */ atomic_t users; /* 228 4 */ unsigned char * head; /* 232 8 */ unsigned char * data; /* 240 8 */ unsigned char * tail; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned char * end; /* 256 8 */ }; /* size: 264, cachelines: 5 */ /* sum members: 260, holes: 1, sum holes: 4 */ /* bit holes: 1, sum bit holes: 2 bits */ /* last cacheline: 8 bytes */ On 32 bits nothing changes, and pointers continue to be used with the compiler turning all this abstraction layer into dust. But there are some sk_buff validation tricks that are now possible, humm... :-) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-11 12:22:35 +08:00
return skb->head + skb->mac_header;
}
[SK_BUFF]: Use offsets for skb->{mac,network,transport}_header on 64bit architectures With this we save 8 bytes per network packet, leaving a 4 bytes hole to be used in further shrinking work, likely with the offsetization of other pointers, such as ->{data,tail,end}, at the cost of adds, that were minimized by the usual practice of setting skb->{mac,nh,n}.raw to a local variable that is then accessed multiple times in each function, it also is not more expensive than before with regards to most of the handling of such headers, like setting one of these headers to another (transport to network, etc), or subtracting, adding to/from it, comparing them, etc. Now we have this layout for sk_buff on a x86_64 machine: [acme@mica net-2.6.22]$ pahole vmlinux sk_buff struct sk_buff { struct sk_buff * next; /* 0 8 */ struct sk_buff * prev; /* 8 8 */ struct rb_node rb; /* 16 24 */ struct sock * sk; /* 40 8 */ ktime_t tstamp; /* 48 8 */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct net_device * input_dev; /* 64 8 */ sk_buff_data_t transport_header; /* 72 4 */ sk_buff_data_t network_header; /* 76 4 */ sk_buff_data_t mac_header; /* 80 4 */ /* XXX 4 bytes hole, try to pack */ struct dst_entry * dst; /* 88 8 */ struct sec_path * sp; /* 96 8 */ char cb[48]; /* 104 48 */ /* cacheline 2 boundary (128 bytes) was 24 bytes ago*/ unsigned int len; /* 152 4 */ unsigned int data_len; /* 156 4 */ unsigned int mac_len; /* 160 4 */ union { __wsum csum; /* 4 */ __u32 csum_offset; /* 4 */ }; /* 164 4 */ __u32 priority; /* 168 4 */ __u8 local_df:1; /* 172 1 */ __u8 cloned:1; /* 172 1 */ __u8 ip_summed:2; /* 172 1 */ __u8 nohdr:1; /* 172 1 */ __u8 nfctinfo:3; /* 172 1 */ __u8 pkt_type:3; /* 173 1 */ __u8 fclone:2; /* 173 1 */ __u8 ipvs_property:1; /* 173 1 */ /* XXX 2 bits hole, try to pack */ __be16 protocol; /* 174 2 */ void (*destructor)(struct sk_buff *); /* 176 8 */ struct nf_conntrack * nfct; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct sk_buff * nfct_reasm; /* 192 8 */ struct nf_bridge_info *nf_bridge; /* 200 8 */ __u16 tc_index; /* 208 2 */ __u16 tc_verd; /* 210 2 */ dma_cookie_t dma_cookie; /* 212 4 */ __u32 secmark; /* 216 4 */ __u32 mark; /* 220 4 */ unsigned int truesize; /* 224 4 */ atomic_t users; /* 228 4 */ unsigned char * head; /* 232 8 */ unsigned char * data; /* 240 8 */ unsigned char * tail; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned char * end; /* 256 8 */ }; /* size: 264, cachelines: 5 */ /* sum members: 260, holes: 1, sum holes: 4 */ /* bit holes: 1, sum bit holes: 2 bits */ /* last cacheline: 8 bytes */ On 32 bits nothing changes, and pointers continue to be used with the compiler turning all this abstraction layer into dust. But there are some sk_buff validation tricks that are now possible, humm... :-) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-11 12:22:35 +08:00
static inline int skb_mac_header_was_set(const struct sk_buff *skb)
{
[SK_BUFF]: Use offsets for skb->{mac,network,transport}_header on 64bit architectures With this we save 8 bytes per network packet, leaving a 4 bytes hole to be used in further shrinking work, likely with the offsetization of other pointers, such as ->{data,tail,end}, at the cost of adds, that were minimized by the usual practice of setting skb->{mac,nh,n}.raw to a local variable that is then accessed multiple times in each function, it also is not more expensive than before with regards to most of the handling of such headers, like setting one of these headers to another (transport to network, etc), or subtracting, adding to/from it, comparing them, etc. Now we have this layout for sk_buff on a x86_64 machine: [acme@mica net-2.6.22]$ pahole vmlinux sk_buff struct sk_buff { struct sk_buff * next; /* 0 8 */ struct sk_buff * prev; /* 8 8 */ struct rb_node rb; /* 16 24 */ struct sock * sk; /* 40 8 */ ktime_t tstamp; /* 48 8 */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct net_device * input_dev; /* 64 8 */ sk_buff_data_t transport_header; /* 72 4 */ sk_buff_data_t network_header; /* 76 4 */ sk_buff_data_t mac_header; /* 80 4 */ /* XXX 4 bytes hole, try to pack */ struct dst_entry * dst; /* 88 8 */ struct sec_path * sp; /* 96 8 */ char cb[48]; /* 104 48 */ /* cacheline 2 boundary (128 bytes) was 24 bytes ago*/ unsigned int len; /* 152 4 */ unsigned int data_len; /* 156 4 */ unsigned int mac_len; /* 160 4 */ union { __wsum csum; /* 4 */ __u32 csum_offset; /* 4 */ }; /* 164 4 */ __u32 priority; /* 168 4 */ __u8 local_df:1; /* 172 1 */ __u8 cloned:1; /* 172 1 */ __u8 ip_summed:2; /* 172 1 */ __u8 nohdr:1; /* 172 1 */ __u8 nfctinfo:3; /* 172 1 */ __u8 pkt_type:3; /* 173 1 */ __u8 fclone:2; /* 173 1 */ __u8 ipvs_property:1; /* 173 1 */ /* XXX 2 bits hole, try to pack */ __be16 protocol; /* 174 2 */ void (*destructor)(struct sk_buff *); /* 176 8 */ struct nf_conntrack * nfct; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct sk_buff * nfct_reasm; /* 192 8 */ struct nf_bridge_info *nf_bridge; /* 200 8 */ __u16 tc_index; /* 208 2 */ __u16 tc_verd; /* 210 2 */ dma_cookie_t dma_cookie; /* 212 4 */ __u32 secmark; /* 216 4 */ __u32 mark; /* 220 4 */ unsigned int truesize; /* 224 4 */ atomic_t users; /* 228 4 */ unsigned char * head; /* 232 8 */ unsigned char * data; /* 240 8 */ unsigned char * tail; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned char * end; /* 256 8 */ }; /* size: 264, cachelines: 5 */ /* sum members: 260, holes: 1, sum holes: 4 */ /* bit holes: 1, sum bit holes: 2 bits */ /* last cacheline: 8 bytes */ On 32 bits nothing changes, and pointers continue to be used with the compiler turning all this abstraction layer into dust. But there are some sk_buff validation tricks that are now possible, humm... :-) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-11 12:22:35 +08:00
return skb->mac_header != ~0U;
}
static inline void skb_reset_mac_header(struct sk_buff *skb)
{
skb->mac_header = skb->data - skb->head;
}
static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
{
skb_reset_mac_header(skb);
skb->mac_header += offset;
}
#else /* NET_SKBUFF_DATA_USES_OFFSET */
static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
{
return skb->transport_header;
}
static inline void skb_reset_transport_header(struct sk_buff *skb)
{
skb->transport_header = skb->data;
}
static inline void skb_set_transport_header(struct sk_buff *skb,
const int offset)
{
skb->transport_header = skb->data + offset;
}
static inline unsigned char *skb_network_header(const struct sk_buff *skb)
{
return skb->network_header;
}
static inline void skb_reset_network_header(struct sk_buff *skb)
{
skb->network_header = skb->data;
}
static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
{
skb->network_header = skb->data + offset;
}
static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
{
return skb->mac_header;
}
static inline int skb_mac_header_was_set(const struct sk_buff *skb)
{
return skb->mac_header != NULL;
}
static inline void skb_reset_mac_header(struct sk_buff *skb)
{
skb->mac_header = skb->data;
}
static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
{
skb->mac_header = skb->data + offset;
}
[SK_BUFF]: Use offsets for skb->{mac,network,transport}_header on 64bit architectures With this we save 8 bytes per network packet, leaving a 4 bytes hole to be used in further shrinking work, likely with the offsetization of other pointers, such as ->{data,tail,end}, at the cost of adds, that were minimized by the usual practice of setting skb->{mac,nh,n}.raw to a local variable that is then accessed multiple times in each function, it also is not more expensive than before with regards to most of the handling of such headers, like setting one of these headers to another (transport to network, etc), or subtracting, adding to/from it, comparing them, etc. Now we have this layout for sk_buff on a x86_64 machine: [acme@mica net-2.6.22]$ pahole vmlinux sk_buff struct sk_buff { struct sk_buff * next; /* 0 8 */ struct sk_buff * prev; /* 8 8 */ struct rb_node rb; /* 16 24 */ struct sock * sk; /* 40 8 */ ktime_t tstamp; /* 48 8 */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct net_device * input_dev; /* 64 8 */ sk_buff_data_t transport_header; /* 72 4 */ sk_buff_data_t network_header; /* 76 4 */ sk_buff_data_t mac_header; /* 80 4 */ /* XXX 4 bytes hole, try to pack */ struct dst_entry * dst; /* 88 8 */ struct sec_path * sp; /* 96 8 */ char cb[48]; /* 104 48 */ /* cacheline 2 boundary (128 bytes) was 24 bytes ago*/ unsigned int len; /* 152 4 */ unsigned int data_len; /* 156 4 */ unsigned int mac_len; /* 160 4 */ union { __wsum csum; /* 4 */ __u32 csum_offset; /* 4 */ }; /* 164 4 */ __u32 priority; /* 168 4 */ __u8 local_df:1; /* 172 1 */ __u8 cloned:1; /* 172 1 */ __u8 ip_summed:2; /* 172 1 */ __u8 nohdr:1; /* 172 1 */ __u8 nfctinfo:3; /* 172 1 */ __u8 pkt_type:3; /* 173 1 */ __u8 fclone:2; /* 173 1 */ __u8 ipvs_property:1; /* 173 1 */ /* XXX 2 bits hole, try to pack */ __be16 protocol; /* 174 2 */ void (*destructor)(struct sk_buff *); /* 176 8 */ struct nf_conntrack * nfct; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct sk_buff * nfct_reasm; /* 192 8 */ struct nf_bridge_info *nf_bridge; /* 200 8 */ __u16 tc_index; /* 208 2 */ __u16 tc_verd; /* 210 2 */ dma_cookie_t dma_cookie; /* 212 4 */ __u32 secmark; /* 216 4 */ __u32 mark; /* 220 4 */ unsigned int truesize; /* 224 4 */ atomic_t users; /* 228 4 */ unsigned char * head; /* 232 8 */ unsigned char * data; /* 240 8 */ unsigned char * tail; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned char * end; /* 256 8 */ }; /* size: 264, cachelines: 5 */ /* sum members: 260, holes: 1, sum holes: 4 */ /* bit holes: 1, sum bit holes: 2 bits */ /* last cacheline: 8 bytes */ On 32 bits nothing changes, and pointers continue to be used with the compiler turning all this abstraction layer into dust. But there are some sk_buff validation tricks that are now possible, humm... :-) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-11 12:22:35 +08:00
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
static inline int skb_transport_offset(const struct sk_buff *skb)
{
return skb_transport_header(skb) - skb->data;
}
static inline u32 skb_network_header_len(const struct sk_buff *skb)
{
return skb->transport_header - skb->network_header;
}
static inline int skb_network_offset(const struct sk_buff *skb)
{
return skb_network_header(skb) - skb->data;
}
/*
* CPUs often take a performance hit when accessing unaligned memory
* locations. The actual performance hit varies, it can be small if the
* hardware handles it or large if we have to take an exception and fix it
* in software.
*
* Since an ethernet header is 14 bytes network drivers often end up with
* the IP header at an unaligned offset. The IP header can be aligned by
* shifting the start of the packet by 2 bytes. Drivers should do this
* with:
*
* skb_reserve(NET_IP_ALIGN);
*
* The downside to this alignment of the IP header is that the DMA is now
* unaligned. On some architectures the cost of an unaligned DMA is high
* and this cost outweighs the gains made by aligning the IP header.
*
* Since this trade off varies between architectures, we allow NET_IP_ALIGN
* to be overridden.
*/
#ifndef NET_IP_ALIGN
#define NET_IP_ALIGN 2
#endif
/*
* The networking layer reserves some headroom in skb data (via
* dev_alloc_skb). This is used to avoid having to reallocate skb data when
* the header has to grow. In the default case, if the header has to grow
* 16 bytes or less we avoid the reallocation.
*
* Unfortunately this headroom changes the DMA alignment of the resulting
* network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
* on some architectures. An architecture can override this value,
* perhaps setting it to a cacheline in size (since that will maintain
* cacheline alignment of the DMA). It must be a power of 2.
*
* Various parts of the networking layer expect at least 16 bytes of
* headroom, you should not reduce this.
*/
#ifndef NET_SKB_PAD
#define NET_SKB_PAD 16
#endif
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
{
if (unlikely(skb->data_len)) {
WARN_ON(1);
return;
}
skb->len = len;
skb_set_tail_pointer(skb, len);
}
/**
* skb_trim - remove end from a buffer
* @skb: buffer to alter
* @len: new length
*
* Cut the length of a buffer down by removing data from the tail. If
* the buffer is already under the length specified it is not modified.
* The skb must be linear.
*/
static inline void skb_trim(struct sk_buff *skb, unsigned int len)
{
if (skb->len > len)
__skb_trim(skb, len);
}
static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
{
if (skb->data_len)
return ___pskb_trim(skb, len);
__skb_trim(skb, len);
return 0;
}
static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
{
return (len < skb->len) ? __pskb_trim(skb, len) : 0;
}
/**
* pskb_trim_unique - remove end from a paged unique (not cloned) buffer
* @skb: buffer to alter
* @len: new length
*
* This is identical to pskb_trim except that the caller knows that
* the skb is not cloned so we should never get an error due to out-
* of-memory.
*/
static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
{
int err = pskb_trim(skb, len);
BUG_ON(err);
}
/**
* skb_orphan - orphan a buffer
* @skb: buffer to orphan
*
* If a buffer currently has an owner then we call the owner's
* destructor function and make the @skb unowned. The buffer continues
* to exist but is no longer charged to its former owner.
*/
static inline void skb_orphan(struct sk_buff *skb)
{
if (skb->destructor)
skb->destructor(skb);
skb->destructor = NULL;
skb->sk = NULL;
}
/**
* __skb_queue_purge - empty a list
* @list: list to empty
*
* Delete all buffers on an &sk_buff list. Each buffer is removed from
* the list and one reference dropped. This function does not take the
* list lock and the caller must hold the relevant locks to use it.
*/
extern void skb_queue_purge(struct sk_buff_head *list);
static inline void __skb_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(list)) != NULL)
kfree_skb(skb);
}
/**
* __dev_alloc_skb - allocate an skbuff for receiving
* @length: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb
*
* Allocate a new &sk_buff and assign it a usage count of one. The
* buffer has unspecified headroom built in. Users should allocate
* the headroom they think they need without accounting for the
* built in space. The built in space is used for optimisations.
*
* %NULL is returned if there is no free memory.
*/
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
gfp_t gfp_mask)
{
struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
if (likely(skb))
skb_reserve(skb, NET_SKB_PAD);
return skb;
}
/**
* dev_alloc_skb - allocate an skbuff for receiving
* @length: length to allocate
*
* Allocate a new &sk_buff and assign it a usage count of one. The
* buffer has unspecified headroom built in. Users should allocate
* the headroom they think they need without accounting for the
* built in space. The built in space is used for optimisations.
*
* %NULL is returned if there is no free memory. Although this function
* allocates memory it can be called from an interrupt.
*/
static inline struct sk_buff *dev_alloc_skb(unsigned int length)
{
return __dev_alloc_skb(length, GFP_ATOMIC);
}
extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
unsigned int length, gfp_t gfp_mask);
/**
* netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
* @length: length to allocate
*
* Allocate a new &sk_buff and assign it a usage count of one. The
* buffer has unspecified headroom built in. Users should allocate
* the headroom they think they need without accounting for the
* built in space. The built in space is used for optimisations.
*
* %NULL is returned if there is no free memory. Although this function
* allocates memory it can be called from an interrupt.
*/
static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
unsigned int length)
{
return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
}
[SKBUFF]: Keep track of writable header len of headerless clones Currently NAT (and others) that want to modify cloned skbs copy them, even if in the vast majority of cases its not necessary because the skb is a clone made by TCP and the portion NAT wants to modify is actually writable because TCP release the header reference before cloning. The problem is that there is no clean way for NAT to find out how long the writable header area is, so this patch introduces skb->hdr_len to hold this length. When a headerless skb is cloned skb->hdr_len is set to the current headroom, for regular clones it is copied from the original. A new function skb_clone_writable(skb, len) returns whether the skb is writable up to len bytes from skb->data. To avoid enlarging the skb the mac_len field is reduced to 16 bit and the new hdr_len field is put in the remaining 16 bit. I've done a few rough benchmarks of NAT (not with this exact patch, but a very similar one). As expected it saves huge amounts of system time in case of sendfile, bringing it down to basically the same amount as without NAT, with sendmsg it only helps on loopback, probably because of the large MTU. Transmit a 1GB file using sendfile/sendmsg over eth0/lo with and without NAT: - sendfile eth0, no NAT: sys 0m0.388s - sendfile eth0, NAT: sys 0m1.835s - sendfile eth0: NAT + path: sys 0m0.370s (~ -80%) - sendfile lo, no NAT: sys 0m0.258s - sendfile lo, NAT: sys 0m2.609s - sendfile lo, NAT + patch: sys 0m0.260s (~ -90%) - sendmsg eth0, no NAT: sys 0m2.508s - sendmsg eth0, NAT: sys 0m2.539s - sendmsg eth0, NAT + patch: sys 0m2.445s (no change) - sendmsg lo, no NAT: sys 0m2.151s - sendmsg lo, NAT: sys 0m3.557s - sendmsg lo, NAT + patch: sys 0m2.159s (~ -40%) I expect other users can see a similar performance improvement, packet mangling iptables targets, ipip and ip_gre come to mind .. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-06-25 19:35:20 +08:00
/**
* skb_clone_writable - is the header of a clone writable
* @skb: buffer to check
* @len: length up to which to write
*
* Returns true if modifying the header part of the cloned buffer
* does not requires the data to be copied.
*/
static inline int skb_clone_writable(struct sk_buff *skb, int len)
{
return !skb_header_cloned(skb) &&
skb_headroom(skb) + len <= skb->hdr_len;
}
/**
* skb_cow - copy header of skb when it is required
* @skb: buffer to cow
* @headroom: needed headroom
*
* If the skb passed lacks sufficient headroom or its data part
* is shared, data is reallocated. If reallocation fails, an error
* is returned and original skb is not changed.
*
* The result is skb with writable area skb->head...skb->tail
* and at least @headroom of space at head.
*/
static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
{
int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) -
skb_headroom(skb);
if (delta < 0)
delta = 0;
if (delta || skb_cloned(skb))
return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) &
~(NET_SKB_PAD-1), 0, GFP_ATOMIC);
return 0;
}
/**
* skb_padto - pad an skbuff up to a minimal size
* @skb: buffer to pad
* @len: minimal length
*
* Pads up a buffer to ensure the trailing bytes exist and are
* blanked. If the buffer already contains sufficient data it
* is untouched. Otherwise it is extended. Returns zero on
* success. The skb is freed on error.
*/
static inline int skb_padto(struct sk_buff *skb, unsigned int len)
{
unsigned int size = skb->len;
if (likely(size >= len))
return 0;
return skb_pad(skb, len-size);
}
static inline int skb_add_data(struct sk_buff *skb,
char __user *from, int copy)
{
const int off = skb->len;
if (skb->ip_summed == CHECKSUM_NONE) {
int err = 0;
__wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
copy, 0, &err);
if (!err) {
skb->csum = csum_block_add(skb->csum, csum, off);
return 0;
}
} else if (!copy_from_user(skb_put(skb, copy), from, copy))
return 0;
__skb_trim(skb, off);
return -EFAULT;
}
static inline int skb_can_coalesce(struct sk_buff *skb, int i,
struct page *page, int off)
{
if (i) {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
return page == frag->page &&
off == frag->page_offset + frag->size;
}
return 0;
}
static inline int __skb_linearize(struct sk_buff *skb)
{
return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
}
/**
* skb_linearize - convert paged skb to linear one
* @skb: buffer to linarize
*
* If there is no free memory -ENOMEM is returned, otherwise zero
* is returned and the old skb data released.
*/
static inline int skb_linearize(struct sk_buff *skb)
{
return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
}
/**
* skb_linearize_cow - make sure skb is linear and writable
* @skb: buffer to process
*
* If there is no free memory -ENOMEM is returned, otherwise zero
* is returned and the old skb data released.
*/
static inline int skb_linearize_cow(struct sk_buff *skb)
{
return skb_is_nonlinear(skb) || skb_cloned(skb) ?
__skb_linearize(skb) : 0;
}
/**
* skb_postpull_rcsum - update checksum for received skb after pull
* @skb: buffer to update
* @start: start of data before pull
* @len: length of data pulled
*
* After doing a pull on a received packet, you need to call this to
* update the CHECKSUM_COMPLETE checksum, or set ip_summed to
* CHECKSUM_NONE so that it can be recomputed from scratch.
*/
static inline void skb_postpull_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
}
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
/**
* pskb_trim_rcsum - trim received skb and update checksum
* @skb: buffer to trim
* @len: new length
*
* This is exactly the same as pskb_trim except that it ensures the
* checksum of received packets are still valid after the operation.
*/
static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
if (likely(len >= skb->len))
return 0;
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
return __pskb_trim(skb, len);
}
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
skb = skb->next)
#define skb_queue_walk_safe(queue, skb, tmp) \
for (skb = (queue)->next, tmp = skb->next; \
skb != (struct sk_buff *)(queue); \
skb = tmp, tmp = skb->next)
#define skb_queue_reverse_walk(queue, skb) \
for (skb = (queue)->prev; \
prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
skb = skb->prev)
extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
int noblock, int *err);
extern unsigned int datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
extern int skb_copy_datagram_iovec(const struct sk_buff *from,
int offset, struct iovec *to,
int size);
extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
int hlen,
struct iovec *iov);
extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
extern void skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
unsigned int flags);
extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
int len, __wsum csum);
extern int skb_copy_bits(const struct sk_buff *skb, int offset,
void *to, int len);
extern int skb_store_bits(struct sk_buff *skb, int offset,
const void *from, int len);
extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
int offset, u8 *to, int len,
__wsum csum);
extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
extern void skb_split(struct sk_buff *skb,
struct sk_buff *skb1, const u32 len);
extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
{
int hlen = skb_headlen(skb);
if (hlen - offset >= len)
return skb->data + offset;
if (skb_copy_bits(skb, offset, buffer, len) < 0)
return NULL;
return buffer;
}
static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
void *to,
const unsigned int len)
{
memcpy(to, skb->data, len);
}
static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
const int offset, void *to,
const unsigned int len)
{
memcpy(to, skb->data + offset, len);
}
static inline void skb_copy_to_linear_data(struct sk_buff *skb,
const void *from,
const unsigned int len)
{
memcpy(skb->data, from, len);
}
static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
const int offset,
const void *from,
const unsigned int len)
{
memcpy(skb->data + offset, from, len);
}
extern void skb_init(void);
/**
* skb_get_timestamp - get timestamp from a skb
* @skb: skb to get stamp from
* @stamp: pointer to struct timeval to store stamp in
*
* Timestamps are stored in the skb as offsets to a base timestamp.
* This function converts the offset back to a struct timeval and stores
* it in stamp.
*/
static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp)
{
*stamp = ktime_to_timeval(skb->tstamp);
}
static inline void __net_timestamp(struct sk_buff *skb)
{
skb->tstamp = ktime_get_real();
}
static inline ktime_t net_timedelta(ktime_t t)
{
return ktime_sub(ktime_get_real(), t);
}
static inline ktime_t net_invalid_timestamp(void)
{
return ktime_set(0, 0);
}
extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
static inline int skb_csum_unnecessary(const struct sk_buff *skb)
{
return skb->ip_summed & CHECKSUM_UNNECESSARY;
}
/**
* skb_checksum_complete - Calculate checksum of an entire packet
* @skb: packet to process
*
* This function calculates the checksum over the entire packet plus
* the value of skb->csum. The latter can be used to supply the
* checksum of a pseudo header as used by TCP/UDP. It returns the
* checksum.
*
* For protocols that contain complete checksums such as ICMP/TCP/UDP,
* this function can be used to verify that checksum on received
* packets. In that case the function should return zero if the
* checksum is correct. In particular, this function will return zero
* if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
* hardware has already verified the correctness of the checksum.
*/
static inline unsigned int skb_checksum_complete(struct sk_buff *skb)
{
return skb_csum_unnecessary(skb) ?
0 : __skb_checksum_complete(skb);
}
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
{
if (nfct && atomic_dec_and_test(&nfct->use))
nf_conntrack_destroy(nfct);
}
static inline void nf_conntrack_get(struct nf_conntrack *nfct)
{
if (nfct)
atomic_inc(&nfct->use);
}
[NETFILTER]: Add nf_conntrack subsystem. The existing connection tracking subsystem in netfilter can only handle ipv4. There were basically two choices present to add connection tracking support for ipv6. We could either duplicate all of the ipv4 connection tracking code into an ipv6 counterpart, or (the choice taken by these patches) we could design a generic layer that could handle both ipv4 and ipv6 and thus requiring only one sub-protocol (TCP, UDP, etc.) connection tracking helper module to be written. In fact nf_conntrack is capable of working with any layer 3 protocol. The existing ipv4 specific conntrack code could also not deal with the pecularities of doing connection tracking on ipv6, which is also cured here. For example, these issues include: 1) ICMPv6 handling, which is used for neighbour discovery in ipv6 thus some messages such as these should not participate in connection tracking since effectively they are like ARP messages 2) fragmentation must be handled differently in ipv6, because the simplistic "defrag, connection track and NAT, refrag" (which the existing ipv4 connection tracking does) approach simply isn't feasible in ipv6 3) ipv6 extension header parsing must occur at the correct spots before and after connection tracking decisions, and there were no provisions for this in the existing connection tracking design 4) ipv6 has no need for stateful NAT The ipv4 specific conntrack layer is kept around, until all of the ipv4 specific conntrack helpers are ported over to nf_conntrack and it is feature complete. Once that occurs, the old conntrack stuff will get placed into the feature-removal-schedule and we will fully kill it off 6 months later. Signed-off-by: Yasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp> Signed-off-by: Harald Welte <laforge@netfilter.org> Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
2005-11-10 08:38:16 +08:00
static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
{
if (skb)
atomic_inc(&skb->users);
}
static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
{
if (skb)
kfree_skb(skb);
}
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
{
if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
kfree(nf_bridge);
}
static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
{
if (nf_bridge)
atomic_inc(&nf_bridge->use);
}
#endif /* CONFIG_BRIDGE_NETFILTER */
static inline void nf_reset(struct sk_buff *skb)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(skb->nfct);
skb->nfct = NULL;
nf_conntrack_put_reasm(skb->nfct_reasm);
skb->nfct_reasm = NULL;
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
nf_bridge_put(skb->nf_bridge);
skb->nf_bridge = NULL;
#endif
}
/* Note: This doesn't put any conntrack and bridge info in dst. */
static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
dst->nfct = src->nfct;
nf_conntrack_get(src->nfct);
dst->nfctinfo = src->nfctinfo;
dst->nfct_reasm = src->nfct_reasm;
nf_conntrack_get_reasm(src->nfct_reasm);
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
dst->nf_bridge = src->nf_bridge;
nf_bridge_get(src->nf_bridge);
#endif
}
static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(dst->nfct);
nf_conntrack_put_reasm(dst->nfct_reasm);
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
nf_bridge_put(dst->nf_bridge);
#endif
__nf_copy(dst, src);
}
#ifdef CONFIG_NETWORK_SECMARK
static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
{
to->secmark = from->secmark;
}
static inline void skb_init_secmark(struct sk_buff *skb)
{
skb->secmark = 0;
}
#else
static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
{ }
static inline void skb_init_secmark(struct sk_buff *skb)
{ }
#endif
static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
{
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
skb->queue_mapping = queue_mapping;
#endif
}
static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
{
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
to->queue_mapping = from->queue_mapping;
#endif
}
static inline int skb_is_gso(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_size;
}
static inline void skb_forward_csum(struct sk_buff *skb)
{
/* Unfortunately we don't support this one. Any brave souls? */
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
}
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */