mirror of https://gitee.com/openkylin/linux.git
[NET] IPV4: Fix whitespace errors.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
642656518b
commit
e905a9edab
|
@ -550,7 +550,7 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
|
|||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
sock->state = SS_CONNECTING;
|
||||
sock->state = SS_CONNECTING;
|
||||
|
||||
/* Just entered SS_CONNECTING state; the only
|
||||
* difference is that return value in non-blocking
|
||||
|
@ -878,36 +878,36 @@ static struct net_proto_family inet_family_ops = {
|
|||
*/
|
||||
static struct inet_protosw inetsw_array[] =
|
||||
{
|
||||
{
|
||||
.type = SOCK_STREAM,
|
||||
.protocol = IPPROTO_TCP,
|
||||
.prot = &tcp_prot,
|
||||
.ops = &inet_stream_ops,
|
||||
.capability = -1,
|
||||
.no_check = 0,
|
||||
.flags = INET_PROTOSW_PERMANENT |
|
||||
{
|
||||
.type = SOCK_STREAM,
|
||||
.protocol = IPPROTO_TCP,
|
||||
.prot = &tcp_prot,
|
||||
.ops = &inet_stream_ops,
|
||||
.capability = -1,
|
||||
.no_check = 0,
|
||||
.flags = INET_PROTOSW_PERMANENT |
|
||||
INET_PROTOSW_ICSK,
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
.type = SOCK_DGRAM,
|
||||
.protocol = IPPROTO_UDP,
|
||||
.prot = &udp_prot,
|
||||
.ops = &inet_dgram_ops,
|
||||
.capability = -1,
|
||||
.no_check = UDP_CSUM_DEFAULT,
|
||||
.flags = INET_PROTOSW_PERMANENT,
|
||||
{
|
||||
.type = SOCK_DGRAM,
|
||||
.protocol = IPPROTO_UDP,
|
||||
.prot = &udp_prot,
|
||||
.ops = &inet_dgram_ops,
|
||||
.capability = -1,
|
||||
.no_check = UDP_CSUM_DEFAULT,
|
||||
.flags = INET_PROTOSW_PERMANENT,
|
||||
},
|
||||
|
||||
|
||||
|
||||
{
|
||||
.type = SOCK_RAW,
|
||||
.protocol = IPPROTO_IP, /* wild card */
|
||||
.prot = &raw_prot,
|
||||
.ops = &inet_sockraw_ops,
|
||||
.capability = CAP_NET_RAW,
|
||||
.no_check = UDP_CSUM_DEFAULT,
|
||||
.flags = INET_PROTOSW_REUSE,
|
||||
.type = SOCK_RAW,
|
||||
.protocol = IPPROTO_IP, /* wild card */
|
||||
.prot = &raw_prot,
|
||||
.ops = &inet_sockraw_ops,
|
||||
.capability = CAP_NET_RAW,
|
||||
.no_check = UDP_CSUM_DEFAULT,
|
||||
.flags = INET_PROTOSW_REUSE,
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -946,7 +946,7 @@ void inet_register_protosw(struct inet_protosw *p)
|
|||
/* Add the new entry after the last permanent entry if any, so that
|
||||
* the new entry does not override a permanent entry when matched with
|
||||
* a wild-card protocol. But it is allowed to override any existing
|
||||
* non-permanent entry. This means that when we remove this entry, the
|
||||
* non-permanent entry. This means that when we remove this entry, the
|
||||
* system automatically returns to the old behavior.
|
||||
*/
|
||||
list_add_rcu(&p->list, last_perm);
|
||||
|
@ -1073,7 +1073,7 @@ int inet_sk_rebuild_header(struct sock *sk)
|
|||
},
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
security_sk_classify_flow(sk, &fl);
|
||||
err = ip_route_output_flow(&rt, &fl, sk, 0);
|
||||
}
|
||||
|
@ -1273,10 +1273,10 @@ static int __init inet_init(void)
|
|||
goto out_unregister_udp_proto;
|
||||
|
||||
/*
|
||||
* Tell SOCKET that we are alive...
|
||||
* Tell SOCKET that we are alive...
|
||||
*/
|
||||
|
||||
(void)sock_register(&inet_family_ops);
|
||||
(void)sock_register(&inet_family_ops);
|
||||
|
||||
/*
|
||||
* Add all the base protocols.
|
||||
|
@ -1306,9 +1306,9 @@ static int __init inet_init(void)
|
|||
|
||||
arp_init();
|
||||
|
||||
/*
|
||||
* Set the IP module up
|
||||
*/
|
||||
/*
|
||||
* Set the IP module up
|
||||
*/
|
||||
|
||||
ip_init();
|
||||
|
||||
|
@ -1334,11 +1334,11 @@ static int __init inet_init(void)
|
|||
#endif
|
||||
/*
|
||||
* Initialise per-cpu ipv4 mibs
|
||||
*/
|
||||
*/
|
||||
|
||||
if(init_ipv4_mibs())
|
||||
printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); ;
|
||||
|
||||
|
||||
ipv4_proc_init();
|
||||
|
||||
ipfrag_init();
|
||||
|
|
|
@ -91,7 +91,7 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||
top_iph->check = 0;
|
||||
|
||||
ahp = x->data;
|
||||
ah->hdrlen = (XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
|
||||
ah->hdrlen = (XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
|
||||
ahp->icv_trunc_len) >> 2) - 2;
|
||||
|
||||
ah->reserved = 0;
|
||||
|
@ -135,9 +135,9 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
|
|||
ah = (struct ip_auth_hdr*)skb->data;
|
||||
ahp = x->data;
|
||||
ah_hlen = (ah->hdrlen + 2) << 2;
|
||||
|
||||
|
||||
if (ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_full_len) &&
|
||||
ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len))
|
||||
ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len))
|
||||
goto out;
|
||||
|
||||
if (!pskb_may_pull(skb, ah_hlen))
|
||||
|
@ -166,9 +166,9 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
|
|||
if (ip_clear_mutable_options(iph, &dummy))
|
||||
goto out;
|
||||
}
|
||||
{
|
||||
{
|
||||
u8 auth_data[MAX_AH_AUTH_LEN];
|
||||
|
||||
|
||||
memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
|
||||
skb_push(skb, ihl);
|
||||
err = ah_mac_digest(ahp, skb, ah->auth_data);
|
||||
|
@ -237,7 +237,7 @@ static int ah_init_state(struct xfrm_state *x)
|
|||
ahp->tfm = tfm;
|
||||
if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len))
|
||||
goto error;
|
||||
|
||||
|
||||
/*
|
||||
* Lookup the algorithm description maintained by xfrm_algo,
|
||||
* verify crypto transform properties, and store information
|
||||
|
@ -254,16 +254,16 @@ static int ah_init_state(struct xfrm_state *x)
|
|||
aalg_desc->uinfo.auth.icv_fullbits/8);
|
||||
goto error;
|
||||
}
|
||||
|
||||
|
||||
ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
|
||||
ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
|
||||
|
||||
|
||||
BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
|
||||
|
||||
|
||||
ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL);
|
||||
if (!ahp->work_icv)
|
||||
goto error;
|
||||
|
||||
|
||||
x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len);
|
||||
if (x->props.mode == XFRM_MODE_TUNNEL)
|
||||
x->props.header_len += sizeof(struct iphdr);
|
||||
|
|
|
@ -15,9 +15,9 @@
|
|||
* 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Fixes:
|
||||
* Alan Cox : Removed the Ethernet assumptions in
|
||||
* Alan Cox : Removed the Ethernet assumptions in
|
||||
* Florian's code
|
||||
* Alan Cox : Fixed some small errors in the ARP
|
||||
* Alan Cox : Fixed some small errors in the ARP
|
||||
* logic
|
||||
* Alan Cox : Allow >4K in /proc
|
||||
* Alan Cox : Make ARP add its own protocol entry
|
||||
|
@ -39,18 +39,18 @@
|
|||
* Jonathan Naylor : Only lookup the hardware address for
|
||||
* the correct hardware type.
|
||||
* Germano Caronni : Assorted subtle races.
|
||||
* Craig Schlenter : Don't modify permanent entry
|
||||
* Craig Schlenter : Don't modify permanent entry
|
||||
* during arp_rcv.
|
||||
* Russ Nelson : Tidied up a few bits.
|
||||
* Alexey Kuznetsov: Major changes to caching and behaviour,
|
||||
* eg intelligent arp probing and
|
||||
* eg intelligent arp probing and
|
||||
* generation
|
||||
* of host down events.
|
||||
* Alan Cox : Missing unlock in device events.
|
||||
* Eckes : ARP ioctl control errors.
|
||||
* Alexey Kuznetsov: Arp free fix.
|
||||
* Manuel Rodriguez: Gratuitous ARP.
|
||||
* Jonathan Layes : Added arpd support through kerneld
|
||||
* Jonathan Layes : Added arpd support through kerneld
|
||||
* message queue (960314)
|
||||
* Mike Shaver : /proc/sys/net/ipv4/arp_* support
|
||||
* Mike McLagan : Routing by source
|
||||
|
@ -210,7 +210,7 @@ int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir)
|
|||
case ARPHRD_FDDI:
|
||||
case ARPHRD_IEEE802:
|
||||
ip_eth_mc_map(addr, haddr);
|
||||
return 0;
|
||||
return 0;
|
||||
case ARPHRD_IEEE802_TR:
|
||||
ip_tr_mc_map(addr, haddr);
|
||||
return 0;
|
||||
|
@ -288,7 +288,7 @@ static int arp_constructor(struct neighbour *neigh)
|
|||
switch (dev->type) {
|
||||
default:
|
||||
break;
|
||||
case ARPHRD_ROSE:
|
||||
case ARPHRD_ROSE:
|
||||
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
|
||||
case ARPHRD_AX25:
|
||||
#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
|
||||
|
@ -425,18 +425,18 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
|
|||
struct flowi fl = { .nl_u = { .ip4_u = { .daddr = sip,
|
||||
.saddr = tip } } };
|
||||
struct rtable *rt;
|
||||
int flag = 0;
|
||||
int flag = 0;
|
||||
/*unsigned long now; */
|
||||
|
||||
if (ip_route_output_key(&rt, &fl) < 0)
|
||||
if (ip_route_output_key(&rt, &fl) < 0)
|
||||
return 1;
|
||||
if (rt->u.dst.dev != dev) {
|
||||
if (rt->u.dst.dev != dev) {
|
||||
NET_INC_STATS_BH(LINUX_MIB_ARPFILTER);
|
||||
flag = 1;
|
||||
}
|
||||
ip_rt_put(rt);
|
||||
return flag;
|
||||
}
|
||||
}
|
||||
ip_rt_put(rt);
|
||||
return flag;
|
||||
}
|
||||
|
||||
/* OBSOLETE FUNCTIONS */
|
||||
|
||||
|
@ -490,7 +490,7 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb)
|
|||
n->used = jiffies;
|
||||
if (n->nud_state&NUD_VALID || neigh_event_send(n, skb) == 0) {
|
||||
read_lock_bh(&n->lock);
|
||||
memcpy(haddr, n->ha, dev->addr_len);
|
||||
memcpy(haddr, n->ha, dev->addr_len);
|
||||
read_unlock_bh(&n->lock);
|
||||
neigh_release(n);
|
||||
return 0;
|
||||
|
@ -572,7 +572,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
|
|||
/*
|
||||
* Allocate a buffer
|
||||
*/
|
||||
|
||||
|
||||
skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
|
||||
+ LL_RESERVED_SPACE(dev), GFP_ATOMIC);
|
||||
if (skb == NULL)
|
||||
|
@ -685,7 +685,7 @@ void arp_send(int type, int ptype, __be32 dest_ip,
|
|||
/*
|
||||
* No arp on this interface.
|
||||
*/
|
||||
|
||||
|
||||
if (dev->flags&IFF_NOARP)
|
||||
return;
|
||||
|
||||
|
@ -725,7 +725,7 @@ static int arp_process(struct sk_buff *skb)
|
|||
arp = skb->nh.arph;
|
||||
|
||||
switch (dev_type) {
|
||||
default:
|
||||
default:
|
||||
if (arp->ar_pro != htons(ETH_P_IP) ||
|
||||
htons(dev_type) != arp->ar_hrd)
|
||||
goto out;
|
||||
|
@ -792,7 +792,7 @@ static int arp_process(struct sk_buff *skb)
|
|||
tha = arp_ptr;
|
||||
arp_ptr += dev->addr_len;
|
||||
memcpy(&tip, arp_ptr, 4);
|
||||
/*
|
||||
/*
|
||||
* Check for bad requests for 127.x.x.x and requests for multicast
|
||||
* addresses. If this is one such, delete it.
|
||||
*/
|
||||
|
@ -809,16 +809,16 @@ static int arp_process(struct sk_buff *skb)
|
|||
* Process entry. The idea here is we want to send a reply if it is a
|
||||
* request for us or if it is a request for someone else that we hold
|
||||
* a proxy for. We want to add an entry to our cache if it is a reply
|
||||
* to us or if it is a request for our address.
|
||||
* (The assumption for this last is that if someone is requesting our
|
||||
* address, they are probably intending to talk to us, so it saves time
|
||||
* if we cache their address. Their address is also probably not in
|
||||
* to us or if it is a request for our address.
|
||||
* (The assumption for this last is that if someone is requesting our
|
||||
* address, they are probably intending to talk to us, so it saves time
|
||||
* if we cache their address. Their address is also probably not in
|
||||
* our cache, since ours is not in their cache.)
|
||||
*
|
||||
*
|
||||
* Putting this another way, we only care about replies if they are to
|
||||
* us, in which case we add them to the cache. For requests, we care
|
||||
* about those for us and those for our proxies. We reply to both,
|
||||
* and in the case of requests for us we add the requester to the arp
|
||||
* and in the case of requests for us we add the requester to the arp
|
||||
* cache.
|
||||
*/
|
||||
|
||||
|
@ -845,7 +845,7 @@ static int arp_process(struct sk_buff *skb)
|
|||
if (!dont_send)
|
||||
dont_send |= arp_ignore(in_dev,dev,sip,tip);
|
||||
if (!dont_send && IN_DEV_ARPFILTER(in_dev))
|
||||
dont_send |= arp_filter(sip,tip,dev);
|
||||
dont_send |= arp_filter(sip,tip,dev);
|
||||
if (!dont_send)
|
||||
arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
|
||||
|
||||
|
@ -860,7 +860,7 @@ static int arp_process(struct sk_buff *skb)
|
|||
if (n)
|
||||
neigh_release(n);
|
||||
|
||||
if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED ||
|
||||
if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED ||
|
||||
skb->pkt_type == PACKET_HOST ||
|
||||
in_dev->arp_parms->proxy_delay == 0) {
|
||||
arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
|
||||
|
@ -1039,7 +1039,7 @@ static int arp_req_set(struct arpreq *r, struct net_device * dev)
|
|||
if (r->arp_flags & ATF_PERM)
|
||||
state = NUD_PERMANENT;
|
||||
err = neigh_update(neigh, (r->arp_flags&ATF_COM) ?
|
||||
r->arp_ha.sa_data : NULL, state,
|
||||
r->arp_ha.sa_data : NULL, state,
|
||||
NEIGH_UPDATE_F_OVERRIDE|
|
||||
NEIGH_UPDATE_F_ADMIN);
|
||||
neigh_release(neigh);
|
||||
|
@ -1121,7 +1121,7 @@ static int arp_req_delete(struct arpreq *r, struct net_device * dev)
|
|||
neigh = neigh_lookup(&arp_tbl, &ip, dev);
|
||||
if (neigh) {
|
||||
if (neigh->nud_state&~NUD_NOARP)
|
||||
err = neigh_update(neigh, NULL, NUD_FAILED,
|
||||
err = neigh_update(neigh, NULL, NUD_FAILED,
|
||||
NEIGH_UPDATE_F_OVERRIDE|
|
||||
NEIGH_UPDATE_F_ADMIN);
|
||||
neigh_release(neigh);
|
||||
|
@ -1181,7 +1181,7 @@ int arp_ioctl(unsigned int cmd, void __user *arg)
|
|||
|
||||
switch(cmd) {
|
||||
case SIOCDARP:
|
||||
err = arp_req_delete(&r, dev);
|
||||
err = arp_req_delete(&r, dev);
|
||||
break;
|
||||
case SIOCSARP:
|
||||
err = arp_req_set(&r, dev);
|
||||
|
@ -1268,14 +1268,14 @@ static char *ax2asc2(ax25_address *a, char *buf)
|
|||
|
||||
if (c != ' ') *s++ = c;
|
||||
}
|
||||
|
||||
|
||||
*s++ = '-';
|
||||
|
||||
if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) {
|
||||
*s++ = '1';
|
||||
n -= 10;
|
||||
}
|
||||
|
||||
|
||||
*s++ = n + '0';
|
||||
*s++ = '\0';
|
||||
|
||||
|
@ -1373,7 +1373,7 @@ static int arp_seq_open(struct inode *inode, struct file *file)
|
|||
struct seq_file *seq;
|
||||
int rc = -ENOMEM;
|
||||
struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
|
||||
|
||||
|
||||
if (!s)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -29,12 +29,12 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||
int oif;
|
||||
int err;
|
||||
|
||||
|
||||
if (addr_len < sizeof(*usin))
|
||||
return -EINVAL;
|
||||
|
||||
if (usin->sin_family != AF_INET)
|
||||
return -EAFNOSUPPORT;
|
||||
if (addr_len < sizeof(*usin))
|
||||
return -EINVAL;
|
||||
|
||||
if (usin->sin_family != AF_INET)
|
||||
return -EAFNOSUPPORT;
|
||||
|
||||
sk_dst_reset(sk);
|
||||
|
||||
|
@ -56,8 +56,8 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||
ip_rt_put(rt);
|
||||
return -EACCES;
|
||||
}
|
||||
if (!inet->saddr)
|
||||
inet->saddr = rt->rt_src; /* Update source address */
|
||||
if (!inet->saddr)
|
||||
inet->saddr = rt->rt_src; /* Update source address */
|
||||
if (!inet->rcv_saddr)
|
||||
inet->rcv_saddr = rt->rt_src;
|
||||
inet->daddr = rt->rt_dst;
|
||||
|
|
|
@ -252,7 +252,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
|
|||
|
||||
ASSERT_RTNL();
|
||||
|
||||
/* 1. Deleting primary ifaddr forces deletion all secondaries
|
||||
/* 1. Deleting primary ifaddr forces deletion all secondaries
|
||||
* unless alias promotion is set
|
||||
**/
|
||||
|
||||
|
@ -260,7 +260,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
|
|||
struct in_ifaddr **ifap1 = &ifa1->ifa_next;
|
||||
|
||||
while ((ifa = *ifap1) != NULL) {
|
||||
if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
|
||||
if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
|
||||
ifa1->ifa_scope <= ifa->ifa_scope)
|
||||
last_prim = ifa;
|
||||
|
||||
|
@ -583,8 +583,8 @@ static __inline__ int inet_abc_len(__be32 addr)
|
|||
{
|
||||
int rc = -1; /* Something else, probably a multicast. */
|
||||
|
||||
if (ZERONET(addr))
|
||||
rc = 0;
|
||||
if (ZERONET(addr))
|
||||
rc = 0;
|
||||
else {
|
||||
__u32 haddr = ntohl(addr);
|
||||
|
||||
|
@ -596,7 +596,7 @@ static __inline__ int inet_abc_len(__be32 addr)
|
|||
rc = 24;
|
||||
}
|
||||
|
||||
return rc;
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1020,29 +1020,29 @@ int unregister_inetaddr_notifier(struct notifier_block *nb)
|
|||
* alias numbering and to create unique labels if possible.
|
||||
*/
|
||||
static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
|
||||
{
|
||||
{
|
||||
struct in_ifaddr *ifa;
|
||||
int named = 0;
|
||||
|
||||
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
|
||||
char old[IFNAMSIZ], *dot;
|
||||
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
|
||||
char old[IFNAMSIZ], *dot;
|
||||
|
||||
memcpy(old, ifa->ifa_label, IFNAMSIZ);
|
||||
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
|
||||
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
|
||||
if (named++ == 0)
|
||||
continue;
|
||||
dot = strchr(ifa->ifa_label, ':');
|
||||
if (dot == NULL) {
|
||||
sprintf(old, ":%d", named);
|
||||
if (dot == NULL) {
|
||||
sprintf(old, ":%d", named);
|
||||
dot = old;
|
||||
}
|
||||
if (strlen(dot) + strlen(dev->name) < IFNAMSIZ) {
|
||||
strcat(ifa->ifa_label, dot);
|
||||
} else {
|
||||
strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (strlen(dot) + strlen(dev->name) < IFNAMSIZ) {
|
||||
strcat(ifa->ifa_label, dot);
|
||||
} else {
|
||||
strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Called only under RTNL semaphore */
|
||||
|
||||
|
@ -1539,7 +1539,7 @@ static struct devinet_sysctl_table {
|
|||
},
|
||||
},
|
||||
.devinet_conf_dir = {
|
||||
{
|
||||
{
|
||||
.ctl_name = NET_IPV4_CONF,
|
||||
.procname = "conf",
|
||||
.mode = 0555,
|
||||
|
@ -1581,18 +1581,18 @@ static void devinet_sysctl_register(struct in_device *in_dev,
|
|||
}
|
||||
|
||||
if (dev) {
|
||||
dev_name = dev->name;
|
||||
dev_name = dev->name;
|
||||
t->devinet_dev[0].ctl_name = dev->ifindex;
|
||||
} else {
|
||||
dev_name = "default";
|
||||
t->devinet_dev[0].ctl_name = NET_PROTO_CONF_DEFAULT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make a copy of dev_name, because '.procname' is regarded as const
|
||||
/*
|
||||
* Make a copy of dev_name, because '.procname' is regarded as const
|
||||
* by sysctl and we wouldn't want anyone to change it under our feet
|
||||
* (see SIOCSIFNAME).
|
||||
*/
|
||||
*/
|
||||
dev_name = kstrdup(dev_name, GFP_KERNEL);
|
||||
if (!dev_name)
|
||||
goto free;
|
||||
|
|
|
@ -215,7 +215,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
|
|||
if (padlen+2 >= elen)
|
||||
goto out;
|
||||
|
||||
/* ... check padding bits here. Silly. :-) */
|
||||
/* ... check padding bits here. Silly. :-) */
|
||||
|
||||
iph = skb->nh.iph;
|
||||
ihl = iph->ihl * 4;
|
||||
|
@ -236,7 +236,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
|
|||
|
||||
ipaddr.a4 = iph->saddr;
|
||||
km_new_mapping(x, &ipaddr, uh->source);
|
||||
|
||||
|
||||
/* XXX: perhaps add an extra
|
||||
* policy check here, to see
|
||||
* if we should allow or
|
||||
|
@ -245,7 +245,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
|
|||
* address/port.
|
||||
*/
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* 2) ignore UDP/TCP checksums in case
|
||||
* of NAT-T in Transport Mode, or
|
||||
|
@ -284,7 +284,7 @@ static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
|
|||
mtu = ALIGN(mtu + 2, 4) + blksize - 4;
|
||||
break;
|
||||
case XFRM_MODE_BEET:
|
||||
/* The worst case. */
|
||||
/* The worst case. */
|
||||
enclen = IPV4_BEET_PHMAXLEN;
|
||||
mtu = ALIGN(mtu + enclen + 2, blksize);
|
||||
break;
|
||||
|
|
|
@ -160,7 +160,7 @@ unsigned inet_addr_type(__be32 addr)
|
|||
#ifdef CONFIG_IP_MULTIPLE_TABLES
|
||||
res.r = NULL;
|
||||
#endif
|
||||
|
||||
|
||||
if (ip_fib_local_table) {
|
||||
ret = RTN_UNICAST;
|
||||
if (!ip_fib_local_table->tb_lookup(ip_fib_local_table,
|
||||
|
@ -378,7 +378,7 @@ static int rtentry_to_fib_config(int cmd, struct rtentry *rt,
|
|||
int len = 0;
|
||||
|
||||
mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
|
||||
if (mx == NULL)
|
||||
if (mx == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (rt->rt_flags & RTF_MTU)
|
||||
|
@ -400,7 +400,7 @@ static int rtentry_to_fib_config(int cmd, struct rtentry *rt,
|
|||
/*
|
||||
* Handle IP routing ioctl calls. These are used to manipulate the routing tables
|
||||
*/
|
||||
|
||||
|
||||
int ip_rt_ioctl(unsigned int cmd, void __user *arg)
|
||||
{
|
||||
struct fib_config cfg;
|
||||
|
@ -600,7 +600,7 @@ int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
goto next;
|
||||
if (dumped)
|
||||
memset(&cb->args[2], 0, sizeof(cb->args) -
|
||||
2 * sizeof(cb->args[0]));
|
||||
2 * sizeof(cb->args[0]));
|
||||
if (tb->tb_dump(tb, skb, cb) < 0)
|
||||
goto out;
|
||||
dumped = 1;
|
||||
|
@ -766,7 +766,7 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa)
|
|||
|
||||
static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb )
|
||||
{
|
||||
|
||||
|
||||
struct fib_result res;
|
||||
struct flowi fl = { .mark = frn->fl_mark,
|
||||
.nl_u = { .ip4_u = { .daddr = frn->fl_addr,
|
||||
|
@ -791,11 +791,11 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb )
|
|||
static void nl_fib_input(struct sock *sk, int len)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
struct nlmsghdr *nlh = NULL;
|
||||
struct nlmsghdr *nlh = NULL;
|
||||
struct fib_result_nl *frn;
|
||||
u32 pid;
|
||||
u32 pid;
|
||||
struct fib_table *tb;
|
||||
|
||||
|
||||
skb = skb_dequeue(&sk->sk_receive_queue);
|
||||
nlh = (struct nlmsghdr *)skb->data;
|
||||
if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len ||
|
||||
|
@ -803,17 +803,17 @@ static void nl_fib_input(struct sock *sk, int len)
|
|||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
frn = (struct fib_result_nl *) NLMSG_DATA(nlh);
|
||||
tb = fib_get_table(frn->tb_id_in);
|
||||
|
||||
nl_fib_lookup(frn, tb);
|
||||
|
||||
|
||||
pid = nlh->nlmsg_pid; /*pid of sending process */
|
||||
NETLINK_CB(skb).pid = 0; /* from kernel */
|
||||
NETLINK_CB(skb).dst_group = 0; /* unicast */
|
||||
netlink_unicast(sk, skb, pid, MSG_DONTWAIT);
|
||||
}
|
||||
}
|
||||
|
||||
static void nl_fib_lookup_init(void)
|
||||
{
|
||||
|
|
|
@ -146,7 +146,7 @@ static void fn_rehash_zone(struct fn_zone *fz)
|
|||
struct hlist_head *ht, *old_ht;
|
||||
int old_divisor, new_divisor;
|
||||
u32 new_hashmask;
|
||||
|
||||
|
||||
old_divisor = fz->fz_divisor;
|
||||
|
||||
switch (old_divisor) {
|
||||
|
@ -911,7 +911,7 @@ static struct fib_alias *fib_get_next(struct seq_file *seq)
|
|||
|
||||
if (!iter->zone)
|
||||
goto out;
|
||||
|
||||
|
||||
iter->bucket = 0;
|
||||
iter->hash_head = iter->zone->fz_hash;
|
||||
|
||||
|
@ -932,7 +932,7 @@ static struct fib_alias *fib_get_idx(struct seq_file *seq, loff_t pos)
|
|||
{
|
||||
struct fib_iter_state *iter = seq->private;
|
||||
struct fib_alias *fa;
|
||||
|
||||
|
||||
if (iter->valid && pos >= iter->pos && iter->genid == fib_hash_genid) {
|
||||
fa = iter->fa;
|
||||
pos -= iter->pos;
|
||||
|
@ -981,7 +981,7 @@ static unsigned fib_flag_trans(int type, __be32 mask, struct fib_info *fi)
|
|||
return flags;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* This outputs /proc/net/route.
|
||||
*
|
||||
* It always works in backward compatibility mode.
|
||||
|
@ -1040,7 +1040,7 @@ static int fib_seq_open(struct inode *inode, struct file *file)
|
|||
struct seq_file *seq;
|
||||
int rc = -ENOMEM;
|
||||
struct fib_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
|
||||
|
||||
|
||||
if (!s)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -85,12 +85,12 @@ for (nhsel=0; nhsel < 1; nhsel++)
|
|||
#define endfor_nexthops(fi) }
|
||||
|
||||
|
||||
static const struct
|
||||
static const struct
|
||||
{
|
||||
int error;
|
||||
u8 scope;
|
||||
} fib_props[RTA_MAX + 1] = {
|
||||
{
|
||||
{
|
||||
.error = 0,
|
||||
.scope = RT_SCOPE_NOWHERE,
|
||||
}, /* RTN_UNSPEC */
|
||||
|
@ -439,7 +439,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
|
|||
|
||||
rtnh = cfg->fc_mp;
|
||||
remaining = cfg->fc_mp_len;
|
||||
|
||||
|
||||
for_nexthops(fi) {
|
||||
int attrlen;
|
||||
|
||||
|
@ -508,9 +508,9 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
|
|||
Normally it looks as following.
|
||||
|
||||
{universe prefix} -> (gw, oif) [scope link]
|
||||
|
|
||||
|
|
||||
|-> {link prefix} -> (gw, oif) [scope local]
|
||||
|
|
||||
|
|
||||
|-> {local prefix} (terminal node)
|
||||
*/
|
||||
|
||||
|
@ -864,7 +864,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
|
|||
err = -EINVAL;
|
||||
|
||||
failure:
|
||||
if (fi) {
|
||||
if (fi) {
|
||||
fi->fib_dead = 1;
|
||||
free_fib_info(fi);
|
||||
}
|
||||
|
@ -1049,7 +1049,7 @@ int fib_sync_down(__be32 local, struct net_device *dev, int force)
|
|||
{
|
||||
int ret = 0;
|
||||
int scope = RT_SCOPE_NOWHERE;
|
||||
|
||||
|
||||
if (force)
|
||||
scope = -1;
|
||||
|
||||
|
|
|
@ -7,13 +7,13 @@
|
|||
* Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
|
||||
* & Swedish University of Agricultural Sciences.
|
||||
*
|
||||
* Jens Laas <jens.laas@data.slu.se> Swedish University of
|
||||
* Jens Laas <jens.laas@data.slu.se> Swedish University of
|
||||
* Agricultural Sciences.
|
||||
*
|
||||
*
|
||||
* Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
|
||||
*
|
||||
* This work is based on the LPC-trie which is originally descibed in:
|
||||
*
|
||||
*
|
||||
* An experimental study of compression methods for dynamic tries
|
||||
* Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
|
||||
* http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
|
||||
|
@ -224,34 +224,34 @@ static inline int tkey_mismatch(t_key a, int offset, t_key b)
|
|||
}
|
||||
|
||||
/*
|
||||
To understand this stuff, an understanding of keys and all their bits is
|
||||
necessary. Every node in the trie has a key associated with it, but not
|
||||
To understand this stuff, an understanding of keys and all their bits is
|
||||
necessary. Every node in the trie has a key associated with it, but not
|
||||
all of the bits in that key are significant.
|
||||
|
||||
Consider a node 'n' and its parent 'tp'.
|
||||
|
||||
If n is a leaf, every bit in its key is significant. Its presence is
|
||||
necessitated by path compression, since during a tree traversal (when
|
||||
searching for a leaf - unless we are doing an insertion) we will completely
|
||||
ignore all skipped bits we encounter. Thus we need to verify, at the end of
|
||||
a potentially successful search, that we have indeed been walking the
|
||||
If n is a leaf, every bit in its key is significant. Its presence is
|
||||
necessitated by path compression, since during a tree traversal (when
|
||||
searching for a leaf - unless we are doing an insertion) we will completely
|
||||
ignore all skipped bits we encounter. Thus we need to verify, at the end of
|
||||
a potentially successful search, that we have indeed been walking the
|
||||
correct key path.
|
||||
|
||||
Note that we can never "miss" the correct key in the tree if present by
|
||||
following the wrong path. Path compression ensures that segments of the key
|
||||
that are the same for all keys with a given prefix are skipped, but the
|
||||
skipped part *is* identical for each node in the subtrie below the skipped
|
||||
bit! trie_insert() in this implementation takes care of that - note the
|
||||
Note that we can never "miss" the correct key in the tree if present by
|
||||
following the wrong path. Path compression ensures that segments of the key
|
||||
that are the same for all keys with a given prefix are skipped, but the
|
||||
skipped part *is* identical for each node in the subtrie below the skipped
|
||||
bit! trie_insert() in this implementation takes care of that - note the
|
||||
call to tkey_sub_equals() in trie_insert().
|
||||
|
||||
if n is an internal node - a 'tnode' here, the various parts of its key
|
||||
if n is an internal node - a 'tnode' here, the various parts of its key
|
||||
have many different meanings.
|
||||
|
||||
Example:
|
||||
Example:
|
||||
_________________________________________________________________
|
||||
| i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
|
||||
-----------------------------------------------------------------
|
||||
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
||||
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
||||
|
||||
_________________________________________________________________
|
||||
| C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
|
||||
|
@ -263,23 +263,23 @@ static inline int tkey_mismatch(t_key a, int offset, t_key b)
|
|||
n->pos = 15
|
||||
n->bits = 4
|
||||
|
||||
First, let's just ignore the bits that come before the parent tp, that is
|
||||
the bits from 0 to (tp->pos-1). They are *known* but at this point we do
|
||||
First, let's just ignore the bits that come before the parent tp, that is
|
||||
the bits from 0 to (tp->pos-1). They are *known* but at this point we do
|
||||
not use them for anything.
|
||||
|
||||
The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
|
||||
index into the parent's child array. That is, they will be used to find
|
||||
index into the parent's child array. That is, they will be used to find
|
||||
'n' among tp's children.
|
||||
|
||||
The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
|
||||
for the node n.
|
||||
|
||||
All the bits we have seen so far are significant to the node n. The rest
|
||||
All the bits we have seen so far are significant to the node n. The rest
|
||||
of the bits are really not needed or indeed known in n->key.
|
||||
|
||||
The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
|
||||
The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
|
||||
n's child array, and will of course be different for each child.
|
||||
|
||||
|
||||
|
||||
The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
|
||||
at this point.
|
||||
|
@ -294,7 +294,7 @@ static inline void check_tnode(const struct tnode *tn)
|
|||
static int halve_threshold = 25;
|
||||
static int inflate_threshold = 50;
|
||||
static int halve_threshold_root = 15;
|
||||
static int inflate_threshold_root = 25;
|
||||
static int inflate_threshold_root = 25;
|
||||
|
||||
|
||||
static void __alias_free_mem(struct rcu_head *head)
|
||||
|
@ -355,7 +355,7 @@ static inline void tnode_free(struct tnode *tn)
|
|||
struct leaf *l = (struct leaf *) tn;
|
||||
call_rcu_bh(&l->rcu, __leaf_free_rcu);
|
||||
}
|
||||
else
|
||||
else
|
||||
call_rcu(&tn->rcu, __tnode_free_rcu);
|
||||
}
|
||||
|
||||
|
@ -461,7 +461,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
|
|||
int inflate_threshold_use;
|
||||
int halve_threshold_use;
|
||||
|
||||
if (!tn)
|
||||
if (!tn)
|
||||
return NULL;
|
||||
|
||||
pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
|
||||
|
@ -556,7 +556,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
|
|||
|
||||
if(!tn->parent)
|
||||
inflate_threshold_use = inflate_threshold_root;
|
||||
else
|
||||
else
|
||||
inflate_threshold_use = inflate_threshold;
|
||||
|
||||
err = 0;
|
||||
|
@ -587,7 +587,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
|
|||
|
||||
if(!tn->parent)
|
||||
halve_threshold_use = halve_threshold_root;
|
||||
else
|
||||
else
|
||||
halve_threshold_use = halve_threshold;
|
||||
|
||||
err = 0;
|
||||
|
@ -665,10 +665,10 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
|
|||
right = tnode_new(inode->key|m, inode->pos + 1,
|
||||
inode->bits - 1);
|
||||
|
||||
if (!right) {
|
||||
if (!right) {
|
||||
tnode_free(left);
|
||||
goto nomem;
|
||||
}
|
||||
}
|
||||
|
||||
put_child(t, tn, 2*i, (struct node *) left);
|
||||
put_child(t, tn, 2*i+1, (struct node *) right);
|
||||
|
@ -890,23 +890,23 @@ static inline struct list_head * get_fa_head(struct leaf *l, int plen)
|
|||
|
||||
static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
|
||||
{
|
||||
struct leaf_info *li = NULL, *last = NULL;
|
||||
struct hlist_node *node;
|
||||
struct leaf_info *li = NULL, *last = NULL;
|
||||
struct hlist_node *node;
|
||||
|
||||
if (hlist_empty(head)) {
|
||||
hlist_add_head_rcu(&new->hlist, head);
|
||||
} else {
|
||||
hlist_for_each_entry(li, node, head, hlist) {
|
||||
if (new->plen > li->plen)
|
||||
break;
|
||||
if (hlist_empty(head)) {
|
||||
hlist_add_head_rcu(&new->hlist, head);
|
||||
} else {
|
||||
hlist_for_each_entry(li, node, head, hlist) {
|
||||
if (new->plen > li->plen)
|
||||
break;
|
||||
|
||||
last = li;
|
||||
}
|
||||
if (last)
|
||||
hlist_add_after_rcu(&last->hlist, &new->hlist);
|
||||
else
|
||||
hlist_add_before_rcu(&new->hlist, &li->hlist);
|
||||
}
|
||||
last = li;
|
||||
}
|
||||
if (last)
|
||||
hlist_add_after_rcu(&last->hlist, &new->hlist);
|
||||
else
|
||||
hlist_add_before_rcu(&new->hlist, &li->hlist);
|
||||
}
|
||||
}
|
||||
|
||||
/* rcu_read_lock needs to be hold by caller from readside */
|
||||
|
@ -1700,7 +1700,7 @@ static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf)
|
|||
/* Decend if tnode */
|
||||
while (IS_TNODE(c)) {
|
||||
p = (struct tnode *) c;
|
||||
idx = 0;
|
||||
idx = 0;
|
||||
|
||||
/* Rightmost non-NULL branch */
|
||||
if (p && IS_TNODE(p))
|
||||
|
@ -2303,9 +2303,9 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
|
|||
|
||||
seq_indent(seq, iter->depth-1);
|
||||
seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n",
|
||||
NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
|
||||
NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
|
||||
tn->empty_children);
|
||||
|
||||
|
||||
} else {
|
||||
struct leaf *l = (struct leaf *) n;
|
||||
int i;
|
||||
|
|
|
@ -304,7 +304,7 @@ static inline int icmpv4_xrlim_allow(struct rtable *rt, int type, int code)
|
|||
|
||||
/* No rate limit on loopback */
|
||||
if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
|
||||
goto out;
|
||||
goto out;
|
||||
|
||||
/* Limit if icmp type is enabled in ratemask. */
|
||||
if ((1 << type) & sysctl_icmp_ratemask)
|
||||
|
@ -350,9 +350,9 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
|
|||
struct sk_buff *skb;
|
||||
|
||||
if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
|
||||
icmp_param->data_len+icmp_param->head_len,
|
||||
icmp_param->head_len,
|
||||
ipc, rt, MSG_DONTWAIT) < 0)
|
||||
icmp_param->data_len+icmp_param->head_len,
|
||||
icmp_param->head_len,
|
||||
ipc, rt, MSG_DONTWAIT) < 0)
|
||||
ip_flush_pending_frames(icmp_socket->sk);
|
||||
else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
|
||||
struct icmphdr *icmph = skb->h.icmph;
|
||||
|
@ -755,7 +755,7 @@ static void icmp_redirect(struct sk_buff *skb)
|
|||
skb->h.icmph->un.gateway,
|
||||
iph->saddr, skb->dev);
|
||||
break;
|
||||
}
|
||||
}
|
||||
out:
|
||||
return;
|
||||
out_err:
|
||||
|
@ -959,7 +959,7 @@ int icmp_rcv(struct sk_buff *skb)
|
|||
* Parse the ICMP message
|
||||
*/
|
||||
|
||||
if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
|
||||
if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
|
||||
/*
|
||||
* RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be
|
||||
* silently ignored (we let user decide with a sysctl).
|
||||
|
@ -976,7 +976,7 @@ int icmp_rcv(struct sk_buff *skb)
|
|||
icmph->type != ICMP_ADDRESS &&
|
||||
icmph->type != ICMP_ADDRESSREPLY) {
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ICMP_INC_STATS_BH(icmp_pointers[icmph->type].input_entry);
|
||||
|
@ -1085,7 +1085,7 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
|
|||
.input_entry = ICMP_MIB_DUMMY,
|
||||
.handler = icmp_discard,
|
||||
},
|
||||
[ICMP_INFO_REPLY] = {
|
||||
[ICMP_INFO_REPLY] = {
|
||||
.output_entry = ICMP_MIB_DUMMY,
|
||||
.input_entry = ICMP_MIB_DUMMY,
|
||||
.handler = icmp_discard,
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
*
|
||||
* Chih-Jen Chang : Tried to revise IGMP to Version 2
|
||||
* Tsu-Sheng Tsao E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu
|
||||
* The enhancements are mainly based on Steve Deering's
|
||||
* The enhancements are mainly based on Steve Deering's
|
||||
* ipmulti-3.5 source code.
|
||||
* Chih-Jen Chang : Added the igmp_get_mrouter_info and
|
||||
* Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of
|
||||
|
@ -49,11 +49,11 @@
|
|||
* Alan Cox : Stop IGMP from 0.0.0.0 being accepted.
|
||||
* Alan Cox : Use GFP_ATOMIC in the right places.
|
||||
* Christian Daudt : igmp timer wasn't set for local group
|
||||
* memberships but was being deleted,
|
||||
* which caused a "del_timer() called
|
||||
* memberships but was being deleted,
|
||||
* which caused a "del_timer() called
|
||||
* from %p with timer not initialized\n"
|
||||
* message (960131).
|
||||
* Christian Daudt : removed del_timer from
|
||||
* Christian Daudt : removed del_timer from
|
||||
* igmp_timer_expire function (960205).
|
||||
* Christian Daudt : igmp_heard_report now only calls
|
||||
* igmp_timer_expire if tm->running is
|
||||
|
@ -718,7 +718,7 @@ static void igmp_ifc_event(struct in_device *in_dev)
|
|||
{
|
||||
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
|
||||
return;
|
||||
in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv :
|
||||
in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv :
|
||||
IGMP_Unsolicited_Report_Count;
|
||||
igmp_ifc_start_timer(in_dev, 1);
|
||||
}
|
||||
|
@ -838,7 +838,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
|
|||
if (len == 8) {
|
||||
if (ih->code == 0) {
|
||||
/* Alas, old v1 router presents here. */
|
||||
|
||||
|
||||
max_delay = IGMP_Query_Response_Interval;
|
||||
in_dev->mr_v1_seen = jiffies +
|
||||
IGMP_V1_Router_Present_Timeout;
|
||||
|
@ -860,10 +860,10 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
|
|||
} else { /* v3 */
|
||||
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
|
||||
return;
|
||||
|
||||
|
||||
ih3 = (struct igmpv3_query *) skb->h.raw;
|
||||
if (ih3->nsrcs) {
|
||||
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
|
||||
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
|
||||
+ ntohs(ih3->nsrcs)*sizeof(__be32)))
|
||||
return;
|
||||
ih3 = (struct igmpv3_query *) skb->h.raw;
|
||||
|
@ -909,7 +909,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
|
|||
else
|
||||
im->gsquery = mark;
|
||||
changed = !im->gsquery ||
|
||||
igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
|
||||
igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
|
||||
spin_unlock_bh(&im->lock);
|
||||
if (changed)
|
||||
igmp_mod_timer(im, max_delay);
|
||||
|
@ -1257,9 +1257,9 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
|
|||
void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
|
||||
{
|
||||
struct ip_mc_list *i, **ip;
|
||||
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
|
||||
for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
|
||||
if (i->multiaddr==addr) {
|
||||
if (--i->users == 0) {
|
||||
|
@ -1436,7 +1436,7 @@ static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
|
|||
#ifdef CONFIG_IP_MULTICAST
|
||||
if (psf->sf_oldin &&
|
||||
!IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
|
||||
psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
|
||||
psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
|
||||
IGMP_Unsolicited_Report_Count;
|
||||
psf->sf_next = pmc->tomb;
|
||||
pmc->tomb = psf;
|
||||
|
@ -1500,7 +1500,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
|
|||
/* filter mode change */
|
||||
pmc->sfmode = MCAST_INCLUDE;
|
||||
#ifdef CONFIG_IP_MULTICAST
|
||||
pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
|
||||
pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
|
||||
IGMP_Unsolicited_Report_Count;
|
||||
in_dev->mr_ifc_count = pmc->crcount;
|
||||
for (psf=pmc->sources; psf; psf = psf->sf_next)
|
||||
|
@ -1679,7 +1679,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
|
|||
#ifdef CONFIG_IP_MULTICAST
|
||||
/* else no filters; keep old mode for reports */
|
||||
|
||||
pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
|
||||
pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
|
||||
IGMP_Unsolicited_Report_Count;
|
||||
in_dev->mr_ifc_count = pmc->crcount;
|
||||
for (psf=pmc->sources; psf; psf = psf->sf_next)
|
||||
|
@ -1873,7 +1873,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
|
|||
} else if (pmc->sfmode != omode) {
|
||||
/* allow mode switches for empty-set filters */
|
||||
ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0);
|
||||
ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0,
|
||||
ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0,
|
||||
NULL, 0);
|
||||
pmc->sfmode = omode;
|
||||
}
|
||||
|
@ -1899,7 +1899,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
|
|||
}
|
||||
|
||||
/* update the interface filter */
|
||||
ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
|
||||
ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
|
||||
&mreqs->imr_sourceaddr, 1);
|
||||
|
||||
for (j=i+1; j<psl->sl_count; j++)
|
||||
|
@ -1949,7 +1949,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
|
|||
psl->sl_count++;
|
||||
err = 0;
|
||||
/* update the interface list */
|
||||
ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
|
||||
ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
|
||||
&mreqs->imr_sourceaddr, 1);
|
||||
done:
|
||||
rtnl_unlock();
|
||||
|
@ -2264,7 +2264,7 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
|
|||
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
|
||||
|
||||
for (state->dev = dev_base, state->in_dev = NULL;
|
||||
state->dev;
|
||||
state->dev;
|
||||
state->dev = state->dev->next) {
|
||||
struct in_device *in_dev;
|
||||
in_dev = in_dev_get(state->dev);
|
||||
|
@ -2346,7 +2346,7 @@ static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
|
|||
static int igmp_mc_seq_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
if (v == SEQ_START_TOKEN)
|
||||
seq_puts(seq,
|
||||
seq_puts(seq,
|
||||
"Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
|
||||
else {
|
||||
struct ip_mc_list *im = (struct ip_mc_list *)v;
|
||||
|
@ -2426,7 +2426,7 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
|
|||
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
|
||||
|
||||
for (state->dev = dev_base, state->idev = NULL, state->im = NULL;
|
||||
state->dev;
|
||||
state->dev;
|
||||
state->dev = state->dev->next) {
|
||||
struct in_device *idev;
|
||||
idev = in_dev_get(state->dev);
|
||||
|
@ -2531,7 +2531,7 @@ static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
|
|||
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
|
||||
|
||||
if (v == SEQ_START_TOKEN) {
|
||||
seq_printf(seq,
|
||||
seq_printf(seq,
|
||||
"%3s %6s "
|
||||
"%10s %10s %6s %6s\n", "Idx",
|
||||
"Device", "MCA",
|
||||
|
@ -2539,8 +2539,8 @@ static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
|
|||
} else {
|
||||
seq_printf(seq,
|
||||
"%3d %6.6s 0x%08x "
|
||||
"0x%08x %6lu %6lu\n",
|
||||
state->dev->ifindex, state->dev->name,
|
||||
"0x%08x %6lu %6lu\n",
|
||||
state->dev->ifindex, state->dev->name,
|
||||
ntohl(state->im->multiaddr),
|
||||
ntohl(psf->sf_inaddr),
|
||||
psf->sf_count[MCAST_INCLUDE],
|
||||
|
|
|
@ -149,7 +149,7 @@ int inet_csk_get_port(struct inet_hashinfo *hashinfo,
|
|||
if (!inet_csk(sk)->icsk_bind_hash)
|
||||
inet_bind_hash(sk, tb, snum);
|
||||
BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
|
||||
ret = 0;
|
||||
ret = 0;
|
||||
|
||||
fail_unlock:
|
||||
spin_unlock(&head->lock);
|
||||
|
@ -255,7 +255,7 @@ EXPORT_SYMBOL(inet_csk_accept);
|
|||
|
||||
/*
|
||||
* Using different timers for retransmit, delayed acks and probes
|
||||
* We may wish use just one timer maintaining a list of expire jiffies
|
||||
* We may wish use just one timer maintaining a list of expire jiffies
|
||||
* to optimize.
|
||||
*/
|
||||
void inet_csk_init_xmit_timers(struct sock *sk,
|
||||
|
@ -273,7 +273,7 @@ void inet_csk_init_xmit_timers(struct sock *sk,
|
|||
icsk->icsk_delack_timer.function = delack_handler;
|
||||
sk->sk_timer.function = keepalive_handler;
|
||||
|
||||
icsk->icsk_retransmit_timer.data =
|
||||
icsk->icsk_retransmit_timer.data =
|
||||
icsk->icsk_delack_timer.data =
|
||||
sk->sk_timer.data = (unsigned long)sk;
|
||||
|
||||
|
|
|
@ -381,7 +381,7 @@ static int inet_diag_bc_run(const void *bc, int len,
|
|||
if (addr[0] == 0 && addr[1] == 0 &&
|
||||
addr[2] == htonl(0xffff) &&
|
||||
bitstring_match(addr + 3, cond->addr,
|
||||
cond->prefix_len))
|
||||
cond->prefix_len))
|
||||
break;
|
||||
}
|
||||
yes = 0;
|
||||
|
@ -518,7 +518,7 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
|
|||
}
|
||||
entry.sport = tw->tw_num;
|
||||
entry.dport = ntohs(tw->tw_dport);
|
||||
entry.userlocks = 0;
|
||||
entry.userlocks = 0;
|
||||
|
||||
if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
|
||||
return 0;
|
||||
|
|
|
@ -262,7 +262,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
|
|||
static inline u32 inet_sk_port_offset(const struct sock *sk)
|
||||
{
|
||||
const struct inet_sock *inet = inet_sk(sk);
|
||||
return secure_ipv4_port_ephemeral(inet->rcv_saddr, inet->daddr,
|
||||
return secure_ipv4_port_ephemeral(inet->rcv_saddr, inet->daddr,
|
||||
inet->dport);
|
||||
}
|
||||
|
||||
|
@ -274,81 +274,81 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row,
|
|||
{
|
||||
struct inet_hashinfo *hinfo = death_row->hashinfo;
|
||||
const unsigned short snum = inet_sk(sk)->num;
|
||||
struct inet_bind_hashbucket *head;
|
||||
struct inet_bind_bucket *tb;
|
||||
struct inet_bind_hashbucket *head;
|
||||
struct inet_bind_bucket *tb;
|
||||
int ret;
|
||||
|
||||
if (!snum) {
|
||||
int low = sysctl_local_port_range[0];
|
||||
int high = sysctl_local_port_range[1];
|
||||
if (!snum) {
|
||||
int low = sysctl_local_port_range[0];
|
||||
int high = sysctl_local_port_range[1];
|
||||
int range = high - low;
|
||||
int i;
|
||||
int i;
|
||||
int port;
|
||||
static u32 hint;
|
||||
u32 offset = hint + inet_sk_port_offset(sk);
|
||||
struct hlist_node *node;
|
||||
struct inet_timewait_sock *tw = NULL;
|
||||
struct inet_timewait_sock *tw = NULL;
|
||||
|
||||
local_bh_disable();
|
||||
local_bh_disable();
|
||||
for (i = 1; i <= range; i++) {
|
||||
port = low + (i + offset) % range;
|
||||
head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)];
|
||||
spin_lock(&head->lock);
|
||||
head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)];
|
||||
spin_lock(&head->lock);
|
||||
|
||||
/* Does not bother with rcv_saddr checks,
|
||||
* because the established check is already
|
||||
* unique enough.
|
||||
*/
|
||||
/* Does not bother with rcv_saddr checks,
|
||||
* because the established check is already
|
||||
* unique enough.
|
||||
*/
|
||||
inet_bind_bucket_for_each(tb, node, &head->chain) {
|
||||
if (tb->port == port) {
|
||||
BUG_TRAP(!hlist_empty(&tb->owners));
|
||||
if (tb->fastreuse >= 0)
|
||||
goto next_port;
|
||||
if (!__inet_check_established(death_row,
|
||||
if (tb->port == port) {
|
||||
BUG_TRAP(!hlist_empty(&tb->owners));
|
||||
if (tb->fastreuse >= 0)
|
||||
goto next_port;
|
||||
if (!__inet_check_established(death_row,
|
||||
sk, port,
|
||||
&tw))
|
||||
goto ok;
|
||||
goto next_port;
|
||||
}
|
||||
}
|
||||
goto ok;
|
||||
goto next_port;
|
||||
}
|
||||
}
|
||||
|
||||
tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, head, port);
|
||||
if (!tb) {
|
||||
spin_unlock(&head->lock);
|
||||
break;
|
||||
}
|
||||
tb->fastreuse = -1;
|
||||
goto ok;
|
||||
tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, head, port);
|
||||
if (!tb) {
|
||||
spin_unlock(&head->lock);
|
||||
break;
|
||||
}
|
||||
tb->fastreuse = -1;
|
||||
goto ok;
|
||||
|
||||
next_port:
|
||||
spin_unlock(&head->lock);
|
||||
}
|
||||
local_bh_enable();
|
||||
next_port:
|
||||
spin_unlock(&head->lock);
|
||||
}
|
||||
local_bh_enable();
|
||||
|
||||
return -EADDRNOTAVAIL;
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
ok:
|
||||
hint += i;
|
||||
|
||||
/* Head lock still held and bh's disabled */
|
||||
inet_bind_hash(sk, tb, port);
|
||||
/* Head lock still held and bh's disabled */
|
||||
inet_bind_hash(sk, tb, port);
|
||||
if (sk_unhashed(sk)) {
|
||||
inet_sk(sk)->sport = htons(port);
|
||||
__inet_hash(hinfo, sk, 0);
|
||||
}
|
||||
spin_unlock(&head->lock);
|
||||
inet_sk(sk)->sport = htons(port);
|
||||
__inet_hash(hinfo, sk, 0);
|
||||
}
|
||||
spin_unlock(&head->lock);
|
||||
|
||||
if (tw) {
|
||||
inet_twsk_deschedule(tw, death_row);
|
||||
inet_twsk_put(tw);
|
||||
}
|
||||
if (tw) {
|
||||
inet_twsk_deschedule(tw, death_row);
|
||||
inet_twsk_put(tw);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)];
|
||||
tb = inet_csk(sk)->icsk_bind_hash;
|
||||
head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)];
|
||||
tb = inet_csk(sk)->icsk_bind_hash;
|
||||
spin_lock_bh(&head->lock);
|
||||
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
|
||||
__inet_hash(hinfo, sk, 0);
|
||||
|
|
|
@ -4,15 +4,15 @@
|
|||
* interface as the means of communication with the user level.
|
||||
*
|
||||
* The IP forwarding functionality.
|
||||
*
|
||||
*
|
||||
* Version: $Id: ip_forward.c,v 1.48 2000/12/13 18:31:48 davem Exp $
|
||||
*
|
||||
* Authors: see ip.c
|
||||
*
|
||||
* Fixes:
|
||||
* Many : Split from ip.c , see ip_input.c for
|
||||
* Many : Split from ip.c , see ip_input.c for
|
||||
* history.
|
||||
* Dave Gregorich : NULL ip_rt_put fix for multicast
|
||||
* Dave Gregorich : NULL ip_rt_put fix for multicast
|
||||
* routing.
|
||||
* Jos Vos : Add call_out_firewall before sending,
|
||||
* use output device for accounting.
|
||||
|
@ -69,14 +69,14 @@ int ip_forward(struct sk_buff *skb)
|
|||
goto drop;
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
|
||||
/*
|
||||
* According to the RFC, we must first decrease the TTL field. If
|
||||
* that reaches zero, we must reply an ICMP control message telling
|
||||
* that the packet's lifetime expired.
|
||||
*/
|
||||
if (skb->nh.iph->ttl <= 1)
|
||||
goto too_many_hops;
|
||||
goto too_many_hops;
|
||||
|
||||
if (!xfrm4_route_forward(skb))
|
||||
goto drop;
|
||||
|
@ -107,16 +107,16 @@ int ip_forward(struct sk_buff *skb)
|
|||
ip_forward_finish);
|
||||
|
||||
sr_failed:
|
||||
/*
|
||||
/*
|
||||
* Strict routing permits no gatewaying
|
||||
*/
|
||||
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0);
|
||||
goto drop;
|
||||
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0);
|
||||
goto drop;
|
||||
|
||||
too_many_hops:
|
||||
/* Tell the sender its packet died... */
|
||||
IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
|
||||
icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
|
||||
/* Tell the sender its packet died... */
|
||||
IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
|
||||
icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
* interface as the means of communication with the user level.
|
||||
*
|
||||
* The IP fragmentation functionality.
|
||||
*
|
||||
*
|
||||
* Version: $Id: ip_fragment.c,v 1.59 2002/01/12 07:54:56 davem Exp $
|
||||
*
|
||||
* Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
|
||||
|
@ -238,7 +238,7 @@ static void ipq_kill(struct ipq *ipq)
|
|||
}
|
||||
}
|
||||
|
||||
/* Memory limiting on fragments. Evictor trashes the oldest
|
||||
/* Memory limiting on fragments. Evictor trashes the oldest
|
||||
* fragment queue until we are back under the threshold.
|
||||
*/
|
||||
static void ip_evictor(void)
|
||||
|
@ -479,14 +479,14 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|||
goto err;
|
||||
}
|
||||
|
||||
offset = ntohs(skb->nh.iph->frag_off);
|
||||
offset = ntohs(skb->nh.iph->frag_off);
|
||||
flags = offset & ~IP_OFFSET;
|
||||
offset &= IP_OFFSET;
|
||||
offset <<= 3; /* offset is in 8-byte chunks */
|
||||
ihl = skb->nh.iph->ihl * 4;
|
||||
ihl = skb->nh.iph->ihl * 4;
|
||||
|
||||
/* Determine the position of this fragment. */
|
||||
end = offset + skb->len - ihl;
|
||||
end = offset + skb->len - ihl;
|
||||
|
||||
/* Is this the final fragment? */
|
||||
if ((flags & IP_MF) == 0) {
|
||||
|
@ -589,8 +589,8 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|||
else
|
||||
qp->fragments = skb;
|
||||
|
||||
if (skb->dev)
|
||||
qp->iif = skb->dev->ifindex;
|
||||
if (skb->dev)
|
||||
qp->iif = skb->dev->ifindex;
|
||||
skb->dev = NULL;
|
||||
skb_get_timestamp(skb, &qp->stamp);
|
||||
qp->meat += skb->len;
|
||||
|
@ -684,7 +684,7 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev)
|
|||
return head;
|
||||
|
||||
out_nomem:
|
||||
LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
|
||||
LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
|
||||
"queue %p\n", qp);
|
||||
goto out_fail;
|
||||
out_oversize:
|
||||
|
@ -703,7 +703,7 @@ struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user)
|
|||
struct iphdr *iph = skb->nh.iph;
|
||||
struct ipq *qp;
|
||||
struct net_device *dev;
|
||||
|
||||
|
||||
IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
|
||||
|
||||
/* Start by cleaning up the memory. */
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Linux NET3: GRE over IP protocol decoder.
|
||||
* Linux NET3: GRE over IP protocol decoder.
|
||||
*
|
||||
* Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
|
||||
*
|
||||
|
@ -63,7 +63,7 @@
|
|||
solution, but it supposes maintaing new variable in ALL
|
||||
skb, even if no tunneling is used.
|
||||
|
||||
Current solution: t->recursion lock breaks dead loops. It looks
|
||||
Current solution: t->recursion lock breaks dead loops. It looks
|
||||
like dev->tbusy flag, but I preferred new variable, because
|
||||
the semantics is different. One day, when hard_start_xmit
|
||||
will be multithreaded we will have to use skb->encapsulation.
|
||||
|
@ -613,7 +613,7 @@ static int ipgre_rcv(struct sk_buff *skb)
|
|||
if (flags == 0 &&
|
||||
skb->protocol == htons(ETH_P_WCCP)) {
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
if ((*(h + offset) & 0xF0) != 0x40)
|
||||
if ((*(h + offset) & 0xF0) != 0x40)
|
||||
offset += 4;
|
||||
}
|
||||
|
||||
|
@ -816,7 +816,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
|
||||
if (!new_skb) {
|
||||
ip_rt_put(rt);
|
||||
stats->tx_dropped++;
|
||||
stats->tx_dropped++;
|
||||
dev_kfree_skb(skb);
|
||||
tunnel->recursion--;
|
||||
return 0;
|
||||
|
@ -1044,7 +1044,7 @@ static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
|
|||
so that I had to set ARPHRD_IPGRE to a random value.
|
||||
I have an impression, that Cisco could make something similar,
|
||||
but this feature is apparently missing in IOS<=11.2(8).
|
||||
|
||||
|
||||
I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
|
||||
with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
|
||||
|
||||
|
@ -1076,9 +1076,9 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned sh
|
|||
p[1] = htons(type);
|
||||
|
||||
/*
|
||||
* Set the source hardware address.
|
||||
* Set the source hardware address.
|
||||
*/
|
||||
|
||||
|
||||
if (saddr)
|
||||
memcpy(&iph->saddr, saddr, 4);
|
||||
|
||||
|
@ -1088,7 +1088,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned sh
|
|||
}
|
||||
if (iph->daddr && !MULTICAST(iph->daddr))
|
||||
return t->hlen;
|
||||
|
||||
|
||||
return -t->hlen;
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
* Stefan Becker, <stefanb@yello.ping.de>
|
||||
* Jorge Cwik, <jorge@laser.satlink.net>
|
||||
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
|
||||
*
|
||||
*
|
||||
*
|
||||
* Fixes:
|
||||
* Alan Cox : Commented a couple of minor bits of surplus code
|
||||
|
@ -98,13 +98,13 @@
|
|||
* Jos Vos : Do accounting *before* call_in_firewall
|
||||
* Willy Konynenberg : Transparent proxying support
|
||||
*
|
||||
*
|
||||
*
|
||||
*
|
||||
* To Fix:
|
||||
* IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
|
||||
* and could be made very efficient with the addition of some virtual memory hacks to permit
|
||||
* the allocation of a buffer that can then be 'grown' by twiddling page tables.
|
||||
* Output fragmentation wants updating along with the buffer management to use a single
|
||||
* Output fragmentation wants updating along with the buffer management to use a single
|
||||
* interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
|
||||
* output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
|
||||
* fragmentation anyway.
|
||||
|
@ -154,7 +154,7 @@ DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics) __read_mostly;
|
|||
|
||||
/*
|
||||
* Process Router Attention IP option
|
||||
*/
|
||||
*/
|
||||
int ip_call_ra_chain(struct sk_buff *skb)
|
||||
{
|
||||
struct ip_ra_chain *ra;
|
||||
|
@ -202,8 +202,8 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb)
|
|||
|
||||
__skb_pull(skb, ihl);
|
||||
|
||||
/* Point into the IP datagram, just past the header. */
|
||||
skb->h.raw = skb->data;
|
||||
/* Point into the IP datagram, just past the header. */
|
||||
skb->h.raw = skb->data;
|
||||
|
||||
rcu_read_lock();
|
||||
{
|
||||
|
@ -259,7 +259,7 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb)
|
|||
|
||||
/*
|
||||
* Deliver IP Packets to the higher protocol layers.
|
||||
*/
|
||||
*/
|
||||
int ip_local_deliver(struct sk_buff *skb)
|
||||
{
|
||||
/*
|
||||
|
@ -335,14 +335,14 @@ static inline int ip_rcv_finish(struct sk_buff *skb)
|
|||
/*
|
||||
* Initialise the virtual path cache for the packet. It describes
|
||||
* how the packet travels inside Linux networking.
|
||||
*/
|
||||
*/
|
||||
if (skb->dst == NULL) {
|
||||
int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
|
||||
skb->dev);
|
||||
if (unlikely(err)) {
|
||||
if (err == -EHOSTUNREACH)
|
||||
IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
|
||||
goto drop;
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -363,13 +363,13 @@ static inline int ip_rcv_finish(struct sk_buff *skb)
|
|||
return dst_input(skb);
|
||||
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
||||
/*
|
||||
* Main IP Receive routine.
|
||||
*/
|
||||
*/
|
||||
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
|
||||
{
|
||||
struct iphdr *iph;
|
||||
|
@ -437,9 +437,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
|
|||
inhdr_error:
|
||||
IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
kfree_skb(skb);
|
||||
out:
|
||||
return NET_RX_DROP;
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(ip_statistics);
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
* Version: $Id: ip_options.c,v 1.21 2001/09/01 00:31:50 davem Exp $
|
||||
*
|
||||
* Authors: A.N.Kuznetsov
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/capability.h>
|
||||
|
@ -26,7 +26,7 @@
|
|||
#include <net/route.h>
|
||||
#include <net/cipso_ipv4.h>
|
||||
|
||||
/*
|
||||
/*
|
||||
* Write options to IP header, record destination address to
|
||||
* source route option, address of outgoing interface
|
||||
* (we should already know it, so that this function is allowed be
|
||||
|
@ -76,7 +76,7 @@ void ip_options_build(struct sk_buff * skb, struct ip_options * opt,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Provided (sopt, skb) points to received options,
|
||||
* build in dopt compiled option set appropriate for answering.
|
||||
* i.e. invert SRR option, copy anothers,
|
||||
|
@ -85,7 +85,7 @@ void ip_options_build(struct sk_buff * skb, struct ip_options * opt,
|
|||
* NOTE: dopt cannot point to skb.
|
||||
*/
|
||||
|
||||
int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
|
||||
int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
|
||||
{
|
||||
struct ip_options *sopt;
|
||||
unsigned char *sptr, *dptr;
|
||||
|
@ -215,7 +215,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
|
|||
* Simple and stupid 8), but the most efficient way.
|
||||
*/
|
||||
|
||||
void ip_options_fragment(struct sk_buff * skb)
|
||||
void ip_options_fragment(struct sk_buff * skb)
|
||||
{
|
||||
unsigned char * optptr = skb->nh.raw + sizeof(struct iphdr);
|
||||
struct ip_options * opt = &(IPCB(skb)->opt);
|
||||
|
@ -370,7 +370,7 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb)
|
|||
switch (optptr[3]&0xF) {
|
||||
case IPOPT_TS_TSONLY:
|
||||
opt->ts = optptr - iph;
|
||||
if (skb)
|
||||
if (skb)
|
||||
timeptr = (__be32*)&optptr[optptr[2]-1];
|
||||
opt->ts_needtime = 1;
|
||||
optptr[2] += 4;
|
||||
|
@ -448,7 +448,7 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb)
|
|||
goto error;
|
||||
}
|
||||
opt->cipso = optptr - iph;
|
||||
if (cipso_v4_validate(&optptr)) {
|
||||
if (cipso_v4_validate(&optptr)) {
|
||||
pp_ptr = optptr;
|
||||
goto error;
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
* Fixes:
|
||||
* Alan Cox : Missing nonblock feature in ip_build_xmit.
|
||||
* Mike Kilburn : htons() missing in ip_build_xmit.
|
||||
* Bradford Johnson: Fix faulty handling of some frames when
|
||||
* Bradford Johnson: Fix faulty handling of some frames when
|
||||
* no route is found.
|
||||
* Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
|
||||
* (in case if packet not accepted by
|
||||
|
@ -33,9 +33,9 @@
|
|||
* some redundant tests.
|
||||
* Vitaly E. Lavrov : Transparent proxy revived after year coma.
|
||||
* Andi Kleen : Replace ip_reply with ip_send_reply.
|
||||
* Andi Kleen : Split fast and slow ip_build_xmit path
|
||||
* for decreased register pressure on x86
|
||||
* and more readibility.
|
||||
* Andi Kleen : Split fast and slow ip_build_xmit path
|
||||
* for decreased register pressure on x86
|
||||
* and more readibility.
|
||||
* Marc Boucher : When call_out_firewall returns FW_QUEUE,
|
||||
* silently drop skb instead of failing with -EPERM.
|
||||
* Detlev Wengorz : Copy protocol for fragments.
|
||||
|
@ -114,7 +114,7 @@ static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
|
|||
return ttl;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Add an ip header to a skbuff and send it out.
|
||||
*
|
||||
*/
|
||||
|
@ -243,7 +243,7 @@ int ip_mc_output(struct sk_buff *skb)
|
|||
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (newskb)
|
||||
NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
|
||||
newskb->dev,
|
||||
newskb->dev,
|
||||
ip_dev_loopback_xmit);
|
||||
}
|
||||
|
||||
|
@ -277,7 +277,7 @@ int ip_output(struct sk_buff *skb)
|
|||
skb->protocol = htons(ETH_P_IP);
|
||||
|
||||
return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
|
||||
ip_finish_output,
|
||||
ip_finish_output,
|
||||
!(IPCB(skb)->flags & IPSKB_REROUTED));
|
||||
}
|
||||
|
||||
|
@ -660,7 +660,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
|
|||
return err;
|
||||
|
||||
fail:
|
||||
kfree_skb(skb);
|
||||
kfree_skb(skb);
|
||||
IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
|
||||
return err;
|
||||
}
|
||||
|
@ -755,7 +755,7 @@ static inline int ip_ufo_append_data(struct sock *sk,
|
|||
* from many pieces of data. Each pieces will be holded on the socket
|
||||
* until ip_push_pending_frames() is called. Each piece can be a page
|
||||
* or non-page data.
|
||||
*
|
||||
*
|
||||
* Not only UDP, other transport protocols - e.g. raw sockets - can use
|
||||
* this interface potentially.
|
||||
*
|
||||
|
@ -888,7 +888,7 @@ int ip_append_data(struct sock *sk,
|
|||
datalen = maxfraglen - fragheaderlen;
|
||||
fraglen = datalen + fragheaderlen;
|
||||
|
||||
if ((flags & MSG_MORE) &&
|
||||
if ((flags & MSG_MORE) &&
|
||||
!(rt->u.dst.dev->features&NETIF_F_SG))
|
||||
alloclen = mtu;
|
||||
else
|
||||
|
@ -903,14 +903,14 @@ int ip_append_data(struct sock *sk,
|
|||
alloclen += rt->u.dst.trailer_len;
|
||||
|
||||
if (transhdrlen) {
|
||||
skb = sock_alloc_send_skb(sk,
|
||||
skb = sock_alloc_send_skb(sk,
|
||||
alloclen + hh_len + 15,
|
||||
(flags & MSG_DONTWAIT), &err);
|
||||
} else {
|
||||
skb = NULL;
|
||||
if (atomic_read(&sk->sk_wmem_alloc) <=
|
||||
2 * sk->sk_sndbuf)
|
||||
skb = sock_wmalloc(sk,
|
||||
skb = sock_wmalloc(sk,
|
||||
alloclen + hh_len + 15, 1,
|
||||
sk->sk_allocation);
|
||||
if (unlikely(skb == NULL))
|
||||
|
@ -971,7 +971,7 @@ int ip_append_data(struct sock *sk,
|
|||
unsigned int off;
|
||||
|
||||
off = skb->len;
|
||||
if (getfrag(from, skb_put(skb, copy),
|
||||
if (getfrag(from, skb_put(skb, copy),
|
||||
offset, copy, off, skb) < 0) {
|
||||
__skb_trim(skb, off);
|
||||
err = -EFAULT;
|
||||
|
@ -993,7 +993,7 @@ int ip_append_data(struct sock *sk,
|
|||
goto error;
|
||||
}
|
||||
get_page(page);
|
||||
skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
|
||||
skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
|
||||
frag = &skb_shinfo(skb)->frags[i];
|
||||
}
|
||||
} else if (i < MAX_SKB_FRAGS) {
|
||||
|
@ -1033,7 +1033,7 @@ int ip_append_data(struct sock *sk,
|
|||
error:
|
||||
inet->cork.length -= length;
|
||||
IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
|
||||
return err;
|
||||
return err;
|
||||
}
|
||||
|
||||
ssize_t ip_append_page(struct sock *sk, struct page *page,
|
||||
|
@ -1257,7 +1257,7 @@ int ip_push_pending_frames(struct sock *sk)
|
|||
skb->dst = dst_clone(&rt->u.dst);
|
||||
|
||||
/* Netfilter gets whole the not fragmented skb. */
|
||||
err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
|
||||
err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
|
||||
skb->dst->dev, dst_output);
|
||||
if (err) {
|
||||
if (err > 0)
|
||||
|
@ -1305,21 +1305,21 @@ void ip_flush_pending_frames(struct sock *sk)
|
|||
/*
|
||||
* Fetch data from kernel space and fill in checksum if needed.
|
||||
*/
|
||||
static int ip_reply_glue_bits(void *dptr, char *to, int offset,
|
||||
static int ip_reply_glue_bits(void *dptr, char *to, int offset,
|
||||
int len, int odd, struct sk_buff *skb)
|
||||
{
|
||||
__wsum csum;
|
||||
|
||||
csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
|
||||
skb->csum = csum_block_add(skb->csum, csum, odd);
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Generic function to send a packet as reply to another packet.
|
||||
* Used to send TCP resets so far. ICMP should use this function too.
|
||||
*
|
||||
* Should run single threaded per socket because it uses the sock
|
||||
* Should run single threaded per socket because it uses the sock
|
||||
* structure to pass arguments.
|
||||
*
|
||||
* LATER: switch from ip_build_xmit to ip_append_*
|
||||
|
@ -1357,7 +1357,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
|
|||
/* Not quite clean, but right. */
|
||||
.uli_u = { .ports =
|
||||
{ .sport = skb->h.th->dest,
|
||||
.dport = skb->h.th->source } },
|
||||
.dport = skb->h.th->source } },
|
||||
.proto = sk->sk_protocol };
|
||||
security_skb_classify_flow(skb, &fl);
|
||||
if (ip_route_output_key(&rt, &fl))
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
* interface as the means of communication with the user level.
|
||||
*
|
||||
* The IP to API glue.
|
||||
*
|
||||
*
|
||||
* Version: $Id: ip_sockglue.c,v 1.62 2002/02/01 22:01:04 davem Exp $
|
||||
*
|
||||
* Authors: see ip.c
|
||||
|
@ -12,7 +12,7 @@
|
|||
* Fixes:
|
||||
* Many : Split from ip.c , see ip.c for history.
|
||||
* Martin Mares : TOS setting fixed.
|
||||
* Alan Cox : Fixed a couple of oopses in Martin's
|
||||
* Alan Cox : Fixed a couple of oopses in Martin's
|
||||
* TOS tweaks.
|
||||
* Mike McLagan : Routing by source
|
||||
*/
|
||||
|
@ -253,7 +253,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct s
|
|||
return 0;
|
||||
}
|
||||
|
||||
void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
|
||||
void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
|
||||
__be16 port, u32 info, u8 *payload)
|
||||
{
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
|
@ -266,10 +266,10 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
|
|||
if (!skb)
|
||||
return;
|
||||
|
||||
serr = SKB_EXT_ERR(skb);
|
||||
serr = SKB_EXT_ERR(skb);
|
||||
serr->ee.ee_errno = err;
|
||||
serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
|
||||
serr->ee.ee_type = skb->h.icmph->type;
|
||||
serr->ee.ee_type = skb->h.icmph->type;
|
||||
serr->ee.ee_code = skb->h.icmph->code;
|
||||
serr->ee.ee_pad = 0;
|
||||
serr->ee.ee_info = info;
|
||||
|
@ -301,10 +301,10 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
|
|||
skb->nh.iph = iph;
|
||||
iph->daddr = daddr;
|
||||
|
||||
serr = SKB_EXT_ERR(skb);
|
||||
serr = SKB_EXT_ERR(skb);
|
||||
serr->ee.ee_errno = err;
|
||||
serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
|
||||
serr->ee.ee_type = 0;
|
||||
serr->ee.ee_type = 0;
|
||||
serr->ee.ee_code = 0;
|
||||
serr->ee.ee_pad = 0;
|
||||
serr->ee.ee_info = info;
|
||||
|
@ -319,7 +319,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
|
|||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Handle MSG_ERRQUEUE
|
||||
*/
|
||||
int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
|
||||
|
@ -391,7 +391,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
|
|||
} else
|
||||
spin_unlock_bh(&sk->sk_error_queue.lock);
|
||||
|
||||
out_free_skb:
|
||||
out_free_skb:
|
||||
kfree_skb(skb);
|
||||
out:
|
||||
return err;
|
||||
|
@ -409,15 +409,15 @@ static int do_ip_setsockopt(struct sock *sk, int level,
|
|||
struct inet_sock *inet = inet_sk(sk);
|
||||
int val=0,err;
|
||||
|
||||
if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) |
|
||||
(1<<IP_RECVOPTS) | (1<<IP_RECVTOS) |
|
||||
(1<<IP_RETOPTS) | (1<<IP_TOS) |
|
||||
(1<<IP_TTL) | (1<<IP_HDRINCL) |
|
||||
(1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
|
||||
if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) |
|
||||
(1<<IP_RECVOPTS) | (1<<IP_RECVTOS) |
|
||||
(1<<IP_RETOPTS) | (1<<IP_TOS) |
|
||||
(1<<IP_TTL) | (1<<IP_HDRINCL) |
|
||||
(1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
|
||||
(1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
|
||||
(1<<IP_PASSSEC))) ||
|
||||
optname == IP_MULTICAST_TTL ||
|
||||
optname == IP_MULTICAST_LOOP) {
|
||||
optname == IP_MULTICAST_TTL ||
|
||||
optname == IP_MULTICAST_LOOP) {
|
||||
if (optlen >= sizeof(int)) {
|
||||
if (get_user(val, (int __user *) optval))
|
||||
return -EFAULT;
|
||||
|
@ -511,7 +511,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
|
|||
val &= ~3;
|
||||
val |= inet->tos & 3;
|
||||
}
|
||||
if (IPTOS_PREC(val) >= IPTOS_PREC_CRITIC_ECP &&
|
||||
if (IPTOS_PREC(val) >= IPTOS_PREC_CRITIC_ECP &&
|
||||
!capable(CAP_NET_ADMIN)) {
|
||||
err = -EPERM;
|
||||
break;
|
||||
|
@ -519,7 +519,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
|
|||
if (inet->tos != val) {
|
||||
inet->tos = val;
|
||||
sk->sk_priority = rt_tos2priority(val);
|
||||
sk_dst_reset(sk);
|
||||
sk_dst_reset(sk);
|
||||
}
|
||||
break;
|
||||
case IP_TTL:
|
||||
|
@ -556,13 +556,13 @@ static int do_ip_setsockopt(struct sock *sk, int level,
|
|||
if (val < 0 || val > 255)
|
||||
goto e_inval;
|
||||
inet->mc_ttl = val;
|
||||
break;
|
||||
case IP_MULTICAST_LOOP:
|
||||
break;
|
||||
case IP_MULTICAST_LOOP:
|
||||
if (optlen<1)
|
||||
goto e_inval;
|
||||
inet->mc_loop = !!val;
|
||||
break;
|
||||
case IP_MULTICAST_IF:
|
||||
break;
|
||||
case IP_MULTICAST_IF:
|
||||
{
|
||||
struct ip_mreqn mreq;
|
||||
struct net_device *dev = NULL;
|
||||
|
@ -616,7 +616,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
|
|||
}
|
||||
|
||||
case IP_ADD_MEMBERSHIP:
|
||||
case IP_DROP_MEMBERSHIP:
|
||||
case IP_DROP_MEMBERSHIP:
|
||||
{
|
||||
struct ip_mreqn mreq;
|
||||
|
||||
|
@ -629,7 +629,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
|
|||
} else {
|
||||
memset(&mreq, 0, sizeof(mreq));
|
||||
if (copy_from_user(&mreq,optval,sizeof(struct ip_mreq)))
|
||||
break;
|
||||
break;
|
||||
}
|
||||
|
||||
if (optname == IP_ADD_MEMBERSHIP)
|
||||
|
@ -714,7 +714,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
|
|||
break;
|
||||
}
|
||||
case MCAST_JOIN_GROUP:
|
||||
case MCAST_LEAVE_GROUP:
|
||||
case MCAST_LEAVE_GROUP:
|
||||
{
|
||||
struct group_req greq;
|
||||
struct sockaddr_in *psin;
|
||||
|
@ -858,16 +858,16 @@ static int do_ip_setsockopt(struct sock *sk, int level,
|
|||
kfree(gsf);
|
||||
break;
|
||||
}
|
||||
case IP_ROUTER_ALERT:
|
||||
case IP_ROUTER_ALERT:
|
||||
err = ip_ra_control(sk, val ? 1 : 0, NULL);
|
||||
break;
|
||||
|
||||
case IP_FREEBIND:
|
||||
if (optlen<1)
|
||||
goto e_inval;
|
||||
inet->freebind = !!val;
|
||||
break;
|
||||
|
||||
inet->freebind = !!val;
|
||||
break;
|
||||
|
||||
case IP_IPSEC_POLICY:
|
||||
case IP_XFRM_POLICY:
|
||||
err = -EPERM;
|
||||
|
@ -954,7 +954,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
|
|||
struct inet_sock *inet = inet_sk(sk);
|
||||
int val;
|
||||
int len;
|
||||
|
||||
|
||||
if(level!=SOL_IP)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
@ -969,7 +969,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
|
|||
return -EFAULT;
|
||||
if(len < 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
switch(optname) {
|
||||
|
@ -984,7 +984,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
|
|||
inet->opt->optlen);
|
||||
release_sock(sk);
|
||||
|
||||
if (opt->optlen == 0)
|
||||
if (opt->optlen == 0)
|
||||
return put_user(0, optlen);
|
||||
|
||||
ip_options_undo(opt);
|
||||
|
@ -1059,8 +1059,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
|
|||
addr.s_addr = inet->mc_addr;
|
||||
release_sock(sk);
|
||||
|
||||
if(put_user(len, optlen))
|
||||
return -EFAULT;
|
||||
if(put_user(len, optlen))
|
||||
return -EFAULT;
|
||||
if(copy_to_user(optval, &addr, len))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
|
@ -1101,7 +1101,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
|
|||
release_sock(sk);
|
||||
return err;
|
||||
}
|
||||
case IP_PKTOPTIONS:
|
||||
case IP_PKTOPTIONS:
|
||||
{
|
||||
struct msghdr msg;
|
||||
|
||||
|
@ -1129,15 +1129,15 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
|
|||
len -= msg.msg_controllen;
|
||||
return put_user(len, optlen);
|
||||
}
|
||||
case IP_FREEBIND:
|
||||
val = inet->freebind;
|
||||
break;
|
||||
case IP_FREEBIND:
|
||||
val = inet->freebind;
|
||||
break;
|
||||
default:
|
||||
release_sock(sk);
|
||||
return -ENOPROTOOPT;
|
||||
}
|
||||
release_sock(sk);
|
||||
|
||||
|
||||
if (len < sizeof(int) && len > 0 && val>=0 && val<255) {
|
||||
unsigned char ucval = (unsigned char)val;
|
||||
len = 1;
|
||||
|
@ -1168,7 +1168,7 @@ int ip_getsockopt(struct sock *sk, int level,
|
|||
&& (optname < MRT_BASE || optname > MRT_BASE+10)
|
||||
#endif
|
||||
) {
|
||||
int len;
|
||||
int len;
|
||||
|
||||
if(get_user(len,optlen))
|
||||
return -EFAULT;
|
||||
|
@ -1197,7 +1197,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
|
|||
&& (optname < MRT_BASE || optname > MRT_BASE+10)
|
||||
#endif
|
||||
) {
|
||||
int len;
|
||||
int len;
|
||||
|
||||
if (get_user(len, optlen))
|
||||
return -EFAULT;
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* Todo:
|
||||
|
@ -48,7 +48,7 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
|
|||
u8 *start, *scratch;
|
||||
struct crypto_comp *tfm;
|
||||
int cpu;
|
||||
|
||||
|
||||
plen = skb->len;
|
||||
dlen = IPCOMP_SCRATCH_SIZE;
|
||||
start = skb->data;
|
||||
|
@ -69,11 +69,11 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
|
|||
err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
||||
skb->truesize += dlen - plen;
|
||||
__skb_put(skb, dlen - plen);
|
||||
memcpy(skb->data, scratch, dlen);
|
||||
out:
|
||||
out:
|
||||
put_cpu();
|
||||
return err;
|
||||
}
|
||||
|
@ -85,11 +85,11 @@ static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
|
|||
struct ip_comp_hdr *ipch;
|
||||
|
||||
if (skb_linearize_cow(skb))
|
||||
goto out;
|
||||
goto out;
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
/* Remove ipcomp header and decompress original payload */
|
||||
/* Remove ipcomp header and decompress original payload */
|
||||
iph = skb->nh.iph;
|
||||
ipch = (void *)skb->data;
|
||||
iph->protocol = ipch->nexthdr;
|
||||
|
@ -97,7 +97,7 @@ static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
|
|||
__skb_pull(skb, sizeof(*ipch));
|
||||
err = ipcomp_decompress(x, skb);
|
||||
|
||||
out:
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -109,7 +109,7 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
|
|||
u8 *start, *scratch;
|
||||
struct crypto_comp *tfm;
|
||||
int cpu;
|
||||
|
||||
|
||||
ihlen = iph->ihl * 4;
|
||||
plen = skb->len - ihlen;
|
||||
dlen = IPCOMP_SCRATCH_SIZE;
|
||||
|
@ -127,14 +127,14 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
|
|||
err = -EMSGSIZE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
|
||||
put_cpu();
|
||||
|
||||
pskb_trim(skb, ihlen + dlen + sizeof(struct ip_comp_hdr));
|
||||
return 0;
|
||||
|
||||
out:
|
||||
|
||||
out:
|
||||
put_cpu();
|
||||
return err;
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ static int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||
|
||||
if (skb_linearize_cow(skb))
|
||||
goto out_ok;
|
||||
|
||||
|
||||
err = ipcomp_compress(x, skb);
|
||||
iph = skb->nh.iph;
|
||||
|
||||
|
@ -194,7 +194,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
|
|||
|
||||
spi = htonl(ntohs(ipch->cpi));
|
||||
x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr,
|
||||
spi, IPPROTO_COMP, AF_INET);
|
||||
spi, IPPROTO_COMP, AF_INET);
|
||||
if (!x)
|
||||
return;
|
||||
NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%u.%u.%u.%u\n",
|
||||
|
@ -202,12 +202,12 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
|
|||
xfrm_state_put(x);
|
||||
}
|
||||
|
||||
/* We always hold one tunnel user reference to indicate a tunnel */
|
||||
/* We always hold one tunnel user reference to indicate a tunnel */
|
||||
static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
|
||||
{
|
||||
struct xfrm_state *t;
|
||||
u8 mode = XFRM_MODE_TUNNEL;
|
||||
|
||||
|
||||
t = xfrm_state_alloc();
|
||||
if (t == NULL)
|
||||
goto out;
|
||||
|
@ -247,7 +247,7 @@ static int ipcomp_tunnel_attach(struct xfrm_state *x)
|
|||
struct xfrm_state *t;
|
||||
|
||||
t = xfrm_state_lookup((xfrm_address_t *)&x->id.daddr.a4,
|
||||
x->props.saddr.a4, IPPROTO_IPIP, AF_INET);
|
||||
x->props.saddr.a4, IPPROTO_IPIP, AF_INET);
|
||||
if (!t) {
|
||||
t = ipcomp_tunnel_create(x);
|
||||
if (!t) {
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
* BOOTP rewritten to construct and analyse packets itself instead
|
||||
* of misusing the IP layer. num_bugs_causing_wrong_arp_replies--;
|
||||
* -- MJ, December 1998
|
||||
*
|
||||
*
|
||||
* Fixed ip_auto_config_setup calling at startup in the new "Linker Magic"
|
||||
* initialization scheme.
|
||||
* - Arnaldo Carvalho de Melo <acme@conectiva.com.br>, 08/11/1999
|
||||
|
@ -98,8 +98,8 @@
|
|||
#define CONF_TIMEOUT_RANDOM (HZ) /* Maximum amount of randomization */
|
||||
#define CONF_TIMEOUT_MULT *7/4 /* Rate of timeout growth */
|
||||
#define CONF_TIMEOUT_MAX (HZ*30) /* Maximum allowed timeout */
|
||||
#define CONF_NAMESERVERS_MAX 3 /* Maximum number of nameservers
|
||||
- '3' from resolv.h */
|
||||
#define CONF_NAMESERVERS_MAX 3 /* Maximum number of nameservers
|
||||
- '3' from resolv.h */
|
||||
|
||||
#define NONE __constant_htonl(INADDR_NONE)
|
||||
|
||||
|
@ -365,7 +365,7 @@ static int __init ic_defaults(void)
|
|||
* At this point we have no userspace running so need not
|
||||
* claim locks on system_utsname
|
||||
*/
|
||||
|
||||
|
||||
if (!ic_host_name_set)
|
||||
sprintf(init_utsname()->nodename, "%u.%u.%u.%u", NIPQUAD(ic_myaddr));
|
||||
|
||||
|
@ -650,9 +650,9 @@ static void __init ic_bootp_init_ext(u8 *e)
|
|||
*e++ = 40;
|
||||
e += 40;
|
||||
|
||||
*e++ = 57; /* set extension buffer size for reply */
|
||||
*e++ = 57; /* set extension buffer size for reply */
|
||||
*e++ = 2;
|
||||
*e++ = 1; /* 128+236+8+20+14, see dhcpd sources */
|
||||
*e++ = 1; /* 128+236+8+20+14, see dhcpd sources */
|
||||
*e++ = 150;
|
||||
|
||||
*e++ = 255; /* End of the list */
|
||||
|
@ -913,7 +913,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
|
|||
/* Parse extensions */
|
||||
if (ext_len >= 4 &&
|
||||
!memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */
|
||||
u8 *end = (u8 *) b + ntohs(b->iph.tot_len);
|
||||
u8 *end = (u8 *) b + ntohs(b->iph.tot_len);
|
||||
u8 *ext;
|
||||
|
||||
#ifdef IPCONFIG_DHCP
|
||||
|
@ -1020,7 +1020,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
|
|||
kfree_skb(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -1080,7 +1080,7 @@ static int __init ic_dynamic(void)
|
|||
* seems to be a terrible waste of CPU time, but actually there is
|
||||
* only one process running at all, so we don't need to use any
|
||||
* scheduler functions.
|
||||
* [Actually we could now, but the nothing else running note still
|
||||
* [Actually we could now, but the nothing else running note still
|
||||
* applies.. - AC]
|
||||
*/
|
||||
printk(KERN_NOTICE "Sending %s%s%s requests .",
|
||||
|
@ -1156,7 +1156,7 @@ static int __init ic_dynamic(void)
|
|||
}
|
||||
|
||||
printk("IP-Config: Got %s answer from %u.%u.%u.%u, ",
|
||||
((ic_got_reply & IC_RARP) ? "RARP"
|
||||
((ic_got_reply & IC_RARP) ? "RARP"
|
||||
: (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
|
||||
NIPQUAD(ic_servaddr));
|
||||
printk("my address is %u.%u.%u.%u\n", NIPQUAD(ic_myaddr));
|
||||
|
@ -1286,7 +1286,7 @@ static int __init ip_auto_config(void)
|
|||
#endif
|
||||
ic_first_dev->next) {
|
||||
#ifdef IPCONFIG_DYNAMIC
|
||||
|
||||
|
||||
int retries = CONF_OPEN_RETRIES;
|
||||
|
||||
if (ic_dynamic() < 0) {
|
||||
|
@ -1308,14 +1308,14 @@ static int __init ip_auto_config(void)
|
|||
*/
|
||||
#ifdef CONFIG_ROOT_NFS
|
||||
if (ROOT_DEV == Root_NFS) {
|
||||
printk(KERN_ERR
|
||||
printk(KERN_ERR
|
||||
"IP-Config: Retrying forever (NFS root)...\n");
|
||||
goto try_try_again;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (--retries) {
|
||||
printk(KERN_ERR
|
||||
printk(KERN_ERR
|
||||
"IP-Config: Reopening network devices...\n");
|
||||
goto try_try_again;
|
||||
}
|
||||
|
@ -1443,8 +1443,8 @@ static int __init ip_auto_config_setup(char *addrs)
|
|||
|
||||
ic_set_manually = 1;
|
||||
|
||||
ic_enable = (*addrs &&
|
||||
(strcmp(addrs, "off") != 0) &&
|
||||
ic_enable = (*addrs &&
|
||||
(strcmp(addrs, "off") != 0) &&
|
||||
(strcmp(addrs, "none") != 0));
|
||||
if (!ic_enable)
|
||||
return 1;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Linux NET3: IP/IP protocol decoder.
|
||||
* Linux NET3: IP/IP protocol decoder.
|
||||
*
|
||||
* Version: $Id: ipip.c,v 1.50 2001/10/02 02:22:36 davem Exp $
|
||||
*
|
||||
|
@ -35,14 +35,14 @@
|
|||
Thanks for the great code!
|
||||
|
||||
-Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
|
||||
|
||||
|
||||
Minor tweaks:
|
||||
Cleaned up the code a little and added some pre-1.3.0 tweaks.
|
||||
dev->hard_header/hard_header_len changed to use no headers.
|
||||
Comments/bracketing tweaked.
|
||||
Made the tunnels use dev->name not tunnel: when error reporting.
|
||||
Added tx_dropped stat
|
||||
|
||||
|
||||
-Alan Cox (Alan.Cox@linux.org) 21 March 95
|
||||
|
||||
Reworked:
|
||||
|
@ -52,7 +52,7 @@
|
|||
Note: There is currently no firewall or ICMP handling done.
|
||||
|
||||
-Sam Lantinga (slouken@cs.ucdavis.edu) 02/13/96
|
||||
|
||||
|
||||
*/
|
||||
|
||||
/* Things I wish I had known when writing the tunnel driver:
|
||||
|
@ -75,7 +75,7 @@
|
|||
"allocated" with skb_put(). You can then write up to skb->len
|
||||
bytes to that buffer. If you need more, you can call skb_put()
|
||||
again with the additional amount of space you need. You can
|
||||
find out how much more space you can allocate by calling
|
||||
find out how much more space you can allocate by calling
|
||||
"skb_tailroom(skb)".
|
||||
Now, to add header space, call "skb_push(skb, header_len)".
|
||||
This creates space at the beginning of the buffer and returns
|
||||
|
@ -92,7 +92,7 @@
|
|||
For comments look at net/ipv4/ip_gre.c --ANK
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include <linux/capability.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -607,7 +607,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
|
||||
if (!new_skb) {
|
||||
ip_rt_put(rt);
|
||||
stats->tx_dropped++;
|
||||
stats->tx_dropped++;
|
||||
dev_kfree_skb(skb);
|
||||
tunnel->recursion--;
|
||||
return 0;
|
||||
|
|
126
net/ipv4/ipmr.c
126
net/ipv4/ipmr.c
|
@ -241,7 +241,7 @@ static struct net_device *ipmr_reg_vif(void)
|
|||
/*
|
||||
* Delete a VIF entry
|
||||
*/
|
||||
|
||||
|
||||
static int vif_delete(int vifi)
|
||||
{
|
||||
struct vif_device *v;
|
||||
|
@ -409,7 +409,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
|
|||
return -ENOBUFS;
|
||||
break;
|
||||
#endif
|
||||
case VIFF_TUNNEL:
|
||||
case VIFF_TUNNEL:
|
||||
dev = ipmr_new_tunnel(vifc);
|
||||
if (!dev)
|
||||
return -ENOBUFS;
|
||||
|
@ -501,7 +501,7 @@ static struct mfc_cache *ipmr_cache_alloc_unres(void)
|
|||
/*
|
||||
* A cache entry has gone into a resolved state from queued
|
||||
*/
|
||||
|
||||
|
||||
static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
@ -538,7 +538,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
|
|||
*
|
||||
* Called under mrt_lock.
|
||||
*/
|
||||
|
||||
|
||||
static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
@ -569,13 +569,13 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
|
|||
memcpy(msg, pkt->nh.raw, sizeof(struct iphdr));
|
||||
msg->im_msgtype = IGMPMSG_WHOLEPKT;
|
||||
msg->im_mbz = 0;
|
||||
msg->im_vif = reg_vif_num;
|
||||
msg->im_vif = reg_vif_num;
|
||||
skb->nh.iph->ihl = sizeof(struct iphdr) >> 2;
|
||||
skb->nh.iph->tot_len = htons(ntohs(pkt->nh.iph->tot_len) + sizeof(struct iphdr));
|
||||
} else
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
|
||||
{
|
||||
|
||||
/*
|
||||
* Copy the IP header
|
||||
*/
|
||||
|
@ -597,7 +597,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
|
|||
igmp->code = 0;
|
||||
skb->nh.iph->tot_len=htons(skb->len); /* Fix the length */
|
||||
skb->h.raw = skb->nh.raw;
|
||||
}
|
||||
}
|
||||
|
||||
if (mroute_socket == NULL) {
|
||||
kfree_skb(skb);
|
||||
|
@ -619,7 +619,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
|
|||
/*
|
||||
* Queue a packet for resolution. It gets locked cache entry!
|
||||
*/
|
||||
|
||||
|
||||
static int
|
||||
ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
|
||||
{
|
||||
|
@ -657,7 +657,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
|
|||
* Reflect first query at mrouted.
|
||||
*/
|
||||
if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {
|
||||
/* If the report failed throw the cache entry
|
||||
/* If the report failed throw the cache entry
|
||||
out - Brad Parker
|
||||
*/
|
||||
spin_unlock_bh(&mfc_unres_lock);
|
||||
|
@ -783,11 +783,11 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
|
|||
/*
|
||||
* Close the multicast socket, and clear the vif tables etc
|
||||
*/
|
||||
|
||||
|
||||
static void mroute_clean_tables(struct sock *sk)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
||||
/*
|
||||
* Shut down all active vif entries
|
||||
*/
|
||||
|
@ -854,13 +854,13 @@ static void mrtsock_destruct(struct sock *sk)
|
|||
* that's how BSD mrouted happens to think. Maybe one day with a proper
|
||||
* MOSPF/PIM router set up we can clean this up.
|
||||
*/
|
||||
|
||||
|
||||
int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen)
|
||||
{
|
||||
int ret;
|
||||
struct vifctl vif;
|
||||
struct mfcctl mfc;
|
||||
|
||||
|
||||
if(optname!=MRT_INIT)
|
||||
{
|
||||
if(sk!=mroute_socket && !capable(CAP_NET_ADMIN))
|
||||
|
@ -901,7 +901,7 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
|
|||
if(optlen!=sizeof(vif))
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&vif,optval,sizeof(vif)))
|
||||
return -EFAULT;
|
||||
return -EFAULT;
|
||||
if(vif.vifc_vifi >= MAXVIFS)
|
||||
return -ENFILE;
|
||||
rtnl_lock();
|
||||
|
@ -980,13 +980,13 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
|
|||
/*
|
||||
* Getsock opt support for the multicast routing system.
|
||||
*/
|
||||
|
||||
|
||||
int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen)
|
||||
{
|
||||
int olr;
|
||||
int val;
|
||||
|
||||
if(optname!=MRT_VERSION &&
|
||||
if(optname!=MRT_VERSION &&
|
||||
#ifdef CONFIG_IP_PIMSM
|
||||
optname!=MRT_PIM &&
|
||||
#endif
|
||||
|
@ -999,7 +999,7 @@ int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __u
|
|||
olr = min_t(unsigned int, olr, sizeof(int));
|
||||
if (olr < 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
if(put_user(olr,optlen))
|
||||
return -EFAULT;
|
||||
if(optname==MRT_VERSION)
|
||||
|
@ -1018,19 +1018,19 @@ int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __u
|
|||
/*
|
||||
* The IP multicast ioctl support routines.
|
||||
*/
|
||||
|
||||
|
||||
int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
|
||||
{
|
||||
struct sioc_sg_req sr;
|
||||
struct sioc_vif_req vr;
|
||||
struct vif_device *vif;
|
||||
struct mfc_cache *c;
|
||||
|
||||
|
||||
switch(cmd)
|
||||
{
|
||||
case SIOCGETVIFCNT:
|
||||
if (copy_from_user(&vr,arg,sizeof(vr)))
|
||||
return -EFAULT;
|
||||
return -EFAULT;
|
||||
if(vr.vifi>=maxvif)
|
||||
return -EINVAL;
|
||||
read_lock(&mrt_lock);
|
||||
|
@ -1096,7 +1096,7 @@ static struct notifier_block ip_mr_notifier={
|
|||
* This avoids tunnel drivers and other mess and gives us the speed so
|
||||
* important for multicast video.
|
||||
*/
|
||||
|
||||
|
||||
static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
|
||||
{
|
||||
struct iphdr *iph = (struct iphdr *)skb_push(skb,sizeof(struct iphdr));
|
||||
|
@ -1194,7 +1194,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
|
|||
encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
|
||||
|
||||
if (skb_cow(skb, encap)) {
|
||||
ip_rt_put(rt);
|
||||
ip_rt_put(rt);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
@ -1228,7 +1228,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
|
|||
* not mrouter) cannot join to more than one interface - it will
|
||||
* result in receiving multiple packets.
|
||||
*/
|
||||
NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev,
|
||||
NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev,
|
||||
ipmr_forward_finish);
|
||||
return;
|
||||
|
||||
|
@ -1289,7 +1289,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
|
|||
large chunk of pimd to kernel. Ough... --ANK
|
||||
*/
|
||||
(mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
|
||||
time_after(jiffies,
|
||||
time_after(jiffies,
|
||||
cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
|
||||
cache->mfc_un.res.last_assert = jiffies;
|
||||
ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
|
||||
|
@ -1426,14 +1426,14 @@ int pim_rcv_v1(struct sk_buff * skb)
|
|||
struct iphdr *encap;
|
||||
struct net_device *reg_dev = NULL;
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
|
||||
if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
|
||||
goto drop;
|
||||
|
||||
pim = (struct igmphdr*)skb->h.raw;
|
||||
|
||||
if (!mroute_do_pim ||
|
||||
if (!mroute_do_pim ||
|
||||
skb->len < sizeof(*pim) + sizeof(*encap) ||
|
||||
pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
|
||||
pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
|
||||
goto drop;
|
||||
|
||||
encap = (struct iphdr*)(skb->h.raw + sizeof(struct igmphdr));
|
||||
|
@ -1445,7 +1445,7 @@ int pim_rcv_v1(struct sk_buff * skb)
|
|||
*/
|
||||
if (!MULTICAST(encap->daddr) ||
|
||||
encap->tot_len == 0 ||
|
||||
ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
|
||||
ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
|
||||
goto drop;
|
||||
|
||||
read_lock(&mrt_lock);
|
||||
|
@ -1455,7 +1455,7 @@ int pim_rcv_v1(struct sk_buff * skb)
|
|||
dev_hold(reg_dev);
|
||||
read_unlock(&mrt_lock);
|
||||
|
||||
if (reg_dev == NULL)
|
||||
if (reg_dev == NULL)
|
||||
goto drop;
|
||||
|
||||
skb->mac.raw = skb->nh.raw;
|
||||
|
@ -1486,13 +1486,13 @@ static int pim_rcv(struct sk_buff * skb)
|
|||
struct iphdr *encap;
|
||||
struct net_device *reg_dev = NULL;
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
|
||||
if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
|
||||
goto drop;
|
||||
|
||||
pim = (struct pimreghdr*)skb->h.raw;
|
||||
if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
|
||||
if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
|
||||
(pim->flags&PIM_NULL_REGISTER) ||
|
||||
(ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
|
||||
(ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
|
||||
csum_fold(skb_checksum(skb, 0, skb->len, 0))))
|
||||
goto drop;
|
||||
|
||||
|
@ -1500,7 +1500,7 @@ static int pim_rcv(struct sk_buff * skb)
|
|||
encap = (struct iphdr*)(skb->h.raw + sizeof(struct pimreghdr));
|
||||
if (!MULTICAST(encap->daddr) ||
|
||||
encap->tot_len == 0 ||
|
||||
ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
|
||||
ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
|
||||
goto drop;
|
||||
|
||||
read_lock(&mrt_lock);
|
||||
|
@ -1510,7 +1510,7 @@ static int pim_rcv(struct sk_buff * skb)
|
|||
dev_hold(reg_dev);
|
||||
read_unlock(&mrt_lock);
|
||||
|
||||
if (reg_dev == NULL)
|
||||
if (reg_dev == NULL)
|
||||
goto drop;
|
||||
|
||||
skb->mac.raw = skb->nh.raw;
|
||||
|
@ -1616,7 +1616,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
|
|||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
#ifdef CONFIG_PROC_FS
|
||||
/*
|
||||
* The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
|
||||
*/
|
||||
|
@ -1630,7 +1630,7 @@ static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
|
|||
for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
|
||||
if(!VIF_EXISTS(iter->ct))
|
||||
continue;
|
||||
if (pos-- == 0)
|
||||
if (pos-- == 0)
|
||||
return &vif_table[iter->ct];
|
||||
}
|
||||
return NULL;
|
||||
|
@ -1639,7 +1639,7 @@ static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
|
|||
static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
{
|
||||
read_lock(&mrt_lock);
|
||||
return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
|
||||
return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
|
||||
: SEQ_START_TOKEN;
|
||||
}
|
||||
|
||||
|
@ -1650,7 +1650,7 @@ static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
++*pos;
|
||||
if (v == SEQ_START_TOKEN)
|
||||
return ipmr_vif_seq_idx(iter, 0);
|
||||
|
||||
|
||||
while (++iter->ct < maxvif) {
|
||||
if(!VIF_EXISTS(iter->ct))
|
||||
continue;
|
||||
|
@ -1667,7 +1667,7 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
|
|||
static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
if (v == SEQ_START_TOKEN) {
|
||||
seq_puts(seq,
|
||||
seq_puts(seq,
|
||||
"Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
|
||||
} else {
|
||||
const struct vif_device *vif = v;
|
||||
|
@ -1676,7 +1676,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
|
|||
seq_printf(seq,
|
||||
"%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
|
||||
vif - vif_table,
|
||||
name, vif->bytes_in, vif->pkt_in,
|
||||
name, vif->bytes_in, vif->pkt_in,
|
||||
vif->bytes_out, vif->pkt_out,
|
||||
vif->flags, vif->local, vif->remote);
|
||||
}
|
||||
|
@ -1695,7 +1695,7 @@ static int ipmr_vif_open(struct inode *inode, struct file *file)
|
|||
struct seq_file *seq;
|
||||
int rc = -ENOMEM;
|
||||
struct ipmr_vif_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
|
||||
|
||||
|
||||
if (!s)
|
||||
goto out;
|
||||
|
||||
|
@ -1734,15 +1734,15 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
|
|||
|
||||
it->cache = mfc_cache_array;
|
||||
read_lock(&mrt_lock);
|
||||
for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
|
||||
for(mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
|
||||
if (pos-- == 0)
|
||||
for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
|
||||
for(mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
|
||||
if (pos-- == 0)
|
||||
return mfc;
|
||||
read_unlock(&mrt_lock);
|
||||
|
||||
it->cache = &mfc_unres_queue;
|
||||
spin_lock_bh(&mfc_unres_lock);
|
||||
for(mfc = mfc_unres_queue; mfc; mfc = mfc->next)
|
||||
for(mfc = mfc_unres_queue; mfc; mfc = mfc->next)
|
||||
if (pos-- == 0)
|
||||
return mfc;
|
||||
spin_unlock_bh(&mfc_unres_lock);
|
||||
|
@ -1757,7 +1757,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
|
|||
struct ipmr_mfc_iter *it = seq->private;
|
||||
it->cache = NULL;
|
||||
it->ct = 0;
|
||||
return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
|
||||
return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
|
||||
: SEQ_START_TOKEN;
|
||||
}
|
||||
|
||||
|
@ -1773,8 +1773,8 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
|
||||
if (mfc->next)
|
||||
return mfc->next;
|
||||
|
||||
if (it->cache == &mfc_unres_queue)
|
||||
|
||||
if (it->cache == &mfc_unres_queue)
|
||||
goto end_of_list;
|
||||
|
||||
BUG_ON(it->cache != mfc_cache_array);
|
||||
|
@ -1789,10 +1789,10 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
read_unlock(&mrt_lock);
|
||||
it->cache = &mfc_unres_queue;
|
||||
it->ct = 0;
|
||||
|
||||
|
||||
spin_lock_bh(&mfc_unres_lock);
|
||||
mfc = mfc_unres_queue;
|
||||
if (mfc)
|
||||
if (mfc)
|
||||
return mfc;
|
||||
|
||||
end_of_list:
|
||||
|
@ -1817,12 +1817,12 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
|
|||
int n;
|
||||
|
||||
if (v == SEQ_START_TOKEN) {
|
||||
seq_puts(seq,
|
||||
seq_puts(seq,
|
||||
"Group Origin Iif Pkts Bytes Wrong Oifs\n");
|
||||
} else {
|
||||
const struct mfc_cache *mfc = v;
|
||||
const struct ipmr_mfc_iter *it = seq->private;
|
||||
|
||||
|
||||
seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld",
|
||||
(unsigned long) mfc->mfc_mcastgrp,
|
||||
(unsigned long) mfc->mfc_origin,
|
||||
|
@ -1832,12 +1832,12 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
|
|||
mfc->mfc_un.res.wrong_if);
|
||||
|
||||
if (it->cache != &mfc_unres_queue) {
|
||||
for(n = mfc->mfc_un.res.minvif;
|
||||
for(n = mfc->mfc_un.res.minvif;
|
||||
n < mfc->mfc_un.res.maxvif; n++ ) {
|
||||
if(VIF_EXISTS(n)
|
||||
if(VIF_EXISTS(n)
|
||||
&& mfc->mfc_un.res.ttls[n] < 255)
|
||||
seq_printf(seq,
|
||||
" %2d:%-3d",
|
||||
seq_printf(seq,
|
||||
" %2d:%-3d",
|
||||
n, mfc->mfc_un.res.ttls[n]);
|
||||
}
|
||||
}
|
||||
|
@ -1858,7 +1858,7 @@ static int ipmr_mfc_open(struct inode *inode, struct file *file)
|
|||
struct seq_file *seq;
|
||||
int rc = -ENOMEM;
|
||||
struct ipmr_mfc_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
|
||||
|
||||
|
||||
if (!s)
|
||||
goto out;
|
||||
|
||||
|
@ -1883,7 +1883,7 @@ static struct file_operations ipmr_mfc_fops = {
|
|||
.llseek = seq_lseek,
|
||||
.release = seq_release_private,
|
||||
};
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IP_PIMSM_V2
|
||||
static struct net_protocol pim_protocol = {
|
||||
|
@ -1895,7 +1895,7 @@ static struct net_protocol pim_protocol = {
|
|||
/*
|
||||
* Setup for IP multicast routing
|
||||
*/
|
||||
|
||||
|
||||
void __init ip_mr_init(void)
|
||||
{
|
||||
mrt_cachep = kmem_cache_create("ip_mrt_cache",
|
||||
|
@ -1905,8 +1905,8 @@ void __init ip_mr_init(void)
|
|||
init_timer(&ipmr_expire_timer);
|
||||
ipmr_expire_timer.function=ipmr_expire_process;
|
||||
register_netdevice_notifier(&ip_mr_notifier);
|
||||
#ifdef CONFIG_PROC_FS
|
||||
#ifdef CONFIG_PROC_FS
|
||||
proc_net_fops_create("ip_mr_vif", 0, &ipmr_vif_fops);
|
||||
proc_net_fops_create("ip_mr_cache", 0, &ipmr_mfc_fops);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -494,8 +494,8 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
|
|||
* Checking the dest server status.
|
||||
*/
|
||||
if ((dest == NULL) ||
|
||||
!(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
|
||||
(sysctl_ip_vs_expire_quiescent_template &&
|
||||
!(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
|
||||
(sysctl_ip_vs_expire_quiescent_template &&
|
||||
(atomic_read(&dest->weight) == 0))) {
|
||||
IP_VS_DBG(9, "check_template: dest not available for "
|
||||
"protocol %s s:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d "
|
||||
|
@ -667,7 +667,7 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
|
|||
{
|
||||
int idx;
|
||||
struct ip_vs_conn *cp;
|
||||
|
||||
|
||||
for(idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) {
|
||||
ct_read_lock_bh(idx);
|
||||
list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
|
||||
|
@ -695,7 +695,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
int idx;
|
||||
|
||||
++*pos;
|
||||
if (v == SEQ_START_TOKEN)
|
||||
if (v == SEQ_START_TOKEN)
|
||||
return ip_vs_conn_array(seq, 0);
|
||||
|
||||
/* more on same hash chain? */
|
||||
|
@ -710,7 +710,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
|
||||
seq->private = &ip_vs_conn_tab[idx];
|
||||
return cp;
|
||||
}
|
||||
}
|
||||
ct_read_unlock_bh(idx);
|
||||
}
|
||||
seq->private = NULL;
|
||||
|
|
|
@ -813,14 +813,14 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb,
|
|||
skb->nh.iph->saddr = cp->vaddr;
|
||||
ip_send_check(skb->nh.iph);
|
||||
|
||||
/* For policy routing, packets originating from this
|
||||
* machine itself may be routed differently to packets
|
||||
* passing through. We want this packet to be routed as
|
||||
* if it came from this machine itself. So re-compute
|
||||
* the routing information.
|
||||
*/
|
||||
if (ip_route_me_harder(pskb, RTN_LOCAL) != 0)
|
||||
goto drop;
|
||||
/* For policy routing, packets originating from this
|
||||
* machine itself may be routed differently to packets
|
||||
* passing through. We want this packet to be routed as
|
||||
* if it came from this machine itself. So re-compute
|
||||
* the routing information.
|
||||
*/
|
||||
if (ip_route_me_harder(pskb, RTN_LOCAL) != 0)
|
||||
goto drop;
|
||||
skb = *pskb;
|
||||
|
||||
IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT");
|
||||
|
@ -847,7 +847,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb,
|
|||
* forward to the right destination host if relevant.
|
||||
* Currently handles error types - unreachable, quench, ttl exceeded.
|
||||
*/
|
||||
static int
|
||||
static int
|
||||
ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum)
|
||||
{
|
||||
struct sk_buff *skb = *pskb;
|
||||
|
@ -863,7 +863,7 @@ ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum)
|
|||
/* reassemble IP fragments */
|
||||
if (skb->nh.iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) {
|
||||
skb = ip_vs_gather_frags(skb,
|
||||
hooknum == NF_IP_LOCAL_IN ?
|
||||
hooknum == NF_IP_LOCAL_IN ?
|
||||
IP_DEFRAG_VS_IN : IP_DEFRAG_VS_FWD);
|
||||
if (!skb)
|
||||
return NF_STOLEN;
|
||||
|
|
|
@ -370,7 +370,7 @@ static int __init ip_vs_ftp_init(void)
|
|||
if (ret)
|
||||
break;
|
||||
IP_VS_INFO("%s: loaded support on port[%d] = %d\n",
|
||||
app->name, i, ports[i]);
|
||||
app->name, i, ports[i]);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
|
|
|
@ -118,7 +118,7 @@ static ctl_table vs_vars_table[] = {
|
|||
.procname = "lblc_expiration",
|
||||
.data = &sysctl_ip_vs_lblc_expiration,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_jiffies,
|
||||
},
|
||||
{ .ctl_name = 0 }
|
||||
|
@ -128,7 +128,7 @@ static ctl_table vs_table[] = {
|
|||
{
|
||||
.ctl_name = NET_IPV4_VS,
|
||||
.procname = "vs",
|
||||
.mode = 0555,
|
||||
.mode = 0555,
|
||||
.child = vs_vars_table
|
||||
},
|
||||
{ .ctl_name = 0 }
|
||||
|
@ -137,7 +137,7 @@ static ctl_table vs_table[] = {
|
|||
static ctl_table ipvs_ipv4_table[] = {
|
||||
{
|
||||
.ctl_name = NET_IPV4,
|
||||
.procname = "ipv4",
|
||||
.procname = "ipv4",
|
||||
.mode = 0555,
|
||||
.child = vs_table
|
||||
},
|
||||
|
@ -147,8 +147,8 @@ static ctl_table ipvs_ipv4_table[] = {
|
|||
static ctl_table lblc_root_table[] = {
|
||||
{
|
||||
.ctl_name = CTL_NET,
|
||||
.procname = "net",
|
||||
.mode = 0555,
|
||||
.procname = "net",
|
||||
.mode = 0555,
|
||||
.child = ipvs_ipv4_table
|
||||
},
|
||||
{ .ctl_name = 0 }
|
||||
|
@ -288,7 +288,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl)
|
|||
|
||||
write_lock(&tbl->lock);
|
||||
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
|
||||
if (time_before(now,
|
||||
if (time_before(now,
|
||||
en->lastuse + sysctl_ip_vs_lblc_expiration))
|
||||
continue;
|
||||
|
||||
|
|
|
@ -307,7 +307,7 @@ static ctl_table vs_vars_table[] = {
|
|||
.procname = "lblcr_expiration",
|
||||
.data = &sysctl_ip_vs_lblcr_expiration,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_jiffies,
|
||||
},
|
||||
{ .ctl_name = 0 }
|
||||
|
@ -326,7 +326,7 @@ static ctl_table vs_table[] = {
|
|||
static ctl_table ipvs_ipv4_table[] = {
|
||||
{
|
||||
.ctl_name = NET_IPV4,
|
||||
.procname = "ipv4",
|
||||
.procname = "ipv4",
|
||||
.mode = 0555,
|
||||
.child = vs_table
|
||||
},
|
||||
|
@ -336,8 +336,8 @@ static ctl_table ipvs_ipv4_table[] = {
|
|||
static ctl_table lblcr_root_table[] = {
|
||||
{
|
||||
.ctl_name = CTL_NET,
|
||||
.procname = "net",
|
||||
.mode = 0555,
|
||||
.procname = "net",
|
||||
.mode = 0555,
|
||||
.child = ipvs_ipv4_table
|
||||
},
|
||||
{ .ctl_name = 0 }
|
||||
|
|
|
@ -68,7 +68,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|
|||
q = q->next;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
dest = list_entry(q, struct ip_vs_dest, n_list);
|
||||
if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
|
||||
atomic_read(&dest->weight) > 0)
|
||||
|
|
|
@ -134,7 +134,7 @@ static void drr_select_route(const struct flowi *flp,
|
|||
struct rtable *first, struct rtable **rp)
|
||||
{
|
||||
struct rtable *nh, *result, *cur_min;
|
||||
int min_usecount = -1;
|
||||
int min_usecount = -1;
|
||||
int devidx = -1;
|
||||
int cur_min_devidx = -1;
|
||||
|
||||
|
@ -161,7 +161,7 @@ static void drr_select_route(const struct flowi *flp,
|
|||
*/
|
||||
devidx = __multipath_finddev(nh_ifidx);
|
||||
if (devidx == -1) {
|
||||
/* add the interface to the array
|
||||
/* add the interface to the array
|
||||
* SMP safe
|
||||
*/
|
||||
spin_lock_bh(&state_lock);
|
||||
|
|
|
@ -58,7 +58,7 @@ static void rr_select_route(const struct flowi *flp,
|
|||
*/
|
||||
result = NULL;
|
||||
for (nh = rcu_dereference(first); nh;
|
||||
nh = rcu_dereference(nh->u.rt_next)) {
|
||||
nh = rcu_dereference(nh->u.rt_next)) {
|
||||
if ((nh->u.dst.flags & DST_BALANCED) != 0 &&
|
||||
multipath_comparekeys(&nh->fl, flp)) {
|
||||
nh->u.dst.lastuse = jiffies;
|
||||
|
|
|
@ -142,7 +142,7 @@ static unsigned char __multipath_lookup_weight(const struct flowi *fl,
|
|||
return weight;
|
||||
}
|
||||
|
||||
static void wrandom_init_state(void)
|
||||
static void wrandom_init_state(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -287,7 +287,7 @@ static void __multipath_free(struct rcu_head *head)
|
|||
|
||||
static void __multipath_free_dst(struct rcu_head *head)
|
||||
{
|
||||
struct multipath_dest *dst = container_of(head,
|
||||
struct multipath_dest *dst = container_of(head,
|
||||
struct multipath_dest,
|
||||
rcu);
|
||||
kfree(dst);
|
||||
|
|
|
@ -53,7 +53,7 @@ int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type)
|
|||
dst_release(&rt->u.dst);
|
||||
dst_release(odst);
|
||||
}
|
||||
|
||||
|
||||
if ((*pskb)->dst->error)
|
||||
return -1;
|
||||
|
||||
|
@ -70,7 +70,7 @@ int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type)
|
|||
struct sk_buff *nskb;
|
||||
|
||||
nskb = skb_realloc_headroom(*pskb, hh_len);
|
||||
if (!nskb)
|
||||
if (!nskb)
|
||||
return -1;
|
||||
if ((*pskb)->sk)
|
||||
skb_set_owner_w(nskb, (*pskb)->sk);
|
||||
|
@ -177,7 +177,7 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
|
|||
break;
|
||||
if ((protocol == 0 && !csum_fold(skb->csum)) ||
|
||||
!csum_tcpudp_magic(iph->saddr, iph->daddr,
|
||||
skb->len - dataoff, protocol,
|
||||
skb->len - dataoff, protocol,
|
||||
skb->csum)) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
break;
|
||||
|
|
|
@ -544,7 +544,7 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
|
|||
}
|
||||
|
||||
/* FIXME: underflows must be unconditional, standard verdicts
|
||||
< 0 (not ARPT_RETURN). --RR */
|
||||
< 0 (not ARPT_RETURN). --RR */
|
||||
|
||||
/* Clear counters and comefrom */
|
||||
e->counters = ((struct xt_counters) { 0, 0 });
|
||||
|
@ -869,8 +869,8 @@ static int do_replace(void __user *user, unsigned int len)
|
|||
/* Update module usage count based on number of rules */
|
||||
duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
|
||||
oldinfo->number, oldinfo->initial_entries, newinfo->number);
|
||||
if ((oldinfo->number > oldinfo->initial_entries) ||
|
||||
(newinfo->number <= oldinfo->initial_entries))
|
||||
if ((oldinfo->number > oldinfo->initial_entries) ||
|
||||
(newinfo->number <= oldinfo->initial_entries))
|
||||
module_put(t->me);
|
||||
if ((oldinfo->number > oldinfo->initial_entries) &&
|
||||
(newinfo->number <= oldinfo->initial_entries))
|
||||
|
|
|
@ -67,7 +67,7 @@ target(struct sk_buff **pskb,
|
|||
|
||||
static int
|
||||
checkentry(const char *tablename, const void *e, const struct xt_target *target,
|
||||
void *targinfo, unsigned int hook_mask)
|
||||
void *targinfo, unsigned int hook_mask)
|
||||
{
|
||||
const struct arpt_mangle *mangle = targinfo;
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
*
|
||||
* Module load syntax:
|
||||
* insmod ip_conntrack_amanda.o [master_timeout=n]
|
||||
*
|
||||
*
|
||||
* Where master_timeout is the timeout (in seconds) of the master
|
||||
* connection (port 10080). This defaults to 5 minutes but if
|
||||
* your clients take longer than 5 minutes to do their work
|
||||
|
@ -84,7 +84,7 @@ static struct {
|
|||
};
|
||||
|
||||
static int help(struct sk_buff **pskb,
|
||||
struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
|
||||
struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
|
||||
{
|
||||
struct ts_state ts;
|
||||
struct ip_conntrack_expect *exp;
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
but required by, the NAT layer; it can also be used by an iptables
|
||||
extension. */
|
||||
|
||||
/* (C) 1999-2001 Paul `Rusty' Russell
|
||||
/* (C) 1999-2001 Paul `Rusty' Russell
|
||||
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
|
@ -99,7 +99,7 @@ __ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache)
|
|||
void ip_ct_deliver_cached_events(const struct ip_conntrack *ct)
|
||||
{
|
||||
struct ip_conntrack_ecache *ecache;
|
||||
|
||||
|
||||
local_bh_disable();
|
||||
ecache = &__get_cpu_var(ip_conntrack_ecache);
|
||||
if (ecache->ct == ct)
|
||||
|
@ -147,9 +147,9 @@ static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple,
|
|||
unsigned int size, unsigned int rnd)
|
||||
{
|
||||
return (jhash_3words((__force u32)tuple->src.ip,
|
||||
((__force u32)tuple->dst.ip ^ tuple->dst.protonum),
|
||||
(tuple->src.u.all | (tuple->dst.u.all << 16)),
|
||||
rnd) % size);
|
||||
((__force u32)tuple->dst.ip ^ tuple->dst.protonum),
|
||||
(tuple->src.u.all | (tuple->dst.u.all << 16)),
|
||||
rnd) % size);
|
||||
}
|
||||
|
||||
static u_int32_t
|
||||
|
@ -219,7 +219,7 @@ struct ip_conntrack_expect *
|
|||
__ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple)
|
||||
{
|
||||
struct ip_conntrack_expect *i;
|
||||
|
||||
|
||||
list_for_each_entry(i, &ip_conntrack_expect_list, list) {
|
||||
if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask))
|
||||
return i;
|
||||
|
@ -232,7 +232,7 @@ struct ip_conntrack_expect *
|
|||
ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple)
|
||||
{
|
||||
struct ip_conntrack_expect *i;
|
||||
|
||||
|
||||
read_lock_bh(&ip_conntrack_lock);
|
||||
i = __ip_conntrack_expect_find(tuple);
|
||||
if (i)
|
||||
|
@ -398,7 +398,7 @@ ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple,
|
|||
|
||||
static void __ip_conntrack_hash_insert(struct ip_conntrack *ct,
|
||||
unsigned int hash,
|
||||
unsigned int repl_hash)
|
||||
unsigned int repl_hash)
|
||||
{
|
||||
ct->id = ++ip_conntrack_next_id;
|
||||
list_add(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list,
|
||||
|
@ -446,15 +446,15 @@ __ip_conntrack_confirm(struct sk_buff **pskb)
|
|||
/* IP_NF_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
|
||||
|
||||
/* No external references means noone else could have
|
||||
confirmed us. */
|
||||
confirmed us. */
|
||||
IP_NF_ASSERT(!is_confirmed(ct));
|
||||
DEBUGP("Confirming conntrack %p\n", ct);
|
||||
|
||||
write_lock_bh(&ip_conntrack_lock);
|
||||
|
||||
/* See if there's one in the list already, including reverse:
|
||||
NAT could have grabbed it without realizing, since we're
|
||||
not in the hash. If there is, we lost race. */
|
||||
NAT could have grabbed it without realizing, since we're
|
||||
not in the hash. If there is, we lost race. */
|
||||
list_for_each_entry(h, &ip_conntrack_hash[hash], list)
|
||||
if (ip_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
|
||||
&h->tuple))
|
||||
|
@ -602,7 +602,7 @@ ip_conntrack_proto_find_get(u_int8_t protocol)
|
|||
p = &ip_conntrack_generic_protocol;
|
||||
}
|
||||
preempt_enable();
|
||||
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
|
@ -746,7 +746,7 @@ resolve_normal_ct(struct sk_buff *skb,
|
|||
|
||||
IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0);
|
||||
|
||||
if (!ip_ct_get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4,
|
||||
if (!ip_ct_get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4,
|
||||
&tuple,proto))
|
||||
return NULL;
|
||||
|
||||
|
@ -771,7 +771,7 @@ resolve_normal_ct(struct sk_buff *skb,
|
|||
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
|
||||
DEBUGP("ip_conntrack_in: normal packet for %p\n",
|
||||
ct);
|
||||
*ctinfo = IP_CT_ESTABLISHED;
|
||||
*ctinfo = IP_CT_ESTABLISHED;
|
||||
} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
|
||||
DEBUGP("ip_conntrack_in: related packet for %p\n",
|
||||
ct);
|
||||
|
@ -822,7 +822,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
|
|||
if ((*pskb)->pkt_type == PACKET_BROADCAST) {
|
||||
printk("Broadcast packet!\n");
|
||||
return NF_ACCEPT;
|
||||
} else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF))
|
||||
} else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF))
|
||||
== htonl(0x000000FF)) {
|
||||
printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n",
|
||||
NIPQUAD((*pskb)->nh.iph->saddr),
|
||||
|
@ -836,7 +836,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
|
|||
/* It may be an special packet, error, unclean...
|
||||
* inverse of the return code tells to the netfilter
|
||||
* core what to do with the packet. */
|
||||
if (proto->error != NULL
|
||||
if (proto->error != NULL
|
||||
&& (ret = proto->error(*pskb, &ctinfo, hooknum)) <= 0) {
|
||||
CONNTRACK_STAT_INC(error);
|
||||
CONNTRACK_STAT_INC(invalid);
|
||||
|
@ -876,7 +876,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
|
|||
int invert_tuplepr(struct ip_conntrack_tuple *inverse,
|
||||
const struct ip_conntrack_tuple *orig)
|
||||
{
|
||||
return ip_ct_invert_tuple(inverse, orig,
|
||||
return ip_ct_invert_tuple(inverse, orig,
|
||||
__ip_conntrack_proto_find(orig->dst.protonum));
|
||||
}
|
||||
|
||||
|
@ -885,7 +885,7 @@ static inline int expect_clash(const struct ip_conntrack_expect *a,
|
|||
const struct ip_conntrack_expect *b)
|
||||
{
|
||||
/* Part covered by intersection of masks must be unequal,
|
||||
otherwise they clash */
|
||||
otherwise they clash */
|
||||
struct ip_conntrack_tuple intersect_mask
|
||||
= { { a->mask.src.ip & b->mask.src.ip,
|
||||
{ a->mask.src.u.all & b->mask.src.u.all } },
|
||||
|
@ -923,7 +923,7 @@ void ip_conntrack_unexpect_related(struct ip_conntrack_expect *exp)
|
|||
}
|
||||
|
||||
/* We don't increase the master conntrack refcount for non-fulfilled
|
||||
* conntracks. During the conntrack destruction, the expectations are
|
||||
* conntracks. During the conntrack destruction, the expectations are
|
||||
* always killed before the conntrack itself */
|
||||
struct ip_conntrack_expect *ip_conntrack_expect_alloc(struct ip_conntrack *me)
|
||||
{
|
||||
|
@ -1012,7 +1012,7 @@ int ip_conntrack_expect_related(struct ip_conntrack_expect *expect)
|
|||
}
|
||||
|
||||
/* Will be over limit? */
|
||||
if (expect->master->helper->max_expected &&
|
||||
if (expect->master->helper->max_expected &&
|
||||
expect->master->expecting >= expect->master->helper->max_expected)
|
||||
evict_oldest_expect(expect->master);
|
||||
|
||||
|
@ -1021,7 +1021,7 @@ int ip_conntrack_expect_related(struct ip_conntrack_expect *expect)
|
|||
ret = 0;
|
||||
out:
|
||||
write_unlock_bh(&ip_conntrack_lock);
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Alter reply tuple (maybe alter helper). This is for NAT, and is
|
||||
|
@ -1069,7 +1069,7 @@ static inline void unhelp(struct ip_conntrack_tuple_hash *i,
|
|||
const struct ip_conntrack_helper *me)
|
||||
{
|
||||
if (tuplehash_to_ctrack(i)->helper == me) {
|
||||
ip_conntrack_event(IPCT_HELPER, tuplehash_to_ctrack(i));
|
||||
ip_conntrack_event(IPCT_HELPER, tuplehash_to_ctrack(i));
|
||||
tuplehash_to_ctrack(i)->helper = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -1105,8 +1105,8 @@ void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me)
|
|||
}
|
||||
|
||||
/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
|
||||
void __ip_ct_refresh_acct(struct ip_conntrack *ct,
|
||||
enum ip_conntrack_info ctinfo,
|
||||
void __ip_ct_refresh_acct(struct ip_conntrack *ct,
|
||||
enum ip_conntrack_info ctinfo,
|
||||
const struct sk_buff *skb,
|
||||
unsigned long extra_jiffies,
|
||||
int do_acct)
|
||||
|
@ -1140,7 +1140,7 @@ void __ip_ct_refresh_acct(struct ip_conntrack *ct,
|
|||
#ifdef CONFIG_IP_NF_CT_ACCT
|
||||
if (do_acct) {
|
||||
ct->counters[CTINFO2DIR(ctinfo)].packets++;
|
||||
ct->counters[CTINFO2DIR(ctinfo)].bytes +=
|
||||
ct->counters[CTINFO2DIR(ctinfo)].bytes +=
|
||||
ntohs(skb->nh.iph->tot_len);
|
||||
if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
|
||||
|| (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
|
||||
|
@ -1194,7 +1194,7 @@ ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user)
|
|||
{
|
||||
skb_orphan(skb);
|
||||
|
||||
local_bh_disable();
|
||||
local_bh_disable();
|
||||
skb = ip_defrag(skb, user);
|
||||
local_bh_enable();
|
||||
|
||||
|
@ -1211,7 +1211,7 @@ static void ip_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
|
|||
|
||||
/* This ICMP is in reverse direction to the packet which caused it */
|
||||
ct = ip_conntrack_get(skb, &ctinfo);
|
||||
|
||||
|
||||
if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
|
||||
ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
|
||||
else
|
||||
|
@ -1279,7 +1279,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
|
|||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct ip_conntrack_tuple_hash *h;
|
||||
struct ip_conntrack_tuple tuple;
|
||||
|
||||
|
||||
IP_CT_TUPLE_U_BLANK(&tuple);
|
||||
tuple.src.ip = inet->rcv_saddr;
|
||||
tuple.src.u.tcp.port = inet->sport;
|
||||
|
@ -1347,7 +1347,7 @@ static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size)
|
|||
if (vmalloced)
|
||||
vfree(hash);
|
||||
else
|
||||
free_pages((unsigned long)hash,
|
||||
free_pages((unsigned long)hash,
|
||||
get_order(sizeof(struct list_head) * size));
|
||||
}
|
||||
|
||||
|
@ -1358,8 +1358,8 @@ void ip_conntrack_cleanup(void)
|
|||
ip_ct_attach = NULL;
|
||||
|
||||
/* This makes sure all current packets have passed through
|
||||
netfilter framework. Roll on, two-stage module
|
||||
delete... */
|
||||
netfilter framework. Roll on, two-stage module
|
||||
delete... */
|
||||
synchronize_net();
|
||||
|
||||
ip_ct_event_cache_flush();
|
||||
|
@ -1385,11 +1385,11 @@ static struct list_head *alloc_hashtable(int size, int *vmalloced)
|
|||
struct list_head *hash;
|
||||
unsigned int i;
|
||||
|
||||
*vmalloced = 0;
|
||||
hash = (void*)__get_free_pages(GFP_KERNEL,
|
||||
*vmalloced = 0;
|
||||
hash = (void*)__get_free_pages(GFP_KERNEL,
|
||||
get_order(sizeof(struct list_head)
|
||||
* size));
|
||||
if (!hash) {
|
||||
if (!hash) {
|
||||
*vmalloced = 1;
|
||||
printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n");
|
||||
hash = vmalloc(sizeof(struct list_head) * size);
|
||||
|
@ -1422,7 +1422,7 @@ static int set_hashsize(const char *val, struct kernel_param *kp)
|
|||
if (!hash)
|
||||
return -ENOMEM;
|
||||
|
||||
/* We have to rehash for the new table anyway, so we also can
|
||||
/* We have to rehash for the new table anyway, so we also can
|
||||
* use a new random seed */
|
||||
get_random_bytes(&rnd, 4);
|
||||
|
||||
|
@ -1460,7 +1460,7 @@ int __init ip_conntrack_init(void)
|
|||
|
||||
/* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
|
||||
* machine has 256 buckets. >= 1GB machines have 8192 buckets. */
|
||||
if (!ip_conntrack_htable_size) {
|
||||
if (!ip_conntrack_htable_size) {
|
||||
ip_conntrack_htable_size
|
||||
= (((num_physpages << PAGE_SHIFT) / 16384)
|
||||
/ sizeof(struct list_head));
|
||||
|
@ -1490,8 +1490,8 @@ int __init ip_conntrack_init(void)
|
|||
}
|
||||
|
||||
ip_conntrack_cachep = kmem_cache_create("ip_conntrack",
|
||||
sizeof(struct ip_conntrack), 0,
|
||||
0, NULL, NULL);
|
||||
sizeof(struct ip_conntrack), 0,
|
||||
0, NULL, NULL);
|
||||
if (!ip_conntrack_cachep) {
|
||||
printk(KERN_ERR "Unable to create ip_conntrack slab cache\n");
|
||||
goto err_free_hash;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* FTP extension for IP connection tracking. */
|
||||
|
||||
/* (C) 1999-2001 Paul `Rusty' Russell
|
||||
/* (C) 1999-2001 Paul `Rusty' Russell
|
||||
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
|
@ -169,7 +169,7 @@ static int try_eprt(const char *data, size_t dlen, u_int32_t array[6],
|
|||
int length;
|
||||
|
||||
/* First character is delimiter, then "1" for IPv4, then
|
||||
delimiter again. */
|
||||
delimiter again. */
|
||||
if (dlen <= 3) return 0;
|
||||
delim = data[0];
|
||||
if (isdigit(delim) || delim < 33 || delim > 126
|
||||
|
@ -344,14 +344,14 @@ static int help(struct sk_buff **pskb,
|
|||
if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) {
|
||||
/* Now if this ends in \n, update ftp info. */
|
||||
DEBUGP("ip_conntrack_ftp_help: wrong seq pos %s(%u) or %s(%u)\n",
|
||||
ct_ftp_info->seq_aft_nl[0][dir]
|
||||
ct_ftp_info->seq_aft_nl[0][dir]
|
||||
old_seq_aft_nl_set ? "":"(UNSET) ", old_seq_aft_nl);
|
||||
ret = NF_ACCEPT;
|
||||
goto out_update_nl;
|
||||
}
|
||||
|
||||
/* Initialize IP array to expected address (it's not mentioned
|
||||
in EPSV responses) */
|
||||
in EPSV responses) */
|
||||
array[0] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 24) & 0xFF;
|
||||
array[1] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 16) & 0xFF;
|
||||
array[2] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 8) & 0xFF;
|
||||
|
@ -386,7 +386,7 @@ static int help(struct sk_buff **pskb,
|
|||
|
||||
DEBUGP("conntrack_ftp: match `%s' (%u bytes at %u)\n",
|
||||
fb_ptr + matchoff, matchlen, ntohl(th->seq) + matchoff);
|
||||
|
||||
|
||||
/* Allocate expectation which will be inserted */
|
||||
exp = ip_conntrack_expect_alloc(ct);
|
||||
if (exp == NULL) {
|
||||
|
@ -504,7 +504,7 @@ static int __init ip_conntrack_ftp_init(void)
|
|||
sprintf(tmpname, "ftp-%d", ports[i]);
|
||||
ftp[i].name = tmpname;
|
||||
|
||||
DEBUGP("ip_ct_ftp: registering helper for port %d\n",
|
||||
DEBUGP("ip_ct_ftp: registering helper for port %d\n",
|
||||
ports[i]);
|
||||
ret = ip_conntrack_helper_register(&ftp[i]);
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper");
|
|||
static int callforward_filter = 1;
|
||||
module_param(callforward_filter, bool, 0600);
|
||||
MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations "
|
||||
"if both endpoints are on different sides "
|
||||
"if both endpoints are on different sides "
|
||||
"(determined by routing information)");
|
||||
|
||||
/* Hooks for NAT */
|
||||
|
|
|
@ -560,7 +560,7 @@ conntrack_pptp_help(struct sk_buff **pskb,
|
|||
tcph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_tcph), &_tcph);
|
||||
BUG_ON(!tcph);
|
||||
nexthdr_off += tcph->doff * 4;
|
||||
datalen = tcplen - tcph->doff * 4;
|
||||
datalen = tcplen - tcph->doff * 4;
|
||||
|
||||
pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph);
|
||||
if (!pptph) {
|
||||
|
@ -624,7 +624,7 @@ static struct ip_conntrack_helper pptp = {
|
|||
.max_expected = 2,
|
||||
.timeout = 5 * 60,
|
||||
.tuple = { .src = { .ip = 0,
|
||||
.u = { .tcp = { .port =
|
||||
.u = { .tcp = { .port =
|
||||
__constant_htons(PPTP_CONTROL_PORT) } }
|
||||
},
|
||||
.dst = { .ip = 0,
|
||||
|
@ -638,7 +638,7 @@ static struct ip_conntrack_helper pptp = {
|
|||
.dst = { .ip = 0,
|
||||
.u = { .all = 0 },
|
||||
.protonum = 0xff
|
||||
}
|
||||
}
|
||||
},
|
||||
.help = conntrack_pptp_help,
|
||||
.destroy = pptp_destroy_siblings,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* IRC extension for IP connection tracking, Version 1.21
|
||||
* (C) 2000-2002 by Harald Welte <laforge@gnumonks.org>
|
||||
* based on RR's ip_conntrack_ftp.c
|
||||
* based on RR's ip_conntrack_ftp.c
|
||||
*
|
||||
* ip_conntrack_irc.c,v 1.21 2002/02/05 14:49:26 laforge Exp
|
||||
*
|
||||
|
@ -12,12 +12,12 @@
|
|||
* Module load syntax:
|
||||
* insmod ip_conntrack_irc.o ports=port1,port2,...port<MAX_PORTS>
|
||||
* max_dcc_channels=n dcc_timeout=secs
|
||||
*
|
||||
*
|
||||
* please give the ports of all IRC servers You wish to connect to.
|
||||
* If You don't specify ports, the default will be port 6667.
|
||||
* With max_dcc_channels you can define the maximum number of not
|
||||
* yet answered DCC channels per IRC session (default 8).
|
||||
* With dcc_timeout you can specify how long the system waits for
|
||||
* With dcc_timeout you can specify how long the system waits for
|
||||
* an expected DCC channel (default 300 seconds).
|
||||
*
|
||||
*/
|
||||
|
@ -63,7 +63,7 @@ static const char *dccprotos[] = { "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT "
|
|||
|
||||
#if 0
|
||||
#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s:" format, \
|
||||
__FILE__, __FUNCTION__ , ## args)
|
||||
__FILE__, __FUNCTION__ , ## args)
|
||||
#else
|
||||
#define DEBUGP(format, args...)
|
||||
#endif
|
||||
|
@ -71,7 +71,7 @@ static const char *dccprotos[] = { "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT "
|
|||
static int parse_dcc(char *data, char *data_end, u_int32_t *ip,
|
||||
u_int16_t *port, char **ad_beg_p, char **ad_end_p)
|
||||
/* tries to get the ip_addr and port out of a dcc command
|
||||
return value: -1 on failure, 0 on success
|
||||
return value: -1 on failure, 0 on success
|
||||
data pointer to first byte of DCC command data
|
||||
data_end pointer to last byte of dcc command data
|
||||
ip returns parsed ip of dcc command
|
||||
|
@ -90,7 +90,7 @@ static int parse_dcc(char *data, char *data_end, u_int32_t *ip,
|
|||
|
||||
/* skip blanks between ip and port */
|
||||
while (*data == ' ') {
|
||||
if (data >= data_end)
|
||||
if (data >= data_end)
|
||||
return -1;
|
||||
data++;
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ static int help(struct sk_buff **pskb,
|
|||
|
||||
DEBUGP("DCC %s detected\n", dccprotos[i]);
|
||||
data += strlen(dccprotos[i]);
|
||||
/* we have at least
|
||||
/* we have at least
|
||||
* (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
|
||||
* data left (== 14/13 bytes) */
|
||||
if (parse_dcc((char *)data, data_limit, &dcc_ip,
|
||||
|
@ -260,7 +260,7 @@ static int __init ip_conntrack_irc_init(void)
|
|||
irc_buffer = kmalloc(65536, GFP_KERNEL);
|
||||
if (!irc_buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
/* If no port given, default to standard irc port */
|
||||
if (ports_c == 0)
|
||||
ports[ports_c++] = IRC_PORT;
|
||||
|
@ -297,7 +297,7 @@ static int __init ip_conntrack_irc_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* This function is intentionally _NOT_ defined as __exit, because
|
||||
/* This function is intentionally _NOT_ defined as __exit, because
|
||||
* it is needed by the init function */
|
||||
static void ip_conntrack_irc_fini(void)
|
||||
{
|
||||
|
|
|
@ -42,7 +42,7 @@ module_param(timeout, uint, 0400);
|
|||
MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
|
||||
|
||||
static int help(struct sk_buff **pskb,
|
||||
struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
|
||||
struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
|
||||
{
|
||||
struct ip_conntrack_expect *exp;
|
||||
struct iphdr *iph = (*pskb)->nh.iph;
|
||||
|
|
|
@ -6,10 +6,10 @@
|
|||
* (C) 2003 by Patrick Mchardy <kaber@trash.net>
|
||||
* (C) 2005-2006 by Pablo Neira Ayuso <pablo@eurodev.net>
|
||||
*
|
||||
* I've reworked this stuff to use attributes instead of conntrack
|
||||
* I've reworked this stuff to use attributes instead of conntrack
|
||||
* structures. 5.44 am. I need more tea. --pablo 05/07/11.
|
||||
*
|
||||
* Initial connection tracking via netlink development funded and
|
||||
* Initial connection tracking via netlink development funded and
|
||||
* generally made possible by Network Robots, Inc. (www.networkrobots.com)
|
||||
*
|
||||
* Further development of this code funded by Astaro AG (http://www.astaro.com)
|
||||
|
@ -45,7 +45,7 @@ MODULE_LICENSE("GPL");
|
|||
static char __initdata version[] = "0.90";
|
||||
|
||||
static inline int
|
||||
ctnetlink_dump_tuples_proto(struct sk_buff *skb,
|
||||
ctnetlink_dump_tuples_proto(struct sk_buff *skb,
|
||||
const struct ip_conntrack_tuple *tuple,
|
||||
struct ip_conntrack_protocol *proto)
|
||||
{
|
||||
|
@ -56,7 +56,7 @@ ctnetlink_dump_tuples_proto(struct sk_buff *skb,
|
|||
|
||||
if (likely(proto->tuple_to_nfattr))
|
||||
ret = proto->tuple_to_nfattr(skb, tuple);
|
||||
|
||||
|
||||
NFA_NEST_END(skb, nest_parms);
|
||||
|
||||
return ret;
|
||||
|
@ -70,7 +70,7 @@ ctnetlink_dump_tuples_ip(struct sk_buff *skb,
|
|||
const struct ip_conntrack_tuple *tuple)
|
||||
{
|
||||
struct nfattr *nest_parms = NFA_NEST(skb, CTA_TUPLE_IP);
|
||||
|
||||
|
||||
NFA_PUT(skb, CTA_IP_V4_SRC, sizeof(__be32), &tuple->src.ip);
|
||||
NFA_PUT(skb, CTA_IP_V4_DST, sizeof(__be32), &tuple->dst.ip);
|
||||
|
||||
|
@ -121,7 +121,7 @@ ctnetlink_dump_timeout(struct sk_buff *skb, const struct ip_conntrack *ct)
|
|||
timeout = 0;
|
||||
else
|
||||
timeout = htonl(timeout_l / HZ);
|
||||
|
||||
|
||||
NFA_PUT(skb, CTA_TIMEOUT, sizeof(timeout), &timeout);
|
||||
return 0;
|
||||
|
||||
|
@ -141,7 +141,7 @@ ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct ip_conntrack *ct)
|
|||
ip_conntrack_proto_put(proto);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
nest_proto = NFA_NEST(skb, CTA_PROTOINFO);
|
||||
|
||||
ret = proto->to_nfattr(skb, nest_proto, ct);
|
||||
|
@ -164,7 +164,7 @@ ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct ip_conntrack *ct)
|
|||
|
||||
if (!ct->helper)
|
||||
return 0;
|
||||
|
||||
|
||||
nest_helper = NFA_NEST(skb, CTA_HELP);
|
||||
NFA_PUT(skb, CTA_HELP_NAME, strlen(ct->helper->name), ct->helper->name);
|
||||
|
||||
|
@ -236,7 +236,7 @@ static inline int
|
|||
ctnetlink_dump_use(struct sk_buff *skb, const struct ip_conntrack *ct)
|
||||
{
|
||||
__be32 use = htonl(atomic_read(&ct->ct_general.use));
|
||||
|
||||
|
||||
NFA_PUT(skb, CTA_USE, sizeof(__be32), &use);
|
||||
return 0;
|
||||
|
||||
|
@ -248,7 +248,7 @@ ctnetlink_dump_use(struct sk_buff *skb, const struct ip_conntrack *ct)
|
|||
|
||||
static int
|
||||
ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
|
||||
int event, int nowait,
|
||||
int event, int nowait,
|
||||
const struct ip_conntrack *ct)
|
||||
{
|
||||
struct nlmsghdr *nlh;
|
||||
|
@ -271,7 +271,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
|
|||
if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
|
||||
goto nfattr_failure;
|
||||
NFA_NEST_END(skb, nest_parms);
|
||||
|
||||
|
||||
nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY);
|
||||
if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0)
|
||||
goto nfattr_failure;
|
||||
|
@ -299,7 +299,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
|
|||
|
||||
#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
|
||||
static int ctnetlink_conntrack_event(struct notifier_block *this,
|
||||
unsigned long events, void *ptr)
|
||||
unsigned long events, void *ptr)
|
||||
{
|
||||
struct nlmsghdr *nlh;
|
||||
struct nfgenmsg *nfmsg;
|
||||
|
@ -324,7 +324,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
|
|||
} else if (events & (IPCT_STATUS | IPCT_PROTOINFO)) {
|
||||
type = IPCTNL_MSG_CT_NEW;
|
||||
group = NFNLGRP_CONNTRACK_UPDATE;
|
||||
} else
|
||||
} else
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (!nfnetlink_has_listeners(group))
|
||||
|
@ -349,7 +349,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
|
|||
if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
|
||||
goto nfattr_failure;
|
||||
NFA_NEST_END(skb, nest_parms);
|
||||
|
||||
|
||||
nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY);
|
||||
if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0)
|
||||
goto nfattr_failure;
|
||||
|
@ -368,16 +368,16 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
|
|||
|
||||
if (events & IPCT_PROTOINFO
|
||||
&& ctnetlink_dump_protoinfo(skb, ct) < 0)
|
||||
goto nfattr_failure;
|
||||
goto nfattr_failure;
|
||||
|
||||
if ((events & IPCT_HELPER || ct->helper)
|
||||
&& ctnetlink_dump_helpinfo(skb, ct) < 0)
|
||||
goto nfattr_failure;
|
||||
goto nfattr_failure;
|
||||
|
||||
#ifdef CONFIG_IP_NF_CONNTRACK_MARK
|
||||
if ((events & IPCT_MARK || ct->mark)
|
||||
&& ctnetlink_dump_mark(skb, ct) < 0)
|
||||
goto nfattr_failure;
|
||||
goto nfattr_failure;
|
||||
#endif
|
||||
|
||||
if (events & IPCT_COUNTER_FILLING &&
|
||||
|
@ -426,7 +426,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
cb->args[1] = 0;
|
||||
}
|
||||
if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
|
||||
cb->nlh->nlmsg_seq,
|
||||
cb->nlh->nlmsg_seq,
|
||||
IPCTNL_MSG_CT_NEW,
|
||||
1, ct) < 0) {
|
||||
nf_conntrack_get(&ct->ct_general);
|
||||
|
@ -488,7 +488,7 @@ static const size_t cta_min_proto[CTA_PROTO_MAX] = {
|
|||
};
|
||||
|
||||
static inline int
|
||||
ctnetlink_parse_tuple_proto(struct nfattr *attr,
|
||||
ctnetlink_parse_tuple_proto(struct nfattr *attr,
|
||||
struct ip_conntrack_tuple *tuple)
|
||||
{
|
||||
struct nfattr *tb[CTA_PROTO_MAX];
|
||||
|
@ -508,9 +508,9 @@ ctnetlink_parse_tuple_proto(struct nfattr *attr,
|
|||
|
||||
if (likely(proto->nfattr_to_tuple))
|
||||
ret = proto->nfattr_to_tuple(tb, tuple);
|
||||
|
||||
|
||||
ip_conntrack_proto_put(proto);
|
||||
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -595,7 +595,7 @@ ctnetlink_parse_nat(struct nfattr *nat,
|
|||
int err;
|
||||
|
||||
memset(range, 0, sizeof(*range));
|
||||
|
||||
|
||||
nfattr_parse_nested(tb, CTA_NAT_MAX, nat);
|
||||
|
||||
if (nfattr_bad_size(tb, CTA_NAT_MAX, cta_min_nat))
|
||||
|
@ -647,7 +647,7 @@ static const size_t cta_min[CTA_MAX] = {
|
|||
};
|
||||
|
||||
static int
|
||||
ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
|
||||
{
|
||||
struct ip_conntrack_tuple_hash *h;
|
||||
|
@ -676,14 +676,14 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
|||
return -ENOENT;
|
||||
|
||||
ct = tuplehash_to_ctrack(h);
|
||||
|
||||
|
||||
if (cda[CTA_ID-1]) {
|
||||
u_int32_t id = ntohl(*(__be32 *)NFA_DATA(cda[CTA_ID-1]));
|
||||
if (ct->id != id) {
|
||||
ip_conntrack_put(ct);
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (del_timer(&ct->timeout))
|
||||
ct->timeout.function((unsigned long)ct);
|
||||
|
||||
|
@ -693,7 +693,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
static int
|
||||
ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
|
||||
{
|
||||
struct ip_conntrack_tuple_hash *h;
|
||||
|
@ -714,8 +714,8 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
|||
return -ENOTSUPP;
|
||||
#endif
|
||||
if ((*errp = netlink_dump_start(ctnl, skb, nlh,
|
||||
ctnetlink_dump_table,
|
||||
ctnetlink_done)) != 0)
|
||||
ctnetlink_dump_table,
|
||||
ctnetlink_done)) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
rlen = NLMSG_ALIGN(nlh->nlmsg_len);
|
||||
|
@ -751,7 +751,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
|
||||
err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
|
||||
IPCTNL_MSG_CT_NEW, 1, ct);
|
||||
ip_conntrack_put(ct);
|
||||
if (err <= 0)
|
||||
|
@ -779,12 +779,12 @@ ctnetlink_change_status(struct ip_conntrack *ct, struct nfattr *cda[])
|
|||
if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
|
||||
/* unchangeable */
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
|
||||
/* SEEN_REPLY bit can only be set */
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
|
||||
if (d & IPS_ASSURED && !(status & IPS_ASSURED))
|
||||
/* ASSURED bit can only be set */
|
||||
return -EINVAL;
|
||||
|
@ -857,7 +857,7 @@ ctnetlink_change_helper(struct ip_conntrack *ct, struct nfattr *cda[])
|
|||
memset(&ct->help, 0, sizeof(ct->help));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
ct->helper = helper;
|
||||
|
||||
return 0;
|
||||
|
@ -867,7 +867,7 @@ static inline int
|
|||
ctnetlink_change_timeout(struct ip_conntrack *ct, struct nfattr *cda[])
|
||||
{
|
||||
u_int32_t timeout = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1]));
|
||||
|
||||
|
||||
if (!del_timer(&ct->timeout))
|
||||
return -ETIME;
|
||||
|
||||
|
@ -891,7 +891,7 @@ ctnetlink_change_protoinfo(struct ip_conntrack *ct, struct nfattr *cda[])
|
|||
|
||||
if (proto->from_nfattr)
|
||||
err = proto->from_nfattr(tb, ct);
|
||||
ip_conntrack_proto_put(proto);
|
||||
ip_conntrack_proto_put(proto);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -934,7 +934,7 @@ ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[])
|
|||
}
|
||||
|
||||
static int
|
||||
ctnetlink_create_conntrack(struct nfattr *cda[],
|
||||
ctnetlink_create_conntrack(struct nfattr *cda[],
|
||||
struct ip_conntrack_tuple *otuple,
|
||||
struct ip_conntrack_tuple *rtuple)
|
||||
{
|
||||
|
@ -943,7 +943,7 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
|
|||
|
||||
ct = ip_conntrack_alloc(otuple, rtuple);
|
||||
if (ct == NULL || IS_ERR(ct))
|
||||
return -ENOMEM;
|
||||
return -ENOMEM;
|
||||
|
||||
if (!cda[CTA_TIMEOUT-1])
|
||||
goto err;
|
||||
|
@ -979,13 +979,13 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
|
|||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
err:
|
||||
ip_conntrack_free(ct);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
static int
|
||||
ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
|
||||
{
|
||||
struct ip_conntrack_tuple otuple, rtuple;
|
||||
|
@ -1039,9 +1039,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
|||
return err;
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
* EXPECT
|
||||
***********************************************************************/
|
||||
/***********************************************************************
|
||||
* EXPECT
|
||||
***********************************************************************/
|
||||
|
||||
static inline int
|
||||
ctnetlink_exp_dump_tuple(struct sk_buff *skb,
|
||||
|
@ -1049,7 +1049,7 @@ ctnetlink_exp_dump_tuple(struct sk_buff *skb,
|
|||
enum ctattr_expect type)
|
||||
{
|
||||
struct nfattr *nest_parms = NFA_NEST(skb, type);
|
||||
|
||||
|
||||
if (ctnetlink_dump_tuples(skb, tuple) < 0)
|
||||
goto nfattr_failure;
|
||||
|
||||
|
@ -1059,7 +1059,7 @@ ctnetlink_exp_dump_tuple(struct sk_buff *skb,
|
|||
|
||||
nfattr_failure:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int
|
||||
ctnetlink_exp_dump_mask(struct sk_buff *skb,
|
||||
|
@ -1090,7 +1090,7 @@ ctnetlink_exp_dump_mask(struct sk_buff *skb,
|
|||
|
||||
static inline int
|
||||
ctnetlink_exp_dump_expect(struct sk_buff *skb,
|
||||
const struct ip_conntrack_expect *exp)
|
||||
const struct ip_conntrack_expect *exp)
|
||||
{
|
||||
struct ip_conntrack *master = exp->master;
|
||||
__be32 timeout = htonl((exp->timeout.expires - jiffies) / HZ);
|
||||
|
@ -1104,20 +1104,20 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
|
|||
&master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
|
||||
CTA_EXPECT_MASTER) < 0)
|
||||
goto nfattr_failure;
|
||||
|
||||
|
||||
NFA_PUT(skb, CTA_EXPECT_TIMEOUT, sizeof(__be32), &timeout);
|
||||
NFA_PUT(skb, CTA_EXPECT_ID, sizeof(__be32), &id);
|
||||
|
||||
return 0;
|
||||
|
||||
|
||||
nfattr_failure:
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int
|
||||
ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
|
||||
int event,
|
||||
int nowait,
|
||||
int event,
|
||||
int nowait,
|
||||
const struct ip_conntrack_expect *exp)
|
||||
{
|
||||
struct nlmsghdr *nlh;
|
||||
|
@ -1216,7 +1216,7 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
goto out;
|
||||
*id = exp->id;
|
||||
}
|
||||
out:
|
||||
out:
|
||||
read_unlock_bh(&ip_conntrack_lock);
|
||||
|
||||
return skb->len;
|
||||
|
@ -1228,7 +1228,7 @@ static const size_t cta_min_exp[CTA_EXPECT_MAX] = {
|
|||
};
|
||||
|
||||
static int
|
||||
ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
|
||||
ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
|
||||
struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
|
||||
{
|
||||
struct ip_conntrack_tuple tuple;
|
||||
|
@ -1247,7 +1247,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
|
|||
return -EAFNOSUPPORT;
|
||||
|
||||
if ((*errp = netlink_dump_start(ctnl, skb, nlh,
|
||||
ctnetlink_exp_dump_table,
|
||||
ctnetlink_exp_dump_table,
|
||||
ctnetlink_done)) != 0)
|
||||
return -EINVAL;
|
||||
rlen = NLMSG_ALIGN(nlh->nlmsg_len);
|
||||
|
@ -1275,14 +1275,14 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
|
|||
ip_conntrack_expect_put(exp);
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = -ENOMEM;
|
||||
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
|
||||
if (!skb2)
|
||||
goto out;
|
||||
|
||||
err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
|
||||
err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
|
||||
nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW,
|
||||
1, exp);
|
||||
if (err <= 0)
|
||||
|
@ -1300,7 +1300,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
static int
|
||||
ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
|
||||
ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
|
||||
struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
|
||||
{
|
||||
struct ip_conntrack_expect *exp, *tmp;
|
||||
|
@ -1333,7 +1333,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
|
|||
|
||||
/* after list removal, usage count == 1 */
|
||||
ip_conntrack_unexpect_related(exp);
|
||||
/* have to put what we 'get' above.
|
||||
/* have to put what we 'get' above.
|
||||
* after this line usage count == 0 */
|
||||
ip_conntrack_expect_put(exp);
|
||||
} else if (cda[CTA_EXPECT_HELP_NAME-1]) {
|
||||
|
@ -1348,7 +1348,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
|
|||
}
|
||||
list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list,
|
||||
list) {
|
||||
if (exp->master->helper == h
|
||||
if (exp->master->helper == h
|
||||
&& del_timer(&exp->timeout)) {
|
||||
ip_ct_unlink_expect(exp);
|
||||
ip_conntrack_expect_put(exp);
|
||||
|
@ -1413,7 +1413,7 @@ ctnetlink_create_expect(struct nfattr *cda[])
|
|||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
exp->expectfn = NULL;
|
||||
exp->flags = 0;
|
||||
exp->master = ct;
|
||||
|
@ -1423,7 +1423,7 @@ ctnetlink_create_expect(struct nfattr *cda[])
|
|||
err = ip_conntrack_expect_related(exp);
|
||||
ip_conntrack_expect_put(exp);
|
||||
|
||||
out:
|
||||
out:
|
||||
ip_conntrack_put(tuplehash_to_ctrack(h));
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -94,9 +94,9 @@ static int icmp_packet(struct ip_conntrack *ct,
|
|||
enum ip_conntrack_info ctinfo)
|
||||
{
|
||||
/* Try to delete connection immediately after all replies:
|
||||
won't actually vanish as we still have skb, and del_timer
|
||||
means this will only run once even if count hits zero twice
|
||||
(theoretically possible with SMP) */
|
||||
won't actually vanish as we still have skb, and del_timer
|
||||
means this will only run once even if count hits zero twice
|
||||
(theoretically possible with SMP) */
|
||||
if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
|
||||
if (atomic_dec_and_test(&ct->proto.icmp.count)
|
||||
&& del_timer(&ct->timeout))
|
||||
|
@ -114,11 +114,11 @@ static int icmp_packet(struct ip_conntrack *ct,
|
|||
static int icmp_new(struct ip_conntrack *conntrack,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
static const u_int8_t valid_new[] = {
|
||||
static const u_int8_t valid_new[] = {
|
||||
[ICMP_ECHO] = 1,
|
||||
[ICMP_TIMESTAMP] = 1,
|
||||
[ICMP_INFO_REQUEST] = 1,
|
||||
[ICMP_ADDRESS] = 1
|
||||
[ICMP_ADDRESS] = 1
|
||||
};
|
||||
|
||||
if (conntrack->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new)
|
||||
|
@ -282,7 +282,7 @@ static int icmp_nfattr_to_tuple(struct nfattr *tb[],
|
|||
|| !tb[CTA_PROTO_ICMP_ID-1])
|
||||
return -EINVAL;
|
||||
|
||||
tuple->dst.u.icmp.type =
|
||||
tuple->dst.u.icmp.type =
|
||||
*(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_TYPE-1]);
|
||||
tuple->dst.u.icmp.code =
|
||||
*(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]);
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
/*
|
||||
* Connection tracking protocol helper module for SCTP.
|
||||
*
|
||||
* SCTP is defined in RFC 2960. References to various sections in this code
|
||||
*
|
||||
* SCTP is defined in RFC 2960. References to various sections in this code
|
||||
* are to this RFC.
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
|
@ -38,7 +38,7 @@
|
|||
static DEFINE_RWLOCK(sctp_lock);
|
||||
|
||||
/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
|
||||
closely. They're more complex. --RR
|
||||
closely. They're more complex. --RR
|
||||
|
||||
And so for me for SCTP :D -Kiran */
|
||||
|
||||
|
@ -87,32 +87,32 @@ static const unsigned int * sctp_timeouts[]
|
|||
#define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
|
||||
#define sIV SCTP_CONNTRACK_MAX
|
||||
|
||||
/*
|
||||
/*
|
||||
These are the descriptions of the states:
|
||||
|
||||
NOTE: These state names are tantalizingly similar to the states of an
|
||||
NOTE: These state names are tantalizingly similar to the states of an
|
||||
SCTP endpoint. But the interpretation of the states is a little different,
|
||||
considering that these are the states of the connection and not of an end
|
||||
considering that these are the states of the connection and not of an end
|
||||
point. Please note the subtleties. -Kiran
|
||||
|
||||
NONE - Nothing so far.
|
||||
COOKIE WAIT - We have seen an INIT chunk in the original direction, or also
|
||||
an INIT_ACK chunk in the reply direction.
|
||||
COOKIE WAIT - We have seen an INIT chunk in the original direction, or also
|
||||
an INIT_ACK chunk in the reply direction.
|
||||
COOKIE ECHOED - We have seen a COOKIE_ECHO chunk in the original direction.
|
||||
ESTABLISHED - We have seen a COOKIE_ACK in the reply direction.
|
||||
SHUTDOWN_SENT - We have seen a SHUTDOWN chunk in the original direction.
|
||||
SHUTDOWN_RECD - We have seen a SHUTDOWN chunk in the reply directoin.
|
||||
SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
|
||||
to that of the SHUTDOWN chunk.
|
||||
CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
|
||||
the SHUTDOWN chunk. Connection is closed.
|
||||
to that of the SHUTDOWN chunk.
|
||||
CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
|
||||
the SHUTDOWN chunk. Connection is closed.
|
||||
*/
|
||||
|
||||
/* TODO
|
||||
- I have assumed that the first INIT is in the original direction.
|
||||
- I have assumed that the first INIT is in the original direction.
|
||||
This messes things when an INIT comes in the reply direction in CLOSED
|
||||
state.
|
||||
- Check the error type in the reply dir before transitioning from
|
||||
- Check the error type in the reply dir before transitioning from
|
||||
cookie echoed to closed.
|
||||
- Sec 5.2.4 of RFC 2960
|
||||
- Multi Homing support.
|
||||
|
@ -229,7 +229,7 @@ static int do_basic_checks(struct ip_conntrack *conntrack,
|
|||
for_each_sctp_chunk (skb, sch, _sch, offset, count) {
|
||||
DEBUGP("Chunk Num: %d Type: %d\n", count, sch->type);
|
||||
|
||||
if (sch->type == SCTP_CID_INIT
|
||||
if (sch->type == SCTP_CID_INIT
|
||||
|| sch->type == SCTP_CID_INIT_ACK
|
||||
|| sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
|
||||
flag = 1;
|
||||
|
@ -269,42 +269,42 @@ static int new_state(enum ip_conntrack_dir dir,
|
|||
DEBUGP("Chunk type: %d\n", chunk_type);
|
||||
|
||||
switch (chunk_type) {
|
||||
case SCTP_CID_INIT:
|
||||
case SCTP_CID_INIT:
|
||||
DEBUGP("SCTP_CID_INIT\n");
|
||||
i = 0; break;
|
||||
case SCTP_CID_INIT_ACK:
|
||||
case SCTP_CID_INIT_ACK:
|
||||
DEBUGP("SCTP_CID_INIT_ACK\n");
|
||||
i = 1; break;
|
||||
case SCTP_CID_ABORT:
|
||||
case SCTP_CID_ABORT:
|
||||
DEBUGP("SCTP_CID_ABORT\n");
|
||||
i = 2; break;
|
||||
case SCTP_CID_SHUTDOWN:
|
||||
case SCTP_CID_SHUTDOWN:
|
||||
DEBUGP("SCTP_CID_SHUTDOWN\n");
|
||||
i = 3; break;
|
||||
case SCTP_CID_SHUTDOWN_ACK:
|
||||
case SCTP_CID_SHUTDOWN_ACK:
|
||||
DEBUGP("SCTP_CID_SHUTDOWN_ACK\n");
|
||||
i = 4; break;
|
||||
case SCTP_CID_ERROR:
|
||||
case SCTP_CID_ERROR:
|
||||
DEBUGP("SCTP_CID_ERROR\n");
|
||||
i = 5; break;
|
||||
case SCTP_CID_COOKIE_ECHO:
|
||||
case SCTP_CID_COOKIE_ECHO:
|
||||
DEBUGP("SCTP_CID_COOKIE_ECHO\n");
|
||||
i = 6; break;
|
||||
case SCTP_CID_COOKIE_ACK:
|
||||
case SCTP_CID_COOKIE_ACK:
|
||||
DEBUGP("SCTP_CID_COOKIE_ACK\n");
|
||||
i = 7; break;
|
||||
case SCTP_CID_SHUTDOWN_COMPLETE:
|
||||
case SCTP_CID_SHUTDOWN_COMPLETE:
|
||||
DEBUGP("SCTP_CID_SHUTDOWN_COMPLETE\n");
|
||||
i = 8; break;
|
||||
default:
|
||||
/* Other chunks like DATA, SACK, HEARTBEAT and
|
||||
its ACK do not cause a change in state */
|
||||
DEBUGP("Unknown chunk type, Will stay in %s\n",
|
||||
DEBUGP("Unknown chunk type, Will stay in %s\n",
|
||||
sctp_conntrack_names[cur_state]);
|
||||
return cur_state;
|
||||
}
|
||||
|
||||
DEBUGP("dir: %d cur_state: %s chunk_type: %d new_state: %s\n",
|
||||
DEBUGP("dir: %d cur_state: %s chunk_type: %d new_state: %s\n",
|
||||
dir, sctp_conntrack_names[cur_state], chunk_type,
|
||||
sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]);
|
||||
|
||||
|
@ -367,7 +367,7 @@ static int sctp_packet(struct ip_conntrack *conntrack,
|
|||
/* Sec 8.5.1 (C) */
|
||||
if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])
|
||||
&& !(sh->vtag == conntrack->proto.sctp.vtag
|
||||
[1 - CTINFO2DIR(ctinfo)]
|
||||
[1 - CTINFO2DIR(ctinfo)]
|
||||
&& (sch->flags & 1))) {
|
||||
write_unlock_bh(&sctp_lock);
|
||||
return -1;
|
||||
|
@ -392,17 +392,17 @@ static int sctp_packet(struct ip_conntrack *conntrack,
|
|||
}
|
||||
|
||||
/* If it is an INIT or an INIT ACK note down the vtag */
|
||||
if (sch->type == SCTP_CID_INIT
|
||||
if (sch->type == SCTP_CID_INIT
|
||||
|| sch->type == SCTP_CID_INIT_ACK) {
|
||||
sctp_inithdr_t _inithdr, *ih;
|
||||
|
||||
ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t),
|
||||
sizeof(_inithdr), &_inithdr);
|
||||
sizeof(_inithdr), &_inithdr);
|
||||
if (ih == NULL) {
|
||||
write_unlock_bh(&sctp_lock);
|
||||
return -1;
|
||||
}
|
||||
DEBUGP("Setting vtag %x for dir %d\n",
|
||||
DEBUGP("Setting vtag %x for dir %d\n",
|
||||
ih->init_tag, !CTINFO2DIR(ctinfo));
|
||||
conntrack->proto.sctp.vtag[!CTINFO2DIR(ctinfo)] = ih->init_tag;
|
||||
}
|
||||
|
@ -427,7 +427,7 @@ static int sctp_packet(struct ip_conntrack *conntrack,
|
|||
}
|
||||
|
||||
/* Called when a new connection for this protocol found. */
|
||||
static int sctp_new(struct ip_conntrack *conntrack,
|
||||
static int sctp_new(struct ip_conntrack *conntrack,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
enum sctp_conntrack newconntrack;
|
||||
|
@ -457,7 +457,7 @@ static int sctp_new(struct ip_conntrack *conntrack,
|
|||
newconntrack = SCTP_CONNTRACK_MAX;
|
||||
for_each_sctp_chunk (skb, sch, _sch, offset, count) {
|
||||
/* Don't need lock here: this conntrack not in circulation yet */
|
||||
newconntrack = new_state (IP_CT_DIR_ORIGINAL,
|
||||
newconntrack = new_state (IP_CT_DIR_ORIGINAL,
|
||||
SCTP_CONNTRACK_NONE, sch->type);
|
||||
|
||||
/* Invalid: delete conntrack */
|
||||
|
@ -472,14 +472,14 @@ static int sctp_new(struct ip_conntrack *conntrack,
|
|||
sctp_inithdr_t _inithdr, *ih;
|
||||
|
||||
ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t),
|
||||
sizeof(_inithdr), &_inithdr);
|
||||
sizeof(_inithdr), &_inithdr);
|
||||
if (ih == NULL)
|
||||
return 0;
|
||||
|
||||
DEBUGP("Setting vtag %x for new conn\n",
|
||||
DEBUGP("Setting vtag %x for new conn\n",
|
||||
ih->init_tag);
|
||||
|
||||
conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] =
|
||||
conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] =
|
||||
ih->init_tag;
|
||||
} else {
|
||||
/* Sec 8.5.1 (A) */
|
||||
|
@ -489,7 +489,7 @@ static int sctp_new(struct ip_conntrack *conntrack,
|
|||
/* If it is a shutdown ack OOTB packet, we expect a return
|
||||
shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
|
||||
else {
|
||||
DEBUGP("Setting vtag %x for new conn OOTB\n",
|
||||
DEBUGP("Setting vtag %x for new conn OOTB\n",
|
||||
sh->vtag);
|
||||
conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag;
|
||||
}
|
||||
|
@ -500,16 +500,16 @@ static int sctp_new(struct ip_conntrack *conntrack,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static struct ip_conntrack_protocol ip_conntrack_protocol_sctp = {
|
||||
.proto = IPPROTO_SCTP,
|
||||
static struct ip_conntrack_protocol ip_conntrack_protocol_sctp = {
|
||||
.proto = IPPROTO_SCTP,
|
||||
.name = "sctp",
|
||||
.pkt_to_tuple = sctp_pkt_to_tuple,
|
||||
.invert_tuple = sctp_invert_tuple,
|
||||
.print_tuple = sctp_print_tuple,
|
||||
.pkt_to_tuple = sctp_pkt_to_tuple,
|
||||
.invert_tuple = sctp_invert_tuple,
|
||||
.print_tuple = sctp_print_tuple,
|
||||
.print_conntrack = sctp_print_conntrack,
|
||||
.packet = sctp_packet,
|
||||
.new = sctp_new,
|
||||
.destroy = NULL,
|
||||
.packet = sctp_packet,
|
||||
.new = sctp_new,
|
||||
.destroy = NULL,
|
||||
.me = THIS_MODULE,
|
||||
#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
|
||||
defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
|
||||
|
@ -603,7 +603,7 @@ static ctl_table ip_ct_net_table[] = {
|
|||
{
|
||||
.ctl_name = CTL_NET,
|
||||
.procname = "net",
|
||||
.mode = 0555,
|
||||
.mode = 0555,
|
||||
.child = ip_ct_ipv4_table,
|
||||
},
|
||||
{ .ctl_name = 0 }
|
||||
|
@ -638,7 +638,7 @@ static int __init ip_conntrack_proto_sctp_init(void)
|
|||
ip_conntrack_protocol_unregister(&ip_conntrack_protocol_sctp);
|
||||
#endif
|
||||
out:
|
||||
DEBUGP("SCTP conntrack module loading %s\n",
|
||||
DEBUGP("SCTP conntrack module loading %s\n",
|
||||
ret ? "failed": "succeeded");
|
||||
return ret;
|
||||
}
|
||||
|
@ -647,7 +647,7 @@ static void __exit ip_conntrack_proto_sctp_fini(void)
|
|||
{
|
||||
ip_conntrack_protocol_unregister(&ip_conntrack_protocol_sctp);
|
||||
#ifdef CONFIG_SYSCTL
|
||||
unregister_sysctl_table(ip_ct_sysctl_header);
|
||||
unregister_sysctl_table(ip_ct_sysctl_header);
|
||||
#endif
|
||||
DEBUGP("SCTP conntrack module unloaded\n");
|
||||
}
|
||||
|
|
|
@ -45,8 +45,8 @@
|
|||
/* Protects conntrack->proto.tcp */
|
||||
static DEFINE_RWLOCK(tcp_lock);
|
||||
|
||||
/* "Be conservative in what you do,
|
||||
be liberal in what you accept from others."
|
||||
/* "Be conservative in what you do,
|
||||
be liberal in what you accept from others."
|
||||
If it's non-zero, we mark only out of window RST segments as INVALID. */
|
||||
int ip_ct_tcp_be_liberal __read_mostly = 0;
|
||||
|
||||
|
@ -54,8 +54,8 @@ int ip_ct_tcp_be_liberal __read_mostly = 0;
|
|||
connections. */
|
||||
int ip_ct_tcp_loose __read_mostly = 1;
|
||||
|
||||
/* Max number of the retransmitted packets without receiving an (acceptable)
|
||||
ACK from the destination. If this number is reached, a shorter timer
|
||||
/* Max number of the retransmitted packets without receiving an (acceptable)
|
||||
ACK from the destination. If this number is reached, a shorter timer
|
||||
will be started. */
|
||||
int ip_ct_tcp_max_retrans __read_mostly = 3;
|
||||
|
||||
|
@ -74,7 +74,7 @@ static const char *tcp_conntrack_names[] = {
|
|||
"CLOSE",
|
||||
"LISTEN"
|
||||
};
|
||||
|
||||
|
||||
#define SECS * HZ
|
||||
#define MINS * 60 SECS
|
||||
#define HOURS * 60 MINS
|
||||
|
@ -90,10 +90,10 @@ unsigned int ip_ct_tcp_timeout_time_wait __read_mostly = 2 MINS;
|
|||
unsigned int ip_ct_tcp_timeout_close __read_mostly = 10 SECS;
|
||||
|
||||
/* RFC1122 says the R2 limit should be at least 100 seconds.
|
||||
Linux uses 15 packets as limit, which corresponds
|
||||
Linux uses 15 packets as limit, which corresponds
|
||||
to ~13-30min depending on RTO. */
|
||||
unsigned int ip_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS;
|
||||
|
||||
|
||||
static const unsigned int * tcp_timeouts[]
|
||||
= { NULL, /* TCP_CONNTRACK_NONE */
|
||||
&ip_ct_tcp_timeout_syn_sent, /* TCP_CONNTRACK_SYN_SENT, */
|
||||
|
@ -106,7 +106,7 @@ static const unsigned int * tcp_timeouts[]
|
|||
&ip_ct_tcp_timeout_close, /* TCP_CONNTRACK_CLOSE, */
|
||||
NULL, /* TCP_CONNTRACK_LISTEN */
|
||||
};
|
||||
|
||||
|
||||
#define sNO TCP_CONNTRACK_NONE
|
||||
#define sSS TCP_CONNTRACK_SYN_SENT
|
||||
#define sSR TCP_CONNTRACK_SYN_RECV
|
||||
|
@ -129,13 +129,13 @@ enum tcp_bit_set {
|
|||
TCP_RST_SET,
|
||||
TCP_NONE_SET,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* The TCP state transition table needs a few words...
|
||||
*
|
||||
* We are the man in the middle. All the packets go through us
|
||||
* but might get lost in transit to the destination.
|
||||
* It is assumed that the destinations can't receive segments
|
||||
* It is assumed that the destinations can't receive segments
|
||||
* we haven't seen.
|
||||
*
|
||||
* The checked segment is in window, but our windows are *not*
|
||||
|
@ -145,11 +145,11 @@ enum tcp_bit_set {
|
|||
* The meaning of the states are:
|
||||
*
|
||||
* NONE: initial state
|
||||
* SYN_SENT: SYN-only packet seen
|
||||
* SYN_SENT: SYN-only packet seen
|
||||
* SYN_RECV: SYN-ACK packet seen
|
||||
* ESTABLISHED: ACK packet seen
|
||||
* FIN_WAIT: FIN packet seen
|
||||
* CLOSE_WAIT: ACK seen (after FIN)
|
||||
* CLOSE_WAIT: ACK seen (after FIN)
|
||||
* LAST_ACK: FIN seen (after FIN)
|
||||
* TIME_WAIT: last ACK seen
|
||||
* CLOSE: closed connection
|
||||
|
@ -157,8 +157,8 @@ enum tcp_bit_set {
|
|||
* LISTEN state is not used.
|
||||
*
|
||||
* Packets marked as IGNORED (sIG):
|
||||
* if they may be either invalid or valid
|
||||
* and the receiver may send back a connection
|
||||
* if they may be either invalid or valid
|
||||
* and the receiver may send back a connection
|
||||
* closing RST or a SYN/ACK.
|
||||
*
|
||||
* Packets marked as INVALID (sIV):
|
||||
|
@ -175,7 +175,7 @@ static const enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
|
|||
* sSS -> sSS Retransmitted SYN
|
||||
* sSR -> sIG Late retransmitted SYN?
|
||||
* sES -> sIG Error: SYNs in window outside the SYN_SENT state
|
||||
* are errors. Receiver will reply with RST
|
||||
* are errors. Receiver will reply with RST
|
||||
* and close the connection.
|
||||
* Or we are not in sync and hold a dead connection.
|
||||
* sFW -> sIG
|
||||
|
@ -188,10 +188,10 @@ static const enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
|
|||
/*synack*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV },
|
||||
/*
|
||||
* A SYN/ACK from the client is always invalid:
|
||||
* - either it tries to set up a simultaneous open, which is
|
||||
* - either it tries to set up a simultaneous open, which is
|
||||
* not supported;
|
||||
* - or the firewall has just been inserted between the two hosts
|
||||
* during the session set-up. The SYN will be retransmitted
|
||||
* during the session set-up. The SYN will be retransmitted
|
||||
* by the true client (or it'll time out).
|
||||
*/
|
||||
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */
|
||||
|
@ -201,9 +201,9 @@ static const enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
|
|||
* sSS -> sIV Client migth not send FIN in this state:
|
||||
* we enforce waiting for a SYN/ACK reply first.
|
||||
* sSR -> sFW Close started.
|
||||
* sES -> sFW
|
||||
* sES -> sFW
|
||||
* sFW -> sLA FIN seen in both directions, waiting for
|
||||
* the last ACK.
|
||||
* the last ACK.
|
||||
* Migth be a retransmitted FIN as well...
|
||||
* sCW -> sLA
|
||||
* sLA -> sLA Retransmitted FIN. Remain in the same state.
|
||||
|
@ -281,7 +281,7 @@ static const enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
|
|||
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */
|
||||
/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV },
|
||||
/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int tcp_pkt_to_tuple(const struct sk_buff *skb,
|
||||
|
@ -337,7 +337,7 @@ static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa,
|
|||
const struct ip_conntrack *ct)
|
||||
{
|
||||
struct nfattr *nest_parms;
|
||||
|
||||
|
||||
read_lock_bh(&tcp_lock);
|
||||
nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP);
|
||||
NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t),
|
||||
|
@ -367,7 +367,7 @@ static int nfattr_to_tcp(struct nfattr *cda[], struct ip_conntrack *ct)
|
|||
if (!attr)
|
||||
return 0;
|
||||
|
||||
nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr);
|
||||
nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr);
|
||||
|
||||
if (nfattr_bad_size(tb, CTA_PROTOINFO_TCP_MAX, cta_min_tcp))
|
||||
return -EINVAL;
|
||||
|
@ -376,7 +376,7 @@ static int nfattr_to_tcp(struct nfattr *cda[], struct ip_conntrack *ct)
|
|||
return -EINVAL;
|
||||
|
||||
write_lock_bh(&tcp_lock);
|
||||
ct->proto.tcp.state =
|
||||
ct->proto.tcp.state =
|
||||
*(u_int8_t *)NFA_DATA(tb[CTA_PROTOINFO_TCP_STATE-1]);
|
||||
write_unlock_bh(&tcp_lock);
|
||||
|
||||
|
@ -395,30 +395,30 @@ static unsigned int get_conntrack_index(const struct tcphdr *tcph)
|
|||
|
||||
/* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
|
||||
in IP Filter' by Guido van Rooij.
|
||||
|
||||
|
||||
http://www.nluug.nl/events/sane2000/papers.html
|
||||
http://www.iae.nl/users/guido/papers/tcp_filtering.ps.gz
|
||||
|
||||
|
||||
The boundaries and the conditions are changed according to RFC793:
|
||||
the packet must intersect the window (i.e. segments may be
|
||||
after the right or before the left edge) and thus receivers may ACK
|
||||
segments after the right edge of the window.
|
||||
|
||||
td_maxend = max(sack + max(win,1)) seen in reply packets
|
||||
td_maxend = max(sack + max(win,1)) seen in reply packets
|
||||
td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
|
||||
td_maxwin += seq + len - sender.td_maxend
|
||||
if seq + len > sender.td_maxend
|
||||
td_end = max(seq + len) seen in sent packets
|
||||
|
||||
|
||||
I. Upper bound for valid data: seq <= sender.td_maxend
|
||||
II. Lower bound for valid data: seq + len >= sender.td_end - receiver.td_maxwin
|
||||
III. Upper bound for valid ack: sack <= receiver.td_end
|
||||
IV. Lower bound for valid ack: ack >= receiver.td_end - MAXACKWINDOW
|
||||
|
||||
|
||||
where sack is the highest right edge of sack block found in the packet.
|
||||
|
||||
The upper bound limit for a valid ack is not ignored -
|
||||
we doesn't have to deal with fragments.
|
||||
|
||||
The upper bound limit for a valid ack is not ignored -
|
||||
we doesn't have to deal with fragments.
|
||||
*/
|
||||
|
||||
static inline __u32 segment_seq_plus_len(__u32 seq,
|
||||
|
@ -429,25 +429,25 @@ static inline __u32 segment_seq_plus_len(__u32 seq,
|
|||
return (seq + len - (iph->ihl + tcph->doff)*4
|
||||
+ (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
|
||||
}
|
||||
|
||||
|
||||
/* Fixme: what about big packets? */
|
||||
#define MAXACKWINCONST 66000
|
||||
#define MAXACKWINDOW(sender) \
|
||||
((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin \
|
||||
: MAXACKWINCONST)
|
||||
|
||||
|
||||
/*
|
||||
* Simplified tcp_parse_options routine from tcp_input.c
|
||||
*/
|
||||
static void tcp_options(const struct sk_buff *skb,
|
||||
struct iphdr *iph,
|
||||
struct tcphdr *tcph,
|
||||
struct tcphdr *tcph,
|
||||
struct ip_ct_tcp_state *state)
|
||||
{
|
||||
unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
|
||||
unsigned char *ptr;
|
||||
int length = (tcph->doff*4) - sizeof(struct tcphdr);
|
||||
|
||||
|
||||
if (!length)
|
||||
return;
|
||||
|
||||
|
@ -456,13 +456,13 @@ static void tcp_options(const struct sk_buff *skb,
|
|||
length, buff);
|
||||
BUG_ON(ptr == NULL);
|
||||
|
||||
state->td_scale =
|
||||
state->td_scale =
|
||||
state->flags = 0;
|
||||
|
||||
|
||||
while (length > 0) {
|
||||
int opcode=*ptr++;
|
||||
int opsize;
|
||||
|
||||
|
||||
switch (opcode) {
|
||||
case TCPOPT_EOL:
|
||||
return;
|
||||
|
@ -476,13 +476,13 @@ static void tcp_options(const struct sk_buff *skb,
|
|||
if (opsize > length)
|
||||
break; /* don't parse partial options */
|
||||
|
||||
if (opcode == TCPOPT_SACK_PERM
|
||||
if (opcode == TCPOPT_SACK_PERM
|
||||
&& opsize == TCPOLEN_SACK_PERM)
|
||||
state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
|
||||
else if (opcode == TCPOPT_WINDOW
|
||||
&& opsize == TCPOLEN_WINDOW) {
|
||||
state->td_scale = *(u_int8_t *)ptr;
|
||||
|
||||
|
||||
if (state->td_scale > 14) {
|
||||
/* See RFC1323 */
|
||||
state->td_scale = 14;
|
||||
|
@ -517,16 +517,16 @@ static void tcp_sack(const struct sk_buff *skb,
|
|||
/* Fast path for timestamp-only option */
|
||||
if (length == TCPOLEN_TSTAMP_ALIGNED*4
|
||||
&& *(__be32 *)ptr ==
|
||||
__constant_htonl((TCPOPT_NOP << 24)
|
||||
| (TCPOPT_NOP << 16)
|
||||
| (TCPOPT_TIMESTAMP << 8)
|
||||
| TCPOLEN_TIMESTAMP))
|
||||
__constant_htonl((TCPOPT_NOP << 24)
|
||||
| (TCPOPT_NOP << 16)
|
||||
| (TCPOPT_TIMESTAMP << 8)
|
||||
| TCPOLEN_TIMESTAMP))
|
||||
return;
|
||||
|
||||
|
||||
while (length > 0) {
|
||||
int opcode=*ptr++;
|
||||
int opsize, i;
|
||||
|
||||
|
||||
switch (opcode) {
|
||||
case TCPOPT_EOL:
|
||||
return;
|
||||
|
@ -540,16 +540,16 @@ static void tcp_sack(const struct sk_buff *skb,
|
|||
if (opsize > length)
|
||||
break; /* don't parse partial options */
|
||||
|
||||
if (opcode == TCPOPT_SACK
|
||||
&& opsize >= (TCPOLEN_SACK_BASE
|
||||
+ TCPOLEN_SACK_PERBLOCK)
|
||||
&& !((opsize - TCPOLEN_SACK_BASE)
|
||||
% TCPOLEN_SACK_PERBLOCK)) {
|
||||
for (i = 0;
|
||||
i < (opsize - TCPOLEN_SACK_BASE);
|
||||
i += TCPOLEN_SACK_PERBLOCK) {
|
||||
if (opcode == TCPOPT_SACK
|
||||
&& opsize >= (TCPOLEN_SACK_BASE
|
||||
+ TCPOLEN_SACK_PERBLOCK)
|
||||
&& !((opsize - TCPOLEN_SACK_BASE)
|
||||
% TCPOLEN_SACK_PERBLOCK)) {
|
||||
for (i = 0;
|
||||
i < (opsize - TCPOLEN_SACK_BASE);
|
||||
i += TCPOLEN_SACK_PERBLOCK) {
|
||||
tmp = ntohl(*((__be32 *)(ptr+i)+1));
|
||||
|
||||
|
||||
if (after(tmp, *sack))
|
||||
*sack = tmp;
|
||||
}
|
||||
|
@ -561,18 +561,18 @@ static void tcp_sack(const struct sk_buff *skb,
|
|||
}
|
||||
}
|
||||
|
||||
static int tcp_in_window(struct ip_ct_tcp *state,
|
||||
enum ip_conntrack_dir dir,
|
||||
unsigned int index,
|
||||
const struct sk_buff *skb,
|
||||
struct iphdr *iph,
|
||||
struct tcphdr *tcph)
|
||||
static int tcp_in_window(struct ip_ct_tcp *state,
|
||||
enum ip_conntrack_dir dir,
|
||||
unsigned int index,
|
||||
const struct sk_buff *skb,
|
||||
struct iphdr *iph,
|
||||
struct tcphdr *tcph)
|
||||
{
|
||||
struct ip_ct_tcp_state *sender = &state->seen[dir];
|
||||
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
|
||||
__u32 seq, ack, sack, end, win, swin;
|
||||
int res;
|
||||
|
||||
|
||||
/*
|
||||
* Get the required data from the packet.
|
||||
*/
|
||||
|
@ -580,23 +580,23 @@ static int tcp_in_window(struct ip_ct_tcp *state,
|
|||
ack = sack = ntohl(tcph->ack_seq);
|
||||
win = ntohs(tcph->window);
|
||||
end = segment_seq_plus_len(seq, skb->len, iph, tcph);
|
||||
|
||||
|
||||
if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
|
||||
tcp_sack(skb, iph, tcph, &sack);
|
||||
|
||||
|
||||
DEBUGP("tcp_in_window: START\n");
|
||||
DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
|
||||
"seq=%u ack=%u sack=%u win=%u end=%u\n",
|
||||
NIPQUAD(iph->saddr), ntohs(tcph->source),
|
||||
NIPQUAD(iph->saddr), ntohs(tcph->source),
|
||||
NIPQUAD(iph->daddr), ntohs(tcph->dest),
|
||||
seq, ack, sack, win, end);
|
||||
DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
|
||||
"receiver end=%u maxend=%u maxwin=%u scale=%i\n",
|
||||
sender->td_end, sender->td_maxend, sender->td_maxwin,
|
||||
sender->td_scale,
|
||||
receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
|
||||
sender->td_scale,
|
||||
receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
|
||||
receiver->td_scale);
|
||||
|
||||
|
||||
if (sender->td_end == 0) {
|
||||
/*
|
||||
* Initialize sender data.
|
||||
|
@ -605,26 +605,26 @@ static int tcp_in_window(struct ip_ct_tcp *state,
|
|||
/*
|
||||
* Outgoing SYN-ACK in reply to a SYN.
|
||||
*/
|
||||
sender->td_end =
|
||||
sender->td_end =
|
||||
sender->td_maxend = end;
|
||||
sender->td_maxwin = (win == 0 ? 1 : win);
|
||||
|
||||
tcp_options(skb, iph, tcph, sender);
|
||||
/*
|
||||
/*
|
||||
* RFC 1323:
|
||||
* Both sides must send the Window Scale option
|
||||
* to enable window scaling in either direction.
|
||||
*/
|
||||
if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE
|
||||
&& receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE))
|
||||
sender->td_scale =
|
||||
sender->td_scale =
|
||||
receiver->td_scale = 0;
|
||||
} else {
|
||||
/*
|
||||
* We are in the middle of a connection,
|
||||
* its history is lost for us.
|
||||
* Let's try to use the data from the packet.
|
||||
*/
|
||||
*/
|
||||
sender->td_end = end;
|
||||
sender->td_maxwin = (win == 0 ? 1 : win);
|
||||
sender->td_maxend = end + sender->td_maxwin;
|
||||
|
@ -632,11 +632,11 @@ static int tcp_in_window(struct ip_ct_tcp *state,
|
|||
} else if (((state->state == TCP_CONNTRACK_SYN_SENT
|
||||
&& dir == IP_CT_DIR_ORIGINAL)
|
||||
|| (state->state == TCP_CONNTRACK_SYN_RECV
|
||||
&& dir == IP_CT_DIR_REPLY))
|
||||
&& dir == IP_CT_DIR_REPLY))
|
||||
&& after(end, sender->td_end)) {
|
||||
/*
|
||||
* RFC 793: "if a TCP is reinitialized ... then it need
|
||||
* not wait at all; it must only be sure to use sequence
|
||||
* not wait at all; it must only be sure to use sequence
|
||||
* numbers larger than those recently used."
|
||||
*/
|
||||
sender->td_end =
|
||||
|
@ -645,14 +645,14 @@ static int tcp_in_window(struct ip_ct_tcp *state,
|
|||
|
||||
tcp_options(skb, iph, tcph, sender);
|
||||
}
|
||||
|
||||
|
||||
if (!(tcph->ack)) {
|
||||
/*
|
||||
* If there is no ACK, just pretend it was set and OK.
|
||||
*/
|
||||
ack = sack = receiver->td_end;
|
||||
} else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
|
||||
(TCP_FLAG_ACK|TCP_FLAG_RST))
|
||||
} else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
|
||||
(TCP_FLAG_ACK|TCP_FLAG_RST))
|
||||
&& (ack == 0)) {
|
||||
/*
|
||||
* Broken TCP stacks, that set ACK in RST packets as well
|
||||
|
@ -662,8 +662,8 @@ static int tcp_in_window(struct ip_ct_tcp *state,
|
|||
}
|
||||
|
||||
if (seq == end
|
||||
&& (!tcph->rst
|
||||
|| (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)))
|
||||
&& (!tcph->rst
|
||||
|| (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)))
|
||||
/*
|
||||
* Packets contains no data: we assume it is valid
|
||||
* and check the ack value only.
|
||||
|
@ -672,7 +672,7 @@ static int tcp_in_window(struct ip_ct_tcp *state,
|
|||
* SYN.
|
||||
*/
|
||||
seq = end = sender->td_end;
|
||||
|
||||
|
||||
DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
|
||||
"seq=%u ack=%u sack =%u win=%u end=%u\n",
|
||||
NIPQUAD(iph->saddr), ntohs(tcph->source),
|
||||
|
@ -681,26 +681,26 @@ static int tcp_in_window(struct ip_ct_tcp *state,
|
|||
DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
|
||||
"receiver end=%u maxend=%u maxwin=%u scale=%i\n",
|
||||
sender->td_end, sender->td_maxend, sender->td_maxwin,
|
||||
sender->td_scale,
|
||||
sender->td_scale,
|
||||
receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
|
||||
receiver->td_scale);
|
||||
|
||||
|
||||
DEBUGP("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
|
||||
before(seq, sender->td_maxend + 1),
|
||||
after(end, sender->td_end - receiver->td_maxwin - 1),
|
||||
before(sack, receiver->td_end + 1),
|
||||
after(ack, receiver->td_end - MAXACKWINDOW(sender)));
|
||||
|
||||
after(end, sender->td_end - receiver->td_maxwin - 1),
|
||||
before(sack, receiver->td_end + 1),
|
||||
after(ack, receiver->td_end - MAXACKWINDOW(sender)));
|
||||
|
||||
if (before(seq, sender->td_maxend + 1) &&
|
||||
after(end, sender->td_end - receiver->td_maxwin - 1) &&
|
||||
before(sack, receiver->td_end + 1) &&
|
||||
after(ack, receiver->td_end - MAXACKWINDOW(sender))) {
|
||||
/*
|
||||
/*
|
||||
* Take into account window scaling (RFC 1323).
|
||||
*/
|
||||
if (!tcph->syn)
|
||||
win <<= sender->td_scale;
|
||||
|
||||
|
||||
/*
|
||||
* Update sender data.
|
||||
*/
|
||||
|
@ -720,7 +720,7 @@ static int tcp_in_window(struct ip_ct_tcp *state,
|
|||
receiver->td_maxend++;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Check retransmissions.
|
||||
*/
|
||||
if (index == TCP_ACK_SET) {
|
||||
|
@ -756,11 +756,11 @@ static int tcp_in_window(struct ip_ct_tcp *state,
|
|||
: "ACK is over the upper bound (ACKed data not seen yet)"
|
||||
: "SEQ is under the lower bound (already ACKed data retransmitted)"
|
||||
: "SEQ is over the upper bound (over the window of the receiver)");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u "
|
||||
"receiver end=%u maxend=%u maxwin=%u\n",
|
||||
res, sender->td_end, sender->td_maxend, sender->td_maxwin,
|
||||
res, sender->td_end, sender->td_maxend, sender->td_maxwin,
|
||||
receiver->td_end, receiver->td_maxend, receiver->td_maxwin);
|
||||
|
||||
return res;
|
||||
|
@ -769,7 +769,7 @@ static int tcp_in_window(struct ip_ct_tcp *state,
|
|||
#ifdef CONFIG_IP_NF_NAT_NEEDED
|
||||
/* Update sender->td_end after NAT successfully mangled the packet */
|
||||
void ip_conntrack_tcp_update(struct sk_buff *skb,
|
||||
struct ip_conntrack *conntrack,
|
||||
struct ip_conntrack *conntrack,
|
||||
enum ip_conntrack_dir dir)
|
||||
{
|
||||
struct iphdr *iph = skb->nh.iph;
|
||||
|
@ -781,7 +781,7 @@ void ip_conntrack_tcp_update(struct sk_buff *skb,
|
|||
#endif
|
||||
|
||||
end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, iph, tcph);
|
||||
|
||||
|
||||
write_lock_bh(&tcp_lock);
|
||||
/*
|
||||
* We have to worry for the ack in the reply packet only...
|
||||
|
@ -793,11 +793,11 @@ void ip_conntrack_tcp_update(struct sk_buff *skb,
|
|||
DEBUGP("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i "
|
||||
"receiver end=%u maxend=%u maxwin=%u scale=%i\n",
|
||||
sender->td_end, sender->td_maxend, sender->td_maxwin,
|
||||
sender->td_scale,
|
||||
sender->td_scale,
|
||||
receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
|
||||
receiver->td_scale);
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
#define TH_FIN 0x01
|
||||
|
@ -847,8 +847,8 @@ static int tcp_error(struct sk_buff *skb,
|
|||
nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
|
||||
"ip_ct_tcp: short packet ");
|
||||
return -NF_ACCEPT;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Not whole TCP header or malformed packet */
|
||||
if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
|
||||
if (LOG_INVALID(IPPROTO_TCP))
|
||||
|
@ -856,7 +856,7 @@ static int tcp_error(struct sk_buff *skb,
|
|||
"ip_ct_tcp: truncated/malformed packet ");
|
||||
return -NF_ACCEPT;
|
||||
}
|
||||
|
||||
|
||||
/* Checksum invalid? Ignore.
|
||||
* We skip checking packets on the outgoing path
|
||||
* because it is assumed to be correct.
|
||||
|
@ -893,11 +893,11 @@ static int tcp_packet(struct ip_conntrack *conntrack,
|
|||
struct tcphdr *th, _tcph;
|
||||
unsigned long timeout;
|
||||
unsigned int index;
|
||||
|
||||
|
||||
th = skb_header_pointer(skb, iph->ihl * 4,
|
||||
sizeof(_tcph), &_tcph);
|
||||
BUG_ON(th == NULL);
|
||||
|
||||
|
||||
write_lock_bh(&tcp_lock);
|
||||
old_state = conntrack->proto.tcp.state;
|
||||
dir = CTINFO2DIR(ctinfo);
|
||||
|
@ -907,7 +907,7 @@ static int tcp_packet(struct ip_conntrack *conntrack,
|
|||
switch (new_state) {
|
||||
case TCP_CONNTRACK_IGNORE:
|
||||
/* Ignored packets:
|
||||
*
|
||||
*
|
||||
* a) SYN in ORIGINAL
|
||||
* b) SYN/ACK in REPLY
|
||||
* c) ACK in reply direction after initial SYN in original.
|
||||
|
@ -916,30 +916,30 @@ static int tcp_packet(struct ip_conntrack *conntrack,
|
|||
&& conntrack->proto.tcp.last_index == TCP_SYN_SET
|
||||
&& conntrack->proto.tcp.last_dir != dir
|
||||
&& ntohl(th->ack_seq) ==
|
||||
conntrack->proto.tcp.last_end) {
|
||||
/* This SYN/ACK acknowledges a SYN that we earlier
|
||||
conntrack->proto.tcp.last_end) {
|
||||
/* This SYN/ACK acknowledges a SYN that we earlier
|
||||
* ignored as invalid. This means that the client and
|
||||
* the server are both in sync, while the firewall is
|
||||
* not. We kill this session and block the SYN/ACK so
|
||||
* that the client cannot but retransmit its SYN and
|
||||
* that the client cannot but retransmit its SYN and
|
||||
* thus initiate a clean new session.
|
||||
*/
|
||||
write_unlock_bh(&tcp_lock);
|
||||
write_unlock_bh(&tcp_lock);
|
||||
if (LOG_INVALID(IPPROTO_TCP))
|
||||
nf_log_packet(PF_INET, 0, skb, NULL, NULL,
|
||||
NULL, "ip_ct_tcp: "
|
||||
"killing out of sync session ");
|
||||
if (del_timer(&conntrack->timeout))
|
||||
conntrack->timeout.function((unsigned long)
|
||||
conntrack);
|
||||
return -NF_DROP;
|
||||
if (del_timer(&conntrack->timeout))
|
||||
conntrack->timeout.function((unsigned long)
|
||||
conntrack);
|
||||
return -NF_DROP;
|
||||
}
|
||||
conntrack->proto.tcp.last_index = index;
|
||||
conntrack->proto.tcp.last_dir = dir;
|
||||
conntrack->proto.tcp.last_seq = ntohl(th->seq);
|
||||
conntrack->proto.tcp.last_end =
|
||||
conntrack->proto.tcp.last_end =
|
||||
segment_seq_plus_len(ntohl(th->seq), skb->len, iph, th);
|
||||
|
||||
|
||||
write_unlock_bh(&tcp_lock);
|
||||
if (LOG_INVALID(IPPROTO_TCP))
|
||||
nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
|
||||
|
@ -959,16 +959,16 @@ static int tcp_packet(struct ip_conntrack *conntrack,
|
|||
if (old_state < TCP_CONNTRACK_TIME_WAIT)
|
||||
break;
|
||||
if ((conntrack->proto.tcp.seen[dir].flags &
|
||||
IP_CT_TCP_FLAG_CLOSE_INIT)
|
||||
IP_CT_TCP_FLAG_CLOSE_INIT)
|
||||
|| after(ntohl(th->seq),
|
||||
conntrack->proto.tcp.seen[dir].td_end)) {
|
||||
/* Attempt to reopen a closed connection.
|
||||
* Delete this connection and look up again. */
|
||||
write_unlock_bh(&tcp_lock);
|
||||
if (del_timer(&conntrack->timeout))
|
||||
conntrack->timeout.function((unsigned long)
|
||||
conntrack);
|
||||
return -NF_REPEAT;
|
||||
conntrack->proto.tcp.seen[dir].td_end)) {
|
||||
/* Attempt to reopen a closed connection.
|
||||
* Delete this connection and look up again. */
|
||||
write_unlock_bh(&tcp_lock);
|
||||
if (del_timer(&conntrack->timeout))
|
||||
conntrack->timeout.function((unsigned long)
|
||||
conntrack);
|
||||
return -NF_REPEAT;
|
||||
} else {
|
||||
write_unlock_bh(&tcp_lock);
|
||||
if (LOG_INVALID(IPPROTO_TCP))
|
||||
|
@ -979,9 +979,9 @@ static int tcp_packet(struct ip_conntrack *conntrack,
|
|||
case TCP_CONNTRACK_CLOSE:
|
||||
if (index == TCP_RST_SET
|
||||
&& ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)
|
||||
&& conntrack->proto.tcp.last_index == TCP_SYN_SET)
|
||||
|| (!test_bit(IPS_ASSURED_BIT, &conntrack->status)
|
||||
&& conntrack->proto.tcp.last_index == TCP_ACK_SET))
|
||||
&& conntrack->proto.tcp.last_index == TCP_SYN_SET)
|
||||
|| (!test_bit(IPS_ASSURED_BIT, &conntrack->status)
|
||||
&& conntrack->proto.tcp.last_index == TCP_ACK_SET))
|
||||
&& ntohl(th->ack_seq) == conntrack->proto.tcp.last_end) {
|
||||
/* RST sent to invalid SYN or ACK we had let through
|
||||
* at a) and c) above:
|
||||
|
@ -1000,13 +1000,13 @@ static int tcp_packet(struct ip_conntrack *conntrack,
|
|||
break;
|
||||
}
|
||||
|
||||
if (!tcp_in_window(&conntrack->proto.tcp, dir, index,
|
||||
if (!tcp_in_window(&conntrack->proto.tcp, dir, index,
|
||||
skb, iph, th)) {
|
||||
write_unlock_bh(&tcp_lock);
|
||||
return -NF_ACCEPT;
|
||||
}
|
||||
in_window:
|
||||
/* From now on we have got in-window packets */
|
||||
/* From now on we have got in-window packets */
|
||||
conntrack->proto.tcp.last_index = index;
|
||||
|
||||
DEBUGP("tcp_conntracks: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
|
||||
|
@ -1018,9 +1018,9 @@ static int tcp_packet(struct ip_conntrack *conntrack,
|
|||
old_state, new_state);
|
||||
|
||||
conntrack->proto.tcp.state = new_state;
|
||||
if (old_state != new_state
|
||||
if (old_state != new_state
|
||||
&& (new_state == TCP_CONNTRACK_FIN_WAIT
|
||||
|| new_state == TCP_CONNTRACK_CLOSE))
|
||||
|| new_state == TCP_CONNTRACK_CLOSE))
|
||||
conntrack->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
|
||||
timeout = conntrack->proto.tcp.retrans >= ip_ct_tcp_max_retrans
|
||||
&& *tcp_timeouts[new_state] > ip_ct_tcp_timeout_max_retrans
|
||||
|
@ -1046,8 +1046,8 @@ static int tcp_packet(struct ip_conntrack *conntrack,
|
|||
&& (old_state == TCP_CONNTRACK_SYN_RECV
|
||||
|| old_state == TCP_CONNTRACK_ESTABLISHED)
|
||||
&& new_state == TCP_CONNTRACK_ESTABLISHED) {
|
||||
/* Set ASSURED if we see see valid ack in ESTABLISHED
|
||||
after SYN_RECV or a valid answer for a picked up
|
||||
/* Set ASSURED if we see see valid ack in ESTABLISHED
|
||||
after SYN_RECV or a valid answer for a picked up
|
||||
connection. */
|
||||
set_bit(IPS_ASSURED_BIT, &conntrack->status);
|
||||
ip_conntrack_event_cache(IPCT_STATUS, skb);
|
||||
|
@ -1056,7 +1056,7 @@ static int tcp_packet(struct ip_conntrack *conntrack,
|
|||
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
|
||||
/* Called when a new connection for this protocol found. */
|
||||
static int tcp_new(struct ip_conntrack *conntrack,
|
||||
const struct sk_buff *skb)
|
||||
|
@ -1072,7 +1072,7 @@ static int tcp_new(struct ip_conntrack *conntrack,
|
|||
th = skb_header_pointer(skb, iph->ihl * 4,
|
||||
sizeof(_tcph), &_tcph);
|
||||
BUG_ON(th == NULL);
|
||||
|
||||
|
||||
/* Don't need lock here: this conntrack not in circulation yet */
|
||||
new_state
|
||||
= tcp_conntracks[0][get_conntrack_index(th)]
|
||||
|
@ -1113,7 +1113,7 @@ static int tcp_new(struct ip_conntrack *conntrack,
|
|||
if (conntrack->proto.tcp.seen[0].td_maxwin == 0)
|
||||
conntrack->proto.tcp.seen[0].td_maxwin = 1;
|
||||
conntrack->proto.tcp.seen[0].td_maxend =
|
||||
conntrack->proto.tcp.seen[0].td_end +
|
||||
conntrack->proto.tcp.seen[0].td_end +
|
||||
conntrack->proto.tcp.seen[0].td_maxwin;
|
||||
conntrack->proto.tcp.seen[0].td_scale = 0;
|
||||
|
||||
|
@ -1123,25 +1123,25 @@ static int tcp_new(struct ip_conntrack *conntrack,
|
|||
conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
|
||||
IP_CT_TCP_FLAG_BE_LIBERAL;
|
||||
}
|
||||
|
||||
|
||||
conntrack->proto.tcp.seen[1].td_end = 0;
|
||||
conntrack->proto.tcp.seen[1].td_maxend = 0;
|
||||
conntrack->proto.tcp.seen[1].td_maxwin = 1;
|
||||
conntrack->proto.tcp.seen[1].td_scale = 0;
|
||||
conntrack->proto.tcp.seen[1].td_scale = 0;
|
||||
|
||||
/* tcp_packet will set them */
|
||||
conntrack->proto.tcp.state = TCP_CONNTRACK_NONE;
|
||||
conntrack->proto.tcp.last_index = TCP_NONE_SET;
|
||||
|
||||
|
||||
DEBUGP("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
|
||||
"receiver end=%u maxend=%u maxwin=%u scale=%i\n",
|
||||
sender->td_end, sender->td_maxend, sender->td_maxwin,
|
||||
sender->td_scale,
|
||||
sender->td_scale,
|
||||
receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
|
||||
receiver->td_scale);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
struct ip_conntrack_protocol ip_conntrack_protocol_tcp =
|
||||
{
|
||||
.proto = IPPROTO_TCP,
|
||||
|
|
|
@ -70,7 +70,7 @@ static int udp_packet(struct ip_conntrack *conntrack,
|
|||
/* If we've seen traffic both ways, this is some kind of UDP
|
||||
stream. Extend timeout. */
|
||||
if (test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) {
|
||||
ip_ct_refresh_acct(conntrack, ctinfo, skb,
|
||||
ip_ct_refresh_acct(conntrack, ctinfo, skb,
|
||||
ip_ct_udp_timeout_stream);
|
||||
/* Also, more likely to be important, and not a probe */
|
||||
if (!test_and_set_bit(IPS_ASSURED_BIT, &conntrack->status))
|
||||
|
@ -102,7 +102,7 @@ static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
|
|||
"ip_ct_udp: short packet ");
|
||||
return -NF_ACCEPT;
|
||||
}
|
||||
|
||||
|
||||
/* Truncated/malformed packets */
|
||||
if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) {
|
||||
if (LOG_INVALID(IPPROTO_UDP))
|
||||
|
@ -110,7 +110,7 @@ static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
|
|||
"ip_ct_udp: truncated/malformed packet ");
|
||||
return -NF_ACCEPT;
|
||||
}
|
||||
|
||||
|
||||
/* Packet with no checksum */
|
||||
if (!hdr->check)
|
||||
return NF_ACCEPT;
|
||||
|
@ -126,7 +126,7 @@ static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
|
|||
"ip_ct_udp: bad UDP checksum ");
|
||||
return -NF_ACCEPT;
|
||||
}
|
||||
|
||||
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
|
|
|
@ -321,7 +321,7 @@ int ct_sip_get_info(const char *dptr, size_t dlen,
|
|||
continue;
|
||||
}
|
||||
aux = ct_sip_search(hnfo->ln_str, dptr, hnfo->ln_strlen,
|
||||
ct_sip_lnlen(dptr, limit),
|
||||
ct_sip_lnlen(dptr, limit),
|
||||
hnfo->case_sensitive);
|
||||
if (!aux) {
|
||||
DEBUGP("'%s' not found in '%s'.\n", hnfo->ln_str,
|
||||
|
@ -406,7 +406,7 @@ static int sip_help(struct sk_buff **pskb,
|
|||
if (dataoff >= (*pskb)->len) {
|
||||
DEBUGP("skb->len = %u\n", (*pskb)->len);
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
}
|
||||
|
||||
ip_ct_refresh(ct, *pskb, sip_timeout * HZ);
|
||||
|
||||
|
@ -439,16 +439,16 @@ static int sip_help(struct sk_buff **pskb,
|
|||
}
|
||||
/* Get ip and port address from SDP packet. */
|
||||
if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen,
|
||||
POS_CONNECTION) > 0) {
|
||||
POS_CONNECTION) > 0) {
|
||||
|
||||
/* We'll drop only if there are parse problems. */
|
||||
if (parse_ipaddr(dptr + matchoff, NULL, &ipaddr,
|
||||
dptr + datalen) < 0) {
|
||||
dptr + datalen) < 0) {
|
||||
ret = NF_DROP;
|
||||
goto out;
|
||||
}
|
||||
if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen,
|
||||
POS_MEDIA) > 0) {
|
||||
POS_MEDIA) > 0) {
|
||||
|
||||
port = simple_strtoul(dptr + matchoff, NULL, 10);
|
||||
if (port < 1024) {
|
||||
|
|
|
@ -46,7 +46,7 @@ DECLARE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
|
|||
|
||||
static int kill_proto(struct ip_conntrack *i, void *data)
|
||||
{
|
||||
return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum ==
|
||||
return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum ==
|
||||
*((u_int8_t *) data));
|
||||
}
|
||||
|
||||
|
@ -124,12 +124,12 @@ static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
|
|||
(*pos)++;
|
||||
return ct_get_next(s, v);
|
||||
}
|
||||
|
||||
|
||||
static void ct_seq_stop(struct seq_file *s, void *v)
|
||||
{
|
||||
read_unlock_bh(&ip_conntrack_lock);
|
||||
}
|
||||
|
||||
|
||||
static int ct_seq_show(struct seq_file *s, void *v)
|
||||
{
|
||||
const struct ip_conntrack_tuple_hash *hash = v;
|
||||
|
@ -155,12 +155,12 @@ static int ct_seq_show(struct seq_file *s, void *v)
|
|||
|
||||
if (proto->print_conntrack(s, conntrack))
|
||||
return -ENOSPC;
|
||||
|
||||
|
||||
if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
|
||||
proto))
|
||||
return -ENOSPC;
|
||||
|
||||
if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_ORIGINAL]))
|
||||
if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_ORIGINAL]))
|
||||
return -ENOSPC;
|
||||
|
||||
if (!(test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)))
|
||||
|
@ -171,7 +171,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
|
|||
proto))
|
||||
return -ENOSPC;
|
||||
|
||||
if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_REPLY]))
|
||||
if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_REPLY]))
|
||||
return -ENOSPC;
|
||||
|
||||
if (test_bit(IPS_ASSURED_BIT, &conntrack->status))
|
||||
|
@ -200,7 +200,7 @@ static struct seq_operations ct_seq_ops = {
|
|||
.stop = ct_seq_stop,
|
||||
.show = ct_seq_show
|
||||
};
|
||||
|
||||
|
||||
static int ct_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct seq_file *seq;
|
||||
|
@ -229,7 +229,7 @@ static struct file_operations ct_file_ops = {
|
|||
.llseek = seq_lseek,
|
||||
.release = seq_release_private,
|
||||
};
|
||||
|
||||
|
||||
/* expects */
|
||||
static void *exp_seq_start(struct seq_file *s, loff_t *pos)
|
||||
{
|
||||
|
@ -253,7 +253,7 @@ static void *exp_seq_start(struct seq_file *s, loff_t *pos)
|
|||
|
||||
static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos)
|
||||
{
|
||||
struct list_head *e = v;
|
||||
struct list_head *e = v;
|
||||
|
||||
++*pos;
|
||||
e = e->next;
|
||||
|
@ -297,7 +297,7 @@ static int exp_open(struct inode *inode, struct file *file)
|
|||
{
|
||||
return seq_open(file, &exp_seq_ops);
|
||||
}
|
||||
|
||||
|
||||
static struct file_operations exp_file_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = exp_open,
|
||||
|
@ -426,14 +426,14 @@ static unsigned int ip_conntrack_help(unsigned int hooknum,
|
|||
}
|
||||
|
||||
static unsigned int ip_conntrack_defrag(unsigned int hooknum,
|
||||
struct sk_buff **pskb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
struct sk_buff **pskb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
|
||||
/* Previously seen (loopback)? Ignore. Do this before
|
||||
fragment check. */
|
||||
fragment check. */
|
||||
if ((*pskb)->nfct)
|
||||
return NF_ACCEPT;
|
||||
#endif
|
||||
|
@ -441,7 +441,7 @@ static unsigned int ip_conntrack_defrag(unsigned int hooknum,
|
|||
/* Gather fragments. */
|
||||
if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
|
||||
*pskb = ip_ct_gather_frags(*pskb,
|
||||
hooknum == NF_IP_PRE_ROUTING ?
|
||||
hooknum == NF_IP_PRE_ROUTING ?
|
||||
IP_DEFRAG_CONNTRACK_IN :
|
||||
IP_DEFRAG_CONNTRACK_OUT);
|
||||
if (!*pskb)
|
||||
|
@ -776,7 +776,7 @@ static ctl_table ip_ct_net_table[] = {
|
|||
{
|
||||
.ctl_name = CTL_NET,
|
||||
.procname = "net",
|
||||
.mode = 0555,
|
||||
.mode = 0555,
|
||||
.child = ip_ct_ipv4_table,
|
||||
},
|
||||
{ .ctl_name = 0 }
|
||||
|
|
|
@ -33,7 +33,7 @@ MODULE_PARM_DESC(ports, "port numbers of tftp servers");
|
|||
|
||||
#if 0
|
||||
#define DEBUGP(format, args...) printk("%s:%s:" format, \
|
||||
__FILE__, __FUNCTION__ , ## args)
|
||||
__FILE__, __FUNCTION__ , ## args)
|
||||
#else
|
||||
#define DEBUGP(format, args...)
|
||||
#endif
|
||||
|
@ -113,7 +113,7 @@ static void ip_conntrack_tftp_fini(void)
|
|||
DEBUGP("unregistering helper for port %d\n",
|
||||
ports[i]);
|
||||
ip_conntrack_helper_unregister(&tftp[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int __init ip_conntrack_tftp_init(void)
|
||||
|
|
|
@ -120,7 +120,7 @@ static int
|
|||
in_range(const struct ip_conntrack_tuple *tuple,
|
||||
const struct ip_nat_range *range)
|
||||
{
|
||||
struct ip_nat_protocol *proto =
|
||||
struct ip_nat_protocol *proto =
|
||||
__ip_nat_proto_find(tuple->dst.protonum);
|
||||
|
||||
/* If we are supposed to map IPs, then we must be in the
|
||||
|
@ -443,8 +443,8 @@ int ip_nat_icmp_reply_translation(struct ip_conntrack *ct,
|
|||
(*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
|
||||
|
||||
/* Redirects on non-null nats must be dropped, else they'll
|
||||
start talking to each other without our translation, and be
|
||||
confused... --RR */
|
||||
start talking to each other without our translation, and be
|
||||
confused... --RR */
|
||||
if (inside->icmp.type == ICMP_REDIRECT) {
|
||||
/* If NAT isn't finished, assume it and drop. */
|
||||
if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
|
||||
|
@ -458,8 +458,8 @@ int ip_nat_icmp_reply_translation(struct ip_conntrack *ct,
|
|||
*pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
|
||||
|
||||
if (!ip_ct_get_tuple(&inside->ip, *pskb, (*pskb)->nh.iph->ihl*4 +
|
||||
sizeof(struct icmphdr) + inside->ip.ihl*4,
|
||||
&inner,
|
||||
sizeof(struct icmphdr) + inside->ip.ihl*4,
|
||||
&inner,
|
||||
__ip_conntrack_proto_find(inside->ip.protocol)))
|
||||
return 0;
|
||||
|
||||
|
@ -537,7 +537,7 @@ EXPORT_SYMBOL(ip_nat_protocol_unregister);
|
|||
#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
|
||||
defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
|
||||
int
|
||||
ip_nat_port_range_to_nfattr(struct sk_buff *skb,
|
||||
ip_nat_port_range_to_nfattr(struct sk_buff *skb,
|
||||
const struct ip_nat_range *range)
|
||||
{
|
||||
NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16),
|
||||
|
@ -555,21 +555,21 @@ int
|
|||
ip_nat_port_nfattr_to_range(struct nfattr *tb[], struct ip_nat_range *range)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
||||
/* we have to return whether we actually parsed something or not */
|
||||
|
||||
if (tb[CTA_PROTONAT_PORT_MIN-1]) {
|
||||
ret = 1;
|
||||
range->min.tcp.port =
|
||||
range->min.tcp.port =
|
||||
*(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]);
|
||||
}
|
||||
|
||||
|
||||
if (!tb[CTA_PROTONAT_PORT_MAX-1]) {
|
||||
if (ret)
|
||||
if (ret)
|
||||
range->max.tcp.port = range->min.tcp.port;
|
||||
} else {
|
||||
ret = 1;
|
||||
range->max.tcp.port =
|
||||
range->max.tcp.port =
|
||||
*(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]);
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ mangle_rfc959_packet(struct sk_buff **pskb,
|
|||
DEBUGP("calling ip_nat_mangle_tcp_packet\n");
|
||||
|
||||
*seq += strlen(buffer) - matchlen;
|
||||
return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
|
||||
return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
|
||||
matchlen, buffer, strlen(buffer));
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,7 @@ mangle_eprt_packet(struct sk_buff **pskb,
|
|||
DEBUGP("calling ip_nat_mangle_tcp_packet\n");
|
||||
|
||||
*seq += strlen(buffer) - matchlen;
|
||||
return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
|
||||
return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
|
||||
matchlen, buffer, strlen(buffer));
|
||||
}
|
||||
|
||||
|
@ -94,7 +94,7 @@ mangle_epsv_packet(struct sk_buff **pskb,
|
|||
DEBUGP("calling ip_nat_mangle_tcp_packet\n");
|
||||
|
||||
*seq += strlen(buffer) - matchlen;
|
||||
return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
|
||||
return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
|
||||
matchlen, buffer, strlen(buffer));
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* ip_nat_helper.c - generic support functions for NAT helpers
|
||||
/* ip_nat_helper.c - generic support functions for NAT helpers
|
||||
*
|
||||
* (C) 2000-2002 Harald Welte <laforge@netfilter.org>
|
||||
* (C) 2003-2004 Netfilter Core Team <coreteam@netfilter.org>
|
||||
|
@ -8,7 +8,7 @@
|
|||
* published by the Free Software Foundation.
|
||||
*
|
||||
* 14 Jan 2002 Harald Welte <laforge@gnumonks.org>:
|
||||
* - add support for SACK adjustment
|
||||
* - add support for SACK adjustment
|
||||
* 14 Mar 2002 Harald Welte <laforge@gnumonks.org>:
|
||||
* - merge SACK support into newnat API
|
||||
* 16 Aug 2002 Brian J. Murrell <netfilter@interlinx.bc.ca>:
|
||||
|
@ -45,10 +45,10 @@
|
|||
static DEFINE_SPINLOCK(ip_nat_seqofs_lock);
|
||||
|
||||
/* Setup TCP sequence correction given this change at this sequence */
|
||||
static inline void
|
||||
static inline void
|
||||
adjust_tcp_sequence(u32 seq,
|
||||
int sizediff,
|
||||
struct ip_conntrack *ct,
|
||||
struct ip_conntrack *ct,
|
||||
enum ip_conntrack_info ctinfo)
|
||||
{
|
||||
int dir;
|
||||
|
@ -150,7 +150,7 @@ static int enlarge_skb(struct sk_buff **pskb, unsigned int extra)
|
|||
* skb enlargement, ...
|
||||
*
|
||||
* */
|
||||
int
|
||||
int
|
||||
ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
|
||||
struct ip_conntrack *ct,
|
||||
enum ip_conntrack_info ctinfo,
|
||||
|
@ -186,7 +186,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
|
|||
tcph->check = tcp_v4_check(datalen,
|
||||
iph->saddr, iph->daddr,
|
||||
csum_partial((char *)tcph,
|
||||
datalen, 0));
|
||||
datalen, 0));
|
||||
} else
|
||||
nf_proto_csum_replace2(&tcph->check, *pskb,
|
||||
htons(oldlen), htons(datalen), 1);
|
||||
|
@ -202,7 +202,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
|
|||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(ip_nat_mangle_tcp_packet);
|
||||
|
||||
|
||||
/* Generic function for mangling variable-length address changes inside
|
||||
* NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
|
||||
* command in the Amanda protocol)
|
||||
|
@ -213,7 +213,7 @@ EXPORT_SYMBOL(ip_nat_mangle_tcp_packet);
|
|||
* XXX - This function could be merged with ip_nat_mangle_tcp_packet which
|
||||
* should be fairly easy to do.
|
||||
*/
|
||||
int
|
||||
int
|
||||
ip_nat_mangle_udp_packet(struct sk_buff **pskb,
|
||||
struct ip_conntrack *ct,
|
||||
enum ip_conntrack_info ctinfo,
|
||||
|
@ -228,8 +228,8 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb,
|
|||
|
||||
/* UDP helpers might accidentally mangle the wrong packet */
|
||||
iph = (*pskb)->nh.iph;
|
||||
if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) +
|
||||
match_offset + match_len)
|
||||
if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) +
|
||||
match_offset + match_len)
|
||||
return 0;
|
||||
|
||||
if (!skb_make_writable(pskb, (*pskb)->len))
|
||||
|
@ -258,9 +258,9 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb,
|
|||
if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
|
||||
udph->check = 0;
|
||||
udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
|
||||
datalen, IPPROTO_UDP,
|
||||
csum_partial((char *)udph,
|
||||
datalen, 0));
|
||||
datalen, IPPROTO_UDP,
|
||||
csum_partial((char *)udph,
|
||||
datalen, 0));
|
||||
if (!udph->check)
|
||||
udph->check = CSUM_MANGLED_0;
|
||||
} else
|
||||
|
@ -273,7 +273,7 @@ EXPORT_SYMBOL(ip_nat_mangle_udp_packet);
|
|||
/* Adjust one found SACK option including checksum correction */
|
||||
static void
|
||||
sack_adjust(struct sk_buff *skb,
|
||||
struct tcphdr *tcph,
|
||||
struct tcphdr *tcph,
|
||||
unsigned int sackoff,
|
||||
unsigned int sackend,
|
||||
struct ip_nat_seq *natseq)
|
||||
|
@ -360,14 +360,14 @@ ip_nat_sack_adjust(struct sk_buff **pskb,
|
|||
|
||||
/* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
|
||||
int
|
||||
ip_nat_seq_adjust(struct sk_buff **pskb,
|
||||
struct ip_conntrack *ct,
|
||||
ip_nat_seq_adjust(struct sk_buff **pskb,
|
||||
struct ip_conntrack *ct,
|
||||
enum ip_conntrack_info ctinfo)
|
||||
{
|
||||
struct tcphdr *tcph;
|
||||
int dir;
|
||||
__be32 newseq, newack;
|
||||
struct ip_nat_seq *this_way, *other_way;
|
||||
struct ip_nat_seq *this_way, *other_way;
|
||||
|
||||
dir = CTINFO2DIR(ctinfo);
|
||||
|
||||
|
|
|
@ -202,10 +202,10 @@ pptp_outbound_pkt(struct sk_buff **pskb,
|
|||
|
||||
/* mangle packet */
|
||||
if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
|
||||
cid_off + sizeof(struct pptp_pkt_hdr) +
|
||||
sizeof(struct PptpControlHeader),
|
||||
sizeof(new_callid), (char *)&new_callid,
|
||||
sizeof(new_callid)) == 0)
|
||||
cid_off + sizeof(struct pptp_pkt_hdr) +
|
||||
sizeof(struct PptpControlHeader),
|
||||
sizeof(new_callid), (char *)&new_callid,
|
||||
sizeof(new_callid)) == 0)
|
||||
return NF_DROP;
|
||||
|
||||
return NF_ACCEPT;
|
||||
|
@ -293,7 +293,7 @@ pptp_inbound_pkt(struct sk_buff **pskb,
|
|||
ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid));
|
||||
|
||||
if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
|
||||
pcid_off + sizeof(struct pptp_pkt_hdr) +
|
||||
pcid_off + sizeof(struct pptp_pkt_hdr) +
|
||||
sizeof(struct PptpControlHeader),
|
||||
sizeof(new_pcid), (char *)&new_pcid,
|
||||
sizeof(new_pcid)) == 0)
|
||||
|
|
|
@ -88,8 +88,8 @@ static unsigned int help(struct sk_buff **pskb,
|
|||
DEBUGP("ip_nat_irc: Inserting '%s' == %u.%u.%u.%u, port %u\n",
|
||||
buffer, NIPQUAD(exp->tuple.src.ip), port);
|
||||
|
||||
ret = ip_nat_mangle_tcp_packet(pskb, exp->master, ctinfo,
|
||||
matchoff, matchlen, buffer,
|
||||
ret = ip_nat_mangle_tcp_packet(pskb, exp->master, ctinfo,
|
||||
matchoff, matchlen, buffer,
|
||||
strlen(buffer));
|
||||
if (ret != NF_ACCEPT)
|
||||
ip_conntrack_unexpect_related(exp);
|
||||
|
|
|
@ -45,7 +45,7 @@ icmp_unique_tuple(struct ip_conntrack_tuple *tuple,
|
|||
|
||||
for (i = 0; i < range_size; i++, id++) {
|
||||
tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
|
||||
(id % range_size));
|
||||
(id % range_size));
|
||||
if (!ip_nat_used_tuple(tuple, conntrack))
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ static unsigned int ipt_snat_target(struct sk_buff **pskb,
|
|||
|
||||
/* Connection must be valid and new. */
|
||||
IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED
|
||||
|| ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
|
||||
|| ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
|
||||
IP_NF_ASSERT(out);
|
||||
|
||||
return ip_nat_setup_info(ct, &mr->range[0], hooknum);
|
||||
|
@ -223,8 +223,8 @@ alloc_null_binding(struct ip_conntrack *conntrack,
|
|||
|
||||
unsigned int
|
||||
alloc_null_binding_confirmed(struct ip_conntrack *conntrack,
|
||||
struct ip_nat_info *info,
|
||||
unsigned int hooknum)
|
||||
struct ip_nat_info *info,
|
||||
unsigned int hooknum)
|
||||
{
|
||||
__be32 ip
|
||||
= (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
|
||||
|
|
|
@ -88,7 +88,7 @@ static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo,
|
|||
return 1;
|
||||
|
||||
if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo,
|
||||
matchoff, matchlen, addr, addrlen))
|
||||
matchoff, matchlen, addr, addrlen))
|
||||
return 0;
|
||||
*dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
|
||||
return 1;
|
||||
|
@ -149,7 +149,7 @@ static unsigned int mangle_sip_packet(struct sk_buff **pskb,
|
|||
return 0;
|
||||
|
||||
if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo,
|
||||
matchoff, matchlen, buffer, bufflen))
|
||||
matchoff, matchlen, buffer, bufflen))
|
||||
return 0;
|
||||
|
||||
/* We need to reload this. Thanks Patrick. */
|
||||
|
@ -170,7 +170,7 @@ static int mangle_content_len(struct sk_buff **pskb,
|
|||
|
||||
/* Get actual SDP lenght */
|
||||
if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff,
|
||||
&matchlen, POS_SDP_HEADER) > 0) {
|
||||
&matchlen, POS_SDP_HEADER) > 0) {
|
||||
|
||||
/* since ct_sip_get_info() give us a pointer passing 'v='
|
||||
we need to add 2 bytes in this count. */
|
||||
|
@ -178,7 +178,7 @@ static int mangle_content_len(struct sk_buff **pskb,
|
|||
|
||||
/* Now, update SDP lenght */
|
||||
if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff,
|
||||
&matchlen, POS_CONTENT) > 0) {
|
||||
&matchlen, POS_CONTENT) > 0) {
|
||||
|
||||
bufflen = sprintf(buffer, "%u", c_len);
|
||||
|
||||
|
@ -204,17 +204,17 @@ static unsigned int mangle_sdp(struct sk_buff **pskb,
|
|||
/* Mangle owner and contact info. */
|
||||
bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip));
|
||||
if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
|
||||
buffer, bufflen, POS_OWNER))
|
||||
buffer, bufflen, POS_OWNER))
|
||||
return 0;
|
||||
|
||||
if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
|
||||
buffer, bufflen, POS_CONNECTION))
|
||||
buffer, bufflen, POS_CONNECTION))
|
||||
return 0;
|
||||
|
||||
/* Mangle media port. */
|
||||
bufflen = sprintf(buffer, "%u", port);
|
||||
if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
|
||||
buffer, bufflen, POS_MEDIA))
|
||||
buffer, bufflen, POS_MEDIA))
|
||||
return 0;
|
||||
|
||||
return mangle_content_len(pskb, ctinfo, ct, dptr);
|
||||
|
|
|
@ -3,11 +3,11 @@
|
|||
*
|
||||
* Basic SNMP Application Layer Gateway
|
||||
*
|
||||
* This IP NAT module is intended for use with SNMP network
|
||||
* discovery and monitoring applications where target networks use
|
||||
* This IP NAT module is intended for use with SNMP network
|
||||
* discovery and monitoring applications where target networks use
|
||||
* conflicting private address realms.
|
||||
*
|
||||
* Static NAT is used to remap the networks from the view of the network
|
||||
* Static NAT is used to remap the networks from the view of the network
|
||||
* management system at the IP layer, and this module remaps some application
|
||||
* layer addresses to match.
|
||||
*
|
||||
|
@ -20,7 +20,7 @@
|
|||
* More information on ALG and associated issues can be found in
|
||||
* RFC 2962
|
||||
*
|
||||
* The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory
|
||||
* The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory
|
||||
* McLean & Jochen Friedrich, stripped down for use in the kernel.
|
||||
*
|
||||
* Copyright (c) 2000 RP Internet (www.rpi.net.au).
|
||||
|
@ -69,8 +69,8 @@ MODULE_DESCRIPTION("Basic SNMP Application Layer Gateway");
|
|||
static int debug;
|
||||
static DEFINE_SPINLOCK(snmp_lock);
|
||||
|
||||
/*
|
||||
* Application layer address mapping mimics the NAT mapping, but
|
||||
/*
|
||||
* Application layer address mapping mimics the NAT mapping, but
|
||||
* only for the first octet in this case (a more flexible system
|
||||
* can be implemented if needed).
|
||||
*/
|
||||
|
@ -80,7 +80,7 @@ struct oct1_map
|
|||
u_int8_t to;
|
||||
};
|
||||
|
||||
|
||||
|
||||
/*****************************************************************************
|
||||
*
|
||||
* Basic ASN.1 decoding routines (gxsnmp author Dirk Wisse)
|
||||
|
@ -129,7 +129,7 @@ struct oct1_map
|
|||
#define ASN1_ERR_DEC_LENGTH_MISMATCH 4
|
||||
#define ASN1_ERR_DEC_BADVALUE 5
|
||||
|
||||
/*
|
||||
/*
|
||||
* ASN.1 context.
|
||||
*/
|
||||
struct asn1_ctx
|
||||
|
@ -148,10 +148,10 @@ struct asn1_octstr
|
|||
unsigned char *data;
|
||||
unsigned int len;
|
||||
};
|
||||
|
||||
|
||||
static void asn1_open(struct asn1_ctx *ctx,
|
||||
unsigned char *buf,
|
||||
unsigned int len)
|
||||
unsigned char *buf,
|
||||
unsigned int len)
|
||||
{
|
||||
ctx->begin = buf;
|
||||
ctx->end = buf + len;
|
||||
|
@ -172,9 +172,9 @@ static unsigned char asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch)
|
|||
static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag)
|
||||
{
|
||||
unsigned char ch;
|
||||
|
||||
|
||||
*tag = 0;
|
||||
|
||||
|
||||
do
|
||||
{
|
||||
if (!asn1_octet_decode(ctx, &ch))
|
||||
|
@ -185,20 +185,20 @@ static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static unsigned char asn1_id_decode(struct asn1_ctx *ctx,
|
||||
unsigned int *cls,
|
||||
unsigned int *con,
|
||||
unsigned int *tag)
|
||||
static unsigned char asn1_id_decode(struct asn1_ctx *ctx,
|
||||
unsigned int *cls,
|
||||
unsigned int *con,
|
||||
unsigned int *tag)
|
||||
{
|
||||
unsigned char ch;
|
||||
|
||||
|
||||
if (!asn1_octet_decode(ctx, &ch))
|
||||
return 0;
|
||||
|
||||
|
||||
*cls = (ch & 0xC0) >> 6;
|
||||
*con = (ch & 0x20) >> 5;
|
||||
*tag = (ch & 0x1F);
|
||||
|
||||
|
||||
if (*tag == 0x1F) {
|
||||
if (!asn1_tag_decode(ctx, tag))
|
||||
return 0;
|
||||
|
@ -207,25 +207,25 @@ static unsigned char asn1_id_decode(struct asn1_ctx *ctx,
|
|||
}
|
||||
|
||||
static unsigned char asn1_length_decode(struct asn1_ctx *ctx,
|
||||
unsigned int *def,
|
||||
unsigned int *len)
|
||||
unsigned int *def,
|
||||
unsigned int *len)
|
||||
{
|
||||
unsigned char ch, cnt;
|
||||
|
||||
|
||||
if (!asn1_octet_decode(ctx, &ch))
|
||||
return 0;
|
||||
|
||||
|
||||
if (ch == 0x80)
|
||||
*def = 0;
|
||||
else {
|
||||
*def = 1;
|
||||
|
||||
|
||||
if (ch < 0x80)
|
||||
*len = ch;
|
||||
else {
|
||||
cnt = (unsigned char) (ch & 0x7F);
|
||||
*len = 0;
|
||||
|
||||
|
||||
while (cnt > 0) {
|
||||
if (!asn1_octet_decode(ctx, &ch))
|
||||
return 0;
|
||||
|
@ -239,20 +239,20 @@ static unsigned char asn1_length_decode(struct asn1_ctx *ctx,
|
|||
}
|
||||
|
||||
static unsigned char asn1_header_decode(struct asn1_ctx *ctx,
|
||||
unsigned char **eoc,
|
||||
unsigned int *cls,
|
||||
unsigned int *con,
|
||||
unsigned int *tag)
|
||||
unsigned char **eoc,
|
||||
unsigned int *cls,
|
||||
unsigned int *con,
|
||||
unsigned int *tag)
|
||||
{
|
||||
unsigned int def, len;
|
||||
|
||||
|
||||
if (!asn1_id_decode(ctx, cls, con, tag))
|
||||
return 0;
|
||||
|
||||
|
||||
def = len = 0;
|
||||
if (!asn1_length_decode(ctx, &def, &len))
|
||||
return 0;
|
||||
|
||||
|
||||
if (def)
|
||||
*eoc = ctx->pointer + len;
|
||||
else
|
||||
|
@ -263,19 +263,19 @@ static unsigned char asn1_header_decode(struct asn1_ctx *ctx,
|
|||
static unsigned char asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc)
|
||||
{
|
||||
unsigned char ch;
|
||||
|
||||
|
||||
if (eoc == 0) {
|
||||
if (!asn1_octet_decode(ctx, &ch))
|
||||
return 0;
|
||||
|
||||
|
||||
if (ch != 0x00) {
|
||||
ctx->error = ASN1_ERR_DEC_EOC_MISMATCH;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
if (!asn1_octet_decode(ctx, &ch))
|
||||
return 0;
|
||||
|
||||
|
||||
if (ch != 0x00) {
|
||||
ctx->error = ASN1_ERR_DEC_EOC_MISMATCH;
|
||||
return 0;
|
||||
|
@ -297,27 +297,27 @@ static unsigned char asn1_null_decode(struct asn1_ctx *ctx, unsigned char *eoc)
|
|||
}
|
||||
|
||||
static unsigned char asn1_long_decode(struct asn1_ctx *ctx,
|
||||
unsigned char *eoc,
|
||||
long *integer)
|
||||
unsigned char *eoc,
|
||||
long *integer)
|
||||
{
|
||||
unsigned char ch;
|
||||
unsigned int len;
|
||||
|
||||
|
||||
if (!asn1_octet_decode(ctx, &ch))
|
||||
return 0;
|
||||
|
||||
|
||||
*integer = (signed char) ch;
|
||||
len = 1;
|
||||
|
||||
|
||||
while (ctx->pointer < eoc) {
|
||||
if (++len > sizeof (long)) {
|
||||
ctx->error = ASN1_ERR_DEC_BADVALUE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
if (!asn1_octet_decode(ctx, &ch))
|
||||
return 0;
|
||||
|
||||
|
||||
*integer <<= 8;
|
||||
*integer |= ch;
|
||||
}
|
||||
|
@ -325,28 +325,28 @@ static unsigned char asn1_long_decode(struct asn1_ctx *ctx,
|
|||
}
|
||||
|
||||
static unsigned char asn1_uint_decode(struct asn1_ctx *ctx,
|
||||
unsigned char *eoc,
|
||||
unsigned int *integer)
|
||||
unsigned char *eoc,
|
||||
unsigned int *integer)
|
||||
{
|
||||
unsigned char ch;
|
||||
unsigned int len;
|
||||
|
||||
|
||||
if (!asn1_octet_decode(ctx, &ch))
|
||||
return 0;
|
||||
|
||||
|
||||
*integer = ch;
|
||||
if (ch == 0) len = 0;
|
||||
else len = 1;
|
||||
|
||||
|
||||
while (ctx->pointer < eoc) {
|
||||
if (++len > sizeof (unsigned int)) {
|
||||
ctx->error = ASN1_ERR_DEC_BADVALUE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
if (!asn1_octet_decode(ctx, &ch))
|
||||
return 0;
|
||||
|
||||
|
||||
*integer <<= 8;
|
||||
*integer |= ch;
|
||||
}
|
||||
|
@ -354,28 +354,28 @@ static unsigned char asn1_uint_decode(struct asn1_ctx *ctx,
|
|||
}
|
||||
|
||||
static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx,
|
||||
unsigned char *eoc,
|
||||
unsigned long *integer)
|
||||
unsigned char *eoc,
|
||||
unsigned long *integer)
|
||||
{
|
||||
unsigned char ch;
|
||||
unsigned int len;
|
||||
|
||||
|
||||
if (!asn1_octet_decode(ctx, &ch))
|
||||
return 0;
|
||||
|
||||
|
||||
*integer = ch;
|
||||
if (ch == 0) len = 0;
|
||||
else len = 1;
|
||||
|
||||
|
||||
while (ctx->pointer < eoc) {
|
||||
if (++len > sizeof (unsigned long)) {
|
||||
ctx->error = ASN1_ERR_DEC_BADVALUE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
if (!asn1_octet_decode(ctx, &ch))
|
||||
return 0;
|
||||
|
||||
|
||||
*integer <<= 8;
|
||||
*integer |= ch;
|
||||
}
|
||||
|
@ -383,21 +383,21 @@ static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx,
|
|||
}
|
||||
|
||||
static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
|
||||
unsigned char *eoc,
|
||||
unsigned char **octets,
|
||||
unsigned int *len)
|
||||
unsigned char *eoc,
|
||||
unsigned char **octets,
|
||||
unsigned int *len)
|
||||
{
|
||||
unsigned char *ptr;
|
||||
|
||||
|
||||
*len = 0;
|
||||
|
||||
|
||||
*octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
|
||||
if (*octets == NULL) {
|
||||
if (net_ratelimit())
|
||||
printk("OOM in bsalg (%d)\n", __LINE__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
ptr = *octets;
|
||||
while (ctx->pointer < eoc) {
|
||||
if (!asn1_octet_decode(ctx, (unsigned char *)ptr++)) {
|
||||
|
@ -411,16 +411,16 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
|
|||
}
|
||||
|
||||
static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
|
||||
unsigned long *subid)
|
||||
unsigned long *subid)
|
||||
{
|
||||
unsigned char ch;
|
||||
|
||||
|
||||
*subid = 0;
|
||||
|
||||
|
||||
do {
|
||||
if (!asn1_octet_decode(ctx, &ch))
|
||||
return 0;
|
||||
|
||||
|
||||
*subid <<= 7;
|
||||
*subid |= ch & 0x7F;
|
||||
} while ((ch & 0x80) == 0x80);
|
||||
|
@ -428,14 +428,14 @@ static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
|
|||
}
|
||||
|
||||
static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
|
||||
unsigned char *eoc,
|
||||
unsigned long **oid,
|
||||
unsigned int *len)
|
||||
unsigned char *eoc,
|
||||
unsigned long **oid,
|
||||
unsigned int *len)
|
||||
{
|
||||
unsigned long subid;
|
||||
unsigned int size;
|
||||
unsigned long *optr;
|
||||
|
||||
|
||||
size = eoc - ctx->pointer + 1;
|
||||
*oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
|
||||
if (*oid == NULL) {
|
||||
|
@ -443,15 +443,15 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
|
|||
printk("OOM in bsalg (%d)\n", __LINE__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
optr = *oid;
|
||||
|
||||
|
||||
if (!asn1_subid_decode(ctx, &subid)) {
|
||||
kfree(*oid);
|
||||
*oid = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
if (subid < 40) {
|
||||
optr [0] = 0;
|
||||
optr [1] = subid;
|
||||
|
@ -462,10 +462,10 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
|
|||
optr [0] = 2;
|
||||
optr [1] = subid - 80;
|
||||
}
|
||||
|
||||
|
||||
*len = 2;
|
||||
optr += 2;
|
||||
|
||||
|
||||
while (ctx->pointer < eoc) {
|
||||
if (++(*len) > size) {
|
||||
ctx->error = ASN1_ERR_DEC_BADVALUE;
|
||||
|
@ -473,7 +473,7 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
|
|||
*oid = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
if (!asn1_subid_decode(ctx, optr++)) {
|
||||
kfree(*oid);
|
||||
*oid = NULL;
|
||||
|
@ -611,9 +611,9 @@ struct snmp_v1_trap
|
|||
#define SERR_EOM 2
|
||||
|
||||
static inline void mangle_address(unsigned char *begin,
|
||||
unsigned char *addr,
|
||||
const struct oct1_map *map,
|
||||
__sum16 *check);
|
||||
unsigned char *addr,
|
||||
const struct oct1_map *map,
|
||||
__sum16 *check);
|
||||
struct snmp_cnv
|
||||
{
|
||||
unsigned int class;
|
||||
|
@ -633,7 +633,7 @@ static struct snmp_cnv snmp_conv [] =
|
|||
{ASN1_APL, SNMP_GGE, SNMP_GAUGE}, /* Gauge32 == Unsigned32 */
|
||||
{ASN1_APL, SNMP_TIT, SNMP_TIMETICKS},
|
||||
{ASN1_APL, SNMP_OPQ, SNMP_OPAQUE},
|
||||
|
||||
|
||||
/* SNMPv2 data types and errors */
|
||||
{ASN1_UNI, ASN1_BTS, SNMP_BITSTR},
|
||||
{ASN1_APL, SNMP_C64, SNMP_COUNTER64},
|
||||
|
@ -644,13 +644,13 @@ static struct snmp_cnv snmp_conv [] =
|
|||
};
|
||||
|
||||
static unsigned char snmp_tag_cls2syntax(unsigned int tag,
|
||||
unsigned int cls,
|
||||
unsigned short *syntax)
|
||||
unsigned int cls,
|
||||
unsigned short *syntax)
|
||||
{
|
||||
struct snmp_cnv *cnv;
|
||||
|
||||
|
||||
cnv = snmp_conv;
|
||||
|
||||
|
||||
while (cnv->syntax != -1) {
|
||||
if (cnv->tag == tag && cnv->class == cls) {
|
||||
*syntax = cnv->syntax;
|
||||
|
@ -662,7 +662,7 @@ static unsigned char snmp_tag_cls2syntax(unsigned int tag,
|
|||
}
|
||||
|
||||
static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
|
||||
struct snmp_object **obj)
|
||||
struct snmp_object **obj)
|
||||
{
|
||||
unsigned int cls, con, tag, len, idlen;
|
||||
unsigned short type;
|
||||
|
@ -670,41 +670,41 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
|
|||
unsigned long *lp, *id;
|
||||
unsigned long ul;
|
||||
long l;
|
||||
|
||||
|
||||
*obj = NULL;
|
||||
id = NULL;
|
||||
|
||||
|
||||
if (!asn1_header_decode(ctx, &eoc, &cls, &con, &tag))
|
||||
return 0;
|
||||
|
||||
|
||||
if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
|
||||
return 0;
|
||||
|
||||
|
||||
if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
|
||||
return 0;
|
||||
|
||||
|
||||
if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI)
|
||||
return 0;
|
||||
|
||||
|
||||
if (!asn1_oid_decode(ctx, end, &id, &idlen))
|
||||
return 0;
|
||||
|
||||
|
||||
if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) {
|
||||
kfree(id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
if (con != ASN1_PRI) {
|
||||
kfree(id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
type = 0;
|
||||
if (!snmp_tag_cls2syntax(tag, cls, &type)) {
|
||||
kfree(id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
l = 0;
|
||||
switch (type) {
|
||||
case SNMP_INTEGER:
|
||||
|
@ -714,7 +714,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
|
|||
return 0;
|
||||
}
|
||||
*obj = kmalloc(sizeof(struct snmp_object) + len,
|
||||
GFP_ATOMIC);
|
||||
GFP_ATOMIC);
|
||||
if (*obj == NULL) {
|
||||
kfree(id);
|
||||
if (net_ratelimit())
|
||||
|
@ -730,7 +730,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
|
|||
return 0;
|
||||
}
|
||||
*obj = kmalloc(sizeof(struct snmp_object) + len,
|
||||
GFP_ATOMIC);
|
||||
GFP_ATOMIC);
|
||||
if (*obj == NULL) {
|
||||
kfree(id);
|
||||
if (net_ratelimit())
|
||||
|
@ -818,12 +818,12 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
|
|||
kfree(id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
(*obj)->syntax_len = len;
|
||||
(*obj)->type = type;
|
||||
(*obj)->id = id;
|
||||
(*obj)->id_len = idlen;
|
||||
|
||||
|
||||
if (!asn1_eoc_decode(ctx, eoc)) {
|
||||
kfree(id);
|
||||
kfree(*obj);
|
||||
|
@ -834,49 +834,49 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
|
|||
}
|
||||
|
||||
static unsigned char snmp_request_decode(struct asn1_ctx *ctx,
|
||||
struct snmp_request *request)
|
||||
struct snmp_request *request)
|
||||
{
|
||||
unsigned int cls, con, tag;
|
||||
unsigned char *end;
|
||||
|
||||
|
||||
if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
|
||||
return 0;
|
||||
|
||||
|
||||
if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
|
||||
return 0;
|
||||
|
||||
|
||||
if (!asn1_ulong_decode(ctx, end, &request->id))
|
||||
return 0;
|
||||
|
||||
|
||||
if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
|
||||
return 0;
|
||||
|
||||
|
||||
if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
|
||||
return 0;
|
||||
|
||||
|
||||
if (!asn1_uint_decode(ctx, end, &request->error_status))
|
||||
return 0;
|
||||
|
||||
|
||||
if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
|
||||
return 0;
|
||||
|
||||
|
||||
if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
|
||||
return 0;
|
||||
|
||||
|
||||
if (!asn1_uint_decode(ctx, end, &request->error_index))
|
||||
return 0;
|
||||
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Fast checksum update for possibly oddly-aligned UDP byte, from the
|
||||
* code example in the draft.
|
||||
*/
|
||||
static void fast_csum(__sum16 *csum,
|
||||
const unsigned char *optr,
|
||||
const unsigned char *nptr,
|
||||
int offset)
|
||||
const unsigned char *optr,
|
||||
const unsigned char *nptr,
|
||||
int offset)
|
||||
{
|
||||
unsigned char s[4];
|
||||
|
||||
|
@ -893,30 +893,30 @@ static void fast_csum(__sum16 *csum,
|
|||
*csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Mangle IP address.
|
||||
* - begin points to the start of the snmp messgae
|
||||
* - addr points to the start of the address
|
||||
*/
|
||||
static inline void mangle_address(unsigned char *begin,
|
||||
unsigned char *addr,
|
||||
const struct oct1_map *map,
|
||||
__sum16 *check)
|
||||
unsigned char *addr,
|
||||
const struct oct1_map *map,
|
||||
__sum16 *check)
|
||||
{
|
||||
if (map->from == NOCT1(addr)) {
|
||||
u_int32_t old;
|
||||
|
||||
|
||||
if (debug)
|
||||
memcpy(&old, (unsigned char *)addr, sizeof(old));
|
||||
|
||||
|
||||
*addr = map->to;
|
||||
|
||||
|
||||
/* Update UDP checksum if being used */
|
||||
if (*check) {
|
||||
fast_csum(check,
|
||||
&map->from, &map->to, addr - begin);
|
||||
&map->from, &map->to, addr - begin);
|
||||
}
|
||||
|
||||
|
||||
if (debug)
|
||||
printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to "
|
||||
"%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr));
|
||||
|
@ -924,66 +924,66 @@ static inline void mangle_address(unsigned char *begin,
|
|||
}
|
||||
|
||||
static unsigned char snmp_trap_decode(struct asn1_ctx *ctx,
|
||||
struct snmp_v1_trap *trap,
|
||||
const struct oct1_map *map,
|
||||
__sum16 *check)
|
||||
struct snmp_v1_trap *trap,
|
||||
const struct oct1_map *map,
|
||||
__sum16 *check)
|
||||
{
|
||||
unsigned int cls, con, tag, len;
|
||||
unsigned char *end;
|
||||
|
||||
if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
|
||||
return 0;
|
||||
|
||||
|
||||
if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI)
|
||||
return 0;
|
||||
|
||||
|
||||
if (!asn1_oid_decode(ctx, end, &trap->id, &trap->id_len))
|
||||
return 0;
|
||||
|
||||
|
||||
if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
|
||||
goto err_id_free;
|
||||
|
||||
if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_IPA) ||
|
||||
(cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_OTS)))
|
||||
goto err_id_free;
|
||||
|
||||
|
||||
if (!asn1_octets_decode(ctx, end, (unsigned char **)&trap->ip_address, &len))
|
||||
goto err_id_free;
|
||||
|
||||
|
||||
/* IPv4 only */
|
||||
if (len != 4)
|
||||
goto err_addr_free;
|
||||
|
||||
|
||||
mangle_address(ctx->begin, ctx->pointer - 4, map, check);
|
||||
|
||||
|
||||
if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
|
||||
goto err_addr_free;
|
||||
|
||||
|
||||
if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
|
||||
goto err_addr_free;
|
||||
|
||||
|
||||
if (!asn1_uint_decode(ctx, end, &trap->general))
|
||||
goto err_addr_free;
|
||||
|
||||
|
||||
if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
|
||||
goto err_addr_free;
|
||||
|
||||
|
||||
if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
|
||||
goto err_addr_free;
|
||||
|
||||
|
||||
if (!asn1_uint_decode(ctx, end, &trap->specific))
|
||||
goto err_addr_free;
|
||||
|
||||
|
||||
if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
|
||||
goto err_addr_free;
|
||||
|
||||
|
||||
if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_TIT) ||
|
||||
(cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_INT)))
|
||||
goto err_addr_free;
|
||||
|
||||
|
||||
if (!asn1_ulong_decode(ctx, end, &trap->time))
|
||||
goto err_addr_free;
|
||||
|
||||
|
||||
return 1;
|
||||
|
||||
err_addr_free:
|
||||
|
@ -1004,7 +1004,7 @@ static unsigned char snmp_trap_decode(struct asn1_ctx *ctx,
|
|||
static void hex_dump(unsigned char *buf, size_t len)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
if (i && !(i % 16))
|
||||
printk("\n");
|
||||
|
@ -1018,30 +1018,30 @@ static void hex_dump(unsigned char *buf, size_t len)
|
|||
* (And this is the fucking 'basic' method).
|
||||
*/
|
||||
static int snmp_parse_mangle(unsigned char *msg,
|
||||
u_int16_t len,
|
||||
const struct oct1_map *map,
|
||||
__sum16 *check)
|
||||
u_int16_t len,
|
||||
const struct oct1_map *map,
|
||||
__sum16 *check)
|
||||
{
|
||||
unsigned char *eoc, *end;
|
||||
unsigned int cls, con, tag, vers, pdutype;
|
||||
struct asn1_ctx ctx;
|
||||
struct asn1_octstr comm;
|
||||
struct snmp_object **obj;
|
||||
|
||||
|
||||
if (debug > 1)
|
||||
hex_dump(msg, len);
|
||||
|
||||
asn1_open(&ctx, msg, len);
|
||||
|
||||
/*
|
||||
|
||||
/*
|
||||
* Start of SNMP message.
|
||||
*/
|
||||
if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag))
|
||||
return 0;
|
||||
if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
||||
/*
|
||||
* Version 1 or 2 handled.
|
||||
*/
|
||||
if (!asn1_header_decode(&ctx, &end, &cls, &con, &tag))
|
||||
|
@ -1054,7 +1054,7 @@ static int snmp_parse_mangle(unsigned char *msg,
|
|||
printk(KERN_DEBUG "bsalg: snmp version: %u\n", vers + 1);
|
||||
if (vers > 1)
|
||||
return 1;
|
||||
|
||||
|
||||
/*
|
||||
* Community.
|
||||
*/
|
||||
|
@ -1066,14 +1066,14 @@ static int snmp_parse_mangle(unsigned char *msg,
|
|||
return 0;
|
||||
if (debug > 1) {
|
||||
unsigned int i;
|
||||
|
||||
|
||||
printk(KERN_DEBUG "bsalg: community: ");
|
||||
for (i = 0; i < comm.len; i++)
|
||||
printk("%c", comm.data[i]);
|
||||
printk("\n");
|
||||
}
|
||||
kfree(comm.data);
|
||||
|
||||
|
||||
/*
|
||||
* PDU type
|
||||
*/
|
||||
|
@ -1092,7 +1092,7 @@ static int snmp_parse_mangle(unsigned char *msg,
|
|||
[SNMP_PDU_INFORM] = "inform",
|
||||
[SNMP_PDU_TRAP2] = "trapv2"
|
||||
};
|
||||
|
||||
|
||||
if (pdutype > SNMP_PDU_TRAP2)
|
||||
printk(KERN_DEBUG "bsalg: bad pdu type %u\n", pdutype);
|
||||
else
|
||||
|
@ -1101,56 +1101,56 @@ static int snmp_parse_mangle(unsigned char *msg,
|
|||
if (pdutype != SNMP_PDU_RESPONSE &&
|
||||
pdutype != SNMP_PDU_TRAP1 && pdutype != SNMP_PDU_TRAP2)
|
||||
return 1;
|
||||
|
||||
|
||||
/*
|
||||
* Request header or v1 trap
|
||||
*/
|
||||
if (pdutype == SNMP_PDU_TRAP1) {
|
||||
struct snmp_v1_trap trap;
|
||||
unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check);
|
||||
|
||||
|
||||
if (ret) {
|
||||
kfree(trap.id);
|
||||
kfree((unsigned long *)trap.ip_address);
|
||||
} else
|
||||
} else
|
||||
return ret;
|
||||
|
||||
|
||||
} else {
|
||||
struct snmp_request req;
|
||||
|
||||
|
||||
if (!snmp_request_decode(&ctx, &req))
|
||||
return 0;
|
||||
|
||||
|
||||
if (debug > 1)
|
||||
printk(KERN_DEBUG "bsalg: request: id=0x%lx error_status=%u "
|
||||
"error_index=%u\n", req.id, req.error_status,
|
||||
req.error_index);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Loop through objects, look for IP addresses to mangle.
|
||||
*/
|
||||
if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag))
|
||||
return 0;
|
||||
|
||||
|
||||
if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
|
||||
return 0;
|
||||
|
||||
|
||||
obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
|
||||
if (obj == NULL) {
|
||||
if (net_ratelimit())
|
||||
printk(KERN_WARNING "OOM in bsalg(%d)\n", __LINE__);
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (!asn1_eoc_decode(&ctx, eoc)) {
|
||||
unsigned int i;
|
||||
|
||||
|
||||
if (!snmp_object_decode(&ctx, obj)) {
|
||||
if (*obj) {
|
||||
kfree((*obj)->id);
|
||||
kfree(*obj);
|
||||
}
|
||||
}
|
||||
kfree(obj);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1163,20 +1163,20 @@ static int snmp_parse_mangle(unsigned char *msg,
|
|||
printk("%lu", (*obj)->id[i]);
|
||||
}
|
||||
printk(": type=%u\n", (*obj)->type);
|
||||
|
||||
|
||||
}
|
||||
|
||||
if ((*obj)->type == SNMP_IPADDR)
|
||||
mangle_address(ctx.begin, ctx.pointer - 4 , map, check);
|
||||
|
||||
|
||||
kfree((*obj)->id);
|
||||
kfree(*obj);
|
||||
}
|
||||
kfree(obj);
|
||||
|
||||
|
||||
if (!asn1_eoc_decode(&ctx, eoc))
|
||||
return 0;
|
||||
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -1186,12 +1186,12 @@ static int snmp_parse_mangle(unsigned char *msg,
|
|||
*
|
||||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
/*
|
||||
* SNMP translation routine.
|
||||
*/
|
||||
static int snmp_translate(struct ip_conntrack *ct,
|
||||
enum ip_conntrack_info ctinfo,
|
||||
struct sk_buff **pskb)
|
||||
enum ip_conntrack_info ctinfo,
|
||||
struct sk_buff **pskb)
|
||||
{
|
||||
struct iphdr *iph = (*pskb)->nh.iph;
|
||||
struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
|
||||
|
@ -1213,12 +1213,12 @@ static int snmp_translate(struct ip_conntrack *ct,
|
|||
map.from = NOCT1(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip);
|
||||
map.to = NOCT1(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip);
|
||||
}
|
||||
|
||||
|
||||
if (map.from == map.to)
|
||||
return NF_ACCEPT;
|
||||
|
||||
|
||||
if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr),
|
||||
paylen, &map, &udph->check)) {
|
||||
paylen, &map, &udph->check)) {
|
||||
if (net_ratelimit())
|
||||
printk(KERN_WARNING "bsalg: parser failed\n");
|
||||
return NF_DROP;
|
||||
|
@ -1247,7 +1247,7 @@ static int help(struct sk_buff **pskb,
|
|||
if (!(ct->status & IPS_NAT_MASK))
|
||||
return NF_ACCEPT;
|
||||
|
||||
/*
|
||||
/*
|
||||
* Make sure the packet length is ok. So far, we were only guaranteed
|
||||
* to have a valid length IP header plus 8 bytes, which means we have
|
||||
* enough room for a UDP header. Just verify the UDP length field so we
|
||||
|
@ -1305,7 +1305,7 @@ static struct ip_conntrack_helper snmp_trap_helper = {
|
|||
* Module stuff.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
|
||||
static int __init ip_nat_snmp_basic_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
|
|
@ -81,7 +81,7 @@ static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
|
|||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static unsigned int
|
||||
ip_nat_fn(unsigned int hooknum,
|
||||
struct sk_buff **pskb,
|
||||
|
@ -107,8 +107,8 @@ ip_nat_fn(unsigned int hooknum,
|
|||
protocol. 8) --RR */
|
||||
if (!ct) {
|
||||
/* Exception: ICMP redirect to new connection (not in
|
||||
hash table yet). We must not let this through, in
|
||||
case we're doing NAT to the same network. */
|
||||
hash table yet). We must not let this through, in
|
||||
case we're doing NAT to the same network. */
|
||||
if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) {
|
||||
struct icmphdr _hdr, *hp;
|
||||
|
||||
|
@ -148,7 +148,7 @@ ip_nat_fn(unsigned int hooknum,
|
|||
if (unlikely(is_confirmed(ct)))
|
||||
/* NAT module was loaded late */
|
||||
ret = alloc_null_binding_confirmed(ct, info,
|
||||
hooknum);
|
||||
hooknum);
|
||||
else if (hooknum == NF_IP_LOCAL_IN)
|
||||
/* LOCAL_IN hook doesn't have a chain! */
|
||||
ret = alloc_null_binding(ct, info, hooknum);
|
||||
|
@ -179,10 +179,10 @@ ip_nat_fn(unsigned int hooknum,
|
|||
|
||||
static unsigned int
|
||||
ip_nat_in(unsigned int hooknum,
|
||||
struct sk_buff **pskb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
struct sk_buff **pskb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
unsigned int ret;
|
||||
__be32 daddr = (*pskb)->nh.iph->daddr;
|
||||
|
@ -277,9 +277,9 @@ ip_nat_adjust(unsigned int hooknum,
|
|||
|
||||
ct = ip_conntrack_get(*pskb, &ctinfo);
|
||||
if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
|
||||
DEBUGP("ip_nat_standalone: adjusting sequence number\n");
|
||||
if (!ip_nat_seq_adjust(pskb, ct, ctinfo))
|
||||
return NF_DROP;
|
||||
DEBUGP("ip_nat_standalone: adjusting sequence number\n");
|
||||
if (!ip_nat_seq_adjust(pskb, ct, ctinfo))
|
||||
return NF_DROP;
|
||||
}
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
|
|
@ -11,13 +11,13 @@
|
|||
*
|
||||
* 2000-03-27: Simplified code (thanks to Andi Kleen for clues).
|
||||
* 2000-05-20: Fixed notifier problems (following Miguel Freitas' report).
|
||||
* 2000-06-19: Fixed so nfmark is copied to metadata (reported by Sebastian
|
||||
* 2000-06-19: Fixed so nfmark is copied to metadata (reported by Sebastian
|
||||
* Zander).
|
||||
* 2000-08-01: Added Nick Williams' MAC support.
|
||||
* 2002-06-25: Code cleanup.
|
||||
* 2005-01-10: Added /proc counter for dropped packets; fixed so
|
||||
* packets aren't delivered to user space if they're going
|
||||
* to be dropped.
|
||||
* packets aren't delivered to user space if they're going
|
||||
* to be dropped.
|
||||
* 2005-05-26: local_bh_{disable,enable} around nf_reinject (Harald Welte)
|
||||
*
|
||||
*/
|
||||
|
@ -97,7 +97,7 @@ __ipq_find_entry(ipq_cmpfn cmpfn, unsigned long data)
|
|||
|
||||
list_for_each_prev(p, &queue_list) {
|
||||
struct ipq_queue_entry *entry = (struct ipq_queue_entry *)p;
|
||||
|
||||
|
||||
if (!cmpfn || cmpfn(entry, data))
|
||||
return entry;
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ static inline void
|
|||
__ipq_flush(int verdict)
|
||||
{
|
||||
struct ipq_queue_entry *entry;
|
||||
|
||||
|
||||
while ((entry = __ipq_find_dequeue_entry(NULL, 0)))
|
||||
ipq_issue_verdict(entry, verdict);
|
||||
}
|
||||
|
@ -138,21 +138,21 @@ static inline int
|
|||
__ipq_set_mode(unsigned char mode, unsigned int range)
|
||||
{
|
||||
int status = 0;
|
||||
|
||||
|
||||
switch(mode) {
|
||||
case IPQ_COPY_NONE:
|
||||
case IPQ_COPY_META:
|
||||
copy_mode = mode;
|
||||
copy_range = 0;
|
||||
break;
|
||||
|
||||
|
||||
case IPQ_COPY_PACKET:
|
||||
copy_mode = mode;
|
||||
copy_range = range;
|
||||
if (copy_range > 0xFFFF)
|
||||
copy_range = 0xFFFF;
|
||||
break;
|
||||
|
||||
|
||||
default:
|
||||
status = -EINVAL;
|
||||
|
||||
|
@ -173,7 +173,7 @@ static struct ipq_queue_entry *
|
|||
ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data)
|
||||
{
|
||||
struct ipq_queue_entry *entry;
|
||||
|
||||
|
||||
write_lock_bh(&queue_lock);
|
||||
entry = __ipq_find_dequeue_entry(cmpfn, data);
|
||||
write_unlock_bh(&queue_lock);
|
||||
|
@ -199,14 +199,14 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
|
|||
struct nlmsghdr *nlh;
|
||||
|
||||
read_lock_bh(&queue_lock);
|
||||
|
||||
|
||||
switch (copy_mode) {
|
||||
case IPQ_COPY_META:
|
||||
case IPQ_COPY_NONE:
|
||||
size = NLMSG_SPACE(sizeof(*pmsg));
|
||||
data_len = 0;
|
||||
break;
|
||||
|
||||
|
||||
case IPQ_COPY_PACKET:
|
||||
if ((entry->skb->ip_summed == CHECKSUM_PARTIAL ||
|
||||
entry->skb->ip_summed == CHECKSUM_COMPLETE) &&
|
||||
|
@ -218,10 +218,10 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
|
|||
data_len = entry->skb->len;
|
||||
else
|
||||
data_len = copy_range;
|
||||
|
||||
|
||||
size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
|
||||
break;
|
||||
|
||||
|
||||
default:
|
||||
*errp = -EINVAL;
|
||||
read_unlock_bh(&queue_lock);
|
||||
|
@ -233,7 +233,7 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
|
|||
skb = alloc_skb(size, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
goto nlmsg_failure;
|
||||
|
||||
|
||||
old_tail= skb->tail;
|
||||
nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
|
||||
pmsg = NLMSG_DATA(nlh);
|
||||
|
@ -246,29 +246,29 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
|
|||
pmsg->mark = entry->skb->mark;
|
||||
pmsg->hook = entry->info->hook;
|
||||
pmsg->hw_protocol = entry->skb->protocol;
|
||||
|
||||
|
||||
if (entry->info->indev)
|
||||
strcpy(pmsg->indev_name, entry->info->indev->name);
|
||||
else
|
||||
pmsg->indev_name[0] = '\0';
|
||||
|
||||
|
||||
if (entry->info->outdev)
|
||||
strcpy(pmsg->outdev_name, entry->info->outdev->name);
|
||||
else
|
||||
pmsg->outdev_name[0] = '\0';
|
||||
|
||||
|
||||
if (entry->info->indev && entry->skb->dev) {
|
||||
pmsg->hw_type = entry->skb->dev->type;
|
||||
if (entry->skb->dev->hard_header_parse)
|
||||
pmsg->hw_addrlen =
|
||||
entry->skb->dev->hard_header_parse(entry->skb,
|
||||
pmsg->hw_addr);
|
||||
pmsg->hw_addr);
|
||||
}
|
||||
|
||||
|
||||
if (data_len)
|
||||
if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
|
||||
BUG();
|
||||
|
||||
|
||||
nlh->nlmsg_len = skb->tail - old_tail;
|
||||
return skb;
|
||||
|
||||
|
@ -303,26 +303,26 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
|
|||
nskb = ipq_build_packet_message(entry, &status);
|
||||
if (nskb == NULL)
|
||||
goto err_out_free;
|
||||
|
||||
|
||||
write_lock_bh(&queue_lock);
|
||||
|
||||
|
||||
if (!peer_pid)
|
||||
goto err_out_free_nskb;
|
||||
goto err_out_free_nskb;
|
||||
|
||||
if (queue_total >= queue_maxlen) {
|
||||
queue_dropped++;
|
||||
queue_dropped++;
|
||||
status = -ENOSPC;
|
||||
if (net_ratelimit())
|
||||
printk (KERN_WARNING "ip_queue: full at %d entries, "
|
||||
printk (KERN_WARNING "ip_queue: full at %d entries, "
|
||||
"dropping packets(s). Dropped: %d\n", queue_total,
|
||||
queue_dropped);
|
||||
goto err_out_free_nskb;
|
||||
}
|
||||
|
||||
/* netlink_unicast will either free the nskb or attach it to a socket */
|
||||
/* netlink_unicast will either free the nskb or attach it to a socket */
|
||||
status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
|
||||
if (status < 0) {
|
||||
queue_user_dropped++;
|
||||
queue_user_dropped++;
|
||||
goto err_out_unlock;
|
||||
}
|
||||
|
||||
|
@ -332,8 +332,8 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
|
|||
return status;
|
||||
|
||||
err_out_free_nskb:
|
||||
kfree_skb(nskb);
|
||||
|
||||
kfree_skb(nskb);
|
||||
|
||||
err_out_unlock:
|
||||
write_unlock_bh(&queue_lock);
|
||||
|
||||
|
@ -359,11 +359,11 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
|
|||
return -EINVAL;
|
||||
if (diff > skb_tailroom(e->skb)) {
|
||||
struct sk_buff *newskb;
|
||||
|
||||
|
||||
newskb = skb_copy_expand(e->skb,
|
||||
skb_headroom(e->skb),
|
||||
diff,
|
||||
GFP_ATOMIC);
|
||||
skb_headroom(e->skb),
|
||||
diff,
|
||||
GFP_ATOMIC);
|
||||
if (newskb == NULL) {
|
||||
printk(KERN_WARNING "ip_queue: OOM "
|
||||
"in mangle, dropping packet\n");
|
||||
|
@ -403,11 +403,11 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
|
|||
return -ENOENT;
|
||||
else {
|
||||
int verdict = vmsg->value;
|
||||
|
||||
|
||||
if (vmsg->data_len && vmsg->data_len == len)
|
||||
if (ipq_mangle_ipv4(vmsg, entry) < 0)
|
||||
verdict = NF_DROP;
|
||||
|
||||
|
||||
ipq_issue_verdict(entry, verdict);
|
||||
return 0;
|
||||
}
|
||||
|
@ -426,7 +426,7 @@ ipq_set_mode(unsigned char mode, unsigned int range)
|
|||
|
||||
static int
|
||||
ipq_receive_peer(struct ipq_peer_msg *pmsg,
|
||||
unsigned char type, unsigned int len)
|
||||
unsigned char type, unsigned int len)
|
||||
{
|
||||
int status = 0;
|
||||
|
||||
|
@ -436,15 +436,15 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
|
|||
switch (type) {
|
||||
case IPQM_MODE:
|
||||
status = ipq_set_mode(pmsg->msg.mode.value,
|
||||
pmsg->msg.mode.range);
|
||||
pmsg->msg.mode.range);
|
||||
break;
|
||||
|
||||
|
||||
case IPQM_VERDICT:
|
||||
if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
|
||||
status = -EINVAL;
|
||||
else
|
||||
status = ipq_set_verdict(&pmsg->msg.verdict,
|
||||
len - sizeof(*pmsg));
|
||||
len - sizeof(*pmsg));
|
||||
break;
|
||||
default:
|
||||
status = -EINVAL;
|
||||
|
@ -468,7 +468,7 @@ dev_cmp(struct ipq_queue_entry *entry, unsigned long ifindex)
|
|||
return 1;
|
||||
if (entry->skb->nf_bridge->physoutdev &&
|
||||
entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
|
||||
return 1;
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
|
@ -478,7 +478,7 @@ static void
|
|||
ipq_dev_drop(int ifindex)
|
||||
{
|
||||
struct ipq_queue_entry *entry;
|
||||
|
||||
|
||||
while ((entry = ipq_find_dequeue_entry(dev_cmp, ifindex)) != NULL)
|
||||
ipq_issue_verdict(entry, NF_DROP);
|
||||
}
|
||||
|
@ -502,25 +502,25 @@ ipq_rcv_skb(struct sk_buff *skb)
|
|||
|
||||
pid = nlh->nlmsg_pid;
|
||||
flags = nlh->nlmsg_flags;
|
||||
|
||||
|
||||
if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
|
||||
RCV_SKB_FAIL(-EINVAL);
|
||||
|
||||
|
||||
if (flags & MSG_TRUNC)
|
||||
RCV_SKB_FAIL(-ECOMM);
|
||||
|
||||
|
||||
type = nlh->nlmsg_type;
|
||||
if (type < NLMSG_NOOP || type >= IPQM_MAX)
|
||||
RCV_SKB_FAIL(-EINVAL);
|
||||
|
||||
|
||||
if (type <= IPQM_BASE)
|
||||
return;
|
||||
|
||||
|
||||
if (security_netlink_recv(skb, CAP_NET_ADMIN))
|
||||
RCV_SKB_FAIL(-EPERM);
|
||||
|
||||
|
||||
write_lock_bh(&queue_lock);
|
||||
|
||||
|
||||
if (peer_pid) {
|
||||
if (peer_pid != pid) {
|
||||
write_unlock_bh(&queue_lock);
|
||||
|
@ -530,17 +530,17 @@ ipq_rcv_skb(struct sk_buff *skb)
|
|||
net_enable_timestamp();
|
||||
peer_pid = pid;
|
||||
}
|
||||
|
||||
|
||||
write_unlock_bh(&queue_lock);
|
||||
|
||||
|
||||
status = ipq_receive_peer(NLMSG_DATA(nlh), type,
|
||||
nlmsglen - NLMSG_LENGTH(0));
|
||||
nlmsglen - NLMSG_LENGTH(0));
|
||||
if (status < 0)
|
||||
RCV_SKB_FAIL(status);
|
||||
|
||||
|
||||
if (flags & NLM_F_ACK)
|
||||
netlink_ack(skb, nlh, 0);
|
||||
return;
|
||||
return;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -550,19 +550,19 @@ ipq_rcv_sk(struct sock *sk, int len)
|
|||
unsigned int qlen;
|
||||
|
||||
mutex_lock(&ipqnl_mutex);
|
||||
|
||||
|
||||
for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
|
||||
skb = skb_dequeue(&sk->sk_receive_queue);
|
||||
ipq_rcv_skb(skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
|
||||
mutex_unlock(&ipqnl_mutex);
|
||||
}
|
||||
|
||||
static int
|
||||
ipq_rcv_dev_event(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct net_device *dev = ptr;
|
||||
|
||||
|
@ -578,7 +578,7 @@ static struct notifier_block ipq_dev_notifier = {
|
|||
|
||||
static int
|
||||
ipq_rcv_nl_event(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct netlink_notify *n = ptr;
|
||||
|
||||
|
@ -607,7 +607,7 @@ static ctl_table ipq_table[] = {
|
|||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec
|
||||
},
|
||||
{ .ctl_name = 0 }
|
||||
{ .ctl_name = 0 }
|
||||
};
|
||||
|
||||
static ctl_table ipq_dir_table[] = {
|
||||
|
@ -637,25 +637,25 @@ ipq_get_info(char *buffer, char **start, off_t offset, int length)
|
|||
int len;
|
||||
|
||||
read_lock_bh(&queue_lock);
|
||||
|
||||
|
||||
len = sprintf(buffer,
|
||||
"Peer PID : %d\n"
|
||||
"Copy mode : %hu\n"
|
||||
"Copy range : %u\n"
|
||||
"Queue length : %u\n"
|
||||
"Queue max. length : %u\n"
|
||||
"Peer PID : %d\n"
|
||||
"Copy mode : %hu\n"
|
||||
"Copy range : %u\n"
|
||||
"Queue length : %u\n"
|
||||
"Queue max. length : %u\n"
|
||||
"Queue dropped : %u\n"
|
||||
"Netlink dropped : %u\n",
|
||||
peer_pid,
|
||||
copy_mode,
|
||||
copy_range,
|
||||
queue_total,
|
||||
queue_maxlen,
|
||||
peer_pid,
|
||||
copy_mode,
|
||||
copy_range,
|
||||
queue_total,
|
||||
queue_maxlen,
|
||||
queue_dropped,
|
||||
queue_user_dropped);
|
||||
|
||||
read_unlock_bh(&queue_lock);
|
||||
|
||||
|
||||
*start = buffer + offset;
|
||||
len -= offset;
|
||||
if (len > length)
|
||||
|
@ -675,7 +675,7 @@ static int __init ip_queue_init(void)
|
|||
{
|
||||
int status = -ENOMEM;
|
||||
struct proc_dir_entry *proc;
|
||||
|
||||
|
||||
netlink_register_notifier(&ipq_nl_notifier);
|
||||
ipqnl = netlink_kernel_create(NETLINK_FIREWALL, 0, ipq_rcv_sk,
|
||||
THIS_MODULE);
|
||||
|
@ -691,10 +691,10 @@ static int __init ip_queue_init(void)
|
|||
printk(KERN_ERR "ip_queue: failed to create proc entry\n");
|
||||
goto cleanup_ipqnl;
|
||||
}
|
||||
|
||||
|
||||
register_netdevice_notifier(&ipq_dev_notifier);
|
||||
ipq_sysctl_header = register_sysctl_table(ipq_root_table, 0);
|
||||
|
||||
|
||||
status = nf_register_queue_handler(PF_INET, &nfqh);
|
||||
if (status < 0) {
|
||||
printk(KERN_ERR "ip_queue: failed to register queue handler\n");
|
||||
|
@ -706,12 +706,12 @@ static int __init ip_queue_init(void)
|
|||
unregister_sysctl_table(ipq_sysctl_header);
|
||||
unregister_netdevice_notifier(&ipq_dev_notifier);
|
||||
proc_net_remove(IPQ_PROC_FS_NAME);
|
||||
|
||||
|
||||
cleanup_ipqnl:
|
||||
sock_release(ipqnl->sk_socket);
|
||||
mutex_lock(&ipqnl_mutex);
|
||||
mutex_unlock(&ipqnl_mutex);
|
||||
|
||||
|
||||
cleanup_netlink_notifier:
|
||||
netlink_unregister_notifier(&ipq_nl_notifier);
|
||||
return status;
|
||||
|
|
|
@ -297,7 +297,7 @@ ipt_do_table(struct sk_buff **pskb,
|
|||
e = get_entry(table_base, v);
|
||||
} else {
|
||||
/* Targets which reenter must return
|
||||
abs. verdicts */
|
||||
abs. verdicts */
|
||||
#ifdef CONFIG_NETFILTER_DEBUG
|
||||
((struct ipt_entry *)table_base)->comefrom
|
||||
= 0xeeeeeeec;
|
||||
|
@ -556,9 +556,9 @@ find_check_match(struct ipt_entry_match *m,
|
|||
|
||||
static inline int check_target(struct ipt_entry *e, const char *name)
|
||||
{
|
||||
struct ipt_entry_target *t;
|
||||
struct ipt_entry_target *t;
|
||||
struct xt_target *target;
|
||||
int ret;
|
||||
int ret;
|
||||
|
||||
t = ipt_get_target(e);
|
||||
target = t->u.kernel.target;
|
||||
|
@ -652,7 +652,7 @@ check_entry_size_and_hooks(struct ipt_entry *e,
|
|||
}
|
||||
|
||||
/* FIXME: underflows must be unconditional, standard verdicts
|
||||
< 0 (not IPT_RETURN). --RR */
|
||||
< 0 (not IPT_RETURN). --RR */
|
||||
|
||||
/* Clear counters and comefrom */
|
||||
e->counters = ((struct xt_counters) { 0, 0 });
|
||||
|
@ -2057,7 +2057,7 @@ void ipt_unregister_table(struct xt_table *table)
|
|||
struct xt_table_info *private;
|
||||
void *loc_cpu_entry;
|
||||
|
||||
private = xt_unregister_table(table);
|
||||
private = xt_unregister_table(table);
|
||||
|
||||
/* Decrease module usage counts and free resources */
|
||||
loc_cpu_entry = private->entries[raw_smp_processor_id()];
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Cluster IP hashmark target
|
||||
/* Cluster IP hashmark target
|
||||
* (C) 2003-2004 by Harald Welte <laforge@netfilter.org>
|
||||
* based on ideas of Fabio Olive Leite <olive@unixforge.org>
|
||||
*
|
||||
|
@ -123,7 +123,7 @@ __clusterip_config_find(__be32 clusterip)
|
|||
struct list_head *pos;
|
||||
|
||||
list_for_each(pos, &clusterip_configs) {
|
||||
struct clusterip_config *c = list_entry(pos,
|
||||
struct clusterip_config *c = list_entry(pos,
|
||||
struct clusterip_config, list);
|
||||
if (c->clusterip == clusterip) {
|
||||
return c;
|
||||
|
@ -229,7 +229,7 @@ clusterip_del_node(struct clusterip_config *c, u_int16_t nodenum)
|
|||
if (nodenum == 0 ||
|
||||
nodenum > c->num_total_nodes)
|
||||
return 1;
|
||||
|
||||
|
||||
if (test_and_clear_bit(nodenum - 1, &c->local_nodes))
|
||||
return 0;
|
||||
|
||||
|
@ -270,7 +270,7 @@ clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config)
|
|||
config->hash_initval);
|
||||
break;
|
||||
case CLUSTERIP_HASHMODE_SIP_SPT:
|
||||
hashval = jhash_2words(ntohl(iph->saddr), sport,
|
||||
hashval = jhash_2words(ntohl(iph->saddr), sport,
|
||||
config->hash_initval);
|
||||
break;
|
||||
case CLUSTERIP_HASHMODE_SIP_SPT_DPT:
|
||||
|
@ -297,8 +297,8 @@ clusterip_responsible(struct clusterip_config *config, u_int32_t hash)
|
|||
return test_bit(hash - 1, &config->local_nodes);
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
* IPTABLES TARGET
|
||||
/***********************************************************************
|
||||
* IPTABLES TARGET
|
||||
***********************************************************************/
|
||||
|
||||
static unsigned int
|
||||
|
@ -321,7 +321,7 @@ target(struct sk_buff **pskb,
|
|||
if (mark == NULL) {
|
||||
printk(KERN_ERR "CLUSTERIP: no conntrack!\n");
|
||||
/* FIXME: need to drop invalid ones, since replies
|
||||
* to outgoing connections of other nodes will be
|
||||
* to outgoing connections of other nodes will be
|
||||
* marked as INVALID */
|
||||
return NF_DROP;
|
||||
}
|
||||
|
@ -329,11 +329,11 @@ target(struct sk_buff **pskb,
|
|||
/* special case: ICMP error handling. conntrack distinguishes between
|
||||
* error messages (RELATED) and information requests (see below) */
|
||||
if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP
|
||||
&& (ctinfo == IP_CT_RELATED
|
||||
&& (ctinfo == IP_CT_RELATED
|
||||
|| ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY))
|
||||
return XT_CONTINUE;
|
||||
|
||||
/* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO,
|
||||
/* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO,
|
||||
* TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here
|
||||
* on, which all have an ID field [relevant for hashing]. */
|
||||
|
||||
|
@ -376,8 +376,8 @@ static int
|
|||
checkentry(const char *tablename,
|
||||
const void *e_void,
|
||||
const struct xt_target *target,
|
||||
void *targinfo,
|
||||
unsigned int hook_mask)
|
||||
void *targinfo,
|
||||
unsigned int hook_mask)
|
||||
{
|
||||
struct ipt_clusterip_tgt_info *cipinfo = targinfo;
|
||||
const struct ipt_entry *e = e_void;
|
||||
|
@ -437,7 +437,7 @@ checkentry(const char *tablename,
|
|||
return 0;
|
||||
}
|
||||
|
||||
config = clusterip_config_init(cipinfo,
|
||||
config = clusterip_config_init(cipinfo,
|
||||
e->ip.dst.s_addr, dev);
|
||||
if (!config) {
|
||||
printk(KERN_WARNING "CLUSTERIP: cannot allocate config\n");
|
||||
|
@ -483,8 +483,8 @@ static struct xt_target clusterip_tgt = {
|
|||
};
|
||||
|
||||
|
||||
/***********************************************************************
|
||||
* ARP MANGLING CODE
|
||||
/***********************************************************************
|
||||
* ARP MANGLING CODE
|
||||
***********************************************************************/
|
||||
|
||||
/* hardcoded for 48bit ethernet and 32bit ipv4 addresses */
|
||||
|
@ -496,7 +496,7 @@ struct arp_payload {
|
|||
} __attribute__ ((packed));
|
||||
|
||||
#ifdef CLUSTERIP_DEBUG
|
||||
static void arp_print(struct arp_payload *payload)
|
||||
static void arp_print(struct arp_payload *payload)
|
||||
{
|
||||
#define HBUFFERLEN 30
|
||||
char hbuffer[HBUFFERLEN];
|
||||
|
@ -510,7 +510,7 @@ static void arp_print(struct arp_payload *payload)
|
|||
}
|
||||
hbuffer[--k]='\0';
|
||||
|
||||
printk("src %u.%u.%u.%u@%s, dst %u.%u.%u.%u\n",
|
||||
printk("src %u.%u.%u.%u@%s, dst %u.%u.%u.%u\n",
|
||||
NIPQUAD(payload->src_ip), hbuffer,
|
||||
NIPQUAD(payload->dst_ip));
|
||||
}
|
||||
|
@ -540,13 +540,13 @@ arp_mangle(unsigned int hook,
|
|||
|
||||
payload = (void *)(arp+1);
|
||||
|
||||
/* if there is no clusterip configuration for the arp reply's
|
||||
/* if there is no clusterip configuration for the arp reply's
|
||||
* source ip, we don't want to mangle it */
|
||||
c = clusterip_config_find_get(payload->src_ip, 0);
|
||||
if (!c)
|
||||
return NF_ACCEPT;
|
||||
|
||||
/* normally the linux kernel always replies to arp queries of
|
||||
/* normally the linux kernel always replies to arp queries of
|
||||
* addresses on different interfacs. However, in the CLUSTERIP case
|
||||
* this wouldn't work, since we didn't subscribe the mcast group on
|
||||
* other interfaces */
|
||||
|
@ -577,8 +577,8 @@ static struct nf_hook_ops cip_arp_ops = {
|
|||
.priority = -1
|
||||
};
|
||||
|
||||
/***********************************************************************
|
||||
* PROC DIR HANDLING
|
||||
/***********************************************************************
|
||||
* PROC DIR HANDLING
|
||||
***********************************************************************/
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
@ -640,7 +640,7 @@ static int clusterip_seq_show(struct seq_file *s, void *v)
|
|||
{
|
||||
struct clusterip_seq_position *idx = (struct clusterip_seq_position *)v;
|
||||
|
||||
if (idx->pos != 0)
|
||||
if (idx->pos != 0)
|
||||
seq_putc(s, ',');
|
||||
|
||||
seq_printf(s, "%u", idx->bit);
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
/* iptables module for the IPv4 and TCP ECN bits, Version 1.5
|
||||
*
|
||||
* (C) 2002 by Harald Welte <laforge@netfilter.org>
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* ipt_ECN.c,v 1.5 2002/08/18 19:36:51 laforge Exp
|
||||
|
@ -40,7 +40,7 @@ set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
|
|||
iph->tos &= ~IPT_ECN_IP_MASK;
|
||||
iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK);
|
||||
nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos));
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -104,8 +104,8 @@ static int
|
|||
checkentry(const char *tablename,
|
||||
const void *e_void,
|
||||
const struct xt_target *target,
|
||||
void *targinfo,
|
||||
unsigned int hook_mask)
|
||||
void *targinfo,
|
||||
unsigned int hook_mask)
|
||||
{
|
||||
const struct ipt_ECN_info *einfo = (struct ipt_ECN_info *)targinfo;
|
||||
const struct ipt_entry *e = e_void;
|
||||
|
|
|
@ -289,7 +289,7 @@ static void dump_packet(const struct nf_loginfo *info,
|
|||
|
||||
if (ntohs(ih->frag_off) & IP_OFFSET)
|
||||
break;
|
||||
|
||||
|
||||
/* Max length: 9 "PROTO=AH " */
|
||||
printk("PROTO=AH ");
|
||||
|
||||
|
@ -334,10 +334,10 @@ static void dump_packet(const struct nf_loginfo *info,
|
|||
}
|
||||
|
||||
/* Max length: 15 "UID=4294967295 " */
|
||||
if ((logflags & IPT_LOG_UID) && !iphoff && skb->sk) {
|
||||
if ((logflags & IPT_LOG_UID) && !iphoff && skb->sk) {
|
||||
read_lock_bh(&skb->sk->sk_callback_lock);
|
||||
if (skb->sk->sk_socket && skb->sk->sk_socket->file)
|
||||
printk("UID=%u ", skb->sk->sk_socket->file->f_uid);
|
||||
printk("UID=%u ", skb->sk->sk_socket->file->f_uid);
|
||||
read_unlock_bh(&skb->sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
|
@ -431,7 +431,7 @@ ipt_log_target(struct sk_buff **pskb,
|
|||
li.u.log.logflags = loginfo->logflags;
|
||||
|
||||
ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
|
||||
loginfo->prefix);
|
||||
loginfo->prefix);
|
||||
return XT_CONTINUE;
|
||||
}
|
||||
|
||||
|
@ -483,7 +483,7 @@ static int __init ipt_log_init(void)
|
|||
/* we cannot make module load fail here, since otherwise
|
||||
* iptables userspace would abort */
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ masquerade_target(struct sk_buff **pskb,
|
|||
nat = nfct_nat(ct);
|
||||
#endif
|
||||
IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED
|
||||
|| ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
|
||||
|| ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
|
||||
|
||||
/* Source address is 0.0.0.0 - locally generated packet that is
|
||||
* probably not supposed to be masqueraded.
|
||||
|
@ -221,7 +221,7 @@ static void __exit ipt_masquerade_fini(void)
|
|||
{
|
||||
xt_unregister_target(&masquerade);
|
||||
unregister_netdevice_notifier(&masq_dev_notifier);
|
||||
unregister_inetaddr_notifier(&masq_inet_notifier);
|
||||
unregister_inetaddr_notifier(&masq_inet_notifier);
|
||||
}
|
||||
|
||||
module_init(ipt_masquerade_init);
|
||||
|
|
|
@ -92,13 +92,13 @@ target(struct sk_buff **pskb,
|
|||
static struct xt_target target_module = {
|
||||
.name = MODULENAME,
|
||||
.family = AF_INET,
|
||||
.target = target,
|
||||
.target = target,
|
||||
.targetsize = sizeof(struct ip_nat_multi_range_compat),
|
||||
.table = "nat",
|
||||
.hooks = (1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_POST_ROUTING) |
|
||||
(1 << NF_IP_LOCAL_OUT),
|
||||
.checkentry = check,
|
||||
.me = THIS_MODULE
|
||||
.me = THIS_MODULE
|
||||
};
|
||||
|
||||
static int __init ipt_netmap_init(void)
|
||||
|
|
|
@ -84,7 +84,7 @@ redirect_target(struct sk_buff **pskb,
|
|||
struct in_ifaddr *ifa;
|
||||
|
||||
newdst = 0;
|
||||
|
||||
|
||||
rcu_read_lock();
|
||||
indev = __in_dev_get_rcu((*pskb)->dev);
|
||||
if (indev && (ifa = indev->ifa_list))
|
||||
|
|
|
@ -57,7 +57,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
|
|||
oth = skb_header_pointer(oldskb, oldskb->nh.iph->ihl * 4,
|
||||
sizeof(_otcph), &_otcph);
|
||||
if (oth == NULL)
|
||||
return;
|
||||
return;
|
||||
|
||||
/* No RST for RST. */
|
||||
if (oth->rst)
|
||||
|
@ -145,7 +145,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
|
|||
|
||||
/* Adjust IP checksum */
|
||||
nskb->nh.iph->check = 0;
|
||||
nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph,
|
||||
nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph,
|
||||
nskb->nh.iph->ihl);
|
||||
|
||||
/* "Never happens" */
|
||||
|
@ -165,7 +165,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
|
|||
static inline void send_unreach(struct sk_buff *skb_in, int code)
|
||||
{
|
||||
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int reject(struct sk_buff **pskb,
|
||||
const struct net_device *in,
|
||||
|
@ -177,33 +177,33 @@ static unsigned int reject(struct sk_buff **pskb,
|
|||
const struct ipt_reject_info *reject = targinfo;
|
||||
|
||||
/* Our naive response construction doesn't deal with IP
|
||||
options, and probably shouldn't try. */
|
||||
options, and probably shouldn't try. */
|
||||
if ((*pskb)->nh.iph->ihl<<2 != sizeof(struct iphdr))
|
||||
return NF_DROP;
|
||||
|
||||
/* WARNING: This code causes reentry within iptables.
|
||||
This means that the iptables jump stack is now crap. We
|
||||
must return an absolute verdict. --RR */
|
||||
switch (reject->with) {
|
||||
case IPT_ICMP_NET_UNREACHABLE:
|
||||
send_unreach(*pskb, ICMP_NET_UNREACH);
|
||||
break;
|
||||
case IPT_ICMP_HOST_UNREACHABLE:
|
||||
send_unreach(*pskb, ICMP_HOST_UNREACH);
|
||||
break;
|
||||
case IPT_ICMP_PROT_UNREACHABLE:
|
||||
send_unreach(*pskb, ICMP_PROT_UNREACH);
|
||||
break;
|
||||
case IPT_ICMP_PORT_UNREACHABLE:
|
||||
send_unreach(*pskb, ICMP_PORT_UNREACH);
|
||||
break;
|
||||
case IPT_ICMP_NET_PROHIBITED:
|
||||
send_unreach(*pskb, ICMP_NET_ANO);
|
||||
break;
|
||||
switch (reject->with) {
|
||||
case IPT_ICMP_NET_UNREACHABLE:
|
||||
send_unreach(*pskb, ICMP_NET_UNREACH);
|
||||
break;
|
||||
case IPT_ICMP_HOST_UNREACHABLE:
|
||||
send_unreach(*pskb, ICMP_HOST_UNREACH);
|
||||
break;
|
||||
case IPT_ICMP_PROT_UNREACHABLE:
|
||||
send_unreach(*pskb, ICMP_PROT_UNREACH);
|
||||
break;
|
||||
case IPT_ICMP_PORT_UNREACHABLE:
|
||||
send_unreach(*pskb, ICMP_PORT_UNREACH);
|
||||
break;
|
||||
case IPT_ICMP_NET_PROHIBITED:
|
||||
send_unreach(*pskb, ICMP_NET_ANO);
|
||||
break;
|
||||
case IPT_ICMP_HOST_PROHIBITED:
|
||||
send_unreach(*pskb, ICMP_HOST_ANO);
|
||||
break;
|
||||
case IPT_ICMP_ADMIN_PROHIBITED:
|
||||
send_unreach(*pskb, ICMP_HOST_ANO);
|
||||
break;
|
||||
case IPT_ICMP_ADMIN_PROHIBITED:
|
||||
send_unreach(*pskb, ICMP_PKT_FILTERED);
|
||||
break;
|
||||
case IPT_TCP_RESET:
|
||||
|
@ -222,7 +222,7 @@ static int check(const char *tablename,
|
|||
void *targinfo,
|
||||
unsigned int hook_mask)
|
||||
{
|
||||
const struct ipt_reject_info *rejinfo = targinfo;
|
||||
const struct ipt_reject_info *rejinfo = targinfo;
|
||||
const struct ipt_entry *e = e_void;
|
||||
|
||||
if (rejinfo->with == IPT_ICMP_ECHOREPLY) {
|
||||
|
|
|
@ -87,24 +87,24 @@ same_check(const char *tablename,
|
|||
DEBUGP("same_check: bad MAP_IPS.\n");
|
||||
return 0;
|
||||
}
|
||||
rangeip = (ntohl(mr->range[count].max_ip) -
|
||||
rangeip = (ntohl(mr->range[count].max_ip) -
|
||||
ntohl(mr->range[count].min_ip) + 1);
|
||||
mr->ipnum += rangeip;
|
||||
|
||||
|
||||
DEBUGP("same_check: range %u, ipnum = %u\n", count, rangeip);
|
||||
}
|
||||
DEBUGP("same_check: total ipaddresses = %u\n", mr->ipnum);
|
||||
|
||||
|
||||
mr->iparray = kmalloc((sizeof(u_int32_t) * mr->ipnum), GFP_KERNEL);
|
||||
if (!mr->iparray) {
|
||||
DEBUGP("same_check: Couldn't allocate %u bytes "
|
||||
"for %u ipaddresses!\n",
|
||||
"for %u ipaddresses!\n",
|
||||
(sizeof(u_int32_t) * mr->ipnum), mr->ipnum);
|
||||
return 0;
|
||||
}
|
||||
DEBUGP("same_check: Allocated %u bytes for %u ipaddresses.\n",
|
||||
(sizeof(u_int32_t) * mr->ipnum), mr->ipnum);
|
||||
|
||||
|
||||
for (count = 0; count < mr->rangesize; count++) {
|
||||
for (countess = ntohl(mr->range[count].min_ip);
|
||||
countess <= ntohl(mr->range[count].max_ip);
|
||||
|
@ -119,13 +119,13 @@ same_check(const char *tablename,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void
|
||||
static void
|
||||
same_destroy(const struct xt_target *target, void *targinfo)
|
||||
{
|
||||
struct ipt_same_info *mr = targinfo;
|
||||
|
||||
kfree(mr->iparray);
|
||||
|
||||
|
||||
DEBUGP("same_destroy: Deallocated %u bytes for %u ipaddresses.\n",
|
||||
(sizeof(u_int32_t) * mr->ipnum), mr->ipnum);
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ same_target(struct sk_buff **pskb,
|
|||
giving some hope for consistency across reboots.
|
||||
Here we calculate the index in same->iparray which
|
||||
holds the ipaddress we should use */
|
||||
|
||||
|
||||
#ifdef CONFIG_NF_NAT_NEEDED
|
||||
tmpip = ntohl(t->src.u3.ip);
|
||||
|
||||
|
|
|
@ -47,8 +47,8 @@ static int
|
|||
checkentry(const char *tablename,
|
||||
const void *e_void,
|
||||
const struct xt_target *target,
|
||||
void *targinfo,
|
||||
unsigned int hook_mask)
|
||||
void *targinfo,
|
||||
unsigned int hook_mask)
|
||||
{
|
||||
const u_int8_t tos = ((struct ipt_tos_target_info *)targinfo)->tos;
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
|
|||
MODULE_DESCRIPTION("IP tables TTL modification module");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static unsigned int
|
||||
static unsigned int
|
||||
ipt_ttl_target(struct sk_buff **pskb,
|
||||
const struct net_device *in, const struct net_device *out,
|
||||
unsigned int hooknum, const struct xt_target *target,
|
||||
|
@ -71,7 +71,7 @@ static int ipt_ttl_checkentry(const char *tablename,
|
|||
struct ipt_TTL_info *info = targinfo;
|
||||
|
||||
if (info->mode > IPT_TTL_MAXMODE) {
|
||||
printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n",
|
||||
printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n",
|
||||
info->mode);
|
||||
return 0;
|
||||
}
|
||||
|
@ -83,10 +83,10 @@ static int ipt_ttl_checkentry(const char *tablename,
|
|||
static struct xt_target ipt_TTL = {
|
||||
.name = "TTL",
|
||||
.family = AF_INET,
|
||||
.target = ipt_ttl_target,
|
||||
.target = ipt_ttl_target,
|
||||
.targetsize = sizeof(struct ipt_TTL_info),
|
||||
.table = "mangle",
|
||||
.checkentry = ipt_ttl_checkentry,
|
||||
.checkentry = ipt_ttl_checkentry,
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
* (C) 2000-2004 by Harald Welte <laforge@netfilter.org>
|
||||
*
|
||||
* 2000/09/22 ulog-cprange feature added
|
||||
* 2001/01/04 in-kernel queue as proposed by Sebastian Zander
|
||||
* 2001/01/04 in-kernel queue as proposed by Sebastian Zander
|
||||
* <zander@fokus.gmd.de>
|
||||
* 2001/01/30 per-rule nlgroup conflicts with global queue.
|
||||
* 2001/01/30 per-rule nlgroup conflicts with global queue.
|
||||
* nlgroup now global (sysctl)
|
||||
* 2001/04/19 ulog-queue reworked, now fixed buffer size specified at
|
||||
* module loadtime -HW
|
||||
|
@ -23,8 +23,8 @@
|
|||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This module accepts two parameters:
|
||||
*
|
||||
* This module accepts two parameters:
|
||||
*
|
||||
* nlbufsiz:
|
||||
* The parameter specifies how big the buffer for each netlink multicast
|
||||
* group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will
|
||||
|
@ -72,7 +72,7 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG);
|
|||
|
||||
#if 0
|
||||
#define DEBUGP(format, args...) printk("%s:%s:" format, \
|
||||
__FILE__, __FUNCTION__ , ## args)
|
||||
__FILE__, __FUNCTION__ , ## args)
|
||||
#else
|
||||
#define DEBUGP(format, args...)
|
||||
#endif
|
||||
|
@ -162,7 +162,7 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
|
|||
PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", n);
|
||||
|
||||
if (n > size) {
|
||||
/* try to allocate only as much as we need for
|
||||
/* try to allocate only as much as we need for
|
||||
* current packet */
|
||||
|
||||
skb = alloc_skb(size, GFP_ATOMIC);
|
||||
|
@ -203,7 +203,7 @@ static void ipt_ulog_packet(unsigned int hooknum,
|
|||
size = NLMSG_SPACE(sizeof(*pm) + copy_len);
|
||||
|
||||
ub = &ulog_buffers[groupnum];
|
||||
|
||||
|
||||
spin_lock_bh(&ulog_lock);
|
||||
|
||||
if (!ub->skb) {
|
||||
|
@ -211,7 +211,7 @@ static void ipt_ulog_packet(unsigned int hooknum,
|
|||
goto alloc_failure;
|
||||
} else if (ub->qlen >= loginfo->qthreshold ||
|
||||
size > skb_tailroom(ub->skb)) {
|
||||
/* either the queue len is too high or we don't have
|
||||
/* either the queue len is too high or we don't have
|
||||
* enough room in nlskb left. send it to userspace. */
|
||||
|
||||
ulog_send(groupnum);
|
||||
|
@ -220,11 +220,11 @@ static void ipt_ulog_packet(unsigned int hooknum,
|
|||
goto alloc_failure;
|
||||
}
|
||||
|
||||
DEBUGP("ipt_ULOG: qlen %d, qthreshold %d\n", ub->qlen,
|
||||
DEBUGP("ipt_ULOG: qlen %d, qthreshold %d\n", ub->qlen,
|
||||
loginfo->qthreshold);
|
||||
|
||||
/* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */
|
||||
nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
|
||||
nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
|
||||
sizeof(*pm)+copy_len);
|
||||
ub->qlen++;
|
||||
|
||||
|
@ -268,7 +268,7 @@ static void ipt_ulog_packet(unsigned int hooknum,
|
|||
/* copy_len <= skb->len, so can't fail. */
|
||||
if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
|
||||
BUG();
|
||||
|
||||
|
||||
/* check if we are building multi-part messages */
|
||||
if (ub->qlen > 1) {
|
||||
ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
|
||||
|
@ -312,10 +312,10 @@ static unsigned int ipt_ulog_target(struct sk_buff **pskb,
|
|||
struct ipt_ulog_info *loginfo = (struct ipt_ulog_info *) targinfo;
|
||||
|
||||
ipt_ulog_packet(hooknum, *pskb, in, out, loginfo, NULL);
|
||||
|
||||
|
||||
return XT_CONTINUE;
|
||||
}
|
||||
|
||||
|
||||
static void ipt_logfn(unsigned int pf,
|
||||
unsigned int hooknum,
|
||||
const struct sk_buff *skb,
|
||||
|
@ -396,7 +396,7 @@ static int __init ipt_ulog_init(void)
|
|||
}
|
||||
|
||||
nflognl = netlink_kernel_create(NETLINK_NFLOG, ULOG_MAXNLGROUPS, NULL,
|
||||
THIS_MODULE);
|
||||
THIS_MODULE);
|
||||
if (!nflognl)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -407,7 +407,7 @@ static int __init ipt_ulog_init(void)
|
|||
}
|
||||
if (nflog)
|
||||
nf_log_register(PF_INET, &ipt_ulog_logger);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ static int match(const struct sk_buff *skb,
|
|||
ret &= match_type(iph->saddr, info->source)^info->invert_source;
|
||||
if (info->dest)
|
||||
ret &= match_type(iph->daddr, info->dest)^info->invert_dest;
|
||||
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -29,8 +29,8 @@ static inline int
|
|||
spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert)
|
||||
{
|
||||
int r=0;
|
||||
duprintf("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ',
|
||||
min,spi,max);
|
||||
duprintf("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ',
|
||||
min,spi,max);
|
||||
r=(spi >= min && spi <= max) ^ invert;
|
||||
duprintf(" result %s\n",r? "PASS" : "FAILED");
|
||||
return r;
|
||||
|
|
|
@ -41,7 +41,7 @@ match(const struct sk_buff *skb,
|
|||
DEBUGP("src IP %u.%u.%u.%u NOT in range %s"
|
||||
"%u.%u.%u.%u-%u.%u.%u.%u\n",
|
||||
NIPQUAD(iph->saddr),
|
||||
info->flags & IPRANGE_SRC_INV ? "(INV) " : "",
|
||||
info->flags & IPRANGE_SRC_INV ? "(INV) " : "",
|
||||
NIPQUAD(info->src.min_ip),
|
||||
NIPQUAD(info->src.max_ip));
|
||||
return 0;
|
||||
|
@ -54,7 +54,7 @@ match(const struct sk_buff *skb,
|
|||
DEBUGP("dst IP %u.%u.%u.%u NOT in range %s"
|
||||
"%u.%u.%u.%u-%u.%u.%u.%u\n",
|
||||
NIPQUAD(iph->daddr),
|
||||
info->flags & IPRANGE_DST_INV ? "(INV) " : "",
|
||||
info->flags & IPRANGE_DST_INV ? "(INV) " : "",
|
||||
NIPQUAD(info->dst.min_ip),
|
||||
NIPQUAD(info->dst.max_ip));
|
||||
return 0;
|
||||
|
|
|
@ -53,10 +53,10 @@ match(const struct sk_buff *skb,
|
|||
|
||||
static int
|
||||
checkentry(const char *tablename,
|
||||
const void *ip,
|
||||
const void *ip,
|
||||
const struct xt_match *match,
|
||||
void *matchinfo,
|
||||
unsigned int hook_mask)
|
||||
void *matchinfo,
|
||||
unsigned int hook_mask)
|
||||
{
|
||||
const struct ipt_owner_info *info = matchinfo;
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* IP tables module for matching the value of the TTL
|
||||
/* IP tables module for matching the value of the TTL
|
||||
*
|
||||
* ipt_ttl.c,v 1.5 2000/11/13 11:16:08 laforge Exp
|
||||
*
|
||||
|
@ -41,7 +41,7 @@ static int match(const struct sk_buff *skb,
|
|||
return (skb->nh.iph->ttl > info->ttl);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING "ipt_ttl: unknown mode %d\n",
|
||||
printk(KERN_WARNING "ipt_ttl: unknown mode %d\n",
|
||||
info->mode);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ static struct
|
|||
struct ipt_replace repl;
|
||||
struct ipt_standard entries[3];
|
||||
struct ipt_error term;
|
||||
} initial_table __initdata
|
||||
} initial_table __initdata
|
||||
= { { "filter", FILTER_VALID_HOOKS, 4,
|
||||
sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
|
||||
{ [NF_IP_LOCAL_IN] = 0,
|
||||
|
|
|
@ -58,7 +58,7 @@ static struct
|
|||
{ { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } },
|
||||
/* LOCAL_IN */
|
||||
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ipt_entry),
|
||||
sizeof(struct ipt_standard),
|
||||
|
@ -66,7 +66,7 @@ static struct
|
|||
{ { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } },
|
||||
/* FORWARD */
|
||||
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ipt_entry),
|
||||
sizeof(struct ipt_standard),
|
||||
|
@ -166,7 +166,7 @@ static struct nf_hook_ops ipt_ops[] = {
|
|||
.hook = ipt_route_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = PF_INET,
|
||||
.hooknum = NF_IP_PRE_ROUTING,
|
||||
.hooknum = NF_IP_PRE_ROUTING,
|
||||
.priority = NF_IP_PRI_MANGLE,
|
||||
},
|
||||
{
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/*
|
||||
/*
|
||||
* 'raw' table, which is the very first hooked in at PRE_ROUTING and LOCAL_OUT .
|
||||
*
|
||||
* Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
|
||||
|
@ -15,26 +15,26 @@ static struct
|
|||
struct ipt_error term;
|
||||
} initial_table __initdata = {
|
||||
.repl = {
|
||||
.name = "raw",
|
||||
.valid_hooks = RAW_VALID_HOOKS,
|
||||
.name = "raw",
|
||||
.valid_hooks = RAW_VALID_HOOKS,
|
||||
.num_entries = 3,
|
||||
.size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error),
|
||||
.hook_entry = {
|
||||
.hook_entry = {
|
||||
[NF_IP_PRE_ROUTING] = 0,
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) },
|
||||
.underflow = {
|
||||
.underflow = {
|
||||
[NF_IP_PRE_ROUTING] = 0,
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) },
|
||||
},
|
||||
.entries = {
|
||||
/* PRE_ROUTING */
|
||||
{
|
||||
.entry = {
|
||||
{
|
||||
.entry = {
|
||||
.target_offset = sizeof(struct ipt_entry),
|
||||
.next_offset = sizeof(struct ipt_standard),
|
||||
},
|
||||
.target = {
|
||||
.target = {
|
||||
.target = {
|
||||
.target = {
|
||||
.u = {
|
||||
.target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)),
|
||||
},
|
||||
|
@ -69,7 +69,7 @@ static struct
|
|||
.target = {
|
||||
.u = {
|
||||
.user = {
|
||||
.target_size = IPT_ALIGN(sizeof(struct ipt_error_target)),
|
||||
.target_size = IPT_ALIGN(sizeof(struct ipt_error_target)),
|
||||
.name = IPT_ERROR_TARGET,
|
||||
},
|
||||
},
|
||||
|
@ -80,9 +80,9 @@ static struct
|
|||
};
|
||||
|
||||
static struct xt_table packet_raw = {
|
||||
.name = "raw",
|
||||
.valid_hooks = RAW_VALID_HOOKS,
|
||||
.lock = RW_LOCK_UNLOCKED,
|
||||
.name = "raw",
|
||||
.valid_hooks = RAW_VALID_HOOKS,
|
||||
.lock = RW_LOCK_UNLOCKED,
|
||||
.me = THIS_MODULE,
|
||||
.af = AF_INET,
|
||||
};
|
||||
|
|
|
@ -66,7 +66,7 @@ static int ipv4_print_tuple(struct seq_file *s,
|
|||
const struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
return seq_printf(s, "src=%u.%u.%u.%u dst=%u.%u.%u.%u ",
|
||||
NIPQUAD(tuple->src.u3.ip),
|
||||
NIPQUAD(tuple->src.u3.ip),
|
||||
NIPQUAD(tuple->dst.u3.ip));
|
||||
}
|
||||
|
||||
|
@ -82,14 +82,14 @@ nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
|
|||
{
|
||||
skb_orphan(skb);
|
||||
|
||||
local_bh_disable();
|
||||
skb = ip_defrag(skb, user);
|
||||
local_bh_enable();
|
||||
local_bh_disable();
|
||||
skb = ip_defrag(skb, user);
|
||||
local_bh_enable();
|
||||
|
||||
if (skb)
|
||||
if (skb)
|
||||
ip_send_check(skb->nh.iph);
|
||||
|
||||
return skb;
|
||||
return skb;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -192,10 +192,10 @@ static unsigned int ipv4_conntrack_in(unsigned int hooknum,
|
|||
}
|
||||
|
||||
static unsigned int ipv4_conntrack_local(unsigned int hooknum,
|
||||
struct sk_buff **pskb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
struct sk_buff **pskb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
/* root is playing with raw sockets. */
|
||||
if ((*pskb)->len < sizeof(struct iphdr)
|
||||
|
@ -332,7 +332,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
|
|||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct nf_conntrack_tuple tuple;
|
||||
|
||||
|
||||
NF_CT_TUPLE_U_BLANK(&tuple);
|
||||
tuple.src.u3.ip = inet->rcv_saddr;
|
||||
tuple.src.u.tcp.port = inet->sport;
|
||||
|
@ -501,7 +501,7 @@ static int __init nf_conntrack_l3proto_ipv4_init(void)
|
|||
return ret;
|
||||
#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
|
||||
cleanup_hooks:
|
||||
nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
|
||||
nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
|
||||
#endif
|
||||
cleanup_ipv4:
|
||||
nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
|
||||
|
|
|
@ -135,7 +135,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
|
|||
l3proto, l4proto))
|
||||
return -ENOSPC;
|
||||
|
||||
if (seq_print_counters(s, &ct->counters[IP_CT_DIR_ORIGINAL]))
|
||||
if (seq_print_counters(s, &ct->counters[IP_CT_DIR_ORIGINAL]))
|
||||
return -ENOSPC;
|
||||
|
||||
if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
|
||||
|
@ -146,7 +146,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
|
|||
l3proto, l4proto))
|
||||
return -ENOSPC;
|
||||
|
||||
if (seq_print_counters(s, &ct->counters[IP_CT_DIR_REPLY]))
|
||||
if (seq_print_counters(s, &ct->counters[IP_CT_DIR_REPLY]))
|
||||
return -ENOSPC;
|
||||
|
||||
if (test_bit(IPS_ASSURED_BIT, &ct->status))
|
||||
|
@ -228,7 +228,7 @@ static void *exp_seq_start(struct seq_file *s, loff_t *pos)
|
|||
|
||||
static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos)
|
||||
{
|
||||
struct list_head *e = v;
|
||||
struct list_head *e = v;
|
||||
|
||||
++*pos;
|
||||
e = e->next;
|
||||
|
@ -262,7 +262,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
|
|||
print_tuple(s, &exp->tuple,
|
||||
__nf_ct_l3proto_find(exp->tuple.src.l3num),
|
||||
__nf_ct_l4proto_find(exp->tuple.src.l3num,
|
||||
exp->tuple.dst.protonum));
|
||||
exp->tuple.dst.protonum));
|
||||
return seq_putc(s, '\n');
|
||||
}
|
||||
|
||||
|
|
|
@ -101,9 +101,9 @@ static int icmp_packet(struct nf_conn *ct,
|
|||
unsigned int hooknum)
|
||||
{
|
||||
/* Try to delete connection immediately after all replies:
|
||||
won't actually vanish as we still have skb, and del_timer
|
||||
means this will only run once even if count hits zero twice
|
||||
(theoretically possible with SMP) */
|
||||
won't actually vanish as we still have skb, and del_timer
|
||||
means this will only run once even if count hits zero twice
|
||||
(theoretically possible with SMP) */
|
||||
if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
|
||||
if (atomic_dec_and_test(&ct->proto.icmp.count)
|
||||
&& del_timer(&ct->timeout))
|
||||
|
@ -144,8 +144,8 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
|
|||
/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
|
||||
static int
|
||||
icmp_error_message(struct sk_buff *skb,
|
||||
enum ip_conntrack_info *ctinfo,
|
||||
unsigned int hooknum)
|
||||
enum ip_conntrack_info *ctinfo,
|
||||
unsigned int hooknum)
|
||||
{
|
||||
struct nf_conntrack_tuple innertuple, origtuple;
|
||||
struct {
|
||||
|
@ -181,9 +181,9 @@ icmp_error_message(struct sk_buff *skb,
|
|||
return -NF_ACCEPT;
|
||||
}
|
||||
|
||||
/* Ordinarily, we'd expect the inverted tupleproto, but it's
|
||||
been preserved inside the ICMP. */
|
||||
if (!nf_ct_invert_tuple(&innertuple, &origtuple,
|
||||
/* Ordinarily, we'd expect the inverted tupleproto, but it's
|
||||
been preserved inside the ICMP. */
|
||||
if (!nf_ct_invert_tuple(&innertuple, &origtuple,
|
||||
&nf_conntrack_l3proto_ipv4, innerproto)) {
|
||||
DEBUGP("icmp_error_message: no match\n");
|
||||
return -NF_ACCEPT;
|
||||
|
@ -212,10 +212,10 @@ icmp_error_message(struct sk_buff *skb,
|
|||
*ctinfo += IP_CT_IS_REPLY;
|
||||
}
|
||||
|
||||
/* Update skb to refer to this connection */
|
||||
skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
|
||||
skb->nfctinfo = *ctinfo;
|
||||
return -NF_ACCEPT;
|
||||
/* Update skb to refer to this connection */
|
||||
skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
|
||||
skb->nfctinfo = *ctinfo;
|
||||
return -NF_ACCEPT;
|
||||
}
|
||||
|
||||
/* Small and modified version of icmp_rcv */
|
||||
|
@ -306,7 +306,7 @@ static int icmp_nfattr_to_tuple(struct nfattr *tb[],
|
|||
if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto))
|
||||
return -EINVAL;
|
||||
|
||||
tuple->dst.u.icmp.type =
|
||||
tuple->dst.u.icmp.type =
|
||||
*(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_TYPE-1]);
|
||||
tuple->dst.u.icmp.code =
|
||||
*(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]);
|
||||
|
@ -332,7 +332,7 @@ static struct ctl_table icmp_sysctl_table[] = {
|
|||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_jiffies,
|
||||
},
|
||||
{
|
||||
{
|
||||
.ctl_name = 0
|
||||
}
|
||||
};
|
||||
|
@ -346,7 +346,7 @@ static struct ctl_table icmp_compat_sysctl_table[] = {
|
|||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_jiffies,
|
||||
},
|
||||
{
|
||||
{
|
||||
.ctl_name = 0
|
||||
}
|
||||
};
|
||||
|
|
|
@ -452,8 +452,8 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
|
|||
(*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
|
||||
|
||||
/* Redirects on non-null nats must be dropped, else they'll
|
||||
start talking to each other without our translation, and be
|
||||
confused... --RR */
|
||||
start talking to each other without our translation, and be
|
||||
confused... --RR */
|
||||
if (inside->icmp.type == ICMP_REDIRECT) {
|
||||
/* If NAT isn't finished, assume it and drop. */
|
||||
if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
|
||||
|
@ -469,13 +469,13 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
|
|||
if (!nf_ct_get_tuple(*pskb,
|
||||
(*pskb)->nh.iph->ihl*4 + sizeof(struct icmphdr),
|
||||
(*pskb)->nh.iph->ihl*4 +
|
||||
sizeof(struct icmphdr) + inside->ip.ihl*4,
|
||||
(u_int16_t)AF_INET,
|
||||
inside->ip.protocol,
|
||||
&inner,
|
||||
l3proto,
|
||||
sizeof(struct icmphdr) + inside->ip.ihl*4,
|
||||
(u_int16_t)AF_INET,
|
||||
inside->ip.protocol,
|
||||
&inner,
|
||||
l3proto,
|
||||
__nf_ct_l4proto_find((u_int16_t)PF_INET,
|
||||
inside->ip.protocol)))
|
||||
inside->ip.protocol)))
|
||||
return 0;
|
||||
|
||||
/* Change inner back to look like incoming packet. We do the
|
||||
|
|
|
@ -256,7 +256,7 @@ static int nat_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct,
|
|||
if (set_h245_addr(pskb, data, dataoff, taddr,
|
||||
&ct->tuplehash[!dir].tuple.dst.u3,
|
||||
htons((port & htons(1)) ? nated_port + 1 :
|
||||
nated_port)) == 0) {
|
||||
nated_port)) == 0) {
|
||||
/* Save ports */
|
||||
info->rtp_port[i][dir] = rtp_port;
|
||||
info->rtp_port[i][!dir] = htons(nated_port);
|
||||
|
|
|
@ -179,7 +179,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb,
|
|||
tcph->check = tcp_v4_check(datalen,
|
||||
iph->saddr, iph->daddr,
|
||||
csum_partial((char *)tcph,
|
||||
datalen, 0));
|
||||
datalen, 0));
|
||||
} else
|
||||
nf_proto_csum_replace2(&tcph->check, *pskb,
|
||||
htons(oldlen), htons(datalen), 1);
|
||||
|
@ -223,7 +223,7 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb,
|
|||
/* UDP helpers might accidentally mangle the wrong packet */
|
||||
iph = (*pskb)->nh.iph;
|
||||
if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) +
|
||||
match_offset + match_len)
|
||||
match_offset + match_len)
|
||||
return 0;
|
||||
|
||||
if (!skb_make_writable(pskb, (*pskb)->len))
|
||||
|
@ -252,9 +252,9 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb,
|
|||
if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
|
||||
udph->check = 0;
|
||||
udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
|
||||
datalen, IPPROTO_UDP,
|
||||
csum_partial((char *)udph,
|
||||
datalen, 0));
|
||||
datalen, IPPROTO_UDP,
|
||||
csum_partial((char *)udph,
|
||||
datalen, 0));
|
||||
if (!udph->check)
|
||||
udph->check = CSUM_MANGLED_0;
|
||||
} else
|
||||
|
|
|
@ -184,10 +184,10 @@ pptp_outbound_pkt(struct sk_buff **pskb,
|
|||
|
||||
/* mangle packet */
|
||||
if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo,
|
||||
cid_off + sizeof(struct pptp_pkt_hdr) +
|
||||
sizeof(struct PptpControlHeader),
|
||||
sizeof(new_callid), (char *)&new_callid,
|
||||
sizeof(new_callid)) == 0)
|
||||
cid_off + sizeof(struct pptp_pkt_hdr) +
|
||||
sizeof(struct PptpControlHeader),
|
||||
sizeof(new_callid), (char *)&new_callid,
|
||||
sizeof(new_callid)) == 0)
|
||||
return NF_DROP;
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
@ -276,7 +276,7 @@ pptp_inbound_pkt(struct sk_buff **pskb,
|
|||
ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid));
|
||||
|
||||
if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo,
|
||||
pcid_off + sizeof(struct pptp_pkt_hdr) +
|
||||
pcid_off + sizeof(struct pptp_pkt_hdr) +
|
||||
sizeof(struct PptpControlHeader),
|
||||
sizeof(new_pcid), (char *)&new_pcid,
|
||||
sizeof(new_pcid)) == 0)
|
||||
|
|
|
@ -44,7 +44,7 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
|
|||
|
||||
for (i = 0; i < range_size; i++, id++) {
|
||||
tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
|
||||
(id % range_size));
|
||||
(id % range_size));
|
||||
if (!nf_nat_used_tuple(tuple, ct))
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -56,8 +56,8 @@ static struct
|
|||
/* PRE_ROUTING */
|
||||
{
|
||||
.entry = {
|
||||
.target_offset = sizeof(struct ipt_entry),
|
||||
.next_offset = sizeof(struct ipt_standard),
|
||||
.target_offset = sizeof(struct ipt_entry),
|
||||
.next_offset = sizeof(struct ipt_standard),
|
||||
},
|
||||
.target = {
|
||||
.target = {
|
||||
|
@ -71,8 +71,8 @@ static struct
|
|||
/* POST_ROUTING */
|
||||
{
|
||||
.entry = {
|
||||
.target_offset = sizeof(struct ipt_entry),
|
||||
.next_offset = sizeof(struct ipt_standard),
|
||||
.target_offset = sizeof(struct ipt_entry),
|
||||
.next_offset = sizeof(struct ipt_standard),
|
||||
},
|
||||
.target = {
|
||||
.target = {
|
||||
|
@ -86,8 +86,8 @@ static struct
|
|||
/* LOCAL_OUT */
|
||||
{
|
||||
.entry = {
|
||||
.target_offset = sizeof(struct ipt_entry),
|
||||
.next_offset = sizeof(struct ipt_standard),
|
||||
.target_offset = sizeof(struct ipt_entry),
|
||||
.next_offset = sizeof(struct ipt_standard),
|
||||
},
|
||||
.target = {
|
||||
.target = {
|
||||
|
@ -145,7 +145,7 @@ static unsigned int ipt_snat_target(struct sk_buff **pskb,
|
|||
|
||||
/* Connection must be valid and new. */
|
||||
NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
|
||||
ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
|
||||
ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
|
||||
NF_CT_ASSERT(out);
|
||||
|
||||
return nf_nat_setup_info(ct, &mr->range[0], hooknum);
|
||||
|
@ -256,8 +256,8 @@ alloc_null_binding(struct nf_conn *ct,
|
|||
|
||||
unsigned int
|
||||
alloc_null_binding_confirmed(struct nf_conn *ct,
|
||||
struct nf_nat_info *info,
|
||||
unsigned int hooknum)
|
||||
struct nf_nat_info *info,
|
||||
unsigned int hooknum)
|
||||
{
|
||||
__be32 ip
|
||||
= (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
|
||||
|
|
|
@ -90,7 +90,7 @@ static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo,
|
|||
return 1;
|
||||
|
||||
if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo,
|
||||
matchoff, matchlen, addr, addrlen))
|
||||
matchoff, matchlen, addr, addrlen))
|
||||
return 0;
|
||||
*dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
|
||||
return 1;
|
||||
|
@ -151,7 +151,7 @@ static unsigned int mangle_sip_packet(struct sk_buff **pskb,
|
|||
return 0;
|
||||
|
||||
if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo,
|
||||
matchoff, matchlen, buffer, bufflen))
|
||||
matchoff, matchlen, buffer, bufflen))
|
||||
return 0;
|
||||
|
||||
/* We need to reload this. Thanks Patrick. */
|
||||
|
@ -172,7 +172,7 @@ static int mangle_content_len(struct sk_buff **pskb,
|
|||
|
||||
/* Get actual SDP lenght */
|
||||
if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff,
|
||||
&matchlen, POS_SDP_HEADER) > 0) {
|
||||
&matchlen, POS_SDP_HEADER) > 0) {
|
||||
|
||||
/* since ct_sip_get_info() give us a pointer passing 'v='
|
||||
we need to add 2 bytes in this count. */
|
||||
|
@ -180,7 +180,7 @@ static int mangle_content_len(struct sk_buff **pskb,
|
|||
|
||||
/* Now, update SDP length */
|
||||
if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff,
|
||||
&matchlen, POS_CONTENT) > 0) {
|
||||
&matchlen, POS_CONTENT) > 0) {
|
||||
|
||||
bufflen = sprintf(buffer, "%u", c_len);
|
||||
return nf_nat_mangle_udp_packet(pskb, ct, ctinfo,
|
||||
|
@ -205,17 +205,17 @@ static unsigned int mangle_sdp(struct sk_buff **pskb,
|
|||
/* Mangle owner and contact info. */
|
||||
bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip));
|
||||
if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
|
||||
buffer, bufflen, POS_OWNER_IP4))
|
||||
buffer, bufflen, POS_OWNER_IP4))
|
||||
return 0;
|
||||
|
||||
if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
|
||||
buffer, bufflen, POS_CONNECTION_IP4))
|
||||
buffer, bufflen, POS_CONNECTION_IP4))
|
||||
return 0;
|
||||
|
||||
/* Mangle media port. */
|
||||
bufflen = sprintf(buffer, "%u", port);
|
||||
if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
|
||||
buffer, bufflen, POS_MEDIA))
|
||||
buffer, bufflen, POS_MEDIA))
|
||||
return 0;
|
||||
|
||||
return mangle_content_len(pskb, ctinfo, ct, dptr);
|
||||
|
|
|
@ -150,8 +150,8 @@ struct asn1_octstr
|
|||
};
|
||||
|
||||
static void asn1_open(struct asn1_ctx *ctx,
|
||||
unsigned char *buf,
|
||||
unsigned int len)
|
||||
unsigned char *buf,
|
||||
unsigned int len)
|
||||
{
|
||||
ctx->begin = buf;
|
||||
ctx->end = buf + len;
|
||||
|
@ -186,9 +186,9 @@ static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag)
|
|||
}
|
||||
|
||||
static unsigned char asn1_id_decode(struct asn1_ctx *ctx,
|
||||
unsigned int *cls,
|
||||
unsigned int *con,
|
||||
unsigned int *tag)
|
||||
unsigned int *cls,
|
||||
unsigned int *con,
|
||||
unsigned int *tag)
|
||||
{
|
||||
unsigned char ch;
|
||||
|
||||
|
@ -207,8 +207,8 @@ static unsigned char asn1_id_decode(struct asn1_ctx *ctx,
|
|||
}
|
||||
|
||||
static unsigned char asn1_length_decode(struct asn1_ctx *ctx,
|
||||
unsigned int *def,
|
||||
unsigned int *len)
|
||||
unsigned int *def,
|
||||
unsigned int *len)
|
||||
{
|
||||
unsigned char ch, cnt;
|
||||
|
||||
|
@ -239,10 +239,10 @@ static unsigned char asn1_length_decode(struct asn1_ctx *ctx,
|
|||
}
|
||||
|
||||
static unsigned char asn1_header_decode(struct asn1_ctx *ctx,
|
||||
unsigned char **eoc,
|
||||
unsigned int *cls,
|
||||
unsigned int *con,
|
||||
unsigned int *tag)
|
||||
unsigned char **eoc,
|
||||
unsigned int *cls,
|
||||
unsigned int *con,
|
||||
unsigned int *tag)
|
||||
{
|
||||
unsigned int def, len;
|
||||
|
||||
|
@ -297,8 +297,8 @@ static unsigned char asn1_null_decode(struct asn1_ctx *ctx, unsigned char *eoc)
|
|||
}
|
||||
|
||||
static unsigned char asn1_long_decode(struct asn1_ctx *ctx,
|
||||
unsigned char *eoc,
|
||||
long *integer)
|
||||
unsigned char *eoc,
|
||||
long *integer)
|
||||
{
|
||||
unsigned char ch;
|
||||
unsigned int len;
|
||||
|
@ -325,8 +325,8 @@ static unsigned char asn1_long_decode(struct asn1_ctx *ctx,
|
|||
}
|
||||
|
||||
static unsigned char asn1_uint_decode(struct asn1_ctx *ctx,
|
||||
unsigned char *eoc,
|
||||
unsigned int *integer)
|
||||
unsigned char *eoc,
|
||||
unsigned int *integer)
|
||||
{
|
||||
unsigned char ch;
|
||||
unsigned int len;
|
||||
|
@ -354,8 +354,8 @@ static unsigned char asn1_uint_decode(struct asn1_ctx *ctx,
|
|||
}
|
||||
|
||||
static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx,
|
||||
unsigned char *eoc,
|
||||
unsigned long *integer)
|
||||
unsigned char *eoc,
|
||||
unsigned long *integer)
|
||||
{
|
||||
unsigned char ch;
|
||||
unsigned int len;
|
||||
|
@ -383,9 +383,9 @@ static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx,
|
|||
}
|
||||
|
||||
static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
|
||||
unsigned char *eoc,
|
||||
unsigned char **octets,
|
||||
unsigned int *len)
|
||||
unsigned char *eoc,
|
||||
unsigned char **octets,
|
||||
unsigned int *len)
|
||||
{
|
||||
unsigned char *ptr;
|
||||
|
||||
|
@ -411,7 +411,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
|
|||
}
|
||||
|
||||
static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
|
||||
unsigned long *subid)
|
||||
unsigned long *subid)
|
||||
{
|
||||
unsigned char ch;
|
||||
|
||||
|
@ -428,9 +428,9 @@ static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
|
|||
}
|
||||
|
||||
static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
|
||||
unsigned char *eoc,
|
||||
unsigned long **oid,
|
||||
unsigned int *len)
|
||||
unsigned char *eoc,
|
||||
unsigned long **oid,
|
||||
unsigned int *len)
|
||||
{
|
||||
unsigned long subid;
|
||||
unsigned int size;
|
||||
|
@ -611,9 +611,9 @@ struct snmp_v1_trap
|
|||
#define SERR_EOM 2
|
||||
|
||||
static inline void mangle_address(unsigned char *begin,
|
||||
unsigned char *addr,
|
||||
const struct oct1_map *map,
|
||||
__sum16 *check);
|
||||
unsigned char *addr,
|
||||
const struct oct1_map *map,
|
||||
__sum16 *check);
|
||||
struct snmp_cnv
|
||||
{
|
||||
unsigned int class;
|
||||
|
@ -644,8 +644,8 @@ static struct snmp_cnv snmp_conv [] =
|
|||
};
|
||||
|
||||
static unsigned char snmp_tag_cls2syntax(unsigned int tag,
|
||||
unsigned int cls,
|
||||
unsigned short *syntax)
|
||||
unsigned int cls,
|
||||
unsigned short *syntax)
|
||||
{
|
||||
struct snmp_cnv *cnv;
|
||||
|
||||
|
@ -662,7 +662,7 @@ static unsigned char snmp_tag_cls2syntax(unsigned int tag,
|
|||
}
|
||||
|
||||
static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
|
||||
struct snmp_object **obj)
|
||||
struct snmp_object **obj)
|
||||
{
|
||||
unsigned int cls, con, tag, len, idlen;
|
||||
unsigned short type;
|
||||
|
@ -714,7 +714,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
|
|||
return 0;
|
||||
}
|
||||
*obj = kmalloc(sizeof(struct snmp_object) + len,
|
||||
GFP_ATOMIC);
|
||||
GFP_ATOMIC);
|
||||
if (*obj == NULL) {
|
||||
kfree(id);
|
||||
if (net_ratelimit())
|
||||
|
@ -730,7 +730,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
|
|||
return 0;
|
||||
}
|
||||
*obj = kmalloc(sizeof(struct snmp_object) + len,
|
||||
GFP_ATOMIC);
|
||||
GFP_ATOMIC);
|
||||
if (*obj == NULL) {
|
||||
kfree(id);
|
||||
if (net_ratelimit())
|
||||
|
@ -834,7 +834,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
|
|||
}
|
||||
|
||||
static unsigned char snmp_request_decode(struct asn1_ctx *ctx,
|
||||
struct snmp_request *request)
|
||||
struct snmp_request *request)
|
||||
{
|
||||
unsigned int cls, con, tag;
|
||||
unsigned char *end;
|
||||
|
@ -874,9 +874,9 @@ static unsigned char snmp_request_decode(struct asn1_ctx *ctx,
|
|||
* code example in the draft.
|
||||
*/
|
||||
static void fast_csum(__sum16 *csum,
|
||||
const unsigned char *optr,
|
||||
const unsigned char *nptr,
|
||||
int offset)
|
||||
const unsigned char *optr,
|
||||
const unsigned char *nptr,
|
||||
int offset)
|
||||
{
|
||||
unsigned char s[4];
|
||||
|
||||
|
@ -899,9 +899,9 @@ static void fast_csum(__sum16 *csum,
|
|||
* - addr points to the start of the address
|
||||
*/
|
||||
static inline void mangle_address(unsigned char *begin,
|
||||
unsigned char *addr,
|
||||
const struct oct1_map *map,
|
||||
__sum16 *check)
|
||||
unsigned char *addr,
|
||||
const struct oct1_map *map,
|
||||
__sum16 *check)
|
||||
{
|
||||
if (map->from == NOCT1(addr)) {
|
||||
u_int32_t old;
|
||||
|
@ -914,7 +914,7 @@ static inline void mangle_address(unsigned char *begin,
|
|||
/* Update UDP checksum if being used */
|
||||
if (*check) {
|
||||
fast_csum(check,
|
||||
&map->from, &map->to, addr - begin);
|
||||
&map->from, &map->to, addr - begin);
|
||||
|
||||
}
|
||||
|
||||
|
@ -925,9 +925,9 @@ static inline void mangle_address(unsigned char *begin,
|
|||
}
|
||||
|
||||
static unsigned char snmp_trap_decode(struct asn1_ctx *ctx,
|
||||
struct snmp_v1_trap *trap,
|
||||
const struct oct1_map *map,
|
||||
__sum16 *check)
|
||||
struct snmp_v1_trap *trap,
|
||||
const struct oct1_map *map,
|
||||
__sum16 *check)
|
||||
{
|
||||
unsigned int cls, con, tag, len;
|
||||
unsigned char *end;
|
||||
|
@ -1019,9 +1019,9 @@ static void hex_dump(unsigned char *buf, size_t len)
|
|||
* (And this is the fucking 'basic' method).
|
||||
*/
|
||||
static int snmp_parse_mangle(unsigned char *msg,
|
||||
u_int16_t len,
|
||||
const struct oct1_map *map,
|
||||
__sum16 *check)
|
||||
u_int16_t len,
|
||||
const struct oct1_map *map,
|
||||
__sum16 *check)
|
||||
{
|
||||
unsigned char *eoc, *end;
|
||||
unsigned int cls, con, tag, vers, pdutype;
|
||||
|
@ -1191,8 +1191,8 @@ static int snmp_parse_mangle(unsigned char *msg,
|
|||
* SNMP translation routine.
|
||||
*/
|
||||
static int snmp_translate(struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo,
|
||||
struct sk_buff **pskb)
|
||||
enum ip_conntrack_info ctinfo,
|
||||
struct sk_buff **pskb)
|
||||
{
|
||||
struct iphdr *iph = (*pskb)->nh.iph;
|
||||
struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
|
||||
|
@ -1219,7 +1219,7 @@ static int snmp_translate(struct nf_conn *ct,
|
|||
return NF_ACCEPT;
|
||||
|
||||
if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr),
|
||||
paylen, &map, &udph->check)) {
|
||||
paylen, &map, &udph->check)) {
|
||||
if (net_ratelimit())
|
||||
printk(KERN_WARNING "bsalg: parser failed\n");
|
||||
return NF_DROP;
|
||||
|
|
|
@ -96,8 +96,8 @@ nf_nat_fn(unsigned int hooknum,
|
|||
protocol. 8) --RR */
|
||||
if (!ct) {
|
||||
/* Exception: ICMP redirect to new connection (not in
|
||||
hash table yet). We must not let this through, in
|
||||
case we're doing NAT to the same network. */
|
||||
hash table yet). We must not let this through, in
|
||||
case we're doing NAT to the same network. */
|
||||
if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) {
|
||||
struct icmphdr _hdr, *hp;
|
||||
|
||||
|
@ -141,7 +141,7 @@ nf_nat_fn(unsigned int hooknum,
|
|||
if (unlikely(nf_ct_is_confirmed(ct)))
|
||||
/* NAT module was loaded late */
|
||||
ret = alloc_null_binding_confirmed(ct, info,
|
||||
hooknum);
|
||||
hooknum);
|
||||
else if (hooknum == NF_IP_LOCAL_IN)
|
||||
/* LOCAL_IN hook doesn't have a chain! */
|
||||
ret = alloc_null_binding(ct, info, hooknum);
|
||||
|
@ -171,10 +171,10 @@ nf_nat_fn(unsigned int hooknum,
|
|||
|
||||
static unsigned int
|
||||
nf_nat_in(unsigned int hooknum,
|
||||
struct sk_buff **pskb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
struct sk_buff **pskb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
unsigned int ret;
|
||||
__be32 daddr = (*pskb)->nh.iph->daddr;
|
||||
|
@ -269,9 +269,9 @@ nf_nat_adjust(unsigned int hooknum,
|
|||
|
||||
ct = nf_ct_get(*pskb, &ctinfo);
|
||||
if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
|
||||
DEBUGP("nf_nat_standalone: adjusting sequence number\n");
|
||||
if (!nf_nat_seq_adjust(pskb, ct, ctinfo))
|
||||
return NF_DROP;
|
||||
DEBUGP("nf_nat_standalone: adjusting sequence number\n");
|
||||
if (!nf_nat_seq_adjust(pskb, ct, ctinfo))
|
||||
return NF_DROP;
|
||||
}
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
|
|
@ -266,7 +266,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
|
|||
|
||||
for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
|
||||
seq_printf(seq, " %lu",
|
||||
fold_field((void **) ip_statistics,
|
||||
fold_field((void **) ip_statistics,
|
||||
snmp4_ipstats_list[i].entry));
|
||||
|
||||
seq_puts(seq, "\nIcmp:");
|
||||
|
@ -276,7 +276,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
|
|||
seq_puts(seq, "\nIcmp:");
|
||||
for (i = 0; snmp4_icmp_list[i].name != NULL; i++)
|
||||
seq_printf(seq, " %lu",
|
||||
fold_field((void **) icmp_statistics,
|
||||
fold_field((void **) icmp_statistics,
|
||||
snmp4_icmp_list[i].entry));
|
||||
|
||||
seq_puts(seq, "\nTcp:");
|
||||
|
@ -288,7 +288,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
|
|||
/* MaxConn field is signed, RFC 2012 */
|
||||
if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
|
||||
seq_printf(seq, " %ld",
|
||||
fold_field((void **) tcp_statistics,
|
||||
fold_field((void **) tcp_statistics,
|
||||
snmp4_tcp_list[i].entry));
|
||||
else
|
||||
seq_printf(seq, " %lu",
|
||||
|
@ -303,7 +303,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
|
|||
seq_puts(seq, "\nUdp:");
|
||||
for (i = 0; snmp4_udp_list[i].name != NULL; i++)
|
||||
seq_printf(seq, " %lu",
|
||||
fold_field((void **) udp_statistics,
|
||||
fold_field((void **) udp_statistics,
|
||||
snmp4_udp_list[i].entry));
|
||||
|
||||
/* the UDP and UDP-Lite MIBs are the same */
|
||||
|
@ -348,7 +348,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
|
|||
seq_puts(seq, "\nTcpExt:");
|
||||
for (i = 0; snmp4_net_list[i].name != NULL; i++)
|
||||
seq_printf(seq, " %lu",
|
||||
fold_field((void **) net_statistics,
|
||||
fold_field((void **) net_statistics,
|
||||
snmp4_net_list[i].entry));
|
||||
|
||||
seq_putc(seq, '\n');
|
||||
|
|
|
@ -74,7 +74,7 @@ int inet_add_protocol(struct net_protocol *prot, unsigned char protocol)
|
|||
/*
|
||||
* Remove a protocol from the hash tables.
|
||||
*/
|
||||
|
||||
|
||||
int inet_del_protocol(struct net_protocol *prot, unsigned char protocol)
|
||||
{
|
||||
int hash, ret;
|
||||
|
|
|
@ -95,7 +95,7 @@ static void raw_v4_hash(struct sock *sk)
|
|||
|
||||
static void raw_v4_unhash(struct sock *sk)
|
||||
{
|
||||
write_lock_bh(&raw_v4_lock);
|
||||
write_lock_bh(&raw_v4_lock);
|
||||
if (sk_del_node_init(sk))
|
||||
sock_prot_dec_use(sk->sk_prot);
|
||||
write_unlock_bh(&raw_v4_lock);
|
||||
|
@ -238,7 +238,7 @@ void raw_err (struct sock *sk, struct sk_buff *skb, u32 info)
|
|||
static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
|
||||
{
|
||||
/* Charge it to the socket. */
|
||||
|
||||
|
||||
if (sock_queue_rcv_skb(sk, skb) < 0) {
|
||||
/* FIXME: increment a raw drops counter here */
|
||||
kfree_skb(skb);
|
||||
|
@ -263,7 +263,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
|
||||
struct rtable *rt,
|
||||
struct rtable *rt,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
|
@ -285,7 +285,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
|
|||
skb = sock_alloc_send_skb(sk, length+hh_len+15,
|
||||
flags&MSG_DONTWAIT, &err);
|
||||
if (skb == NULL)
|
||||
goto error;
|
||||
goto error;
|
||||
skb_reserve(skb, hh_len);
|
||||
|
||||
skb->priority = sk->sk_priority;
|
||||
|
@ -326,7 +326,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
|
|||
kfree_skb(skb);
|
||||
error:
|
||||
IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
|
||||
return err;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
|
||||
|
@ -399,9 +399,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
err = -EOPNOTSUPP;
|
||||
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */
|
||||
goto out; /* compatibility */
|
||||
|
||||
|
||||
/*
|
||||
* Get and verify the address.
|
||||
* Get and verify the address.
|
||||
*/
|
||||
|
||||
if (msg->msg_namelen) {
|
||||
|
@ -426,7 +426,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
*/
|
||||
} else {
|
||||
err = -EDESTADDRREQ;
|
||||
if (sk->sk_state != TCP_ESTABLISHED)
|
||||
if (sk->sk_state != TCP_ESTABLISHED)
|
||||
goto out;
|
||||
daddr = inet->daddr;
|
||||
}
|
||||
|
@ -480,7 +480,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
.saddr = saddr,
|
||||
.tos = tos } },
|
||||
.proto = inet->hdrincl ? IPPROTO_RAW :
|
||||
sk->sk_protocol,
|
||||
sk->sk_protocol,
|
||||
};
|
||||
if (!inet->hdrincl) {
|
||||
err = raw_probe_proto_opt(&fl, msg);
|
||||
|
@ -503,9 +503,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
back_from_confirm:
|
||||
|
||||
if (inet->hdrincl)
|
||||
err = raw_send_hdrinc(sk, msg->msg_iov, len,
|
||||
err = raw_send_hdrinc(sk, msg->msg_iov, len,
|
||||
rt, msg->msg_flags);
|
||||
|
||||
|
||||
else {
|
||||
if (!ipc.addr)
|
||||
ipc.addr = rt->rt_dst;
|
||||
|
@ -538,7 +538,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
|
||||
static void raw_close(struct sock *sk, long timeout)
|
||||
{
|
||||
/*
|
||||
/*
|
||||
* Raw sockets may have direct kernel refereneces. Kill them.
|
||||
*/
|
||||
ip_ra_control(sk, 0, NULL);
|
||||
|
@ -861,7 +861,7 @@ static __inline__ char *get_raw_sock(struct sock *sp, char *tmpbuf, int i)
|
|||
|
||||
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
|
||||
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
|
||||
i, src, srcp, dest, destp, sp->sk_state,
|
||||
i, src, srcp, dest, destp, sp->sk_state,
|
||||
atomic_read(&sp->sk_wmem_alloc),
|
||||
atomic_read(&sp->sk_rmem_alloc),
|
||||
0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
|
||||
|
|
110
net/ipv4/route.c
110
net/ipv4/route.c
|
@ -20,7 +20,7 @@
|
|||
* (rco@di.uminho.pt) Routing table insertion and update
|
||||
* Linus Torvalds : Rewrote bits to be sensible
|
||||
* Alan Cox : Added BSD route gw semantics
|
||||
* Alan Cox : Super /proc >4K
|
||||
* Alan Cox : Super /proc >4K
|
||||
* Alan Cox : MTU in route table
|
||||
* Alan Cox : MSS actually. Also added the window
|
||||
* clamper.
|
||||
|
@ -38,7 +38,7 @@
|
|||
* Alan Cox : Faster /proc handling
|
||||
* Alexey Kuznetsov : Massive rework to support tree based routing,
|
||||
* routing caches and better behaviour.
|
||||
*
|
||||
*
|
||||
* Olaf Erb : irtt wasn't being copied right.
|
||||
* Bjorn Ekwall : Kerneld route support.
|
||||
* Alan Cox : Multicast fixed (I hope)
|
||||
|
@ -361,8 +361,8 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
|
|||
dev_queue_xmit) : 0,
|
||||
r->rt_spec_dst);
|
||||
seq_printf(seq, "%-127s\n", temp);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct seq_operations rt_cache_seq_ops = {
|
||||
|
@ -429,7 +429,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
return &per_cpu(rt_cache_stat, cpu);
|
||||
}
|
||||
return NULL;
|
||||
|
||||
|
||||
}
|
||||
|
||||
static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
|
||||
|
@ -445,7 +445,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
|
|||
seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
|
||||
" %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
|
||||
atomic_read(&ipv4_dst_ops.entries),
|
||||
|
@ -459,7 +459,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
|
|||
|
||||
st->out_hit,
|
||||
st->out_slow_tot,
|
||||
st->out_slow_mc,
|
||||
st->out_slow_mc,
|
||||
|
||||
st->gc_total,
|
||||
st->gc_ignored,
|
||||
|
@ -493,7 +493,7 @@ static struct file_operations rt_cpu_seq_fops = {
|
|||
};
|
||||
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
|
||||
static __inline__ void rt_free(struct rtable *rt)
|
||||
{
|
||||
multipath_remove(rt);
|
||||
|
@ -672,8 +672,8 @@ static void rt_check_expire(unsigned long dummy)
|
|||
rt_free(rth);
|
||||
}
|
||||
#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
|
||||
*rthp = rth->u.rt_next;
|
||||
rt_free(rth);
|
||||
*rthp = rth->u.rt_next;
|
||||
rt_free(rth);
|
||||
#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
|
||||
}
|
||||
spin_unlock(rt_hash_lock_addr(i));
|
||||
|
@ -739,7 +739,7 @@ void rt_cache_flush(int delay)
|
|||
|
||||
if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay)
|
||||
tmo = 0;
|
||||
|
||||
|
||||
if (delay > tmo)
|
||||
delay = tmo;
|
||||
}
|
||||
|
@ -1104,7 +1104,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
|
|||
return;
|
||||
}
|
||||
} else
|
||||
printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
|
||||
printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
|
||||
__builtin_return_address(0));
|
||||
|
||||
ip_select_fb_ident(iph);
|
||||
|
@ -1190,7 +1190,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
|
|||
|
||||
/* Copy all the information. */
|
||||
*rt = *rth;
|
||||
INIT_RCU_HEAD(&rt->u.dst.rcu_head);
|
||||
INIT_RCU_HEAD(&rt->u.dst.rcu_head);
|
||||
rt->u.dst.__use = 1;
|
||||
atomic_set(&rt->u.dst.__refcnt, 1);
|
||||
rt->u.dst.child = NULL;
|
||||
|
@ -1225,11 +1225,11 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
|
|||
rt_drop(rt);
|
||||
goto do_next;
|
||||
}
|
||||
|
||||
|
||||
netevent.old = &rth->u.dst;
|
||||
netevent.new = &rt->u.dst;
|
||||
call_netevent_notifiers(NETEVENT_REDIRECT,
|
||||
&netevent);
|
||||
call_netevent_notifiers(NETEVENT_REDIRECT,
|
||||
&netevent);
|
||||
|
||||
rt_del(hash, rth);
|
||||
if (!rt_intern_hash(hash, rt, &rt))
|
||||
|
@ -1343,7 +1343,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
|
|||
#endif
|
||||
}
|
||||
out:
|
||||
in_dev_put(in_dev);
|
||||
in_dev_put(in_dev);
|
||||
}
|
||||
|
||||
static int ip_error(struct sk_buff *skb)
|
||||
|
@ -1379,7 +1379,7 @@ static int ip_error(struct sk_buff *skb)
|
|||
|
||||
out: kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The last two values are not from the RFC but
|
||||
|
@ -1392,7 +1392,7 @@ static const unsigned short mtu_plateau[] =
|
|||
static __inline__ unsigned short guess_mtu(unsigned short old_mtu)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
|
||||
if (old_mtu > mtu_plateau[i])
|
||||
return mtu_plateau[i];
|
||||
|
@ -1436,7 +1436,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
|
|||
mtu = guess_mtu(old_mtu);
|
||||
}
|
||||
if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) {
|
||||
if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) {
|
||||
if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) {
|
||||
dst_confirm(&rth->u.dst);
|
||||
if (mtu < ip_rt_min_pmtu) {
|
||||
mtu = ip_rt_min_pmtu;
|
||||
|
@ -1600,7 +1600,7 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
|
|||
#endif
|
||||
set_class_tag(rt, itag);
|
||||
#endif
|
||||
rt->rt_type = res->type;
|
||||
rt->rt_type = res->type;
|
||||
}
|
||||
|
||||
static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
|
@ -1714,11 +1714,11 @@ static void ip_handle_martian_source(struct net_device *dev,
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline int __mkroute_input(struct sk_buff *skb,
|
||||
struct fib_result* res,
|
||||
struct in_device *in_dev,
|
||||
static inline int __mkroute_input(struct sk_buff *skb,
|
||||
struct fib_result* res,
|
||||
struct in_device *in_dev,
|
||||
__be32 daddr, __be32 saddr, u32 tos,
|
||||
struct rtable **result)
|
||||
struct rtable **result)
|
||||
{
|
||||
|
||||
struct rtable *rth;
|
||||
|
@ -1738,12 +1738,12 @@ static inline int __mkroute_input(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
|
||||
err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
|
||||
err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
|
||||
in_dev->dev, &spec_dst, &itag);
|
||||
if (err < 0) {
|
||||
ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
|
||||
ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
|
||||
saddr);
|
||||
|
||||
|
||||
err = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
@ -1811,10 +1811,10 @@ static inline int __mkroute_input(struct sk_buff *skb,
|
|||
/* release the working reference to the output device */
|
||||
in_dev_put(out_dev);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int ip_mkroute_input_def(struct sk_buff *skb,
|
||||
struct fib_result* res,
|
||||
static inline int ip_mkroute_input_def(struct sk_buff *skb,
|
||||
struct fib_result* res,
|
||||
const struct flowi *fl,
|
||||
struct in_device *in_dev,
|
||||
__be32 daddr, __be32 saddr, u32 tos)
|
||||
|
@ -1835,11 +1835,11 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb,
|
|||
|
||||
/* put it into the cache */
|
||||
hash = rt_hash(daddr, saddr, fl->iif);
|
||||
return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
|
||||
return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
|
||||
}
|
||||
|
||||
static inline int ip_mkroute_input(struct sk_buff *skb,
|
||||
struct fib_result* res,
|
||||
static inline int ip_mkroute_input(struct sk_buff *skb,
|
||||
struct fib_result* res,
|
||||
const struct flowi *fl,
|
||||
struct in_device *in_dev,
|
||||
__be32 daddr, __be32 saddr, u32 tos)
|
||||
|
@ -1859,7 +1859,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb,
|
|||
if (hopcount < 2)
|
||||
return ip_mkroute_input_def(skb, res, fl, in_dev, daddr,
|
||||
saddr, tos);
|
||||
|
||||
|
||||
/* add all alternatives to the routing cache */
|
||||
for (hop = 0; hop < hopcount; hop++) {
|
||||
res->nh_sel = hop;
|
||||
|
@ -1988,7 +1988,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
|||
goto e_nobufs;
|
||||
if (err == -EINVAL)
|
||||
goto e_inval;
|
||||
|
||||
|
||||
done:
|
||||
in_dev_put(in_dev);
|
||||
if (free_res)
|
||||
|
@ -2071,8 +2071,8 @@ out: return err;
|
|||
#endif
|
||||
|
||||
e_hostunreach:
|
||||
err = -EHOSTUNREACH;
|
||||
goto done;
|
||||
err = -EHOSTUNREACH;
|
||||
goto done;
|
||||
|
||||
e_inval:
|
||||
err = -EINVAL;
|
||||
|
@ -2153,11 +2153,11 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
|||
}
|
||||
|
||||
static inline int __mkroute_output(struct rtable **result,
|
||||
struct fib_result* res,
|
||||
struct fib_result* res,
|
||||
const struct flowi *fl,
|
||||
const struct flowi *oldflp,
|
||||
struct net_device *dev_out,
|
||||
unsigned flags)
|
||||
const struct flowi *oldflp,
|
||||
struct net_device *dev_out,
|
||||
unsigned flags)
|
||||
{
|
||||
struct rtable *rth;
|
||||
struct in_device *in_dev;
|
||||
|
@ -2190,7 +2190,7 @@ static inline int __mkroute_output(struct rtable **result,
|
|||
}
|
||||
} else if (res->type == RTN_MULTICAST) {
|
||||
flags |= RTCF_MULTICAST|RTCF_LOCAL;
|
||||
if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
|
||||
if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
|
||||
oldflp->proto))
|
||||
flags &= ~RTCF_LOCAL;
|
||||
/* If multicast route do not exist use
|
||||
|
@ -2208,7 +2208,7 @@ static inline int __mkroute_output(struct rtable **result,
|
|||
if (!rth) {
|
||||
err = -ENOBUFS;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
atomic_set(&rth->u.dst.__refcnt, 1);
|
||||
rth->u.dst.flags= DST_HOST;
|
||||
|
@ -2232,7 +2232,7 @@ static inline int __mkroute_output(struct rtable **result,
|
|||
rth->rt_dst = fl->fl4_dst;
|
||||
rth->rt_src = fl->fl4_src;
|
||||
rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
|
||||
/* get references to the devices that are to be hold by the routing
|
||||
/* get references to the devices that are to be hold by the routing
|
||||
cache entry */
|
||||
rth->u.dst.dev = dev_out;
|
||||
dev_hold(dev_out);
|
||||
|
@ -2250,7 +2250,7 @@ static inline int __mkroute_output(struct rtable **result,
|
|||
}
|
||||
if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
|
||||
rth->rt_spec_dst = fl->fl4_src;
|
||||
if (flags & RTCF_LOCAL &&
|
||||
if (flags & RTCF_LOCAL &&
|
||||
!(dev_out->flags & IFF_LOOPBACK)) {
|
||||
rth->u.dst.output = ip_mc_output;
|
||||
RT_CACHE_STAT_INC(out_slow_mc);
|
||||
|
@ -2292,7 +2292,7 @@ static inline int ip_mkroute_output_def(struct rtable **rp,
|
|||
hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif);
|
||||
err = rt_intern_hash(hash, rth, rp);
|
||||
}
|
||||
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -2830,7 +2830,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
continue;
|
||||
skb->dst = dst_clone(&rt->u.dst);
|
||||
if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
|
||||
cb->nlh->nlmsg_seq, RTM_NEWROUTE,
|
||||
cb->nlh->nlmsg_seq, RTM_NEWROUTE,
|
||||
1, NLM_F_MULTI) <= 0) {
|
||||
dst_release(xchg(&skb->dst, NULL));
|
||||
rcu_read_unlock_bh();
|
||||
|
@ -2863,7 +2863,7 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
|
|||
proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
|
||||
rt_cache_flush(flush_delay);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -2880,13 +2880,13 @@ static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
|
|||
if (newlen != sizeof(int))
|
||||
return -EINVAL;
|
||||
if (get_user(delay, (int __user *)newval))
|
||||
return -EFAULT;
|
||||
rt_cache_flush(delay);
|
||||
return -EFAULT;
|
||||
rt_cache_flush(delay);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ctl_table ipv4_route_table[] = {
|
||||
{
|
||||
{
|
||||
.ctl_name = NET_IPV4_ROUTE_FLUSH,
|
||||
.procname = "flush",
|
||||
.data = &flush_delay,
|
||||
|
@ -2931,7 +2931,7 @@ ctl_table ipv4_route_table[] = {
|
|||
},
|
||||
{
|
||||
/* Deprecated. Use gc_min_interval_ms */
|
||||
|
||||
|
||||
.ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL,
|
||||
.procname = "gc_min_interval",
|
||||
.data = &ip_rt_gc_min_interval,
|
||||
|
@ -3180,8 +3180,8 @@ int __init ip_rt_init(void)
|
|||
{
|
||||
struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */
|
||||
if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
|
||||
!(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO,
|
||||
proc_net_stat))) {
|
||||
!(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO,
|
||||
proc_net_stat))) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
rtstat_pde->proc_fops = &rt_cpu_seq_fops;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue