mirror of https://gitee.com/openkylin/linux.git
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.26
This commit is contained in:
commit
334f8b2afd
|
@ -6,11 +6,13 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/if.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/list.h>
|
||||
#include <net/net_namespace.h>
|
||||
#endif
|
||||
#include <linux/compiler.h>
|
||||
|
||||
|
@ -76,7 +78,6 @@ extern void netfilter_init(void);
|
|||
#define NF_MAX_HOOKS 8
|
||||
|
||||
struct sk_buff;
|
||||
struct net_device;
|
||||
|
||||
typedef unsigned int nf_hookfn(unsigned int hooknum,
|
||||
struct sk_buff *skb,
|
||||
|
@ -233,6 +234,11 @@ struct nf_afinfo {
|
|||
unsigned short family;
|
||||
__sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
|
||||
unsigned int dataoff, u_int8_t protocol);
|
||||
__sum16 (*checksum_partial)(struct sk_buff *skb,
|
||||
unsigned int hook,
|
||||
unsigned int dataoff,
|
||||
unsigned int len,
|
||||
u_int8_t protocol);
|
||||
int (*route)(struct dst_entry **dst, struct flowi *fl);
|
||||
void (*saveroute)(const struct sk_buff *skb,
|
||||
struct nf_queue_entry *entry);
|
||||
|
@ -262,6 +268,23 @@ nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
|
|||
return csum;
|
||||
}
|
||||
|
||||
static inline __sum16
|
||||
nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
|
||||
unsigned int dataoff, unsigned int len,
|
||||
u_int8_t protocol, unsigned short family)
|
||||
{
|
||||
const struct nf_afinfo *afinfo;
|
||||
__sum16 csum = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
afinfo = nf_get_afinfo(family);
|
||||
if (afinfo)
|
||||
csum = afinfo->checksum_partial(skb, hook, dataoff, len,
|
||||
protocol);
|
||||
rcu_read_unlock();
|
||||
return csum;
|
||||
}
|
||||
|
||||
extern int nf_register_afinfo(const struct nf_afinfo *afinfo);
|
||||
extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
|
||||
|
||||
|
@ -320,5 +343,56 @@ extern void (*nf_ct_destroy)(struct nf_conntrack *);
|
|||
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
|
||||
#endif
|
||||
|
||||
static inline struct net *nf_pre_routing_net(const struct net_device *in,
|
||||
const struct net_device *out)
|
||||
{
|
||||
#ifdef CONFIG_NET_NS
|
||||
return in->nd_net;
|
||||
#else
|
||||
return &init_net;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline struct net *nf_local_in_net(const struct net_device *in,
|
||||
const struct net_device *out)
|
||||
{
|
||||
#ifdef CONFIG_NET_NS
|
||||
return in->nd_net;
|
||||
#else
|
||||
return &init_net;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline struct net *nf_forward_net(const struct net_device *in,
|
||||
const struct net_device *out)
|
||||
{
|
||||
#ifdef CONFIG_NET_NS
|
||||
BUG_ON(in->nd_net != out->nd_net);
|
||||
return in->nd_net;
|
||||
#else
|
||||
return &init_net;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline struct net *nf_local_out_net(const struct net_device *in,
|
||||
const struct net_device *out)
|
||||
{
|
||||
#ifdef CONFIG_NET_NS
|
||||
return out->nd_net;
|
||||
#else
|
||||
return &init_net;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline struct net *nf_post_routing_net(const struct net_device *in,
|
||||
const struct net_device *out)
|
||||
{
|
||||
#ifdef CONFIG_NET_NS
|
||||
return out->nd_net;
|
||||
#else
|
||||
return &init_net;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /*__KERNEL__*/
|
||||
#endif /*__LINUX_NETFILTER_H*/
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
#ifndef _NF_CONNTRACK_DCCP_H
|
||||
#define _NF_CONNTRACK_DCCP_H
|
||||
|
||||
/* Exposed to userspace over nfnetlink */
|
||||
enum ct_dccp_states {
|
||||
CT_DCCP_NONE,
|
||||
CT_DCCP_REQUEST,
|
||||
CT_DCCP_RESPOND,
|
||||
CT_DCCP_PARTOPEN,
|
||||
CT_DCCP_OPEN,
|
||||
CT_DCCP_CLOSEREQ,
|
||||
CT_DCCP_CLOSING,
|
||||
CT_DCCP_TIMEWAIT,
|
||||
CT_DCCP_IGNORE,
|
||||
CT_DCCP_INVALID,
|
||||
__CT_DCCP_MAX
|
||||
};
|
||||
#define CT_DCCP_MAX (__CT_DCCP_MAX - 1)
|
||||
|
||||
enum ct_dccp_roles {
|
||||
CT_DCCP_ROLE_CLIENT,
|
||||
CT_DCCP_ROLE_SERVER,
|
||||
__CT_DCCP_ROLE_MAX
|
||||
};
|
||||
#define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1)
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <net/netfilter/nf_conntrack_tuple.h>
|
||||
|
||||
struct nf_ct_dccp {
|
||||
u_int8_t role[IP_CT_DIR_MAX];
|
||||
u_int8_t state;
|
||||
u_int8_t last_pkt;
|
||||
u_int8_t last_dir;
|
||||
u_int64_t handshake_seq;
|
||||
};
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _NF_CONNTRACK_DCCP_H */
|
|
@ -80,6 +80,7 @@ enum ctattr_l4proto {
|
|||
enum ctattr_protoinfo {
|
||||
CTA_PROTOINFO_UNSPEC,
|
||||
CTA_PROTOINFO_TCP,
|
||||
CTA_PROTOINFO_DCCP,
|
||||
__CTA_PROTOINFO_MAX
|
||||
};
|
||||
#define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1)
|
||||
|
@ -95,6 +96,13 @@ enum ctattr_protoinfo_tcp {
|
|||
};
|
||||
#define CTA_PROTOINFO_TCP_MAX (__CTA_PROTOINFO_TCP_MAX - 1)
|
||||
|
||||
enum ctattr_protoinfo_dccp {
|
||||
CTA_PROTOINFO_DCCP_UNSPEC,
|
||||
CTA_PROTOINFO_DCCP_STATE,
|
||||
__CTA_PROTOINFO_DCCP_MAX,
|
||||
};
|
||||
#define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1)
|
||||
|
||||
enum ctattr_counters {
|
||||
CTA_COUNTERS_UNSPEC,
|
||||
CTA_COUNTERS_PACKETS, /* old 64bit counters */
|
||||
|
|
|
@ -430,13 +430,13 @@ extern int xt_compat_add_offset(int af, unsigned int offset, short delta);
|
|||
extern void xt_compat_flush_offsets(int af);
|
||||
extern short xt_compat_calc_jump(int af, unsigned int offset);
|
||||
|
||||
extern int xt_compat_match_offset(struct xt_match *match);
|
||||
extern int xt_compat_match_offset(const struct xt_match *match);
|
||||
extern int xt_compat_match_from_user(struct xt_entry_match *m,
|
||||
void **dstptr, unsigned int *size);
|
||||
extern int xt_compat_match_to_user(struct xt_entry_match *m,
|
||||
void __user **dstptr, unsigned int *size);
|
||||
|
||||
extern int xt_compat_target_offset(struct xt_target *target);
|
||||
extern int xt_compat_target_offset(const struct xt_target *target);
|
||||
extern void xt_compat_target_from_user(struct xt_entry_target *t,
|
||||
void **dstptr, unsigned int *size);
|
||||
extern int xt_compat_target_to_user(struct xt_entry_target *t,
|
||||
|
|
|
@ -37,68 +37,54 @@ struct xt_sctp_info {
|
|||
|
||||
#define SCTP_CHUNKMAP_SET(chunkmap, type) \
|
||||
do { \
|
||||
chunkmap[type / bytes(u_int32_t)] |= \
|
||||
(chunkmap)[type / bytes(u_int32_t)] |= \
|
||||
1 << (type % bytes(u_int32_t)); \
|
||||
} while (0)
|
||||
|
||||
#define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \
|
||||
do { \
|
||||
chunkmap[type / bytes(u_int32_t)] &= \
|
||||
(chunkmap)[type / bytes(u_int32_t)] &= \
|
||||
~(1 << (type % bytes(u_int32_t))); \
|
||||
} while (0)
|
||||
|
||||
#define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \
|
||||
({ \
|
||||
(chunkmap[type / bytes (u_int32_t)] & \
|
||||
((chunkmap)[type / bytes (u_int32_t)] & \
|
||||
(1 << (type % bytes (u_int32_t)))) ? 1: 0; \
|
||||
})
|
||||
|
||||
#define SCTP_CHUNKMAP_RESET(chunkmap) \
|
||||
do { \
|
||||
int i; \
|
||||
for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \
|
||||
chunkmap[i] = 0; \
|
||||
} while (0)
|
||||
#define SCTP_CHUNKMAP_RESET(chunkmap) \
|
||||
memset((chunkmap), 0, sizeof(chunkmap))
|
||||
|
||||
#define SCTP_CHUNKMAP_SET_ALL(chunkmap) \
|
||||
do { \
|
||||
int i; \
|
||||
for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \
|
||||
chunkmap[i] = ~0; \
|
||||
} while (0)
|
||||
#define SCTP_CHUNKMAP_SET_ALL(chunkmap) \
|
||||
memset((chunkmap), ~0U, sizeof(chunkmap))
|
||||
|
||||
#define SCTP_CHUNKMAP_COPY(destmap, srcmap) \
|
||||
do { \
|
||||
int i; \
|
||||
for (i = 0; i < ARRAY_SIZE(srcmap); i++) \
|
||||
destmap[i] = srcmap[i]; \
|
||||
} while (0)
|
||||
#define SCTP_CHUNKMAP_COPY(destmap, srcmap) \
|
||||
memcpy((destmap), (srcmap), sizeof(srcmap))
|
||||
|
||||
#define SCTP_CHUNKMAP_IS_CLEAR(chunkmap) \
|
||||
({ \
|
||||
int i; \
|
||||
int flag = 1; \
|
||||
for (i = 0; i < ARRAY_SIZE(chunkmap); i++) { \
|
||||
if (chunkmap[i]) { \
|
||||
flag = 0; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
flag; \
|
||||
})
|
||||
#define SCTP_CHUNKMAP_IS_CLEAR(chunkmap) \
|
||||
__sctp_chunkmap_is_clear((chunkmap), ARRAY_SIZE(chunkmap))
|
||||
static inline bool
|
||||
__sctp_chunkmap_is_clear(const u_int32_t *chunkmap, unsigned int n)
|
||||
{
|
||||
unsigned int i;
|
||||
for (i = 0; i < n; ++i)
|
||||
if (chunkmap[i])
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
#define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap) \
|
||||
({ \
|
||||
int i; \
|
||||
int flag = 1; \
|
||||
for (i = 0; i < ARRAY_SIZE(chunkmap); i++) { \
|
||||
if (chunkmap[i] != ~0) { \
|
||||
flag = 0; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
flag; \
|
||||
})
|
||||
#define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap) \
|
||||
__sctp_chunkmap_is_all_set((chunkmap), ARRAY_SIZE(chunkmap))
|
||||
static inline bool
|
||||
__sctp_chunkmap_is_all_set(const u_int32_t *chunkmap, unsigned int n)
|
||||
{
|
||||
unsigned int i;
|
||||
for (i = 0; i < n; ++i)
|
||||
if (chunkmap[i] != ~0U)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* _XT_SCTP_H_ */
|
||||
|
||||
|
|
|
@ -23,8 +23,6 @@
|
|||
|
||||
#define ARPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
|
||||
#define ARPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN
|
||||
#define arpt_target xt_target
|
||||
#define arpt_table xt_table
|
||||
|
||||
#define ARPT_DEV_ADDR_LEN_MAX 16
|
||||
|
||||
|
@ -266,20 +264,15 @@ struct arpt_error
|
|||
.target.errorname = "ERROR", \
|
||||
}
|
||||
|
||||
#define arpt_register_target(tgt) \
|
||||
({ (tgt)->family = NF_ARP; \
|
||||
xt_register_target(tgt); })
|
||||
#define arpt_unregister_target(tgt) xt_unregister_target(tgt)
|
||||
|
||||
extern struct arpt_table *arpt_register_table(struct net *net,
|
||||
struct arpt_table *table,
|
||||
const struct arpt_replace *repl);
|
||||
extern void arpt_unregister_table(struct arpt_table *table);
|
||||
extern struct xt_table *arpt_register_table(struct net *net,
|
||||
struct xt_table *table,
|
||||
const struct arpt_replace *repl);
|
||||
extern void arpt_unregister_table(struct xt_table *table);
|
||||
extern unsigned int arpt_do_table(struct sk_buff *skb,
|
||||
unsigned int hook,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
struct arpt_table *table);
|
||||
struct xt_table *table);
|
||||
|
||||
#define ARPT_ALIGN(s) XT_ALIGN(s)
|
||||
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
#ifndef __LINUX_BRIDGE_EBT_NFLOG_H
|
||||
#define __LINUX_BRIDGE_EBT_NFLOG_H
|
||||
|
||||
#define EBT_NFLOG_MASK 0x0
|
||||
|
||||
#define EBT_NFLOG_PREFIX_SIZE 64
|
||||
#define EBT_NFLOG_WATCHER "nflog"
|
||||
|
||||
#define EBT_NFLOG_DEFAULT_GROUP 0x1
|
||||
#define EBT_NFLOG_DEFAULT_THRESHOLD 1
|
||||
|
||||
struct ebt_nflog_info {
|
||||
u_int32_t len;
|
||||
u_int16_t group;
|
||||
u_int16_t threshold;
|
||||
u_int16_t flags;
|
||||
u_int16_t pad;
|
||||
char prefix[EBT_NFLOG_PREFIX_SIZE];
|
||||
};
|
||||
|
||||
#endif /* __LINUX_BRIDGE_EBT_NFLOG_H */
|
|
@ -62,8 +62,6 @@ enum nf_ip_hook_priorities {
|
|||
NF_IP_PRI_FILTER = 0,
|
||||
NF_IP_PRI_NAT_SRC = 100,
|
||||
NF_IP_PRI_SELINUX_LAST = 225,
|
||||
NF_IP_PRI_CONNTRACK_HELPER = INT_MAX - 2,
|
||||
NF_IP_PRI_NAT_SEQ_ADJUST = INT_MAX - 1,
|
||||
NF_IP_PRI_CONNTRACK_CONFIRM = INT_MAX,
|
||||
NF_IP_PRI_LAST = INT_MAX,
|
||||
};
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <asm/atomic.h>
|
||||
|
||||
#include <linux/netfilter/nf_conntrack_tcp.h>
|
||||
#include <linux/netfilter/nf_conntrack_dccp.h>
|
||||
#include <linux/netfilter/nf_conntrack_sctp.h>
|
||||
#include <linux/netfilter/nf_conntrack_proto_gre.h>
|
||||
#include <net/netfilter/ipv4/nf_conntrack_icmp.h>
|
||||
|
@ -30,6 +31,7 @@
|
|||
/* per conntrack: protocol private data */
|
||||
union nf_conntrack_proto {
|
||||
/* insert conntrack proto private data here */
|
||||
struct nf_ct_dccp dccp;
|
||||
struct ip_ct_sctp sctp;
|
||||
struct ip_ct_tcp tcp;
|
||||
struct ip_ct_icmp icmp;
|
||||
|
@ -63,14 +65,7 @@ union nf_conntrack_help {
|
|||
#include <linux/timer.h>
|
||||
|
||||
#ifdef CONFIG_NETFILTER_DEBUG
|
||||
#define NF_CT_ASSERT(x) \
|
||||
do { \
|
||||
if (!(x)) \
|
||||
/* Wooah! I'm tripping my conntrack in a frenzy of \
|
||||
netplay... */ \
|
||||
printk("NF_CT_ASSERT: %s:%i(%s)\n", \
|
||||
__FILE__, __LINE__, __FUNCTION__); \
|
||||
} while(0)
|
||||
#define NF_CT_ASSERT(x) WARN_ON(!(x))
|
||||
#else
|
||||
#define NF_CT_ASSERT(x)
|
||||
#endif
|
||||
|
@ -145,6 +140,16 @@ nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
|
|||
tuplehash[hash->tuple.dst.dir]);
|
||||
}
|
||||
|
||||
static inline u_int16_t nf_ct_l3num(const struct nf_conn *ct)
|
||||
{
|
||||
return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
|
||||
}
|
||||
|
||||
static inline u_int8_t nf_ct_protonum(const struct nf_conn *ct)
|
||||
{
|
||||
return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
|
||||
}
|
||||
|
||||
/* get master conntrack via master expectation */
|
||||
#define master_ct(conntr) (conntr->master)
|
||||
|
||||
|
@ -189,12 +194,11 @@ extern void nf_conntrack_hash_insert(struct nf_conn *ct);
|
|||
|
||||
extern void nf_conntrack_flush(void);
|
||||
|
||||
extern int nf_ct_get_tuplepr(const struct sk_buff *skb,
|
||||
unsigned int nhoff,
|
||||
u_int16_t l3num,
|
||||
struct nf_conntrack_tuple *tuple);
|
||||
extern int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
|
||||
const struct nf_conntrack_tuple *orig);
|
||||
extern bool nf_ct_get_tuplepr(const struct sk_buff *skb,
|
||||
unsigned int nhoff, u_int16_t l3num,
|
||||
struct nf_conntrack_tuple *tuple);
|
||||
extern bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
|
||||
const struct nf_conntrack_tuple *orig);
|
||||
|
||||
extern void __nf_ct_refresh_acct(struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo,
|
||||
|
|
|
@ -30,7 +30,7 @@ extern void nf_conntrack_cleanup(void);
|
|||
extern int nf_conntrack_proto_init(void);
|
||||
extern void nf_conntrack_proto_fini(void);
|
||||
|
||||
extern int
|
||||
extern bool
|
||||
nf_ct_get_tuple(const struct sk_buff *skb,
|
||||
unsigned int nhoff,
|
||||
unsigned int dataoff,
|
||||
|
@ -40,7 +40,7 @@ nf_ct_get_tuple(const struct sk_buff *skb,
|
|||
const struct nf_conntrack_l3proto *l3proto,
|
||||
const struct nf_conntrack_l4proto *l4proto);
|
||||
|
||||
extern int
|
||||
extern bool
|
||||
nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
|
||||
const struct nf_conntrack_tuple *orig,
|
||||
const struct nf_conntrack_l3proto *l3proto,
|
||||
|
|
|
@ -28,31 +28,20 @@ struct nf_conntrack_l3proto
|
|||
* Try to fill in the third arg: nhoff is offset of l3 proto
|
||||
* hdr. Return true if possible.
|
||||
*/
|
||||
int (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff,
|
||||
struct nf_conntrack_tuple *tuple);
|
||||
bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff,
|
||||
struct nf_conntrack_tuple *tuple);
|
||||
|
||||
/*
|
||||
* Invert the per-proto part of the tuple: ie. turn xmit into reply.
|
||||
* Some packets can't be inverted: return 0 in that case.
|
||||
*/
|
||||
int (*invert_tuple)(struct nf_conntrack_tuple *inverse,
|
||||
const struct nf_conntrack_tuple *orig);
|
||||
bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
|
||||
const struct nf_conntrack_tuple *orig);
|
||||
|
||||
/* Print out the per-protocol part of the tuple. */
|
||||
int (*print_tuple)(struct seq_file *s,
|
||||
const struct nf_conntrack_tuple *);
|
||||
|
||||
/* Returns verdict for packet, or -1 for invalid. */
|
||||
int (*packet)(struct nf_conn *ct,
|
||||
const struct sk_buff *skb,
|
||||
enum ip_conntrack_info ctinfo);
|
||||
|
||||
/*
|
||||
* Called when a new connection for this protocol found;
|
||||
* returns TRUE if it's OK. If so, packet() called next.
|
||||
*/
|
||||
int (*new)(struct nf_conn *ct, const struct sk_buff *skb);
|
||||
|
||||
/*
|
||||
* Called before tracking.
|
||||
* *dataoff: offset of protocol header (TCP, UDP,...) in skb
|
||||
|
|
|
@ -25,15 +25,14 @@ struct nf_conntrack_l4proto
|
|||
|
||||
/* Try to fill in the third arg: dataoff is offset past network protocol
|
||||
hdr. Return true if possible. */
|
||||
int (*pkt_to_tuple)(const struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple);
|
||||
bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple);
|
||||
|
||||
/* Invert the per-proto part of the tuple: ie. turn xmit into reply.
|
||||
* Some packets can't be inverted: return 0 in that case.
|
||||
*/
|
||||
int (*invert_tuple)(struct nf_conntrack_tuple *inverse,
|
||||
const struct nf_conntrack_tuple *orig);
|
||||
bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
|
||||
const struct nf_conntrack_tuple *orig);
|
||||
|
||||
/* Returns verdict for packet, or -1 for invalid. */
|
||||
int (*packet)(struct nf_conn *ct,
|
||||
|
@ -45,8 +44,8 @@ struct nf_conntrack_l4proto
|
|||
|
||||
/* Called when a new connection for this protocol found;
|
||||
* returns TRUE if it's OK. If so, packet() called next. */
|
||||
int (*new)(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff);
|
||||
bool (*new)(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff);
|
||||
|
||||
/* Called when a conntrack entry is destroyed */
|
||||
void (*destroy)(struct nf_conn *ct);
|
||||
|
|
|
@ -39,6 +39,9 @@ union nf_conntrack_man_proto
|
|||
struct {
|
||||
__be16 id;
|
||||
} icmp;
|
||||
struct {
|
||||
__be16 port;
|
||||
} dccp;
|
||||
struct {
|
||||
__be16 port;
|
||||
} sctp;
|
||||
|
@ -77,6 +80,9 @@ struct nf_conntrack_tuple
|
|||
struct {
|
||||
u_int8_t type, code;
|
||||
} icmp;
|
||||
struct {
|
||||
__be16 port;
|
||||
} dccp;
|
||||
struct {
|
||||
__be16 port;
|
||||
} sctp;
|
||||
|
@ -145,8 +151,6 @@ static inline void nf_ct_dump_tuple(const struct nf_conntrack_tuple *t)
|
|||
}
|
||||
}
|
||||
|
||||
#define NF_CT_DUMP_TUPLE(tp) nf_ct_dump_tuple(tp)
|
||||
|
||||
/* If we're the first tuple, it's the original dir. */
|
||||
#define NF_CT_DIRECTION(h) \
|
||||
((enum ip_conntrack_dir)(h)->tuple.dst.dir)
|
||||
|
@ -160,61 +164,64 @@ struct nf_conntrack_tuple_hash
|
|||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
static inline int __nf_ct_tuple_src_equal(const struct nf_conntrack_tuple *t1,
|
||||
const struct nf_conntrack_tuple *t2)
|
||||
static inline bool __nf_ct_tuple_src_equal(const struct nf_conntrack_tuple *t1,
|
||||
const struct nf_conntrack_tuple *t2)
|
||||
{
|
||||
return (nf_inet_addr_cmp(&t1->src.u3, &t2->src.u3) &&
|
||||
t1->src.u.all == t2->src.u.all &&
|
||||
t1->src.l3num == t2->src.l3num);
|
||||
}
|
||||
|
||||
static inline int __nf_ct_tuple_dst_equal(const struct nf_conntrack_tuple *t1,
|
||||
const struct nf_conntrack_tuple *t2)
|
||||
static inline bool __nf_ct_tuple_dst_equal(const struct nf_conntrack_tuple *t1,
|
||||
const struct nf_conntrack_tuple *t2)
|
||||
{
|
||||
return (nf_inet_addr_cmp(&t1->dst.u3, &t2->dst.u3) &&
|
||||
t1->dst.u.all == t2->dst.u.all &&
|
||||
t1->dst.protonum == t2->dst.protonum);
|
||||
}
|
||||
|
||||
static inline int nf_ct_tuple_equal(const struct nf_conntrack_tuple *t1,
|
||||
const struct nf_conntrack_tuple *t2)
|
||||
static inline bool nf_ct_tuple_equal(const struct nf_conntrack_tuple *t1,
|
||||
const struct nf_conntrack_tuple *t2)
|
||||
{
|
||||
return __nf_ct_tuple_src_equal(t1, t2) &&
|
||||
__nf_ct_tuple_dst_equal(t1, t2);
|
||||
}
|
||||
|
||||
static inline int nf_ct_tuple_mask_equal(const struct nf_conntrack_tuple_mask *m1,
|
||||
const struct nf_conntrack_tuple_mask *m2)
|
||||
static inline bool
|
||||
nf_ct_tuple_mask_equal(const struct nf_conntrack_tuple_mask *m1,
|
||||
const struct nf_conntrack_tuple_mask *m2)
|
||||
{
|
||||
return (nf_inet_addr_cmp(&m1->src.u3, &m2->src.u3) &&
|
||||
m1->src.u.all == m2->src.u.all);
|
||||
}
|
||||
|
||||
static inline int nf_ct_tuple_src_mask_cmp(const struct nf_conntrack_tuple *t1,
|
||||
const struct nf_conntrack_tuple *t2,
|
||||
const struct nf_conntrack_tuple_mask *mask)
|
||||
static inline bool
|
||||
nf_ct_tuple_src_mask_cmp(const struct nf_conntrack_tuple *t1,
|
||||
const struct nf_conntrack_tuple *t2,
|
||||
const struct nf_conntrack_tuple_mask *mask)
|
||||
{
|
||||
int count;
|
||||
|
||||
for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++) {
|
||||
if ((t1->src.u3.all[count] ^ t2->src.u3.all[count]) &
|
||||
mask->src.u3.all[count])
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((t1->src.u.all ^ t2->src.u.all) & mask->src.u.all)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (t1->src.l3num != t2->src.l3num ||
|
||||
t1->dst.protonum != t2->dst.protonum)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline int nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple_mask *mask)
|
||||
static inline bool
|
||||
nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple_mask *mask)
|
||||
{
|
||||
return nf_ct_tuple_src_mask_cmp(t, tuple, mask) &&
|
||||
__nf_ct_tuple_dst_equal(t, tuple);
|
||||
|
|
|
@ -24,6 +24,9 @@ extern int nf_nat_mangle_udp_packet(struct sk_buff *skb,
|
|||
extern int nf_nat_seq_adjust(struct sk_buff *skb,
|
||||
struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo);
|
||||
extern int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
|
||||
struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo);
|
||||
|
||||
/* Setup NAT on this expected conntrack so it follows master, but goes
|
||||
* to port ct->master->saved_proto. */
|
||||
|
|
|
@ -8,9 +8,6 @@ struct nf_nat_range;
|
|||
|
||||
struct nf_nat_protocol
|
||||
{
|
||||
/* Protocol name */
|
||||
const char *name;
|
||||
|
||||
/* Protocol number. */
|
||||
unsigned int protonum;
|
||||
|
||||
|
@ -18,25 +15,25 @@ struct nf_nat_protocol
|
|||
|
||||
/* Translate a packet to the target according to manip type.
|
||||
Return true if succeeded. */
|
||||
int (*manip_pkt)(struct sk_buff *skb,
|
||||
unsigned int iphdroff,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type maniptype);
|
||||
bool (*manip_pkt)(struct sk_buff *skb,
|
||||
unsigned int iphdroff,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type maniptype);
|
||||
|
||||
/* Is the manipable part of the tuple between min and max incl? */
|
||||
int (*in_range)(const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const union nf_conntrack_man_proto *min,
|
||||
const union nf_conntrack_man_proto *max);
|
||||
bool (*in_range)(const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const union nf_conntrack_man_proto *min,
|
||||
const union nf_conntrack_man_proto *max);
|
||||
|
||||
/* Alter the per-proto part of the tuple (depending on
|
||||
maniptype), to give a unique tuple in the given range if
|
||||
possible; return false if not. Per-protocol part of tuple
|
||||
is initialized to the incoming packet. */
|
||||
int (*unique_tuple)(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_nat_range *range,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const struct nf_conn *ct);
|
||||
bool (*unique_tuple)(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_nat_range *range,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const struct nf_conn *ct);
|
||||
|
||||
int (*range_to_nlattr)(struct sk_buff *skb,
|
||||
const struct nf_nat_range *range);
|
||||
|
@ -62,9 +59,20 @@ extern int init_protocols(void) __init;
|
|||
extern void cleanup_protocols(void);
|
||||
extern const struct nf_nat_protocol *find_nat_proto(u_int16_t protonum);
|
||||
|
||||
extern int nf_nat_port_range_to_nlattr(struct sk_buff *skb,
|
||||
const struct nf_nat_range *range);
|
||||
extern int nf_nat_port_nlattr_to_range(struct nlattr *tb[],
|
||||
struct nf_nat_range *range);
|
||||
extern bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const union nf_conntrack_man_proto *min,
|
||||
const union nf_conntrack_man_proto *max);
|
||||
|
||||
extern bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_nat_range *range,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const struct nf_conn *ct,
|
||||
u_int16_t *rover);
|
||||
|
||||
extern int nf_nat_proto_range_to_nlattr(struct sk_buff *skb,
|
||||
const struct nf_nat_range *range);
|
||||
extern int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
|
||||
struct nf_nat_range *range);
|
||||
|
||||
#endif /*_NF_NAT_PROTO_H*/
|
||||
|
|
|
@ -14,7 +14,4 @@ extern int nf_nat_rule_find(struct sk_buff *skb,
|
|||
|
||||
extern unsigned int
|
||||
alloc_null_binding(struct nf_conn *ct, unsigned int hooknum);
|
||||
|
||||
extern unsigned int
|
||||
alloc_null_binding_confirmed(struct nf_conn *ct, unsigned int hooknum);
|
||||
#endif /* _NF_NAT_RULE_H */
|
||||
|
|
|
@ -212,4 +212,18 @@ config BRIDGE_EBT_ULOG
|
|||
|
||||
To compile it as a module, choose M here. If unsure, say N.
|
||||
|
||||
config BRIDGE_EBT_NFLOG
|
||||
tristate "ebt: nflog support"
|
||||
depends on BRIDGE_NF_EBTABLES
|
||||
help
|
||||
This option enables the nflog watcher, which allows to LOG
|
||||
messages through the netfilter logging API, which can use
|
||||
either the old LOG target, the old ULOG target or nfnetlink_log
|
||||
as backend.
|
||||
|
||||
This option adds the ulog watcher, that you can use in any rule
|
||||
in any ebtables table.
|
||||
|
||||
To compile it as a module, choose M here. If unsure, say N.
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -30,3 +30,4 @@ obj-$(CONFIG_BRIDGE_EBT_SNAT) += ebt_snat.o
|
|||
# watchers
|
||||
obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o
|
||||
obj-$(CONFIG_BRIDGE_EBT_ULOG) += ebt_ulog.o
|
||||
obj-$(CONFIG_BRIDGE_EBT_NFLOG) += ebt_nflog.o
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* ebt_nflog
|
||||
*
|
||||
* Author:
|
||||
* Peter Warasin <peter@endian.com>
|
||||
*
|
||||
* February, 2008
|
||||
*
|
||||
* Based on:
|
||||
* xt_NFLOG.c, (C) 2006 by Patrick McHardy <kaber@trash.net>
|
||||
* ebt_ulog.c, (C) 2004 by Bart De Schuymer <bdschuym@pandora.be>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/netfilter_bridge/ebtables.h>
|
||||
#include <linux/netfilter_bridge/ebt_nflog.h>
|
||||
#include <net/netfilter/nf_log.h>
|
||||
|
||||
static void ebt_nflog(const struct sk_buff *skb,
|
||||
unsigned int hooknr,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
const void *data, unsigned int datalen)
|
||||
{
|
||||
struct ebt_nflog_info *info = (struct ebt_nflog_info *)data;
|
||||
struct nf_loginfo li;
|
||||
|
||||
li.type = NF_LOG_TYPE_ULOG;
|
||||
li.u.ulog.copy_len = info->len;
|
||||
li.u.ulog.group = info->group;
|
||||
li.u.ulog.qthreshold = info->threshold;
|
||||
|
||||
nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, "%s", info->prefix);
|
||||
}
|
||||
|
||||
static int ebt_nflog_check(const char *tablename,
|
||||
unsigned int hookmask,
|
||||
const struct ebt_entry *e,
|
||||
void *data, unsigned int datalen)
|
||||
{
|
||||
struct ebt_nflog_info *info = (struct ebt_nflog_info *)data;
|
||||
|
||||
if (datalen != EBT_ALIGN(sizeof(struct ebt_nflog_info)))
|
||||
return -EINVAL;
|
||||
if (info->flags & ~EBT_NFLOG_MASK)
|
||||
return -EINVAL;
|
||||
info->prefix[EBT_NFLOG_PREFIX_SIZE - 1] = '\0';
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ebt_watcher nflog __read_mostly = {
|
||||
.name = EBT_NFLOG_WATCHER,
|
||||
.watcher = ebt_nflog,
|
||||
.check = ebt_nflog_check,
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init ebt_nflog_init(void)
|
||||
{
|
||||
return ebt_register_watcher(&nflog);
|
||||
}
|
||||
|
||||
static void __exit ebt_nflog_fini(void)
|
||||
{
|
||||
ebt_unregister_watcher(&nflog);
|
||||
}
|
||||
|
||||
module_init(ebt_nflog_init);
|
||||
module_exit(ebt_nflog_fini);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Peter Warasin <peter@endian.com>");
|
||||
MODULE_DESCRIPTION("ebtables NFLOG netfilter logging module");
|
|
@ -46,7 +46,7 @@ static struct ebt_table broute_table =
|
|||
.name = "broute",
|
||||
.table = &initial_table,
|
||||
.valid_hooks = 1 << NF_BR_BROUTING,
|
||||
.lock = RW_LOCK_UNLOCKED,
|
||||
.lock = __RW_LOCK_UNLOCKED(broute_table.lock),
|
||||
.check = check,
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
|
|
@ -55,7 +55,7 @@ static struct ebt_table frame_filter =
|
|||
.name = "filter",
|
||||
.table = &initial_table,
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
.lock = RW_LOCK_UNLOCKED,
|
||||
.lock = __RW_LOCK_UNLOCKED(frame_filter.lock),
|
||||
.check = check,
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
|
|
@ -55,7 +55,7 @@ static struct ebt_table frame_nat =
|
|||
.name = "nat",
|
||||
.table = &initial_table,
|
||||
.valid_hooks = NAT_VALID_HOOKS,
|
||||
.lock = RW_LOCK_UNLOCKED,
|
||||
.lock = __RW_LOCK_UNLOCKED(frame_nat.lock),
|
||||
.check = check,
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
|
|
@ -182,21 +182,44 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
|
|||
}
|
||||
return csum;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nf_ip_checksum);
|
||||
|
||||
static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
|
||||
unsigned int dataoff, unsigned int len,
|
||||
u_int8_t protocol)
|
||||
{
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
__sum16 csum = 0;
|
||||
|
||||
switch (skb->ip_summed) {
|
||||
case CHECKSUM_COMPLETE:
|
||||
if (len == skb->len - dataoff)
|
||||
return nf_ip_checksum(skb, hook, dataoff, protocol);
|
||||
/* fall through */
|
||||
case CHECKSUM_NONE:
|
||||
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol,
|
||||
skb->len - dataoff, 0);
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
csum = __skb_checksum_complete_head(skb, dataoff + len);
|
||||
if (!csum)
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
}
|
||||
return csum;
|
||||
}
|
||||
|
||||
static int nf_ip_route(struct dst_entry **dst, struct flowi *fl)
|
||||
{
|
||||
return ip_route_output_key(&init_net, (struct rtable **)dst, fl);
|
||||
}
|
||||
|
||||
static const struct nf_afinfo nf_ip_afinfo = {
|
||||
.family = AF_INET,
|
||||
.checksum = nf_ip_checksum,
|
||||
.route = nf_ip_route,
|
||||
.saveroute = nf_ip_saveroute,
|
||||
.reroute = nf_ip_reroute,
|
||||
.route_key_size = sizeof(struct ip_rt_info),
|
||||
.family = AF_INET,
|
||||
.checksum = nf_ip_checksum,
|
||||
.checksum_partial = nf_ip_checksum_partial,
|
||||
.route = nf_ip_route,
|
||||
.saveroute = nf_ip_saveroute,
|
||||
.reroute = nf_ip_reroute,
|
||||
.route_key_size = sizeof(struct ip_rt_info),
|
||||
};
|
||||
|
||||
static int ipv4_netfilter_init(void)
|
||||
|
|
|
@ -241,10 +241,25 @@ config NF_NAT_SNMP_BASIC
|
|||
# <expr> '&&' <expr> (6)
|
||||
#
|
||||
# (6) Returns the result of min(/expr/, /expr/).
|
||||
config NF_NAT_PROTO_DCCP
|
||||
tristate
|
||||
depends on NF_NAT && NF_CT_PROTO_DCCP
|
||||
default NF_NAT && NF_CT_PROTO_DCCP
|
||||
|
||||
config NF_NAT_PROTO_GRE
|
||||
tristate
|
||||
depends on NF_NAT && NF_CT_PROTO_GRE
|
||||
|
||||
config NF_NAT_PROTO_UDPLITE
|
||||
tristate
|
||||
depends on NF_NAT && NF_CT_PROTO_UDPLITE
|
||||
default NF_NAT && NF_CT_PROTO_UDPLITE
|
||||
|
||||
config NF_NAT_PROTO_SCTP
|
||||
tristate
|
||||
default NF_NAT && NF_CT_PROTO_SCTP
|
||||
depends on NF_NAT && NF_CT_PROTO_SCTP
|
||||
|
||||
config NF_NAT_FTP
|
||||
tristate
|
||||
depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
|
||||
|
|
|
@ -10,7 +10,7 @@ nf_conntrack_ipv4-objs += nf_conntrack_l3proto_ipv4_compat.o
|
|||
endif
|
||||
endif
|
||||
|
||||
nf_nat-objs := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
|
||||
nf_nat-objs := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_common.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
|
||||
iptable_nat-objs := nf_nat_rule.o nf_nat_standalone.o
|
||||
|
||||
# connection tracking
|
||||
|
@ -29,7 +29,10 @@ obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
|
|||
obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
|
||||
|
||||
# NAT protocols (nf_nat)
|
||||
obj-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o
|
||||
obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
|
||||
obj-$(CONFIG_NF_NAT_PROTO_UDPLITE) += nf_nat_proto_udplite.o
|
||||
obj-$(CONFIG_NF_NAT_PROTO_SCTP) += nf_nat_proto_sctp.o
|
||||
|
||||
# generic IP tables
|
||||
obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
|
||||
|
|
|
@ -59,7 +59,7 @@ do { \
|
|||
#endif
|
||||
|
||||
static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
|
||||
char *hdr_addr, int len)
|
||||
const char *hdr_addr, int len)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
|
@ -80,8 +80,8 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
|
|||
const char *outdev,
|
||||
const struct arpt_arp *arpinfo)
|
||||
{
|
||||
char *arpptr = (char *)(arphdr + 1);
|
||||
char *src_devaddr, *tgt_devaddr;
|
||||
const char *arpptr = (char *)(arphdr + 1);
|
||||
const char *src_devaddr, *tgt_devaddr;
|
||||
__be32 src_ipaddr, tgt_ipaddr;
|
||||
int i, ret;
|
||||
|
||||
|
@ -222,16 +222,16 @@ unsigned int arpt_do_table(struct sk_buff *skb,
|
|||
unsigned int hook,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
struct arpt_table *table)
|
||||
struct xt_table *table)
|
||||
{
|
||||
static const char nulldevname[IFNAMSIZ];
|
||||
unsigned int verdict = NF_DROP;
|
||||
struct arphdr *arp;
|
||||
const struct arphdr *arp;
|
||||
bool hotdrop = false;
|
||||
struct arpt_entry *e, *back;
|
||||
const char *indev, *outdev;
|
||||
void *table_base;
|
||||
struct xt_table_info *private;
|
||||
const struct xt_table_info *private;
|
||||
|
||||
if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
|
||||
return NF_DROP;
|
||||
|
@ -352,7 +352,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
|
|||
e->counters.pcnt = pos;
|
||||
|
||||
for (;;) {
|
||||
struct arpt_standard_target *t
|
||||
const struct arpt_standard_target *t
|
||||
= (void *)arpt_get_target(e);
|
||||
int visited = e->comefrom & (1 << hook);
|
||||
|
||||
|
@ -437,7 +437,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
|
|||
|
||||
static inline int check_entry(struct arpt_entry *e, const char *name)
|
||||
{
|
||||
struct arpt_entry_target *t;
|
||||
const struct arpt_entry_target *t;
|
||||
|
||||
if (!arp_checkentry(&e->arp)) {
|
||||
duprintf("arp_tables: arp check failed %p %s.\n", e, name);
|
||||
|
@ -457,7 +457,7 @@ static inline int check_entry(struct arpt_entry *e, const char *name)
|
|||
static inline int check_target(struct arpt_entry *e, const char *name)
|
||||
{
|
||||
struct arpt_entry_target *t;
|
||||
struct arpt_target *target;
|
||||
struct xt_target *target;
|
||||
int ret;
|
||||
|
||||
t = arpt_get_target(e);
|
||||
|
@ -480,7 +480,7 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
|
|||
unsigned int *i)
|
||||
{
|
||||
struct arpt_entry_target *t;
|
||||
struct arpt_target *target;
|
||||
struct xt_target *target;
|
||||
int ret;
|
||||
|
||||
ret = check_entry(e, name);
|
||||
|
@ -706,11 +706,11 @@ static void get_counters(const struct xt_table_info *t,
|
|||
}
|
||||
}
|
||||
|
||||
static inline struct xt_counters *alloc_counters(struct arpt_table *table)
|
||||
static inline struct xt_counters *alloc_counters(struct xt_table *table)
|
||||
{
|
||||
unsigned int countersize;
|
||||
struct xt_counters *counters;
|
||||
struct xt_table_info *private = table->private;
|
||||
const struct xt_table_info *private = table->private;
|
||||
|
||||
/* We need atomic snapshot of counters: rest doesn't change
|
||||
* (other than comefrom, which userspace doesn't care
|
||||
|
@ -731,7 +731,7 @@ static inline struct xt_counters *alloc_counters(struct arpt_table *table)
|
|||
}
|
||||
|
||||
static int copy_entries_to_user(unsigned int total_size,
|
||||
struct arpt_table *table,
|
||||
struct xt_table *table,
|
||||
void __user *userptr)
|
||||
{
|
||||
unsigned int off, num;
|
||||
|
@ -851,7 +851,7 @@ static int compat_table_info(const struct xt_table_info *info,
|
|||
static int get_info(struct net *net, void __user *user, int *len, int compat)
|
||||
{
|
||||
char name[ARPT_TABLE_MAXNAMELEN];
|
||||
struct arpt_table *t;
|
||||
struct xt_table *t;
|
||||
int ret;
|
||||
|
||||
if (*len != sizeof(struct arpt_getinfo)) {
|
||||
|
@ -872,7 +872,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
|
|||
"arptable_%s", name);
|
||||
if (t && !IS_ERR(t)) {
|
||||
struct arpt_getinfo info;
|
||||
struct xt_table_info *private = t->private;
|
||||
const struct xt_table_info *private = t->private;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (compat) {
|
||||
|
@ -911,7 +911,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
|
|||
{
|
||||
int ret;
|
||||
struct arpt_get_entries get;
|
||||
struct arpt_table *t;
|
||||
struct xt_table *t;
|
||||
|
||||
if (*len < sizeof(get)) {
|
||||
duprintf("get_entries: %u < %Zu\n", *len, sizeof(get));
|
||||
|
@ -927,7 +927,8 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
|
|||
|
||||
t = xt_find_table_lock(net, NF_ARP, get.name);
|
||||
if (t && !IS_ERR(t)) {
|
||||
struct xt_table_info *private = t->private;
|
||||
const struct xt_table_info *private = t->private;
|
||||
|
||||
duprintf("t->private->number = %u\n",
|
||||
private->number);
|
||||
if (get.size == private->size)
|
||||
|
@ -936,7 +937,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
|
|||
else {
|
||||
duprintf("get_entries: I've got %u not %u!\n",
|
||||
private->size, get.size);
|
||||
ret = -EINVAL;
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
module_put(t->me);
|
||||
xt_table_unlock(t);
|
||||
|
@ -953,7 +954,7 @@ static int __do_replace(struct net *net, const char *name,
|
|||
void __user *counters_ptr)
|
||||
{
|
||||
int ret;
|
||||
struct arpt_table *t;
|
||||
struct xt_table *t;
|
||||
struct xt_table_info *oldinfo;
|
||||
struct xt_counters *counters;
|
||||
void *loc_cpu_old_entry;
|
||||
|
@ -1087,11 +1088,11 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
|
|||
struct xt_counters_info tmp;
|
||||
struct xt_counters *paddc;
|
||||
unsigned int num_counters;
|
||||
char *name;
|
||||
const char *name;
|
||||
int size;
|
||||
void *ptmp;
|
||||
struct arpt_table *t;
|
||||
struct xt_table_info *private;
|
||||
struct xt_table *t;
|
||||
const struct xt_table_info *private;
|
||||
int ret = 0;
|
||||
void *loc_cpu_entry;
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
@ -1554,11 +1555,11 @@ static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
|
|||
}
|
||||
|
||||
static int compat_copy_entries_to_user(unsigned int total_size,
|
||||
struct arpt_table *table,
|
||||
struct xt_table *table,
|
||||
void __user *userptr)
|
||||
{
|
||||
struct xt_counters *counters;
|
||||
struct xt_table_info *private = table->private;
|
||||
const struct xt_table_info *private = table->private;
|
||||
void __user *pos;
|
||||
unsigned int size;
|
||||
int ret = 0;
|
||||
|
@ -1592,7 +1593,7 @@ static int compat_get_entries(struct net *net,
|
|||
{
|
||||
int ret;
|
||||
struct compat_arpt_get_entries get;
|
||||
struct arpt_table *t;
|
||||
struct xt_table *t;
|
||||
|
||||
if (*len < sizeof(get)) {
|
||||
duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
|
||||
|
@ -1609,7 +1610,7 @@ static int compat_get_entries(struct net *net,
|
|||
xt_compat_lock(NF_ARP);
|
||||
t = xt_find_table_lock(net, NF_ARP, get.name);
|
||||
if (t && !IS_ERR(t)) {
|
||||
struct xt_table_info *private = t->private;
|
||||
const struct xt_table_info *private = t->private;
|
||||
struct xt_table_info info;
|
||||
|
||||
duprintf("t->private->number = %u\n", private->number);
|
||||
|
@ -1620,7 +1621,7 @@ static int compat_get_entries(struct net *net,
|
|||
} else if (!ret) {
|
||||
duprintf("compat_get_entries: I've got %u not %u!\n",
|
||||
private->size, get.size);
|
||||
ret = -EINVAL;
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
xt_compat_flush_offsets(NF_ARP);
|
||||
module_put(t->me);
|
||||
|
@ -1722,9 +1723,8 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct arpt_table *arpt_register_table(struct net *net,
|
||||
struct arpt_table *table,
|
||||
const struct arpt_replace *repl)
|
||||
struct xt_table *arpt_register_table(struct net *net, struct xt_table *table,
|
||||
const struct arpt_replace *repl)
|
||||
{
|
||||
int ret;
|
||||
struct xt_table_info *newinfo;
|
||||
|
@ -1766,7 +1766,7 @@ struct arpt_table *arpt_register_table(struct net *net,
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void arpt_unregister_table(struct arpt_table *table)
|
||||
void arpt_unregister_table(struct xt_table *table)
|
||||
{
|
||||
struct xt_table_info *private;
|
||||
void *loc_cpu_entry;
|
||||
|
@ -1784,7 +1784,7 @@ void arpt_unregister_table(struct arpt_table *table)
|
|||
}
|
||||
|
||||
/* The built-in targets: standard (NULL) and error. */
|
||||
static struct arpt_target arpt_standard_target __read_mostly = {
|
||||
static struct xt_target arpt_standard_target __read_mostly = {
|
||||
.name = ARPT_STANDARD_TARGET,
|
||||
.targetsize = sizeof(int),
|
||||
.family = NF_ARP,
|
||||
|
@ -1795,7 +1795,7 @@ static struct arpt_target arpt_standard_target __read_mostly = {
|
|||
#endif
|
||||
};
|
||||
|
||||
static struct arpt_target arpt_error_target __read_mostly = {
|
||||
static struct xt_target arpt_error_target __read_mostly = {
|
||||
.name = ARPT_ERROR_TARGET,
|
||||
.target = arpt_error,
|
||||
.targetsize = ARPT_FUNCTION_MAXNAMELEN,
|
||||
|
|
|
@ -15,7 +15,7 @@ target(struct sk_buff *skb,
|
|||
const void *targinfo)
|
||||
{
|
||||
const struct arpt_mangle *mangle = targinfo;
|
||||
struct arphdr *arp;
|
||||
const struct arphdr *arp;
|
||||
unsigned char *arpptr;
|
||||
int pln, hln;
|
||||
|
||||
|
@ -73,8 +73,9 @@ checkentry(const char *tablename, const void *e, const struct xt_target *target,
|
|||
return true;
|
||||
}
|
||||
|
||||
static struct arpt_target arpt_mangle_reg __read_mostly = {
|
||||
static struct xt_target arpt_mangle_reg __read_mostly = {
|
||||
.name = "mangle",
|
||||
.family = NF_ARP,
|
||||
.target = target,
|
||||
.targetsize = sizeof(struct arpt_mangle),
|
||||
.checkentry = checkentry,
|
||||
|
@ -83,15 +84,12 @@ static struct arpt_target arpt_mangle_reg __read_mostly = {
|
|||
|
||||
static int __init arpt_mangle_init(void)
|
||||
{
|
||||
if (arpt_register_target(&arpt_mangle_reg))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
return xt_register_target(&arpt_mangle_reg);
|
||||
}
|
||||
|
||||
static void __exit arpt_mangle_fini(void)
|
||||
{
|
||||
arpt_unregister_target(&arpt_mangle_reg);
|
||||
xt_unregister_target(&arpt_mangle_reg);
|
||||
}
|
||||
|
||||
module_init(arpt_mangle_init);
|
||||
|
|
|
@ -45,10 +45,10 @@ static struct
|
|||
.term = ARPT_ERROR_INIT,
|
||||
};
|
||||
|
||||
static struct arpt_table packet_filter = {
|
||||
static struct xt_table packet_filter = {
|
||||
.name = "filter",
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
.lock = RW_LOCK_UNLOCKED,
|
||||
.lock = __RW_LOCK_UNLOCKED(packet_filter.lock),
|
||||
.private = NULL,
|
||||
.me = THIS_MODULE,
|
||||
.af = NF_ARP,
|
||||
|
@ -70,18 +70,21 @@ static struct nf_hook_ops arpt_ops[] __read_mostly = {
|
|||
.owner = THIS_MODULE,
|
||||
.pf = NF_ARP,
|
||||
.hooknum = NF_ARP_IN,
|
||||
.priority = NF_IP_PRI_FILTER,
|
||||
},
|
||||
{
|
||||
.hook = arpt_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NF_ARP,
|
||||
.hooknum = NF_ARP_OUT,
|
||||
.priority = NF_IP_PRI_FILTER,
|
||||
},
|
||||
{
|
||||
.hook = arpt_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NF_ARP,
|
||||
.hooknum = NF_ARP_FORWARD,
|
||||
.priority = NF_IP_PRI_FILTER,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -296,7 +296,7 @@ static void trace_packet(struct sk_buff *skb,
|
|||
struct ipt_entry *e)
|
||||
{
|
||||
void *table_base;
|
||||
struct ipt_entry *root;
|
||||
const struct ipt_entry *root;
|
||||
char *hookname, *chainname, *comment;
|
||||
unsigned int rulenum = 0;
|
||||
|
||||
|
@ -327,7 +327,7 @@ ipt_do_table(struct sk_buff *skb,
|
|||
{
|
||||
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
|
||||
u_int16_t offset;
|
||||
struct iphdr *ip;
|
||||
const struct iphdr *ip;
|
||||
u_int16_t datalen;
|
||||
bool hotdrop = false;
|
||||
/* Initializing verdict to NF_DROP keeps gcc happy. */
|
||||
|
@ -926,7 +926,7 @@ static struct xt_counters * alloc_counters(struct xt_table *table)
|
|||
{
|
||||
unsigned int countersize;
|
||||
struct xt_counters *counters;
|
||||
struct xt_table_info *private = table->private;
|
||||
const struct xt_table_info *private = table->private;
|
||||
|
||||
/* We need atomic snapshot of counters: rest doesn't change
|
||||
(other than comefrom, which userspace doesn't care
|
||||
|
@ -953,9 +953,9 @@ copy_entries_to_user(unsigned int total_size,
|
|||
unsigned int off, num;
|
||||
struct ipt_entry *e;
|
||||
struct xt_counters *counters;
|
||||
struct xt_table_info *private = table->private;
|
||||
const struct xt_table_info *private = table->private;
|
||||
int ret = 0;
|
||||
void *loc_cpu_entry;
|
||||
const void *loc_cpu_entry;
|
||||
|
||||
counters = alloc_counters(table);
|
||||
if (IS_ERR(counters))
|
||||
|
@ -975,8 +975,8 @@ copy_entries_to_user(unsigned int total_size,
|
|||
/* ... then go back and fix counters and names */
|
||||
for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
|
||||
unsigned int i;
|
||||
struct ipt_entry_match *m;
|
||||
struct ipt_entry_target *t;
|
||||
const struct ipt_entry_match *m;
|
||||
const struct ipt_entry_target *t;
|
||||
|
||||
e = (struct ipt_entry *)(loc_cpu_entry + off);
|
||||
if (copy_to_user(userptr + off
|
||||
|
@ -1116,7 +1116,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
|
|||
"iptable_%s", name);
|
||||
if (t && !IS_ERR(t)) {
|
||||
struct ipt_getinfo info;
|
||||
struct xt_table_info *private = t->private;
|
||||
const struct xt_table_info *private = t->private;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (compat) {
|
||||
|
@ -1172,7 +1172,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
|
|||
|
||||
t = xt_find_table_lock(net, AF_INET, get.name);
|
||||
if (t && !IS_ERR(t)) {
|
||||
struct xt_table_info *private = t->private;
|
||||
const struct xt_table_info *private = t->private;
|
||||
duprintf("t->private->number = %u\n", private->number);
|
||||
if (get.size == private->size)
|
||||
ret = copy_entries_to_user(private->size,
|
||||
|
@ -1180,7 +1180,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
|
|||
else {
|
||||
duprintf("get_entries: I've got %u not %u!\n",
|
||||
private->size, get.size);
|
||||
ret = -EINVAL;
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
module_put(t->me);
|
||||
xt_table_unlock(t);
|
||||
|
@ -1337,11 +1337,11 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
|
|||
struct xt_counters_info tmp;
|
||||
struct xt_counters *paddc;
|
||||
unsigned int num_counters;
|
||||
char *name;
|
||||
const char *name;
|
||||
int size;
|
||||
void *ptmp;
|
||||
struct xt_table *t;
|
||||
struct xt_table_info *private;
|
||||
const struct xt_table_info *private;
|
||||
int ret = 0;
|
||||
void *loc_cpu_entry;
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
@ -1878,11 +1878,11 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
|
|||
void __user *userptr)
|
||||
{
|
||||
struct xt_counters *counters;
|
||||
struct xt_table_info *private = table->private;
|
||||
const struct xt_table_info *private = table->private;
|
||||
void __user *pos;
|
||||
unsigned int size;
|
||||
int ret = 0;
|
||||
void *loc_cpu_entry;
|
||||
const void *loc_cpu_entry;
|
||||
unsigned int i = 0;
|
||||
|
||||
counters = alloc_counters(table);
|
||||
|
@ -1929,7 +1929,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
|
|||
xt_compat_lock(AF_INET);
|
||||
t = xt_find_table_lock(net, AF_INET, get.name);
|
||||
if (t && !IS_ERR(t)) {
|
||||
struct xt_table_info *private = t->private;
|
||||
const struct xt_table_info *private = t->private;
|
||||
struct xt_table_info info;
|
||||
duprintf("t->private->number = %u\n", private->number);
|
||||
ret = compat_table_info(private, &info);
|
||||
|
@ -1939,7 +1939,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
|
|||
} else if (!ret) {
|
||||
duprintf("compat_get_entries: I've got %u not %u!\n",
|
||||
private->size, get.size);
|
||||
ret = -EINVAL;
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
xt_compat_flush_offsets(AF_INET);
|
||||
module_put(t->me);
|
||||
|
@ -2130,7 +2130,8 @@ icmp_match(const struct sk_buff *skb,
|
|||
unsigned int protoff,
|
||||
bool *hotdrop)
|
||||
{
|
||||
struct icmphdr _icmph, *ic;
|
||||
const struct icmphdr *ic;
|
||||
struct icmphdr _icmph;
|
||||
const struct ipt_icmp *icmpinfo = matchinfo;
|
||||
|
||||
/* Must not be a fragment. */
|
||||
|
|
|
@ -144,7 +144,7 @@ clusterip_config_init_nodelist(struct clusterip_config *c,
|
|||
}
|
||||
|
||||
static struct clusterip_config *
|
||||
clusterip_config_init(struct ipt_clusterip_tgt_info *i, __be32 ip,
|
||||
clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
|
||||
struct net_device *dev)
|
||||
{
|
||||
struct clusterip_config *c;
|
||||
|
@ -333,7 +333,7 @@ clusterip_tg(struct sk_buff *skb, const struct net_device *in,
|
|||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
nf_ct_dump_tuple_ip(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
#endif
|
||||
pr_debug("hash=%u ct_hash=%u ", hash, ct->mark);
|
||||
if (!clusterip_responsible(cipinfo->config, hash)) {
|
||||
|
@ -418,7 +418,7 @@ clusterip_tg_check(const char *tablename, const void *e_void,
|
|||
/* drop reference count of cluster config when rule is deleted */
|
||||
static void clusterip_tg_destroy(const struct xt_target *target, void *targinfo)
|
||||
{
|
||||
struct ipt_clusterip_tgt_info *cipinfo = targinfo;
|
||||
const struct ipt_clusterip_tgt_info *cipinfo = targinfo;
|
||||
|
||||
/* if no more entries are referencing the config, remove it
|
||||
* from the list and destroy the proc entry */
|
||||
|
@ -567,7 +567,7 @@ struct clusterip_seq_position {
|
|||
|
||||
static void *clusterip_seq_start(struct seq_file *s, loff_t *pos)
|
||||
{
|
||||
struct proc_dir_entry *pde = s->private;
|
||||
const struct proc_dir_entry *pde = s->private;
|
||||
struct clusterip_config *c = pde->data;
|
||||
unsigned int weight;
|
||||
u_int32_t local_nodes;
|
||||
|
@ -594,7 +594,7 @@ static void *clusterip_seq_start(struct seq_file *s, loff_t *pos)
|
|||
|
||||
static void *clusterip_seq_next(struct seq_file *s, void *v, loff_t *pos)
|
||||
{
|
||||
struct clusterip_seq_position *idx = (struct clusterip_seq_position *)v;
|
||||
struct clusterip_seq_position *idx = v;
|
||||
|
||||
*pos = ++idx->pos;
|
||||
if (*pos >= idx->weight) {
|
||||
|
@ -613,7 +613,7 @@ static void clusterip_seq_stop(struct seq_file *s, void *v)
|
|||
|
||||
static int clusterip_seq_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct clusterip_seq_position *idx = (struct clusterip_seq_position *)v;
|
||||
struct clusterip_seq_position *idx = v;
|
||||
|
||||
if (idx->pos != 0)
|
||||
seq_putc(s, ',');
|
||||
|
@ -669,7 +669,7 @@ static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
|
|||
{
|
||||
#define PROC_WRITELEN 10
|
||||
char buffer[PROC_WRITELEN+1];
|
||||
struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
|
||||
const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
|
||||
struct clusterip_config *c = pde->data;
|
||||
unsigned long nodenum;
|
||||
|
||||
|
|
|
@ -100,7 +100,7 @@ ecn_tg_check(const char *tablename, const void *e_void,
|
|||
const struct xt_target *target, void *targinfo,
|
||||
unsigned int hook_mask)
|
||||
{
|
||||
const struct ipt_ECN_info *einfo = (struct ipt_ECN_info *)targinfo;
|
||||
const struct ipt_ECN_info *einfo = targinfo;
|
||||
const struct ipt_entry *e = e_void;
|
||||
|
||||
if (einfo->operation & IPT_ECN_OP_MASK) {
|
||||
|
|
|
@ -76,7 +76,8 @@ static void dump_packet(const struct nf_loginfo *info,
|
|||
|
||||
if ((logflags & IPT_LOG_IPOPT)
|
||||
&& ih->ihl * 4 > sizeof(struct iphdr)) {
|
||||
unsigned char _opt[4 * 15 - sizeof(struct iphdr)], *op;
|
||||
const unsigned char *op;
|
||||
unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
|
||||
unsigned int i, optsize;
|
||||
|
||||
optsize = ih->ihl * 4 - sizeof(struct iphdr);
|
||||
|
@ -338,12 +339,16 @@ static void dump_packet(const struct nf_loginfo *info,
|
|||
if ((logflags & IPT_LOG_UID) && !iphoff && skb->sk) {
|
||||
read_lock_bh(&skb->sk->sk_callback_lock);
|
||||
if (skb->sk->sk_socket && skb->sk->sk_socket->file)
|
||||
printk("UID=%u GID=%u",
|
||||
printk("UID=%u GID=%u ",
|
||||
skb->sk->sk_socket->file->f_uid,
|
||||
skb->sk->sk_socket->file->f_gid);
|
||||
read_unlock_bh(&skb->sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
/* Max length: 16 "MARK=0xFFFFFFFF " */
|
||||
if (!iphoff && skb->mark)
|
||||
printk("MARK=0x%x ", skb->mark);
|
||||
|
||||
/* Proto Max log string length */
|
||||
/* IP: 40+46+6+11+127 = 230 */
|
||||
/* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */
|
||||
|
|
|
@ -35,8 +35,10 @@ MODULE_DESCRIPTION("Xtables: packet \"rejection\" target for IPv4");
|
|||
static void send_reset(struct sk_buff *oldskb, int hook)
|
||||
{
|
||||
struct sk_buff *nskb;
|
||||
struct iphdr *oiph, *niph;
|
||||
struct tcphdr _otcph, *oth, *tcph;
|
||||
const struct iphdr *oiph;
|
||||
struct iphdr *niph;
|
||||
const struct tcphdr *oth;
|
||||
struct tcphdr _otcph, *tcph;
|
||||
unsigned int addr_type;
|
||||
|
||||
/* IP header checks: fragment. */
|
||||
|
|
|
@ -340,7 +340,7 @@ static void *recent_seq_start(struct seq_file *seq, loff_t *pos)
|
|||
static void *recent_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
struct recent_iter_state *st = seq->private;
|
||||
struct recent_table *t = st->table;
|
||||
const struct recent_table *t = st->table;
|
||||
struct recent_entry *e = v;
|
||||
struct list_head *head = e->list.next;
|
||||
|
||||
|
@ -361,7 +361,7 @@ static void recent_seq_stop(struct seq_file *s, void *v)
|
|||
|
||||
static int recent_seq_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct recent_entry *e = v;
|
||||
const struct recent_entry *e = v;
|
||||
unsigned int i;
|
||||
|
||||
i = (e->index - 1) % ip_pkt_list_tot;
|
||||
|
@ -396,7 +396,7 @@ static int recent_seq_open(struct inode *inode, struct file *file)
|
|||
static ssize_t recent_proc_write(struct file *file, const char __user *input,
|
||||
size_t size, loff_t *loff)
|
||||
{
|
||||
struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
|
||||
const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
|
||||
struct recent_table *t = pde->data;
|
||||
struct recent_entry *e;
|
||||
char buf[sizeof("+255.255.255.255")], *c = buf;
|
||||
|
|
|
@ -56,12 +56,23 @@ static struct
|
|||
static struct xt_table packet_filter = {
|
||||
.name = "filter",
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
.lock = RW_LOCK_UNLOCKED,
|
||||
.lock = __RW_LOCK_UNLOCKED(packet_filter.lock),
|
||||
.me = THIS_MODULE,
|
||||
.af = AF_INET,
|
||||
};
|
||||
|
||||
/* The work comes in here from netfilter.c. */
|
||||
static unsigned int
|
||||
ipt_local_in_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
nf_local_in_net(in, out)->ipv4.iptable_filter);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ipt_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
|
@ -69,7 +80,8 @@ ipt_hook(unsigned int hook,
|
|||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_filter);
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
nf_forward_net(in, out)->ipv4.iptable_filter);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
|
@ -88,12 +100,13 @@ ipt_local_out_hook(unsigned int hook,
|
|||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_filter);
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
nf_local_out_net(in, out)->ipv4.iptable_filter);
|
||||
}
|
||||
|
||||
static struct nf_hook_ops ipt_ops[] __read_mostly = {
|
||||
{
|
||||
.hook = ipt_hook,
|
||||
.hook = ipt_local_in_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = PF_INET,
|
||||
.hooknum = NF_INET_LOCAL_IN,
|
||||
|
|
|
@ -67,20 +67,54 @@ static struct
|
|||
static struct xt_table packet_mangler = {
|
||||
.name = "mangle",
|
||||
.valid_hooks = MANGLE_VALID_HOOKS,
|
||||
.lock = RW_LOCK_UNLOCKED,
|
||||
.lock = __RW_LOCK_UNLOCKED(packet_mangler.lock),
|
||||
.me = THIS_MODULE,
|
||||
.af = AF_INET,
|
||||
};
|
||||
|
||||
/* The work comes in here from netfilter.c. */
|
||||
static unsigned int
|
||||
ipt_route_hook(unsigned int hook,
|
||||
ipt_pre_routing_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
nf_pre_routing_net(in, out)->ipv4.iptable_mangle);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ipt_post_routing_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
nf_post_routing_net(in, out)->ipv4.iptable_mangle);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ipt_local_in_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
nf_local_in_net(in, out)->ipv4.iptable_mangle);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ipt_forward_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_mangle);
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
nf_forward_net(in, out)->ipv4.iptable_mangle);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
|
@ -112,7 +146,8 @@ ipt_local_hook(unsigned int hook,
|
|||
daddr = iph->daddr;
|
||||
tos = iph->tos;
|
||||
|
||||
ret = ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_mangle);
|
||||
ret = ipt_do_table(skb, hook, in, out,
|
||||
nf_local_out_net(in, out)->ipv4.iptable_mangle);
|
||||
/* Reroute for ANY change. */
|
||||
if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
|
||||
iph = ip_hdr(skb);
|
||||
|
@ -130,21 +165,21 @@ ipt_local_hook(unsigned int hook,
|
|||
|
||||
static struct nf_hook_ops ipt_ops[] __read_mostly = {
|
||||
{
|
||||
.hook = ipt_route_hook,
|
||||
.hook = ipt_pre_routing_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = PF_INET,
|
||||
.hooknum = NF_INET_PRE_ROUTING,
|
||||
.priority = NF_IP_PRI_MANGLE,
|
||||
},
|
||||
{
|
||||
.hook = ipt_route_hook,
|
||||
.hook = ipt_local_in_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = PF_INET,
|
||||
.hooknum = NF_INET_LOCAL_IN,
|
||||
.priority = NF_IP_PRI_MANGLE,
|
||||
},
|
||||
{
|
||||
.hook = ipt_route_hook,
|
||||
.hook = ipt_forward_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = PF_INET,
|
||||
.hooknum = NF_INET_FORWARD,
|
||||
|
@ -158,7 +193,7 @@ static struct nf_hook_ops ipt_ops[] __read_mostly = {
|
|||
.priority = NF_IP_PRI_MANGLE,
|
||||
},
|
||||
{
|
||||
.hook = ipt_route_hook,
|
||||
.hook = ipt_post_routing_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = PF_INET,
|
||||
.hooknum = NF_INET_POST_ROUTING,
|
||||
|
|
|
@ -39,7 +39,7 @@ static struct
|
|||
static struct xt_table packet_raw = {
|
||||
.name = "raw",
|
||||
.valid_hooks = RAW_VALID_HOOKS,
|
||||
.lock = RW_LOCK_UNLOCKED,
|
||||
.lock = __RW_LOCK_UNLOCKED(packet_raw.lock),
|
||||
.me = THIS_MODULE,
|
||||
.af = AF_INET,
|
||||
};
|
||||
|
@ -52,7 +52,8 @@ ipt_hook(unsigned int hook,
|
|||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_raw);
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
nf_pre_routing_net(in, out)->ipv4.iptable_raw);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
|
@ -70,7 +71,8 @@ ipt_local_hook(unsigned int hook,
|
|||
"packet.\n");
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_raw);
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
nf_local_out_net(in, out)->ipv4.iptable_raw);
|
||||
}
|
||||
|
||||
/* 'raw' is the very first table. */
|
||||
|
|
|
@ -23,30 +23,36 @@
|
|||
#include <net/netfilter/nf_conntrack_l3proto.h>
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
|
||||
#include <net/netfilter/nf_nat_helper.h>
|
||||
|
||||
static int ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
|
||||
struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo);
|
||||
EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook);
|
||||
|
||||
static bool ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
const __be32 *ap;
|
||||
__be32 _addrs[2];
|
||||
ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr),
|
||||
sizeof(u_int32_t) * 2, _addrs);
|
||||
if (ap == NULL)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
tuple->src.u3.ip = ap[0];
|
||||
tuple->dst.u3.ip = ap[1];
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int ipv4_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
static bool ipv4_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
{
|
||||
tuple->src.u3.ip = orig->dst.u3.ip;
|
||||
tuple->dst.u3.ip = orig->src.u3.ip;
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int ipv4_print_tuple(struct seq_file *s,
|
||||
|
@ -100,36 +106,42 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
|
|||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
/* We've seen it coming out the other side: confirm it */
|
||||
return nf_conntrack_confirm(skb);
|
||||
}
|
||||
|
||||
static unsigned int ipv4_conntrack_help(unsigned int hooknum,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
struct nf_conn *ct;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
const struct nf_conn_help *help;
|
||||
const struct nf_conntrack_helper *helper;
|
||||
unsigned int ret;
|
||||
|
||||
/* This is where we call the helper: as the packet goes out. */
|
||||
ct = nf_ct_get(skb, &ctinfo);
|
||||
if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)
|
||||
return NF_ACCEPT;
|
||||
goto out;
|
||||
|
||||
help = nfct_help(ct);
|
||||
if (!help)
|
||||
return NF_ACCEPT;
|
||||
goto out;
|
||||
|
||||
/* rcu_read_lock()ed by nf_hook_slow */
|
||||
helper = rcu_dereference(help->helper);
|
||||
if (!helper)
|
||||
return NF_ACCEPT;
|
||||
return helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
|
||||
ct, ctinfo);
|
||||
goto out;
|
||||
|
||||
ret = helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
|
||||
ct, ctinfo);
|
||||
if (ret != NF_ACCEPT)
|
||||
return ret;
|
||||
|
||||
if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
|
||||
typeof(nf_nat_seq_adjust_hook) seq_adjust;
|
||||
|
||||
seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
|
||||
if (!seq_adjust || !seq_adjust(skb, ct, ctinfo))
|
||||
return NF_DROP;
|
||||
}
|
||||
out:
|
||||
/* We've seen it coming out the other side: confirm it */
|
||||
return nf_conntrack_confirm(skb);
|
||||
}
|
||||
|
||||
static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
|
||||
|
@ -210,20 +222,6 @@ static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
|
|||
.hooknum = NF_INET_LOCAL_OUT,
|
||||
.priority = NF_IP_PRI_CONNTRACK,
|
||||
},
|
||||
{
|
||||
.hook = ipv4_conntrack_help,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = PF_INET,
|
||||
.hooknum = NF_INET_POST_ROUTING,
|
||||
.priority = NF_IP_PRI_CONNTRACK_HELPER,
|
||||
},
|
||||
{
|
||||
.hook = ipv4_conntrack_help,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = PF_INET,
|
||||
.hooknum = NF_INET_LOCAL_IN,
|
||||
.priority = NF_IP_PRI_CONNTRACK_HELPER,
|
||||
},
|
||||
{
|
||||
.hook = ipv4_confirm,
|
||||
.owner = THIS_MODULE,
|
||||
|
|
|
@ -106,21 +106,16 @@ static int ct_seq_show(struct seq_file *s, void *v)
|
|||
/* we only want to print DIR_ORIGINAL */
|
||||
if (NF_CT_DIRECTION(hash))
|
||||
return 0;
|
||||
if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num != AF_INET)
|
||||
if (nf_ct_l3num(ct) != AF_INET)
|
||||
return 0;
|
||||
|
||||
l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL]
|
||||
.tuple.src.l3num);
|
||||
l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
|
||||
NF_CT_ASSERT(l3proto);
|
||||
l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL]
|
||||
.tuple.src.l3num,
|
||||
ct->tuplehash[IP_CT_DIR_ORIGINAL]
|
||||
.tuple.dst.protonum);
|
||||
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
|
||||
NF_CT_ASSERT(l4proto);
|
||||
|
||||
if (seq_printf(s, "%-8s %u %ld ",
|
||||
l4proto->name,
|
||||
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum,
|
||||
l4proto->name, nf_ct_protonum(ct),
|
||||
timer_pending(&ct->timeout)
|
||||
? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
|
||||
return -ENOSPC;
|
||||
|
|
|
@ -22,22 +22,21 @@
|
|||
|
||||
static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ;
|
||||
|
||||
static int icmp_pkt_to_tuple(const struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
const struct icmphdr *hp;
|
||||
struct icmphdr _hdr;
|
||||
|
||||
hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
|
||||
if (hp == NULL)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
tuple->dst.u.icmp.type = hp->type;
|
||||
tuple->src.u.icmp.id = hp->un.echo.id;
|
||||
tuple->dst.u.icmp.code = hp->code;
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Add 1; spaces filled with 0. */
|
||||
|
@ -52,17 +51,17 @@ static const u_int8_t invmap[] = {
|
|||
[ICMP_ADDRESSREPLY] = ICMP_ADDRESS + 1
|
||||
};
|
||||
|
||||
static int icmp_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
static bool icmp_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
{
|
||||
if (orig->dst.u.icmp.type >= sizeof(invmap)
|
||||
|| !invmap[orig->dst.u.icmp.type])
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
tuple->src.u.icmp.id = orig->src.u.icmp.id;
|
||||
tuple->dst.u.icmp.type = invmap[orig->dst.u.icmp.type] - 1;
|
||||
tuple->dst.u.icmp.code = orig->dst.u.icmp.code;
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Print out the per-protocol part of the tuple. */
|
||||
|
@ -101,8 +100,8 @@ static int icmp_packet(struct nf_conn *ct,
|
|||
}
|
||||
|
||||
/* Called when a new connection for this protocol found. */
|
||||
static int icmp_new(struct nf_conn *ct,
|
||||
const struct sk_buff *skb, unsigned int dataoff)
|
||||
static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff)
|
||||
{
|
||||
static const u_int8_t valid_new[] = {
|
||||
[ICMP_ECHO] = 1,
|
||||
|
@ -116,11 +115,11 @@ static int icmp_new(struct nf_conn *ct,
|
|||
/* Can't create a new ICMP `conn' with this. */
|
||||
pr_debug("icmp: can't create new conn with type %u\n",
|
||||
ct->tuplehash[0].tuple.dst.u.icmp.type);
|
||||
NF_CT_DUMP_TUPLE(&ct->tuplehash[0].tuple);
|
||||
return 0;
|
||||
nf_ct_dump_tuple_ip(&ct->tuplehash[0].tuple);
|
||||
return false;
|
||||
}
|
||||
atomic_set(&ct->proto.icmp.count, 0);
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
|
||||
|
|
|
@ -150,9 +150,9 @@ find_appropriate_src(const struct nf_conntrack_tuple *tuple,
|
|||
const struct nf_nat_range *range)
|
||||
{
|
||||
unsigned int h = hash_by_src(tuple);
|
||||
struct nf_conn_nat *nat;
|
||||
struct nf_conn *ct;
|
||||
struct hlist_node *n;
|
||||
const struct nf_conn_nat *nat;
|
||||
const struct nf_conn *ct;
|
||||
const struct hlist_node *n;
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(nat, n, &bysource[h], bysource) {
|
||||
|
@ -349,7 +349,7 @@ nf_nat_setup_info(struct nf_conn *ct,
|
|||
EXPORT_SYMBOL(nf_nat_setup_info);
|
||||
|
||||
/* Returns true if succeeded. */
|
||||
static int
|
||||
static bool
|
||||
manip_pkt(u_int16_t proto,
|
||||
struct sk_buff *skb,
|
||||
unsigned int iphdroff,
|
||||
|
@ -360,7 +360,7 @@ manip_pkt(u_int16_t proto,
|
|||
const struct nf_nat_protocol *p;
|
||||
|
||||
if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
iph = (void *)skb->data + iphdroff;
|
||||
|
||||
|
@ -369,7 +369,7 @@ manip_pkt(u_int16_t proto,
|
|||
/* rcu_read_lock()ed by nf_hook_slow */
|
||||
p = __nf_nat_proto_find(proto);
|
||||
if (!p->manip_pkt(skb, iphdroff, target, maniptype))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
iph = (void *)skb->data + iphdroff;
|
||||
|
||||
|
@ -380,7 +380,7 @@ manip_pkt(u_int16_t proto,
|
|||
csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
|
||||
iph->daddr = target->dst.u3.ip;
|
||||
}
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Do packet manipulations according to nf_nat_setup_info. */
|
||||
|
@ -426,7 +426,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
|
|||
struct icmphdr icmp;
|
||||
struct iphdr ip;
|
||||
} *inside;
|
||||
struct nf_conntrack_l4proto *l4proto;
|
||||
const struct nf_conntrack_l4proto *l4proto;
|
||||
struct nf_conntrack_tuple inner, target;
|
||||
int hdrlen = ip_hdrlen(skb);
|
||||
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
|
||||
|
@ -544,46 +544,6 @@ void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
|
|||
}
|
||||
EXPORT_SYMBOL(nf_nat_protocol_unregister);
|
||||
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
int
|
||||
nf_nat_port_range_to_nlattr(struct sk_buff *skb,
|
||||
const struct nf_nat_range *range)
|
||||
{
|
||||
NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MIN, range->min.tcp.port);
|
||||
NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MAX, range->max.tcp.port);
|
||||
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_nat_port_nlattr_to_range);
|
||||
|
||||
int
|
||||
nf_nat_port_nlattr_to_range(struct nlattr *tb[], struct nf_nat_range *range)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* we have to return whether we actually parsed something or not */
|
||||
|
||||
if (tb[CTA_PROTONAT_PORT_MIN]) {
|
||||
ret = 1;
|
||||
range->min.tcp.port = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
|
||||
}
|
||||
|
||||
if (!tb[CTA_PROTONAT_PORT_MAX]) {
|
||||
if (ret)
|
||||
range->max.tcp.port = range->min.tcp.port;
|
||||
} else {
|
||||
ret = 1;
|
||||
range->max.tcp.port = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nlattr);
|
||||
#endif
|
||||
|
||||
/* Noone using conntrack by the time this called. */
|
||||
static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
|
||||
{
|
||||
|
@ -660,6 +620,9 @@ static int __init nf_nat_init(void)
|
|||
nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
|
||||
|
||||
l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
|
||||
|
||||
BUG_ON(nf_nat_seq_adjust_hook != NULL);
|
||||
rcu_assign_pointer(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
|
||||
return 0;
|
||||
|
||||
cleanup_extend:
|
||||
|
@ -686,6 +649,8 @@ static void __exit nf_nat_cleanup(void)
|
|||
nf_ct_free_hashtable(bysource, nf_nat_vmalloced, nf_nat_htable_size);
|
||||
nf_ct_l3proto_put(l3proto);
|
||||
nf_ct_extend_unregister(&nat_extend);
|
||||
rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL);
|
||||
synchronize_net();
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -416,7 +416,6 @@ nf_nat_seq_adjust(struct sk_buff *skb,
|
|||
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(nf_nat_seq_adjust);
|
||||
|
||||
/* Setup NAT on this expected conntrack so it follows master. */
|
||||
/* If we fail to get a free NAT slot, we'll get dropped on confirm */
|
||||
|
|
|
@ -72,7 +72,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
|
|||
}
|
||||
|
||||
pr_debug("trying to unexpect other dir: ");
|
||||
NF_CT_DUMP_TUPLE(&t);
|
||||
nf_ct_dump_tuple_ip(&t);
|
||||
other_exp = nf_ct_expect_find_get(&t);
|
||||
if (other_exp) {
|
||||
nf_ct_unexpect_related(other_exp);
|
||||
|
|
|
@ -0,0 +1,120 @@
|
|||
/* (C) 1999-2001 Paul `Rusty' Russell
|
||||
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
|
||||
* (C) 2008 Patrick McHardy <kaber@trash.net>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/ip.h>
|
||||
|
||||
#include <linux/netfilter.h>
|
||||
#include <net/netfilter/nf_nat.h>
|
||||
#include <net/netfilter/nf_nat_core.h>
|
||||
#include <net/netfilter/nf_nat_rule.h>
|
||||
#include <net/netfilter/nf_nat_protocol.h>
|
||||
|
||||
bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const union nf_conntrack_man_proto *min,
|
||||
const union nf_conntrack_man_proto *max)
|
||||
{
|
||||
__be16 port;
|
||||
|
||||
if (maniptype == IP_NAT_MANIP_SRC)
|
||||
port = tuple->src.u.all;
|
||||
else
|
||||
port = tuple->dst.u.all;
|
||||
|
||||
return ntohs(port) >= ntohs(min->all) &&
|
||||
ntohs(port) <= ntohs(max->all);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_nat_proto_in_range);
|
||||
|
||||
bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_nat_range *range,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const struct nf_conn *ct,
|
||||
u_int16_t *rover)
|
||||
{
|
||||
unsigned int range_size, min, i;
|
||||
__be16 *portptr;
|
||||
u_int16_t off;
|
||||
|
||||
if (maniptype == IP_NAT_MANIP_SRC)
|
||||
portptr = &tuple->src.u.all;
|
||||
else
|
||||
portptr = &tuple->dst.u.all;
|
||||
|
||||
/* If no range specified... */
|
||||
if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
|
||||
/* If it's dst rewrite, can't change port */
|
||||
if (maniptype == IP_NAT_MANIP_DST)
|
||||
return false;
|
||||
|
||||
if (ntohs(*portptr) < 1024) {
|
||||
/* Loose convention: >> 512 is credential passing */
|
||||
if (ntohs(*portptr) < 512) {
|
||||
min = 1;
|
||||
range_size = 511 - min + 1;
|
||||
} else {
|
||||
min = 600;
|
||||
range_size = 1023 - min + 1;
|
||||
}
|
||||
} else {
|
||||
min = 1024;
|
||||
range_size = 65535 - 1024 + 1;
|
||||
}
|
||||
} else {
|
||||
min = ntohs(range->min.all);
|
||||
range_size = ntohs(range->max.all) - min + 1;
|
||||
}
|
||||
|
||||
off = *rover;
|
||||
if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
|
||||
off = net_random();
|
||||
|
||||
for (i = 0; i < range_size; i++, off++) {
|
||||
*portptr = htons(min + off % range_size);
|
||||
if (nf_nat_used_tuple(tuple, ct))
|
||||
continue;
|
||||
if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
|
||||
*rover = off;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple);
|
||||
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
int nf_nat_proto_range_to_nlattr(struct sk_buff *skb,
|
||||
const struct nf_nat_range *range)
|
||||
{
|
||||
NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MIN, range->min.all);
|
||||
NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MAX, range->max.all);
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_nat_proto_nlattr_to_range);
|
||||
|
||||
int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
|
||||
struct nf_nat_range *range)
|
||||
{
|
||||
if (tb[CTA_PROTONAT_PORT_MIN]) {
|
||||
range->min.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
|
||||
range->max.all = range->min.tcp.port;
|
||||
range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
|
||||
}
|
||||
if (tb[CTA_PROTONAT_PORT_MAX]) {
|
||||
range->max.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
|
||||
range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_nat_proto_range_to_nlattr);
|
||||
#endif
|
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
* DCCP NAT protocol helper
|
||||
*
|
||||
* Copyright (c) 2005, 2006. 2008 Patrick McHardy <kaber@trash.net>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/dccp.h>
|
||||
|
||||
#include <net/netfilter/nf_conntrack.h>
|
||||
#include <net/netfilter/nf_nat.h>
|
||||
#include <net/netfilter/nf_nat_protocol.h>
|
||||
|
||||
static u_int16_t dccp_port_rover;
|
||||
|
||||
static bool
|
||||
dccp_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_nat_range *range,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const struct nf_conn *ct)
|
||||
{
|
||||
return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
|
||||
&dccp_port_rover);
|
||||
}
|
||||
|
||||
static bool
|
||||
dccp_manip_pkt(struct sk_buff *skb,
|
||||
unsigned int iphdroff,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type maniptype)
|
||||
{
|
||||
const struct iphdr *iph = (const void *)(skb->data + iphdroff);
|
||||
struct dccp_hdr *hdr;
|
||||
unsigned int hdroff = iphdroff + iph->ihl * 4;
|
||||
__be32 oldip, newip;
|
||||
__be16 *portptr, oldport, newport;
|
||||
int hdrsize = 8; /* DCCP connection tracking guarantees this much */
|
||||
|
||||
if (skb->len >= hdroff + sizeof(struct dccp_hdr))
|
||||
hdrsize = sizeof(struct dccp_hdr);
|
||||
|
||||
if (!skb_make_writable(skb, hdroff + hdrsize))
|
||||
return false;
|
||||
|
||||
iph = (struct iphdr *)(skb->data + iphdroff);
|
||||
hdr = (struct dccp_hdr *)(skb->data + hdroff);
|
||||
|
||||
if (maniptype == IP_NAT_MANIP_SRC) {
|
||||
oldip = iph->saddr;
|
||||
newip = tuple->src.u3.ip;
|
||||
newport = tuple->src.u.dccp.port;
|
||||
portptr = &hdr->dccph_sport;
|
||||
} else {
|
||||
oldip = iph->daddr;
|
||||
newip = tuple->dst.u3.ip;
|
||||
newport = tuple->dst.u.dccp.port;
|
||||
portptr = &hdr->dccph_dport;
|
||||
}
|
||||
|
||||
oldport = *portptr;
|
||||
*portptr = newport;
|
||||
|
||||
if (hdrsize < sizeof(*hdr))
|
||||
return true;
|
||||
|
||||
inet_proto_csum_replace4(&hdr->dccph_checksum, skb, oldip, newip, 1);
|
||||
inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
|
||||
0);
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct nf_nat_protocol nf_nat_protocol_dccp = {
|
||||
.protonum = IPPROTO_DCCP,
|
||||
.me = THIS_MODULE,
|
||||
.manip_pkt = dccp_manip_pkt,
|
||||
.in_range = nf_nat_proto_in_range,
|
||||
.unique_tuple = dccp_unique_tuple,
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
.range_to_nlattr = nf_nat_proto_range_to_nlattr,
|
||||
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int __init nf_nat_proto_dccp_init(void)
|
||||
{
|
||||
return nf_nat_protocol_register(&nf_nat_protocol_dccp);
|
||||
}
|
||||
|
||||
static void __exit nf_nat_proto_dccp_fini(void)
|
||||
{
|
||||
nf_nat_protocol_unregister(&nf_nat_protocol_dccp);
|
||||
}
|
||||
|
||||
module_init(nf_nat_proto_dccp_init);
|
||||
module_exit(nf_nat_proto_dccp_fini);
|
||||
|
||||
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
|
||||
MODULE_DESCRIPTION("DCCP NAT protocol helper");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -36,26 +36,8 @@ MODULE_LICENSE("GPL");
|
|||
MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
|
||||
MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
|
||||
|
||||
/* is key in given range between min and max */
|
||||
static int
|
||||
gre_in_range(const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const union nf_conntrack_man_proto *min,
|
||||
const union nf_conntrack_man_proto *max)
|
||||
{
|
||||
__be16 key;
|
||||
|
||||
if (maniptype == IP_NAT_MANIP_SRC)
|
||||
key = tuple->src.u.gre.key;
|
||||
else
|
||||
key = tuple->dst.u.gre.key;
|
||||
|
||||
return ntohs(key) >= ntohs(min->gre.key) &&
|
||||
ntohs(key) <= ntohs(max->gre.key);
|
||||
}
|
||||
|
||||
/* generate unique tuple ... */
|
||||
static int
|
||||
static bool
|
||||
gre_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_nat_range *range,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
|
@ -68,7 +50,7 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
|
|||
/* If there is no master conntrack we are not PPTP,
|
||||
do not change tuples */
|
||||
if (!ct->master)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (maniptype == IP_NAT_MANIP_SRC)
|
||||
keyptr = &tuple->src.u.gre.key;
|
||||
|
@ -89,20 +71,20 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
|
|||
for (i = 0; i < range_size; i++, key++) {
|
||||
*keyptr = htons(min + key % range_size);
|
||||
if (!nf_nat_used_tuple(tuple, ct))
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
pr_debug("%p: no NAT mapping\n", ct);
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* manipulate a GRE packet according to maniptype */
|
||||
static int
|
||||
static bool
|
||||
gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type maniptype)
|
||||
{
|
||||
struct gre_hdr *greh;
|
||||
const struct gre_hdr *greh;
|
||||
struct gre_hdr_pptp *pgreh;
|
||||
const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
|
||||
unsigned int hdroff = iphdroff + iph->ihl * 4;
|
||||
|
@ -110,7 +92,7 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
|
|||
/* pgreh includes two optional 32bit fields which are not required
|
||||
* to be there. That's where the magic '8' comes from */
|
||||
if (!skb_make_writable(skb, hdroff + sizeof(*pgreh) - 8))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
greh = (void *)skb->data + hdroff;
|
||||
pgreh = (struct gre_hdr_pptp *)greh;
|
||||
|
@ -118,7 +100,7 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
|
|||
/* we only have destination manip of a packet, since 'source key'
|
||||
* is not present in the packet itself */
|
||||
if (maniptype != IP_NAT_MANIP_DST)
|
||||
return 1;
|
||||
return true;
|
||||
switch (greh->version) {
|
||||
case GRE_VERSION_1701:
|
||||
/* We do not currently NAT any GREv0 packets.
|
||||
|
@ -130,21 +112,20 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
|
|||
break;
|
||||
default:
|
||||
pr_debug("can't nat unknown GRE version\n");
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct nf_nat_protocol gre = {
|
||||
.name = "GRE",
|
||||
.protonum = IPPROTO_GRE,
|
||||
.me = THIS_MODULE,
|
||||
.manip_pkt = gre_manip_pkt,
|
||||
.in_range = gre_in_range,
|
||||
.in_range = nf_nat_proto_in_range,
|
||||
.unique_tuple = gre_unique_tuple,
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
.range_to_nlattr = nf_nat_port_range_to_nlattr,
|
||||
.nlattr_to_range = nf_nat_port_nlattr_to_range,
|
||||
.range_to_nlattr = nf_nat_proto_range_to_nlattr,
|
||||
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include <net/netfilter/nf_nat_rule.h>
|
||||
#include <net/netfilter/nf_nat_protocol.h>
|
||||
|
||||
static int
|
||||
static bool
|
||||
icmp_in_range(const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const union nf_conntrack_man_proto *min,
|
||||
|
@ -27,7 +27,7 @@ icmp_in_range(const struct nf_conntrack_tuple *tuple,
|
|||
ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
|
||||
}
|
||||
|
||||
static int
|
||||
static bool
|
||||
icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_nat_range *range,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
|
@ -46,12 +46,12 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
|
|||
tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
|
||||
(id % range_size));
|
||||
if (!nf_nat_used_tuple(tuple, ct))
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int
|
||||
static bool
|
||||
icmp_manip_pkt(struct sk_buff *skb,
|
||||
unsigned int iphdroff,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
|
@ -62,24 +62,23 @@ icmp_manip_pkt(struct sk_buff *skb,
|
|||
unsigned int hdroff = iphdroff + iph->ihl*4;
|
||||
|
||||
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
hdr = (struct icmphdr *)(skb->data + hdroff);
|
||||
inet_proto_csum_replace2(&hdr->checksum, skb,
|
||||
hdr->un.echo.id, tuple->src.u.icmp.id, 0);
|
||||
hdr->un.echo.id = tuple->src.u.icmp.id;
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
const struct nf_nat_protocol nf_nat_protocol_icmp = {
|
||||
.name = "ICMP",
|
||||
.protonum = IPPROTO_ICMP,
|
||||
.me = THIS_MODULE,
|
||||
.manip_pkt = icmp_manip_pkt,
|
||||
.in_range = icmp_in_range,
|
||||
.unique_tuple = icmp_unique_tuple,
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
.range_to_nlattr = nf_nat_port_range_to_nlattr,
|
||||
.nlattr_to_range = nf_nat_port_nlattr_to_range,
|
||||
.range_to_nlattr = nf_nat_proto_range_to_nlattr,
|
||||
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -0,0 +1,96 @@
|
|||
/*
|
||||
* Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/sctp.h>
|
||||
#include <net/sctp/checksum.h>
|
||||
|
||||
#include <net/netfilter/nf_nat_protocol.h>
|
||||
|
||||
static u_int16_t nf_sctp_port_rover;
|
||||
|
||||
static bool
|
||||
sctp_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_nat_range *range,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const struct nf_conn *ct)
|
||||
{
|
||||
return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
|
||||
&nf_sctp_port_rover);
|
||||
}
|
||||
|
||||
static bool
|
||||
sctp_manip_pkt(struct sk_buff *skb,
|
||||
unsigned int iphdroff,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type maniptype)
|
||||
{
|
||||
const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
|
||||
sctp_sctphdr_t *hdr;
|
||||
unsigned int hdroff = iphdroff + iph->ihl*4;
|
||||
__be32 oldip, newip;
|
||||
u32 crc32;
|
||||
|
||||
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
|
||||
return false;
|
||||
|
||||
iph = (struct iphdr *)(skb->data + iphdroff);
|
||||
hdr = (struct sctphdr *)(skb->data + hdroff);
|
||||
|
||||
if (maniptype == IP_NAT_MANIP_SRC) {
|
||||
/* Get rid of src ip and src pt */
|
||||
oldip = iph->saddr;
|
||||
newip = tuple->src.u3.ip;
|
||||
hdr->source = tuple->src.u.sctp.port;
|
||||
} else {
|
||||
/* Get rid of dst ip and dst pt */
|
||||
oldip = iph->daddr;
|
||||
newip = tuple->dst.u3.ip;
|
||||
hdr->dest = tuple->dst.u.sctp.port;
|
||||
}
|
||||
|
||||
crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff);
|
||||
for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next)
|
||||
crc32 = sctp_update_cksum((u8 *)skb->data, skb_headlen(skb),
|
||||
crc32);
|
||||
crc32 = sctp_end_cksum(crc32);
|
||||
hdr->checksum = htonl(crc32);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct nf_nat_protocol nf_nat_protocol_sctp = {
|
||||
.protonum = IPPROTO_SCTP,
|
||||
.me = THIS_MODULE,
|
||||
.manip_pkt = sctp_manip_pkt,
|
||||
.in_range = nf_nat_proto_in_range,
|
||||
.unique_tuple = sctp_unique_tuple,
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
.range_to_nlattr = nf_nat_proto_range_to_nlattr,
|
||||
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int __init nf_nat_proto_sctp_init(void)
|
||||
{
|
||||
return nf_nat_protocol_register(&nf_nat_protocol_sctp);
|
||||
}
|
||||
|
||||
static void __exit nf_nat_proto_sctp_exit(void)
|
||||
{
|
||||
nf_nat_protocol_unregister(&nf_nat_protocol_sctp);
|
||||
}
|
||||
|
||||
module_init(nf_nat_proto_sctp_init);
|
||||
module_exit(nf_nat_proto_sctp_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("SCTP NAT protocol helper");
|
||||
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
|
|
@ -8,7 +8,6 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/tcp.h>
|
||||
|
||||
|
@ -19,75 +18,19 @@
|
|||
#include <net/netfilter/nf_nat_protocol.h>
|
||||
#include <net/netfilter/nf_nat_core.h>
|
||||
|
||||
static int
|
||||
tcp_in_range(const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const union nf_conntrack_man_proto *min,
|
||||
const union nf_conntrack_man_proto *max)
|
||||
{
|
||||
__be16 port;
|
||||
static u_int16_t tcp_port_rover;
|
||||
|
||||
if (maniptype == IP_NAT_MANIP_SRC)
|
||||
port = tuple->src.u.tcp.port;
|
||||
else
|
||||
port = tuple->dst.u.tcp.port;
|
||||
|
||||
return ntohs(port) >= ntohs(min->tcp.port) &&
|
||||
ntohs(port) <= ntohs(max->tcp.port);
|
||||
}
|
||||
|
||||
static int
|
||||
static bool
|
||||
tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_nat_range *range,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const struct nf_conn *ct)
|
||||
{
|
||||
static u_int16_t port;
|
||||
__be16 *portptr;
|
||||
unsigned int range_size, min, i;
|
||||
|
||||
if (maniptype == IP_NAT_MANIP_SRC)
|
||||
portptr = &tuple->src.u.tcp.port;
|
||||
else
|
||||
portptr = &tuple->dst.u.tcp.port;
|
||||
|
||||
/* If no range specified... */
|
||||
if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
|
||||
/* If it's dst rewrite, can't change port */
|
||||
if (maniptype == IP_NAT_MANIP_DST)
|
||||
return 0;
|
||||
|
||||
/* Map privileged onto privileged. */
|
||||
if (ntohs(*portptr) < 1024) {
|
||||
/* Loose convention: >> 512 is credential passing */
|
||||
if (ntohs(*portptr)<512) {
|
||||
min = 1;
|
||||
range_size = 511 - min + 1;
|
||||
} else {
|
||||
min = 600;
|
||||
range_size = 1023 - min + 1;
|
||||
}
|
||||
} else {
|
||||
min = 1024;
|
||||
range_size = 65535 - 1024 + 1;
|
||||
}
|
||||
} else {
|
||||
min = ntohs(range->min.tcp.port);
|
||||
range_size = ntohs(range->max.tcp.port) - min + 1;
|
||||
}
|
||||
|
||||
if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
|
||||
port = net_random();
|
||||
|
||||
for (i = 0; i < range_size; i++, port++) {
|
||||
*portptr = htons(min + port % range_size);
|
||||
if (!nf_nat_used_tuple(tuple, ct))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
|
||||
&tcp_port_rover);
|
||||
}
|
||||
|
||||
static int
|
||||
static bool
|
||||
tcp_manip_pkt(struct sk_buff *skb,
|
||||
unsigned int iphdroff,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
|
@ -107,7 +50,7 @@ tcp_manip_pkt(struct sk_buff *skb,
|
|||
hdrsize = sizeof(struct tcphdr);
|
||||
|
||||
if (!skb_make_writable(skb, hdroff + hdrsize))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
iph = (struct iphdr *)(skb->data + iphdroff);
|
||||
hdr = (struct tcphdr *)(skb->data + hdroff);
|
||||
|
@ -130,22 +73,21 @@ tcp_manip_pkt(struct sk_buff *skb,
|
|||
*portptr = newport;
|
||||
|
||||
if (hdrsize < sizeof(*hdr))
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
|
||||
inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0);
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
const struct nf_nat_protocol nf_nat_protocol_tcp = {
|
||||
.name = "TCP",
|
||||
.protonum = IPPROTO_TCP,
|
||||
.me = THIS_MODULE,
|
||||
.manip_pkt = tcp_manip_pkt,
|
||||
.in_range = tcp_in_range,
|
||||
.in_range = nf_nat_proto_in_range,
|
||||
.unique_tuple = tcp_unique_tuple,
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
.range_to_nlattr = nf_nat_port_range_to_nlattr,
|
||||
.nlattr_to_range = nf_nat_port_nlattr_to_range,
|
||||
.range_to_nlattr = nf_nat_proto_range_to_nlattr,
|
||||
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/udp.h>
|
||||
|
||||
|
@ -18,74 +17,19 @@
|
|||
#include <net/netfilter/nf_nat_rule.h>
|
||||
#include <net/netfilter/nf_nat_protocol.h>
|
||||
|
||||
static int
|
||||
udp_in_range(const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const union nf_conntrack_man_proto *min,
|
||||
const union nf_conntrack_man_proto *max)
|
||||
{
|
||||
__be16 port;
|
||||
static u_int16_t udp_port_rover;
|
||||
|
||||
if (maniptype == IP_NAT_MANIP_SRC)
|
||||
port = tuple->src.u.udp.port;
|
||||
else
|
||||
port = tuple->dst.u.udp.port;
|
||||
|
||||
return ntohs(port) >= ntohs(min->udp.port) &&
|
||||
ntohs(port) <= ntohs(max->udp.port);
|
||||
}
|
||||
|
||||
static int
|
||||
static bool
|
||||
udp_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_nat_range *range,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const struct nf_conn *ct)
|
||||
{
|
||||
static u_int16_t port;
|
||||
__be16 *portptr;
|
||||
unsigned int range_size, min, i;
|
||||
|
||||
if (maniptype == IP_NAT_MANIP_SRC)
|
||||
portptr = &tuple->src.u.udp.port;
|
||||
else
|
||||
portptr = &tuple->dst.u.udp.port;
|
||||
|
||||
/* If no range specified... */
|
||||
if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
|
||||
/* If it's dst rewrite, can't change port */
|
||||
if (maniptype == IP_NAT_MANIP_DST)
|
||||
return 0;
|
||||
|
||||
if (ntohs(*portptr) < 1024) {
|
||||
/* Loose convention: >> 512 is credential passing */
|
||||
if (ntohs(*portptr)<512) {
|
||||
min = 1;
|
||||
range_size = 511 - min + 1;
|
||||
} else {
|
||||
min = 600;
|
||||
range_size = 1023 - min + 1;
|
||||
}
|
||||
} else {
|
||||
min = 1024;
|
||||
range_size = 65535 - 1024 + 1;
|
||||
}
|
||||
} else {
|
||||
min = ntohs(range->min.udp.port);
|
||||
range_size = ntohs(range->max.udp.port) - min + 1;
|
||||
}
|
||||
|
||||
if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
|
||||
port = net_random();
|
||||
|
||||
for (i = 0; i < range_size; i++, port++) {
|
||||
*portptr = htons(min + port % range_size);
|
||||
if (!nf_nat_used_tuple(tuple, ct))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
|
||||
&udp_port_rover);
|
||||
}
|
||||
|
||||
static int
|
||||
static bool
|
||||
udp_manip_pkt(struct sk_buff *skb,
|
||||
unsigned int iphdroff,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
|
@ -98,7 +42,7 @@ udp_manip_pkt(struct sk_buff *skb,
|
|||
__be16 *portptr, newport;
|
||||
|
||||
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
iph = (struct iphdr *)(skb->data + iphdroff);
|
||||
hdr = (struct udphdr *)(skb->data + hdroff);
|
||||
|
@ -124,18 +68,17 @@ udp_manip_pkt(struct sk_buff *skb,
|
|||
hdr->check = CSUM_MANGLED_0;
|
||||
}
|
||||
*portptr = newport;
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
const struct nf_nat_protocol nf_nat_protocol_udp = {
|
||||
.name = "UDP",
|
||||
.protonum = IPPROTO_UDP,
|
||||
.me = THIS_MODULE,
|
||||
.manip_pkt = udp_manip_pkt,
|
||||
.in_range = udp_in_range,
|
||||
.in_range = nf_nat_proto_in_range,
|
||||
.unique_tuple = udp_unique_tuple,
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
.range_to_nlattr = nf_nat_port_range_to_nlattr,
|
||||
.nlattr_to_range = nf_nat_port_nlattr_to_range,
|
||||
.range_to_nlattr = nf_nat_proto_range_to_nlattr,
|
||||
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
/* (C) 1999-2001 Paul `Rusty' Russell
|
||||
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
|
||||
* (C) 2008 Patrick McHardy <kaber@trash.net>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/udp.h>
|
||||
|
||||
#include <linux/netfilter.h>
|
||||
#include <net/netfilter/nf_nat.h>
|
||||
#include <net/netfilter/nf_nat_protocol.h>
|
||||
|
||||
static u_int16_t udplite_port_rover;
|
||||
|
||||
static bool
|
||||
udplite_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_nat_range *range,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const struct nf_conn *ct)
|
||||
{
|
||||
return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
|
||||
&udplite_port_rover);
|
||||
}
|
||||
|
||||
static bool
|
||||
udplite_manip_pkt(struct sk_buff *skb,
|
||||
unsigned int iphdroff,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type maniptype)
|
||||
{
|
||||
const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
|
||||
struct udphdr *hdr;
|
||||
unsigned int hdroff = iphdroff + iph->ihl*4;
|
||||
__be32 oldip, newip;
|
||||
__be16 *portptr, newport;
|
||||
|
||||
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
|
||||
return false;
|
||||
|
||||
iph = (struct iphdr *)(skb->data + iphdroff);
|
||||
hdr = (struct udphdr *)(skb->data + hdroff);
|
||||
|
||||
if (maniptype == IP_NAT_MANIP_SRC) {
|
||||
/* Get rid of src ip and src pt */
|
||||
oldip = iph->saddr;
|
||||
newip = tuple->src.u3.ip;
|
||||
newport = tuple->src.u.udp.port;
|
||||
portptr = &hdr->source;
|
||||
} else {
|
||||
/* Get rid of dst ip and dst pt */
|
||||
oldip = iph->daddr;
|
||||
newip = tuple->dst.u3.ip;
|
||||
newport = tuple->dst.u.udp.port;
|
||||
portptr = &hdr->dest;
|
||||
}
|
||||
|
||||
inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
|
||||
inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0);
|
||||
if (!hdr->check)
|
||||
hdr->check = CSUM_MANGLED_0;
|
||||
|
||||
*portptr = newport;
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct nf_nat_protocol nf_nat_protocol_udplite = {
|
||||
.protonum = IPPROTO_UDPLITE,
|
||||
.me = THIS_MODULE,
|
||||
.manip_pkt = udplite_manip_pkt,
|
||||
.in_range = nf_nat_proto_in_range,
|
||||
.unique_tuple = udplite_unique_tuple,
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
.range_to_nlattr = nf_nat_proto_range_to_nlattr,
|
||||
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int __init nf_nat_proto_udplite_init(void)
|
||||
{
|
||||
return nf_nat_protocol_register(&nf_nat_protocol_udplite);
|
||||
}
|
||||
|
||||
static void __exit nf_nat_proto_udplite_fini(void)
|
||||
{
|
||||
nf_nat_protocol_unregister(&nf_nat_protocol_udplite);
|
||||
}
|
||||
|
||||
module_init(nf_nat_proto_udplite_init);
|
||||
module_exit(nf_nat_proto_udplite_fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("UDP-Lite NAT protocol helper");
|
||||
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
|
|
@ -18,35 +18,34 @@
|
|||
#include <net/netfilter/nf_nat_rule.h>
|
||||
#include <net/netfilter/nf_nat_protocol.h>
|
||||
|
||||
static int unknown_in_range(const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type manip_type,
|
||||
const union nf_conntrack_man_proto *min,
|
||||
const union nf_conntrack_man_proto *max)
|
||||
static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type manip_type,
|
||||
const union nf_conntrack_man_proto *min,
|
||||
const union nf_conntrack_man_proto *max)
|
||||
{
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_nat_range *range,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const struct nf_conn *ct)
|
||||
static bool unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_nat_range *range,
|
||||
enum nf_nat_manip_type maniptype,
|
||||
const struct nf_conn *ct)
|
||||
{
|
||||
/* Sorry: we can't help you; if it's not unique, we can't frob
|
||||
anything. */
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int
|
||||
static bool
|
||||
unknown_manip_pkt(struct sk_buff *skb,
|
||||
unsigned int iphdroff,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
enum nf_nat_manip_type maniptype)
|
||||
{
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
const struct nf_nat_protocol nf_nat_unknown_protocol = {
|
||||
.name = "unknown",
|
||||
/* .me isn't set: getting a ref to this cannot fail. */
|
||||
.manip_pkt = unknown_manip_pkt,
|
||||
.in_range = unknown_in_range,
|
||||
|
|
|
@ -61,7 +61,7 @@ static struct
|
|||
static struct xt_table __nat_table = {
|
||||
.name = "nat",
|
||||
.valid_hooks = NAT_VALID_HOOKS,
|
||||
.lock = RW_LOCK_UNLOCKED,
|
||||
.lock = __RW_LOCK_UNLOCKED(__nat_table.lock),
|
||||
.me = THIS_MODULE,
|
||||
.af = AF_INET,
|
||||
};
|
||||
|
@ -143,7 +143,7 @@ static bool ipt_snat_checkentry(const char *tablename,
|
|||
void *targinfo,
|
||||
unsigned int hook_mask)
|
||||
{
|
||||
struct nf_nat_multi_range_compat *mr = targinfo;
|
||||
const struct nf_nat_multi_range_compat *mr = targinfo;
|
||||
|
||||
/* Must be a valid range */
|
||||
if (mr->rangesize != 1) {
|
||||
|
@ -159,7 +159,7 @@ static bool ipt_dnat_checkentry(const char *tablename,
|
|||
void *targinfo,
|
||||
unsigned int hook_mask)
|
||||
{
|
||||
struct nf_nat_multi_range_compat *mr = targinfo;
|
||||
const struct nf_nat_multi_range_compat *mr = targinfo;
|
||||
|
||||
/* Must be a valid range */
|
||||
if (mr->rangesize != 1) {
|
||||
|
@ -188,25 +188,6 @@ alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
|
|||
return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
|
||||
}
|
||||
|
||||
unsigned int
|
||||
alloc_null_binding_confirmed(struct nf_conn *ct, unsigned int hooknum)
|
||||
{
|
||||
__be32 ip
|
||||
= (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
|
||||
? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip
|
||||
: ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
|
||||
__be16 all
|
||||
= (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
|
||||
? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.all
|
||||
: ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.all);
|
||||
struct nf_nat_range range
|
||||
= { IP_NAT_RANGE_MAP_IPS, ip, ip, { all }, { all } };
|
||||
|
||||
pr_debug("Allocating NULL binding for confirmed %p (%u.%u.%u.%u)\n",
|
||||
ct, NIPQUAD(ip));
|
||||
return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
|
||||
}
|
||||
|
||||
int nf_nat_rule_find(struct sk_buff *skb,
|
||||
unsigned int hooknum,
|
||||
const struct net_device *in,
|
||||
|
|
|
@ -220,7 +220,7 @@ static unsigned char asn1_length_decode(struct asn1_ctx *ctx,
|
|||
if (ch < 0x80)
|
||||
*len = ch;
|
||||
else {
|
||||
cnt = (unsigned char) (ch & 0x7F);
|
||||
cnt = ch & 0x7F;
|
||||
*len = 0;
|
||||
|
||||
while (cnt > 0) {
|
||||
|
@ -618,8 +618,7 @@ struct snmp_cnv
|
|||
int syntax;
|
||||
};
|
||||
|
||||
static struct snmp_cnv snmp_conv [] =
|
||||
{
|
||||
static const struct snmp_cnv snmp_conv[] = {
|
||||
{ASN1_UNI, ASN1_NUL, SNMP_NULL},
|
||||
{ASN1_UNI, ASN1_INT, SNMP_INTEGER},
|
||||
{ASN1_UNI, ASN1_OTS, SNMP_OCTETSTR},
|
||||
|
@ -644,7 +643,7 @@ static unsigned char snmp_tag_cls2syntax(unsigned int tag,
|
|||
unsigned int cls,
|
||||
unsigned short *syntax)
|
||||
{
|
||||
struct snmp_cnv *cnv;
|
||||
const struct snmp_cnv *cnv;
|
||||
|
||||
cnv = snmp_conv;
|
||||
|
||||
|
@ -904,7 +903,7 @@ static inline void mangle_address(unsigned char *begin,
|
|||
u_int32_t old;
|
||||
|
||||
if (debug)
|
||||
memcpy(&old, (unsigned char *)addr, sizeof(old));
|
||||
memcpy(&old, addr, sizeof(old));
|
||||
|
||||
*addr = map->to;
|
||||
|
||||
|
@ -999,7 +998,7 @@ static unsigned char snmp_trap_decode(struct asn1_ctx *ctx,
|
|||
*
|
||||
*****************************************************************************/
|
||||
|
||||
static void hex_dump(unsigned char *buf, size_t len)
|
||||
static void hex_dump(const unsigned char *buf, size_t len)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
|
@ -1080,7 +1079,7 @@ static int snmp_parse_mangle(unsigned char *msg,
|
|||
if (cls != ASN1_CTX || con != ASN1_CON)
|
||||
return 0;
|
||||
if (debug > 1) {
|
||||
unsigned char *pdus[] = {
|
||||
static const unsigned char *const pdus[] = {
|
||||
[SNMP_PDU_GET] = "get",
|
||||
[SNMP_PDU_NEXT] = "get-next",
|
||||
[SNMP_PDU_RESPONSE] = "response",
|
||||
|
@ -1232,8 +1231,8 @@ static int help(struct sk_buff *skb, unsigned int protoff,
|
|||
{
|
||||
int dir = CTINFO2DIR(ctinfo);
|
||||
unsigned int ret;
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl);
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
const struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
|
||||
|
||||
/* SNMP replies and originating SNMP traps get mangled */
|
||||
if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY)
|
||||
|
|
|
@ -30,8 +30,8 @@
|
|||
#ifdef CONFIG_XFRM
|
||||
static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
|
||||
{
|
||||
struct nf_conn *ct;
|
||||
struct nf_conntrack_tuple *t;
|
||||
const struct nf_conn *ct;
|
||||
const struct nf_conntrack_tuple *t;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
enum ip_conntrack_dir dir;
|
||||
unsigned long statusbit;
|
||||
|
@ -50,7 +50,10 @@ static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
|
|||
if (ct->status & statusbit) {
|
||||
fl->fl4_dst = t->dst.u3.ip;
|
||||
if (t->dst.protonum == IPPROTO_TCP ||
|
||||
t->dst.protonum == IPPROTO_UDP)
|
||||
t->dst.protonum == IPPROTO_UDP ||
|
||||
t->dst.protonum == IPPROTO_UDPLITE ||
|
||||
t->dst.protonum == IPPROTO_DCCP ||
|
||||
t->dst.protonum == IPPROTO_SCTP)
|
||||
fl->fl_ip_dport = t->dst.u.tcp.port;
|
||||
}
|
||||
|
||||
|
@ -59,7 +62,10 @@ static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
|
|||
if (ct->status & statusbit) {
|
||||
fl->fl4_src = t->src.u3.ip;
|
||||
if (t->dst.protonum == IPPROTO_TCP ||
|
||||
t->dst.protonum == IPPROTO_UDP)
|
||||
t->dst.protonum == IPPROTO_UDP ||
|
||||
t->dst.protonum == IPPROTO_UDPLITE ||
|
||||
t->dst.protonum == IPPROTO_DCCP ||
|
||||
t->dst.protonum == IPPROTO_SCTP)
|
||||
fl->fl_ip_sport = t->src.u.tcp.port;
|
||||
}
|
||||
}
|
||||
|
@ -87,21 +93,8 @@ nf_nat_fn(unsigned int hooknum,
|
|||
have dropped it. Hence it's the user's responsibilty to
|
||||
packet filter it out, or implement conntrack/NAT for that
|
||||
protocol. 8) --RR */
|
||||
if (!ct) {
|
||||
/* Exception: ICMP redirect to new connection (not in
|
||||
hash table yet). We must not let this through, in
|
||||
case we're doing NAT to the same network. */
|
||||
if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
|
||||
struct icmphdr _hdr, *hp;
|
||||
|
||||
hp = skb_header_pointer(skb, ip_hdrlen(skb),
|
||||
sizeof(_hdr), &_hdr);
|
||||
if (hp != NULL &&
|
||||
hp->type == ICMP_REDIRECT)
|
||||
return NF_DROP;
|
||||
}
|
||||
if (!ct)
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
/* Don't try to NAT if this packet is not conntracked */
|
||||
if (ct == &nf_conntrack_untracked)
|
||||
|
@ -109,6 +102,9 @@ nf_nat_fn(unsigned int hooknum,
|
|||
|
||||
nat = nfct_nat(ct);
|
||||
if (!nat) {
|
||||
/* NAT module was loaded late. */
|
||||
if (nf_ct_is_confirmed(ct))
|
||||
return NF_ACCEPT;
|
||||
nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
|
||||
if (nat == NULL) {
|
||||
pr_debug("failed to add NAT extension\n");
|
||||
|
@ -134,10 +130,7 @@ nf_nat_fn(unsigned int hooknum,
|
|||
if (!nf_nat_initialized(ct, maniptype)) {
|
||||
unsigned int ret;
|
||||
|
||||
if (unlikely(nf_ct_is_confirmed(ct)))
|
||||
/* NAT module was loaded late */
|
||||
ret = alloc_null_binding_confirmed(ct, hooknum);
|
||||
else if (hooknum == NF_INET_LOCAL_IN)
|
||||
if (hooknum == NF_INET_LOCAL_IN)
|
||||
/* LOCAL_IN hook doesn't have a chain! */
|
||||
ret = alloc_null_binding(ct, hooknum);
|
||||
else
|
||||
|
@ -189,7 +182,7 @@ nf_nat_out(unsigned int hooknum,
|
|||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
#ifdef CONFIG_XFRM
|
||||
struct nf_conn *ct;
|
||||
const struct nf_conn *ct;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
#endif
|
||||
unsigned int ret;
|
||||
|
@ -223,7 +216,7 @@ nf_nat_local_fn(unsigned int hooknum,
|
|||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
struct nf_conn *ct;
|
||||
const struct nf_conn *ct;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
unsigned int ret;
|
||||
|
||||
|
@ -252,25 +245,6 @@ nf_nat_local_fn(unsigned int hooknum,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
nf_nat_adjust(unsigned int hooknum,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
struct nf_conn *ct;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
|
||||
ct = nf_ct_get(skb, &ctinfo);
|
||||
if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
|
||||
pr_debug("nf_nat_standalone: adjusting sequence number\n");
|
||||
if (!nf_nat_seq_adjust(skb, ct, ctinfo))
|
||||
return NF_DROP;
|
||||
}
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
/* We must be after connection tracking and before packet filtering. */
|
||||
|
||||
static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
|
||||
|
@ -290,14 +264,6 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
|
|||
.hooknum = NF_INET_POST_ROUTING,
|
||||
.priority = NF_IP_PRI_NAT_SRC,
|
||||
},
|
||||
/* After conntrack, adjust sequence number */
|
||||
{
|
||||
.hook = nf_nat_adjust,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = PF_INET,
|
||||
.hooknum = NF_INET_POST_ROUTING,
|
||||
.priority = NF_IP_PRI_NAT_SEQ_ADJUST,
|
||||
},
|
||||
/* Before packet filtering, change destination */
|
||||
{
|
||||
.hook = nf_nat_local_fn,
|
||||
|
@ -314,14 +280,6 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
|
|||
.hooknum = NF_INET_LOCAL_IN,
|
||||
.priority = NF_IP_PRI_NAT_SRC,
|
||||
},
|
||||
/* After conntrack, adjust sequence number */
|
||||
{
|
||||
.hook = nf_nat_adjust,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = PF_INET,
|
||||
.hooknum = NF_INET_LOCAL_IN,
|
||||
.priority = NF_IP_PRI_NAT_SEQ_ADJUST,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init nf_nat_standalone_init(void)
|
||||
|
|
|
@ -121,16 +121,44 @@ __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
|
|||
}
|
||||
return csum;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nf_ip6_checksum);
|
||||
|
||||
static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
|
||||
unsigned int dataoff, unsigned int len,
|
||||
u_int8_t protocol)
|
||||
{
|
||||
struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
||||
__wsum hsum;
|
||||
__sum16 csum = 0;
|
||||
|
||||
switch (skb->ip_summed) {
|
||||
case CHECKSUM_COMPLETE:
|
||||
if (len == skb->len - dataoff)
|
||||
return nf_ip6_checksum(skb, hook, dataoff, protocol);
|
||||
/* fall through */
|
||||
case CHECKSUM_NONE:
|
||||
hsum = skb_checksum(skb, 0, dataoff, 0);
|
||||
skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
|
||||
&ip6h->daddr,
|
||||
skb->len - dataoff,
|
||||
protocol,
|
||||
csum_sub(0, hsum)));
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
csum = __skb_checksum_complete_head(skb, dataoff + len);
|
||||
if (!csum)
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
}
|
||||
return csum;
|
||||
};
|
||||
|
||||
static const struct nf_afinfo nf_ip6_afinfo = {
|
||||
.family = AF_INET6,
|
||||
.checksum = nf_ip6_checksum,
|
||||
.route = nf_ip6_route,
|
||||
.saveroute = nf_ip6_saveroute,
|
||||
.reroute = nf_ip6_reroute,
|
||||
.route_key_size = sizeof(struct ip6_rt_info),
|
||||
.family = AF_INET6,
|
||||
.checksum = nf_ip6_checksum,
|
||||
.checksum_partial = nf_ip6_checksum_partial,
|
||||
.route = nf_ip6_route,
|
||||
.saveroute = nf_ip6_saveroute,
|
||||
.reroute = nf_ip6_reroute,
|
||||
.route_key_size = sizeof(struct ip6_rt_info),
|
||||
};
|
||||
|
||||
int __init ipv6_netfilter_init(void)
|
||||
|
|
|
@ -325,7 +325,7 @@ static void trace_packet(struct sk_buff *skb,
|
|||
struct ip6t_entry *e)
|
||||
{
|
||||
void *table_base;
|
||||
struct ip6t_entry *root;
|
||||
const struct ip6t_entry *root;
|
||||
char *hookname, *chainname, *comment;
|
||||
unsigned int rulenum = 0;
|
||||
|
||||
|
@ -952,7 +952,7 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
|
|||
{
|
||||
unsigned int countersize;
|
||||
struct xt_counters *counters;
|
||||
struct xt_table_info *private = table->private;
|
||||
const struct xt_table_info *private = table->private;
|
||||
|
||||
/* We need atomic snapshot of counters: rest doesn't change
|
||||
(other than comefrom, which userspace doesn't care
|
||||
|
@ -979,9 +979,9 @@ copy_entries_to_user(unsigned int total_size,
|
|||
unsigned int off, num;
|
||||
struct ip6t_entry *e;
|
||||
struct xt_counters *counters;
|
||||
struct xt_table_info *private = table->private;
|
||||
const struct xt_table_info *private = table->private;
|
||||
int ret = 0;
|
||||
void *loc_cpu_entry;
|
||||
const void *loc_cpu_entry;
|
||||
|
||||
counters = alloc_counters(table);
|
||||
if (IS_ERR(counters))
|
||||
|
@ -1001,8 +1001,8 @@ copy_entries_to_user(unsigned int total_size,
|
|||
/* ... then go back and fix counters and names */
|
||||
for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
|
||||
unsigned int i;
|
||||
struct ip6t_entry_match *m;
|
||||
struct ip6t_entry_target *t;
|
||||
const struct ip6t_entry_match *m;
|
||||
const struct ip6t_entry_target *t;
|
||||
|
||||
e = (struct ip6t_entry *)(loc_cpu_entry + off);
|
||||
if (copy_to_user(userptr + off
|
||||
|
@ -1142,7 +1142,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
|
|||
"ip6table_%s", name);
|
||||
if (t && !IS_ERR(t)) {
|
||||
struct ip6t_getinfo info;
|
||||
struct xt_table_info *private = t->private;
|
||||
const struct xt_table_info *private = t->private;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (compat) {
|
||||
|
@ -1206,7 +1206,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
|
|||
else {
|
||||
duprintf("get_entries: I've got %u not %u!\n",
|
||||
private->size, get.size);
|
||||
ret = -EINVAL;
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
module_put(t->me);
|
||||
xt_table_unlock(t);
|
||||
|
@ -1225,7 +1225,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
|
|||
struct xt_table *t;
|
||||
struct xt_table_info *oldinfo;
|
||||
struct xt_counters *counters;
|
||||
void *loc_cpu_old_entry;
|
||||
const void *loc_cpu_old_entry;
|
||||
|
||||
ret = 0;
|
||||
counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
|
||||
|
@ -1369,9 +1369,9 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
|
|||
int size;
|
||||
void *ptmp;
|
||||
struct xt_table *t;
|
||||
struct xt_table_info *private;
|
||||
const struct xt_table_info *private;
|
||||
int ret = 0;
|
||||
void *loc_cpu_entry;
|
||||
const void *loc_cpu_entry;
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_xt_counters_info compat_tmp;
|
||||
|
||||
|
@ -1905,11 +1905,11 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
|
|||
void __user *userptr)
|
||||
{
|
||||
struct xt_counters *counters;
|
||||
struct xt_table_info *private = table->private;
|
||||
const struct xt_table_info *private = table->private;
|
||||
void __user *pos;
|
||||
unsigned int size;
|
||||
int ret = 0;
|
||||
void *loc_cpu_entry;
|
||||
const void *loc_cpu_entry;
|
||||
unsigned int i = 0;
|
||||
|
||||
counters = alloc_counters(table);
|
||||
|
@ -1956,7 +1956,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
|
|||
xt_compat_lock(AF_INET6);
|
||||
t = xt_find_table_lock(net, AF_INET6, get.name);
|
||||
if (t && !IS_ERR(t)) {
|
||||
struct xt_table_info *private = t->private;
|
||||
const struct xt_table_info *private = t->private;
|
||||
struct xt_table_info info;
|
||||
duprintf("t->private->number = %u\n", private->number);
|
||||
ret = compat_table_info(private, &info);
|
||||
|
@ -1966,7 +1966,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
|
|||
} else if (!ret) {
|
||||
duprintf("compat_get_entries: I've got %u not %u!\n",
|
||||
private->size, get.size);
|
||||
ret = -EINVAL;
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
xt_compat_flush_offsets(AF_INET6);
|
||||
module_put(t->me);
|
||||
|
@ -2155,7 +2155,8 @@ icmp6_match(const struct sk_buff *skb,
|
|||
unsigned int protoff,
|
||||
bool *hotdrop)
|
||||
{
|
||||
struct icmp6hdr _icmph, *ic;
|
||||
const struct icmp6hdr *ic;
|
||||
struct icmp6hdr _icmph;
|
||||
const struct ip6t_icmp *icmpinfo = matchinfo;
|
||||
|
||||
/* Must not be a fragment. */
|
||||
|
|
|
@ -363,11 +363,15 @@ static void dump_packet(const struct nf_loginfo *info,
|
|||
if ((logflags & IP6T_LOG_UID) && recurse && skb->sk) {
|
||||
read_lock_bh(&skb->sk->sk_callback_lock);
|
||||
if (skb->sk->sk_socket && skb->sk->sk_socket->file)
|
||||
printk("UID=%u GID=%u",
|
||||
printk("UID=%u GID=%u ",
|
||||
skb->sk->sk_socket->file->f_uid,
|
||||
skb->sk->sk_socket->file->f_gid);
|
||||
read_unlock_bh(&skb->sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
/* Max length: 16 "MARK=0xFFFFFFFF " */
|
||||
if (!recurse && skb->mark)
|
||||
printk("MARK=0x%x ", skb->mark);
|
||||
}
|
||||
|
||||
static struct nf_loginfo default_loginfo = {
|
||||
|
|
|
@ -41,7 +41,8 @@ static void send_reset(struct sk_buff *oldskb)
|
|||
struct tcphdr otcph, *tcph;
|
||||
unsigned int otcplen, hh_len;
|
||||
int tcphoff, needs_ack;
|
||||
struct ipv6hdr *oip6h = ipv6_hdr(oldskb), *ip6h;
|
||||
const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
|
||||
struct ipv6hdr *ip6h;
|
||||
struct dst_entry *dst = NULL;
|
||||
u8 proto;
|
||||
struct flowi fl;
|
||||
|
|
|
@ -49,7 +49,8 @@ ipv6header_mt6(const struct sk_buff *skb, const struct net_device *in,
|
|||
temp = 0;
|
||||
|
||||
while (ip6t_ext_hdr(nexthdr)) {
|
||||
struct ipv6_opt_hdr _hdr, *hp;
|
||||
const struct ipv6_opt_hdr *hp;
|
||||
struct ipv6_opt_hdr _hdr;
|
||||
int hdrlen;
|
||||
|
||||
/* Is there enough space for the next ext header? */
|
||||
|
|
|
@ -110,7 +110,8 @@ rt_mt6(const struct sk_buff *skb, const struct net_device *in,
|
|||
!!(rtinfo->invflags & IP6T_RT_INV_TYP)));
|
||||
|
||||
if (ret && (rtinfo->flags & IP6T_RT_RES)) {
|
||||
u_int32_t *rp, _reserved;
|
||||
const u_int32_t *rp;
|
||||
u_int32_t _reserved;
|
||||
rp = skb_header_pointer(skb,
|
||||
ptr + offsetof(struct rt0_hdr,
|
||||
reserved),
|
||||
|
|
|
@ -54,7 +54,7 @@ static struct
|
|||
static struct xt_table packet_filter = {
|
||||
.name = "filter",
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
.lock = RW_LOCK_UNLOCKED,
|
||||
.lock = __RW_LOCK_UNLOCKED(packet_filter.lock),
|
||||
.me = THIS_MODULE,
|
||||
.af = AF_INET6,
|
||||
};
|
||||
|
|
|
@ -60,7 +60,7 @@ static struct
|
|||
static struct xt_table packet_mangler = {
|
||||
.name = "mangle",
|
||||
.valid_hooks = MANGLE_VALID_HOOKS,
|
||||
.lock = RW_LOCK_UNLOCKED,
|
||||
.lock = __RW_LOCK_UNLOCKED(packet_mangler.lock),
|
||||
.me = THIS_MODULE,
|
||||
.af = AF_INET6,
|
||||
};
|
||||
|
|
|
@ -38,7 +38,7 @@ static struct
|
|||
static struct xt_table packet_raw = {
|
||||
.name = "raw",
|
||||
.valid_hooks = RAW_VALID_HOOKS,
|
||||
.lock = RW_LOCK_UNLOCKED,
|
||||
.lock = __RW_LOCK_UNLOCKED(packet_raw.lock),
|
||||
.me = THIS_MODULE,
|
||||
.af = AF_INET6,
|
||||
};
|
||||
|
|
|
@ -27,8 +27,8 @@
|
|||
#include <net/netfilter/nf_conntrack_l3proto.h>
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
|
||||
static int ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
static bool ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
const u_int32_t *ap;
|
||||
u_int32_t _addrs[8];
|
||||
|
@ -36,21 +36,21 @@ static int ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
|
|||
ap = skb_header_pointer(skb, nhoff + offsetof(struct ipv6hdr, saddr),
|
||||
sizeof(_addrs), _addrs);
|
||||
if (ap == NULL)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
|
||||
memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int ipv6_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
static bool ipv6_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
{
|
||||
memcpy(tuple->src.u3.ip6, orig->dst.u3.ip6, sizeof(tuple->src.u3.ip6));
|
||||
memcpy(tuple->dst.u3.ip6, orig->src.u3.ip6, sizeof(tuple->dst.u3.ip6));
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int ipv6_print_tuple(struct seq_file *s,
|
||||
|
|
|
@ -28,21 +28,21 @@
|
|||
|
||||
static unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
|
||||
|
||||
static int icmpv6_pkt_to_tuple(const struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
const struct icmp6hdr *hp;
|
||||
struct icmp6hdr _hdr;
|
||||
|
||||
hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
|
||||
if (hp == NULL)
|
||||
return 0;
|
||||
return false;
|
||||
tuple->dst.u.icmp.type = hp->icmp6_type;
|
||||
tuple->src.u.icmp.id = hp->icmp6_identifier;
|
||||
tuple->dst.u.icmp.code = hp->icmp6_code;
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Add 1; spaces filled with 0. */
|
||||
|
@ -53,17 +53,17 @@ static const u_int8_t invmap[] = {
|
|||
[ICMPV6_NI_REPLY - 128] = ICMPV6_NI_REPLY +1
|
||||
};
|
||||
|
||||
static int icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
{
|
||||
int type = orig->dst.u.icmp.type - 128;
|
||||
if (type < 0 || type >= sizeof(invmap) || !invmap[type])
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
tuple->src.u.icmp.id = orig->src.u.icmp.id;
|
||||
tuple->dst.u.icmp.type = invmap[type] - 1;
|
||||
tuple->dst.u.icmp.code = orig->dst.u.icmp.code;
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Print out the per-protocol part of the tuple. */
|
||||
|
@ -102,9 +102,8 @@ static int icmpv6_packet(struct nf_conn *ct,
|
|||
}
|
||||
|
||||
/* Called when a new connection for this protocol found. */
|
||||
static int icmpv6_new(struct nf_conn *ct,
|
||||
const struct sk_buff *skb,
|
||||
unsigned int dataoff)
|
||||
static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff)
|
||||
{
|
||||
static const u_int8_t valid_new[] = {
|
||||
[ICMPV6_ECHO_REQUEST - 128] = 1,
|
||||
|
@ -116,11 +115,11 @@ static int icmpv6_new(struct nf_conn *ct,
|
|||
/* Can't create a new ICMPv6 `conn' with this. */
|
||||
pr_debug("icmpv6: can't create new conn with type %u\n",
|
||||
type + 128);
|
||||
NF_CT_DUMP_TUPLE(&ct->tuplehash[0].tuple);
|
||||
return 0;
|
||||
nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple);
|
||||
return false;
|
||||
}
|
||||
atomic_set(&ct->proto.icmp.count, 0);
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -103,8 +103,8 @@ struct ctl_table nf_ct_ipv6_sysctl_table[] = {
|
|||
};
|
||||
#endif
|
||||
|
||||
static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
|
||||
struct in6_addr *daddr)
|
||||
static unsigned int ip6qhashfn(__be32 id, const struct in6_addr *saddr,
|
||||
const struct in6_addr *daddr)
|
||||
{
|
||||
u32 a, b, c;
|
||||
|
||||
|
@ -132,7 +132,7 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
|
|||
|
||||
static unsigned int nf_hashfn(struct inet_frag_queue *q)
|
||||
{
|
||||
struct nf_ct_frag6_queue *nq;
|
||||
const struct nf_ct_frag6_queue *nq;
|
||||
|
||||
nq = container_of(q, struct nf_ct_frag6_queue, q);
|
||||
return ip6qhashfn(nq->id, &nq->saddr, &nq->daddr);
|
||||
|
@ -222,7 +222,7 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
|
|||
|
||||
|
||||
static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
|
||||
struct frag_hdr *fhdr, int nhoff)
|
||||
const struct frag_hdr *fhdr, int nhoff)
|
||||
{
|
||||
struct sk_buff *prev, *next;
|
||||
int offset, end;
|
||||
|
|
|
@ -86,6 +86,16 @@ config NF_CONNTRACK_EVENTS
|
|||
|
||||
If unsure, say `N'.
|
||||
|
||||
config NF_CT_PROTO_DCCP
|
||||
tristate 'DCCP protocol connection tracking support (EXPERIMENTAL)'
|
||||
depends on EXPERIMENTAL && NF_CONNTRACK
|
||||
depends on NETFILTER_ADVANCED
|
||||
help
|
||||
With this option enabled, the layer 3 independent connection
|
||||
tracking code will be able to do state tracking on DCCP connections.
|
||||
|
||||
If unsure, say 'N'.
|
||||
|
||||
config NF_CT_PROTO_GRE
|
||||
tristate
|
||||
depends on NF_CONNTRACK
|
||||
|
|
|
@ -13,6 +13,7 @@ obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
|
|||
obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o
|
||||
|
||||
# SCTP protocol connection tracking
|
||||
obj-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
|
||||
obj-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
|
||||
obj-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
|
||||
obj-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o
|
||||
|
|
|
@ -53,7 +53,7 @@ enum amanda_strings {
|
|||
};
|
||||
|
||||
static struct {
|
||||
char *string;
|
||||
const char *string;
|
||||
size_t len;
|
||||
struct ts_config *ts;
|
||||
} search[] __read_mostly = {
|
||||
|
@ -91,7 +91,6 @@ static int amanda_help(struct sk_buff *skb,
|
|||
char pbuf[sizeof("65535")], *tmp;
|
||||
u_int16_t len;
|
||||
__be16 port;
|
||||
int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
|
||||
int ret = NF_ACCEPT;
|
||||
typeof(nf_nat_amanda_hook) nf_nat_amanda;
|
||||
|
||||
|
@ -148,7 +147,8 @@ static int amanda_help(struct sk_buff *skb,
|
|||
goto out;
|
||||
}
|
||||
tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, family,
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
|
||||
nf_ct_l3num(ct),
|
||||
&tuple->src.u3, &tuple->dst.u3,
|
||||
IPPROTO_TCP, NULL, &port);
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
|
|||
nf_conntrack_hash_rnd);
|
||||
}
|
||||
|
||||
int
|
||||
bool
|
||||
nf_ct_get_tuple(const struct sk_buff *skb,
|
||||
unsigned int nhoff,
|
||||
unsigned int dataoff,
|
||||
|
@ -108,7 +108,7 @@ nf_ct_get_tuple(const struct sk_buff *skb,
|
|||
|
||||
tuple->src.l3num = l3num;
|
||||
if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
tuple->dst.protonum = protonum;
|
||||
tuple->dst.dir = IP_CT_DIR_ORIGINAL;
|
||||
|
@ -117,10 +117,8 @@ nf_ct_get_tuple(const struct sk_buff *skb,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
|
||||
|
||||
int nf_ct_get_tuplepr(const struct sk_buff *skb,
|
||||
unsigned int nhoff,
|
||||
u_int16_t l3num,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
|
||||
u_int16_t l3num, struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
struct nf_conntrack_l3proto *l3proto;
|
||||
struct nf_conntrack_l4proto *l4proto;
|
||||
|
@ -134,7 +132,7 @@ int nf_ct_get_tuplepr(const struct sk_buff *skb,
|
|||
ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
|
||||
if (ret != NF_ACCEPT) {
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
l4proto = __nf_ct_l4proto_find(l3num, protonum);
|
||||
|
@ -147,7 +145,7 @@ int nf_ct_get_tuplepr(const struct sk_buff *skb,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
|
||||
|
||||
int
|
||||
bool
|
||||
nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
|
||||
const struct nf_conntrack_tuple *orig,
|
||||
const struct nf_conntrack_l3proto *l3proto,
|
||||
|
@ -157,7 +155,7 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
|
|||
|
||||
inverse->src.l3num = orig->src.l3num;
|
||||
if (l3proto->invert_tuple(inverse, orig) == 0)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
inverse->dst.dir = !orig->dst.dir;
|
||||
|
||||
|
@ -194,8 +192,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
|
|||
* destroy_conntrack() MUST NOT be called with a write lock
|
||||
* to nf_conntrack_lock!!! -HW */
|
||||
rcu_read_lock();
|
||||
l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num,
|
||||
ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
|
||||
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
|
||||
if (l4proto && l4proto->destroy)
|
||||
l4proto->destroy(ct);
|
||||
|
||||
|
@ -739,10 +736,10 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff *skb)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_in);
|
||||
|
||||
int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
{
|
||||
int ret;
|
||||
bool ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ret = nf_ct_invert_tuple(inverse, orig,
|
||||
|
@ -766,10 +763,10 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
|
|||
NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
|
||||
|
||||
pr_debug("Altering reply tuple of %p to ", ct);
|
||||
NF_CT_DUMP_TUPLE(newreply);
|
||||
nf_ct_dump_tuple(newreply);
|
||||
|
||||
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
|
||||
if (ct->master || (help && help->expecting != 0))
|
||||
if (ct->master || (help && !hlist_empty(&help->expectations)))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
|
|
|
@ -71,6 +71,9 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
|
|||
int i, newlen, newoff;
|
||||
struct nf_ct_ext_type *t;
|
||||
|
||||
/* Conntrack must not be confirmed to avoid races on reallocation. */
|
||||
NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
|
||||
|
||||
if (!ct->ext)
|
||||
return nf_ct_ext_create(&ct->ext, id, gfp);
|
||||
|
||||
|
|
|
@ -350,8 +350,9 @@ static int help(struct sk_buff *skb,
|
|||
enum ip_conntrack_info ctinfo)
|
||||
{
|
||||
unsigned int dataoff, datalen;
|
||||
struct tcphdr _tcph, *th;
|
||||
char *fb_ptr;
|
||||
const struct tcphdr *th;
|
||||
struct tcphdr _tcph;
|
||||
const char *fb_ptr;
|
||||
int ret;
|
||||
u32 seq;
|
||||
int dir = CTINFO2DIR(ctinfo);
|
||||
|
@ -405,7 +406,7 @@ static int help(struct sk_buff *skb,
|
|||
|
||||
/* Initialize IP/IPv6 addr to expected address (it's not mentioned
|
||||
in EPSV responses) */
|
||||
cmd.l3num = ct->tuplehash[dir].tuple.src.l3num;
|
||||
cmd.l3num = nf_ct_l3num(ct);
|
||||
memcpy(cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all,
|
||||
sizeof(cmd.u3.all));
|
||||
|
||||
|
@ -452,7 +453,7 @@ static int help(struct sk_buff *skb,
|
|||
daddr = &ct->tuplehash[!dir].tuple.dst.u3;
|
||||
|
||||
/* Update the ftp info */
|
||||
if ((cmd.l3num == ct->tuplehash[dir].tuple.src.l3num) &&
|
||||
if ((cmd.l3num == nf_ct_l3num(ct)) &&
|
||||
memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all,
|
||||
sizeof(cmd.u3.all))) {
|
||||
/* Enrico Scholz's passive FTP to partially RNAT'd ftp
|
||||
|
|
|
@ -218,7 +218,6 @@ static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
|
|||
union nf_inet_addr *addr, __be16 *port)
|
||||
{
|
||||
const unsigned char *p;
|
||||
int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
|
||||
int len;
|
||||
|
||||
if (taddr->choice != eH245_TransportAddress_unicastAddress)
|
||||
|
@ -226,13 +225,13 @@ static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
|
|||
|
||||
switch (taddr->unicastAddress.choice) {
|
||||
case eUnicastAddress_iPAddress:
|
||||
if (family != AF_INET)
|
||||
if (nf_ct_l3num(ct) != AF_INET)
|
||||
return 0;
|
||||
p = data + taddr->unicastAddress.iPAddress.network;
|
||||
len = 4;
|
||||
break;
|
||||
case eUnicastAddress_iP6Address:
|
||||
if (family != AF_INET6)
|
||||
if (nf_ct_l3num(ct) != AF_INET6)
|
||||
return 0;
|
||||
p = data + taddr->unicastAddress.iP6Address.network;
|
||||
len = 16;
|
||||
|
@ -277,8 +276,7 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
|
|||
/* Create expect for RTP */
|
||||
if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
|
||||
return -1;
|
||||
nf_ct_expect_init(rtp_exp, NF_CT_EXPECT_CLASS_DEFAULT,
|
||||
ct->tuplehash[!dir].tuple.src.l3num,
|
||||
nf_ct_expect_init(rtp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
|
||||
&ct->tuplehash[!dir].tuple.src.u3,
|
||||
&ct->tuplehash[!dir].tuple.dst.u3,
|
||||
IPPROTO_UDP, NULL, &rtp_port);
|
||||
|
@ -288,8 +286,7 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
|
|||
nf_ct_expect_put(rtp_exp);
|
||||
return -1;
|
||||
}
|
||||
nf_ct_expect_init(rtcp_exp, NF_CT_EXPECT_CLASS_DEFAULT,
|
||||
ct->tuplehash[!dir].tuple.src.l3num,
|
||||
nf_ct_expect_init(rtcp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
|
||||
&ct->tuplehash[!dir].tuple.src.u3,
|
||||
&ct->tuplehash[!dir].tuple.dst.u3,
|
||||
IPPROTO_UDP, NULL, &rtcp_port);
|
||||
|
@ -306,9 +303,9 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
|
|||
if (nf_ct_expect_related(rtp_exp) == 0) {
|
||||
if (nf_ct_expect_related(rtcp_exp) == 0) {
|
||||
pr_debug("nf_ct_h323: expect RTP ");
|
||||
NF_CT_DUMP_TUPLE(&rtp_exp->tuple);
|
||||
nf_ct_dump_tuple(&rtp_exp->tuple);
|
||||
pr_debug("nf_ct_h323: expect RTCP ");
|
||||
NF_CT_DUMP_TUPLE(&rtcp_exp->tuple);
|
||||
nf_ct_dump_tuple(&rtcp_exp->tuple);
|
||||
} else {
|
||||
nf_ct_unexpect_related(rtp_exp);
|
||||
ret = -1;
|
||||
|
@ -346,8 +343,7 @@ static int expect_t120(struct sk_buff *skb,
|
|||
/* Create expect for T.120 connections */
|
||||
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
|
||||
return -1;
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
|
||||
ct->tuplehash[!dir].tuple.src.l3num,
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
|
||||
&ct->tuplehash[!dir].tuple.src.u3,
|
||||
&ct->tuplehash[!dir].tuple.dst.u3,
|
||||
IPPROTO_TCP, NULL, &port);
|
||||
|
@ -364,7 +360,7 @@ static int expect_t120(struct sk_buff *skb,
|
|||
} else { /* Conntrack only */
|
||||
if (nf_ct_expect_related(exp) == 0) {
|
||||
pr_debug("nf_ct_h323: expect T.120 ");
|
||||
NF_CT_DUMP_TUPLE(&exp->tuple);
|
||||
nf_ct_dump_tuple(&exp->tuple);
|
||||
} else
|
||||
ret = -1;
|
||||
}
|
||||
|
@ -586,7 +582,7 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
|
|||
while (get_tpkt_data(skb, protoff, ct, ctinfo,
|
||||
&data, &datalen, &dataoff)) {
|
||||
pr_debug("nf_ct_h245: TPKT len=%d ", datalen);
|
||||
NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
|
||||
nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
|
||||
|
||||
/* Decode H.245 signal */
|
||||
ret = DecodeMultimediaSystemControlMessage(data, datalen,
|
||||
|
@ -634,18 +630,17 @@ int get_h225_addr(struct nf_conn *ct, unsigned char *data,
|
|||
union nf_inet_addr *addr, __be16 *port)
|
||||
{
|
||||
const unsigned char *p;
|
||||
int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
|
||||
int len;
|
||||
|
||||
switch (taddr->choice) {
|
||||
case eTransportAddress_ipAddress:
|
||||
if (family != AF_INET)
|
||||
if (nf_ct_l3num(ct) != AF_INET)
|
||||
return 0;
|
||||
p = data + taddr->ipAddress.ip;
|
||||
len = 4;
|
||||
break;
|
||||
case eTransportAddress_ip6Address:
|
||||
if (family != AF_INET6)
|
||||
if (nf_ct_l3num(ct) != AF_INET6)
|
||||
return 0;
|
||||
p = data + taddr->ip6Address.ip;
|
||||
len = 16;
|
||||
|
@ -683,8 +678,7 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
|
|||
/* Create expect for h245 connection */
|
||||
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
|
||||
return -1;
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
|
||||
ct->tuplehash[!dir].tuple.src.l3num,
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
|
||||
&ct->tuplehash[!dir].tuple.src.u3,
|
||||
&ct->tuplehash[!dir].tuple.dst.u3,
|
||||
IPPROTO_TCP, NULL, &port);
|
||||
|
@ -701,7 +695,7 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
|
|||
} else { /* Conntrack only */
|
||||
if (nf_ct_expect_related(exp) == 0) {
|
||||
pr_debug("nf_ct_q931: expect H.245 ");
|
||||
NF_CT_DUMP_TUPLE(&exp->tuple);
|
||||
nf_ct_dump_tuple(&exp->tuple);
|
||||
} else
|
||||
ret = -1;
|
||||
}
|
||||
|
@ -792,7 +786,7 @@ static int expect_callforwarding(struct sk_buff *skb,
|
|||
* we don't need to track the second call */
|
||||
if (callforward_filter &&
|
||||
callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3,
|
||||
ct->tuplehash[!dir].tuple.src.l3num)) {
|
||||
nf_ct_l3num(ct))) {
|
||||
pr_debug("nf_ct_q931: Call Forwarding not tracked\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -800,8 +794,7 @@ static int expect_callforwarding(struct sk_buff *skb,
|
|||
/* Create expect for the second call leg */
|
||||
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
|
||||
return -1;
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
|
||||
ct->tuplehash[!dir].tuple.src.l3num,
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
|
||||
&ct->tuplehash[!dir].tuple.src.u3, &addr,
|
||||
IPPROTO_TCP, NULL, &port);
|
||||
exp->helper = nf_conntrack_helper_q931;
|
||||
|
@ -817,7 +810,7 @@ static int expect_callforwarding(struct sk_buff *skb,
|
|||
} else { /* Conntrack only */
|
||||
if (nf_ct_expect_related(exp) == 0) {
|
||||
pr_debug("nf_ct_q931: expect Call Forwarding ");
|
||||
NF_CT_DUMP_TUPLE(&exp->tuple);
|
||||
nf_ct_dump_tuple(&exp->tuple);
|
||||
} else
|
||||
ret = -1;
|
||||
}
|
||||
|
@ -1137,7 +1130,7 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
|
|||
while (get_tpkt_data(skb, protoff, ct, ctinfo,
|
||||
&data, &datalen, &dataoff)) {
|
||||
pr_debug("nf_ct_q931: TPKT len=%d ", datalen);
|
||||
NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
|
||||
nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
|
||||
|
||||
/* Decode Q.931 signal */
|
||||
ret = DecodeQ931(data, datalen, &q931);
|
||||
|
@ -1272,8 +1265,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
|
|||
/* Create expect for Q.931 */
|
||||
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
|
||||
return -1;
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
|
||||
ct->tuplehash[!dir].tuple.src.l3num,
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
|
||||
gkrouted_only ? /* only accept calls from GK? */
|
||||
&ct->tuplehash[!dir].tuple.src.u3 : NULL,
|
||||
&ct->tuplehash[!dir].tuple.dst.u3,
|
||||
|
@ -1287,7 +1279,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
|
|||
} else { /* Conntrack only */
|
||||
if (nf_ct_expect_related(exp) == 0) {
|
||||
pr_debug("nf_ct_ras: expect Q.931 ");
|
||||
NF_CT_DUMP_TUPLE(&exp->tuple);
|
||||
nf_ct_dump_tuple(&exp->tuple);
|
||||
|
||||
/* Save port for looking up expect in processing RCF */
|
||||
info->sig_port[dir] = port;
|
||||
|
@ -1344,15 +1336,14 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
|
|||
/* Need new expect */
|
||||
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
|
||||
return -1;
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
|
||||
ct->tuplehash[!dir].tuple.src.l3num,
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
|
||||
&ct->tuplehash[!dir].tuple.src.u3, &addr,
|
||||
IPPROTO_UDP, NULL, &port);
|
||||
exp->helper = nf_conntrack_helper_ras;
|
||||
|
||||
if (nf_ct_expect_related(exp) == 0) {
|
||||
pr_debug("nf_ct_ras: expect RAS ");
|
||||
NF_CT_DUMP_TUPLE(&exp->tuple);
|
||||
nf_ct_dump_tuple(&exp->tuple);
|
||||
} else
|
||||
ret = -1;
|
||||
|
||||
|
@ -1436,7 +1427,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
|
|||
pr_debug("nf_ct_ras: set Q.931 expect "
|
||||
"timeout to %u seconds for",
|
||||
info->timeout);
|
||||
NF_CT_DUMP_TUPLE(&exp->tuple);
|
||||
nf_ct_dump_tuple(&exp->tuple);
|
||||
set_expect_timeout(exp, info->timeout);
|
||||
}
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
|
@ -1549,8 +1540,7 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
|
|||
/* Need new expect */
|
||||
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
|
||||
return -1;
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
|
||||
ct->tuplehash[!dir].tuple.src.l3num,
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
|
||||
&ct->tuplehash[!dir].tuple.src.u3, &addr,
|
||||
IPPROTO_TCP, NULL, &port);
|
||||
exp->flags = NF_CT_EXPECT_PERMANENT;
|
||||
|
@ -1558,7 +1548,7 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
|
|||
|
||||
if (nf_ct_expect_related(exp) == 0) {
|
||||
pr_debug("nf_ct_ras: expect Q.931 ");
|
||||
NF_CT_DUMP_TUPLE(&exp->tuple);
|
||||
nf_ct_dump_tuple(&exp->tuple);
|
||||
} else
|
||||
ret = -1;
|
||||
|
||||
|
@ -1603,8 +1593,7 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
|
|||
/* Need new expect for call signal */
|
||||
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
|
||||
return -1;
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
|
||||
ct->tuplehash[!dir].tuple.src.l3num,
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
|
||||
&ct->tuplehash[!dir].tuple.src.u3, &addr,
|
||||
IPPROTO_TCP, NULL, &port);
|
||||
exp->flags = NF_CT_EXPECT_PERMANENT;
|
||||
|
@ -1612,7 +1601,7 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
|
|||
|
||||
if (nf_ct_expect_related(exp) == 0) {
|
||||
pr_debug("nf_ct_ras: expect Q.931 ");
|
||||
NF_CT_DUMP_TUPLE(&exp->tuple);
|
||||
nf_ct_dump_tuple(&exp->tuple);
|
||||
} else
|
||||
ret = -1;
|
||||
|
||||
|
@ -1716,7 +1705,7 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff,
|
|||
if (data == NULL)
|
||||
goto accept;
|
||||
pr_debug("nf_ct_ras: RAS message len=%d ", datalen);
|
||||
NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
|
||||
nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
|
||||
|
||||
/* Decode RAS message */
|
||||
ret = DecodeRasMessage(data, datalen, &ras);
|
||||
|
|
|
@ -126,7 +126,7 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
|
|||
{
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct nf_conntrack_expect *exp;
|
||||
struct hlist_node *n, *next;
|
||||
const struct hlist_node *n, *next;
|
||||
unsigned int i;
|
||||
|
||||
mutex_lock(&nf_ct_helper_mutex);
|
||||
|
|
|
@ -50,7 +50,7 @@ MODULE_PARM_DESC(max_dcc_channels, "max number of expected DCC channels per "
|
|||
module_param(dcc_timeout, uint, 0400);
|
||||
MODULE_PARM_DESC(dcc_timeout, "timeout on for unestablished DCC channels");
|
||||
|
||||
static const char *dccprotos[] = {
|
||||
static const char *const dccprotos[] = {
|
||||
"SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT "
|
||||
};
|
||||
|
||||
|
@ -65,7 +65,7 @@ static const char *dccprotos[] = {
|
|||
* ad_beg_p returns pointer to first byte of addr data
|
||||
* ad_end_p returns pointer to last byte of addr data
|
||||
*/
|
||||
static int parse_dcc(char *data, char *data_end, u_int32_t *ip,
|
||||
static int parse_dcc(char *data, const char *data_end, u_int32_t *ip,
|
||||
u_int16_t *port, char **ad_beg_p, char **ad_end_p)
|
||||
{
|
||||
/* at least 12: "AAAAAAAA P\1\n" */
|
||||
|
@ -93,9 +93,11 @@ static int help(struct sk_buff *skb, unsigned int protoff,
|
|||
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
|
||||
{
|
||||
unsigned int dataoff;
|
||||
struct iphdr *iph;
|
||||
struct tcphdr _tcph, *th;
|
||||
char *data, *data_limit, *ib_ptr;
|
||||
const struct iphdr *iph;
|
||||
const struct tcphdr *th;
|
||||
struct tcphdr _tcph;
|
||||
const char *data_limit;
|
||||
char *data, *ib_ptr;
|
||||
int dir = CTINFO2DIR(ctinfo);
|
||||
struct nf_conntrack_expect *exp;
|
||||
struct nf_conntrack_tuple *tuple;
|
||||
|
@ -159,7 +161,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
|
|||
/* we have at least
|
||||
* (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
|
||||
* data left (== 14/13 bytes) */
|
||||
if (parse_dcc((char *)data, data_limit, &dcc_ip,
|
||||
if (parse_dcc(data, data_limit, &dcc_ip,
|
||||
&dcc_port, &addr_beg_p, &addr_end_p)) {
|
||||
pr_debug("unable to parse dcc command\n");
|
||||
continue;
|
||||
|
|
|
@ -31,22 +31,22 @@
|
|||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
|
||||
|
||||
static int generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
static bool generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
memset(&tuple->src.u3, 0, sizeof(tuple->src.u3));
|
||||
memset(&tuple->dst.u3, 0, sizeof(tuple->dst.u3));
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int generic_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
static bool generic_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
{
|
||||
memset(&tuple->src.u3, 0, sizeof(tuple->src.u3));
|
||||
memset(&tuple->dst.u3, 0, sizeof(tuple->dst.u3));
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int generic_print_tuple(struct seq_file *s,
|
||||
|
|
|
@ -145,10 +145,11 @@ ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
|
|||
static inline int
|
||||
ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct nf_conn *ct)
|
||||
{
|
||||
struct nf_conntrack_l4proto *l4proto = nf_ct_l4proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum);
|
||||
struct nf_conntrack_l4proto *l4proto;
|
||||
struct nlattr *nest_proto;
|
||||
int ret;
|
||||
|
||||
l4proto = nf_ct_l4proto_find_get(nf_ct_l3num(ct), nf_ct_protonum(ct));
|
||||
if (!l4proto->to_nlattr) {
|
||||
nf_ct_l4proto_put(l4proto);
|
||||
return 0;
|
||||
|
@ -368,8 +369,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
|
|||
nfmsg = NLMSG_DATA(nlh);
|
||||
|
||||
nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0;
|
||||
nfmsg->nfgen_family =
|
||||
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
|
||||
nfmsg->nfgen_family = nf_ct_l3num(ct);
|
||||
nfmsg->version = NFNETLINK_V0;
|
||||
nfmsg->res_id = 0;
|
||||
|
||||
|
@ -454,7 +454,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
|
|||
nfmsg = NLMSG_DATA(nlh);
|
||||
|
||||
nlh->nlmsg_flags = flags;
|
||||
nfmsg->nfgen_family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
|
||||
nfmsg->nfgen_family = nf_ct_l3num(ct);
|
||||
nfmsg->version = NFNETLINK_V0;
|
||||
nfmsg->res_id = 0;
|
||||
|
||||
|
@ -535,8 +535,6 @@ static int ctnetlink_done(struct netlink_callback *cb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define L3PROTO(ct) (ct)->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num
|
||||
|
||||
static int
|
||||
ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
|
@ -558,7 +556,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
/* Dump entries of a given L3 protocol number.
|
||||
* If it is not specified, ie. l3proto == 0,
|
||||
* then dump everything. */
|
||||
if (l3proto && L3PROTO(ct) != l3proto)
|
||||
if (l3proto && nf_ct_l3num(ct) != l3proto)
|
||||
continue;
|
||||
if (cb->args[1]) {
|
||||
if (ct != last)
|
||||
|
@ -704,20 +702,11 @@ static int nfnetlink_parse_nat_proto(struct nlattr *attr,
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
npt = nf_nat_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum);
|
||||
|
||||
if (!npt->nlattr_to_range) {
|
||||
nf_nat_proto_put(npt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* nlattr_to_range returns 1 if it parsed, 0 if not, neg. on error */
|
||||
if (npt->nlattr_to_range(tb, range) > 0)
|
||||
range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
|
||||
|
||||
npt = nf_nat_proto_find_get(nf_ct_protonum(ct));
|
||||
if (npt->nlattr_to_range)
|
||||
err = npt->nlattr_to_range(tb, range);
|
||||
nf_nat_proto_put(npt);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
|
||||
|
@ -1010,14 +999,11 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, struct nlattr *cda[])
|
|||
{
|
||||
struct nlattr *tb[CTA_PROTOINFO_MAX+1], *attr = cda[CTA_PROTOINFO];
|
||||
struct nf_conntrack_l4proto *l4proto;
|
||||
u_int16_t npt = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
|
||||
u_int16_t l3num = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
|
||||
int err = 0;
|
||||
|
||||
nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, NULL);
|
||||
|
||||
l4proto = nf_ct_l4proto_find_get(l3num, npt);
|
||||
|
||||
l4proto = nf_ct_l4proto_find_get(nf_ct_l3num(ct), nf_ct_protonum(ct));
|
||||
if (l4proto->from_nlattr)
|
||||
err = l4proto->from_nlattr(tb, ct);
|
||||
nf_ct_l4proto_put(l4proto);
|
||||
|
|
|
@ -119,7 +119,7 @@ static void pptp_expectfn(struct nf_conn *ct,
|
|||
/* obviously this tuple inversion only works until you do NAT */
|
||||
nf_ct_invert_tuplepr(&inv_t, &exp->tuple);
|
||||
pr_debug("trying to unexpect other dir: ");
|
||||
NF_CT_DUMP_TUPLE(&inv_t);
|
||||
nf_ct_dump_tuple(&inv_t);
|
||||
|
||||
exp_other = nf_ct_expect_find_get(&inv_t);
|
||||
if (exp_other) {
|
||||
|
@ -141,7 +141,7 @@ static int destroy_sibling_or_exp(const struct nf_conntrack_tuple *t)
|
|||
struct nf_conn *sibling;
|
||||
|
||||
pr_debug("trying to timeout ct or exp for tuple ");
|
||||
NF_CT_DUMP_TUPLE(t);
|
||||
nf_ct_dump_tuple(t);
|
||||
|
||||
h = nf_conntrack_find_get(t);
|
||||
if (h) {
|
||||
|
@ -209,7 +209,7 @@ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid)
|
|||
/* original direction, PNS->PAC */
|
||||
dir = IP_CT_DIR_ORIGINAL;
|
||||
nf_ct_expect_init(exp_orig, NF_CT_EXPECT_CLASS_DEFAULT,
|
||||
ct->tuplehash[dir].tuple.src.l3num,
|
||||
nf_ct_l3num(ct),
|
||||
&ct->tuplehash[dir].tuple.src.u3,
|
||||
&ct->tuplehash[dir].tuple.dst.u3,
|
||||
IPPROTO_GRE, &peer_callid, &callid);
|
||||
|
@ -218,7 +218,7 @@ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid)
|
|||
/* reply direction, PAC->PNS */
|
||||
dir = IP_CT_DIR_REPLY;
|
||||
nf_ct_expect_init(exp_reply, NF_CT_EXPECT_CLASS_DEFAULT,
|
||||
ct->tuplehash[dir].tuple.src.l3num,
|
||||
nf_ct_l3num(ct),
|
||||
&ct->tuplehash[dir].tuple.src.u3,
|
||||
&ct->tuplehash[dir].tuple.dst.u3,
|
||||
IPPROTO_GRE, &callid, &peer_callid);
|
||||
|
|
|
@ -146,18 +146,15 @@ EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put);
|
|||
|
||||
static int kill_l3proto(struct nf_conn *i, void *data)
|
||||
{
|
||||
return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num ==
|
||||
((struct nf_conntrack_l3proto *)data)->l3proto);
|
||||
return nf_ct_l3num(i) == ((struct nf_conntrack_l3proto *)data)->l3proto;
|
||||
}
|
||||
|
||||
static int kill_l4proto(struct nf_conn *i, void *data)
|
||||
{
|
||||
struct nf_conntrack_l4proto *l4proto;
|
||||
l4proto = (struct nf_conntrack_l4proto *)data;
|
||||
return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum ==
|
||||
l4proto->l4proto) &&
|
||||
(i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num ==
|
||||
l4proto->l3proto);
|
||||
return nf_ct_protonum(i) == l4proto->l4proto &&
|
||||
nf_ct_l3num(i) == l4proto->l3proto;
|
||||
}
|
||||
|
||||
static int nf_ct_l3proto_register_sysctl(struct nf_conntrack_l3proto *l3proto)
|
||||
|
|
|
@ -0,0 +1,815 @@
|
|||
/*
|
||||
* DCCP connection tracking protocol helper
|
||||
*
|
||||
* Copyright (c) 2005, 2006, 2008 Patrick McHardy <kaber@trash.net>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/dccp.h>
|
||||
|
||||
#include <linux/netfilter/nfnetlink_conntrack.h>
|
||||
#include <net/netfilter/nf_conntrack.h>
|
||||
#include <net/netfilter/nf_conntrack_l4proto.h>
|
||||
#include <net/netfilter/nf_log.h>
|
||||
|
||||
static DEFINE_RWLOCK(dccp_lock);
|
||||
|
||||
static int nf_ct_dccp_loose __read_mostly = 1;
|
||||
|
||||
/* Timeouts are based on values from RFC4340:
|
||||
*
|
||||
* - REQUEST:
|
||||
*
|
||||
* 8.1.2. Client Request
|
||||
*
|
||||
* A client MAY give up on its DCCP-Requests after some time
|
||||
* (3 minutes, for example).
|
||||
*
|
||||
* - RESPOND:
|
||||
*
|
||||
* 8.1.3. Server Response
|
||||
*
|
||||
* It MAY also leave the RESPOND state for CLOSED after a timeout of
|
||||
* not less than 4MSL (8 minutes);
|
||||
*
|
||||
* - PARTOPEN:
|
||||
*
|
||||
* 8.1.5. Handshake Completion
|
||||
*
|
||||
* If the client remains in PARTOPEN for more than 4MSL (8 minutes),
|
||||
* it SHOULD reset the connection with Reset Code 2, "Aborted".
|
||||
*
|
||||
* - OPEN:
|
||||
*
|
||||
* The DCCP timestamp overflows after 11.9 hours. If the connection
|
||||
* stays idle this long the sequence number won't be recognized
|
||||
* as valid anymore.
|
||||
*
|
||||
* - CLOSEREQ/CLOSING:
|
||||
*
|
||||
* 8.3. Termination
|
||||
*
|
||||
* The retransmission timer should initially be set to go off in two
|
||||
* round-trip times and should back off to not less than once every
|
||||
* 64 seconds ...
|
||||
*
|
||||
* - TIMEWAIT:
|
||||
*
|
||||
* 4.3. States
|
||||
*
|
||||
* A server or client socket remains in this state for 2MSL (4 minutes)
|
||||
* after the connection has been town down, ...
|
||||
*/
|
||||
|
||||
#define DCCP_MSL (2 * 60 * HZ)
|
||||
|
||||
static unsigned int dccp_timeout[CT_DCCP_MAX + 1] __read_mostly = {
|
||||
[CT_DCCP_REQUEST] = 2 * DCCP_MSL,
|
||||
[CT_DCCP_RESPOND] = 4 * DCCP_MSL,
|
||||
[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL,
|
||||
[CT_DCCP_OPEN] = 12 * 3600 * HZ,
|
||||
[CT_DCCP_CLOSEREQ] = 64 * HZ,
|
||||
[CT_DCCP_CLOSING] = 64 * HZ,
|
||||
[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL,
|
||||
};
|
||||
|
||||
static const char * const dccp_state_names[] = {
|
||||
[CT_DCCP_NONE] = "NONE",
|
||||
[CT_DCCP_REQUEST] = "REQUEST",
|
||||
[CT_DCCP_RESPOND] = "RESPOND",
|
||||
[CT_DCCP_PARTOPEN] = "PARTOPEN",
|
||||
[CT_DCCP_OPEN] = "OPEN",
|
||||
[CT_DCCP_CLOSEREQ] = "CLOSEREQ",
|
||||
[CT_DCCP_CLOSING] = "CLOSING",
|
||||
[CT_DCCP_TIMEWAIT] = "TIMEWAIT",
|
||||
[CT_DCCP_IGNORE] = "IGNORE",
|
||||
[CT_DCCP_INVALID] = "INVALID",
|
||||
};
|
||||
|
||||
#define sNO CT_DCCP_NONE
|
||||
#define sRQ CT_DCCP_REQUEST
|
||||
#define sRS CT_DCCP_RESPOND
|
||||
#define sPO CT_DCCP_PARTOPEN
|
||||
#define sOP CT_DCCP_OPEN
|
||||
#define sCR CT_DCCP_CLOSEREQ
|
||||
#define sCG CT_DCCP_CLOSING
|
||||
#define sTW CT_DCCP_TIMEWAIT
|
||||
#define sIG CT_DCCP_IGNORE
|
||||
#define sIV CT_DCCP_INVALID
|
||||
|
||||
/*
|
||||
* DCCP state transistion table
|
||||
*
|
||||
* The assumption is the same as for TCP tracking:
|
||||
*
|
||||
* We are the man in the middle. All the packets go through us but might
|
||||
* get lost in transit to the destination. It is assumed that the destination
|
||||
* can't receive segments we haven't seen.
|
||||
*
|
||||
* The following states exist:
|
||||
*
|
||||
* NONE: Initial state, expecting Request
|
||||
* REQUEST: Request seen, waiting for Response from server
|
||||
* RESPOND: Response from server seen, waiting for Ack from client
|
||||
* PARTOPEN: Ack after Response seen, waiting for packet other than Response,
|
||||
* Reset or Sync from server
|
||||
* OPEN: Packet other than Response, Reset or Sync seen
|
||||
* CLOSEREQ: CloseReq from server seen, expecting Close from client
|
||||
* CLOSING: Close seen, expecting Reset
|
||||
* TIMEWAIT: Reset seen
|
||||
* IGNORE: Not determinable whether packet is valid
|
||||
*
|
||||
* Some states exist only on one side of the connection: REQUEST, RESPOND,
|
||||
* PARTOPEN, CLOSEREQ. For the other side these states are equivalent to
|
||||
* the one it was in before.
|
||||
*
|
||||
* Packets are marked as ignored (sIG) if we don't know if they're valid
|
||||
* (for example a reincarnation of a connection we didn't notice is dead
|
||||
* already) and the server may send back a connection closing Reset or a
|
||||
* Response. They're also used for Sync/SyncAck packets, which we don't
|
||||
* care about.
|
||||
*/
|
||||
static const u_int8_t
|
||||
dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = {
|
||||
[CT_DCCP_ROLE_CLIENT] = {
|
||||
[DCCP_PKT_REQUEST] = {
|
||||
/*
|
||||
* sNO -> sRQ Regular Request
|
||||
* sRQ -> sRQ Retransmitted Request or reincarnation
|
||||
* sRS -> sRS Retransmitted Request (apparently Response
|
||||
* got lost after we saw it) or reincarnation
|
||||
* sPO -> sIG Ignore, conntrack might be out of sync
|
||||
* sOP -> sIG Ignore, conntrack might be out of sync
|
||||
* sCR -> sIG Ignore, conntrack might be out of sync
|
||||
* sCG -> sIG Ignore, conntrack might be out of sync
|
||||
* sTW -> sRQ Reincarnation
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO. sOP, sCR, sCG, sTW, */
|
||||
sRQ, sRQ, sRS, sIG, sIG, sIG, sIG, sRQ,
|
||||
},
|
||||
[DCCP_PKT_RESPONSE] = {
|
||||
/*
|
||||
* sNO -> sIV Invalid
|
||||
* sRQ -> sIG Ignore, might be response to ignored Request
|
||||
* sRS -> sIG Ignore, might be response to ignored Request
|
||||
* sPO -> sIG Ignore, might be response to ignored Request
|
||||
* sOP -> sIG Ignore, might be response to ignored Request
|
||||
* sCR -> sIG Ignore, might be response to ignored Request
|
||||
* sCG -> sIG Ignore, might be response to ignored Request
|
||||
* sTW -> sIV Invalid, reincarnation in reverse direction
|
||||
* goes through sRQ
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIV,
|
||||
},
|
||||
[DCCP_PKT_ACK] = {
|
||||
/*
|
||||
* sNO -> sIV No connection
|
||||
* sRQ -> sIV No connection
|
||||
* sRS -> sPO Ack for Response, move to PARTOPEN (8.1.5.)
|
||||
* sPO -> sPO Retransmitted Ack for Response, remain in PARTOPEN
|
||||
* sOP -> sOP Regular ACK, remain in OPEN
|
||||
* sCR -> sCR Ack in CLOSEREQ MAY be processed (8.3.)
|
||||
* sCG -> sCG Ack in CLOSING MAY be processed (8.3.)
|
||||
* sTW -> sIV
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIV, sIV, sPO, sPO, sOP, sCR, sCG, sIV
|
||||
},
|
||||
[DCCP_PKT_DATA] = {
|
||||
/*
|
||||
* sNO -> sIV No connection
|
||||
* sRQ -> sIV No connection
|
||||
* sRS -> sIV No connection
|
||||
* sPO -> sIV MUST use DataAck in PARTOPEN state (8.1.5.)
|
||||
* sOP -> sOP Regular Data packet
|
||||
* sCR -> sCR Data in CLOSEREQ MAY be processed (8.3.)
|
||||
* sCG -> sCG Data in CLOSING MAY be processed (8.3.)
|
||||
* sTW -> sIV
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIV, sIV, sIV, sIV, sOP, sCR, sCG, sIV,
|
||||
},
|
||||
[DCCP_PKT_DATAACK] = {
|
||||
/*
|
||||
* sNO -> sIV No connection
|
||||
* sRQ -> sIV No connection
|
||||
* sRS -> sPO Ack for Response, move to PARTOPEN (8.1.5.)
|
||||
* sPO -> sPO Remain in PARTOPEN state
|
||||
* sOP -> sOP Regular DataAck packet in OPEN state
|
||||
* sCR -> sCR DataAck in CLOSEREQ MAY be processed (8.3.)
|
||||
* sCG -> sCG DataAck in CLOSING MAY be processed (8.3.)
|
||||
* sTW -> sIV
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIV, sIV, sPO, sPO, sOP, sCR, sCG, sIV
|
||||
},
|
||||
[DCCP_PKT_CLOSEREQ] = {
|
||||
/*
|
||||
* CLOSEREQ may only be sent by the server.
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV
|
||||
},
|
||||
[DCCP_PKT_CLOSE] = {
|
||||
/*
|
||||
* sNO -> sIV No connection
|
||||
* sRQ -> sIV No connection
|
||||
* sRS -> sIV No connection
|
||||
* sPO -> sCG Client-initiated close
|
||||
* sOP -> sCG Client-initiated close
|
||||
* sCR -> sCG Close in response to CloseReq (8.3.)
|
||||
* sCG -> sCG Retransmit
|
||||
* sTW -> sIV Late retransmit, already in TIME_WAIT
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIV, sIV, sIV, sCG, sCG, sCG, sIV, sIV
|
||||
},
|
||||
[DCCP_PKT_RESET] = {
|
||||
/*
|
||||
* sNO -> sIV No connection
|
||||
* sRQ -> sTW Sync received or timeout, SHOULD send Reset (8.1.1.)
|
||||
* sRS -> sTW Response received without Request
|
||||
* sPO -> sTW Timeout, SHOULD send Reset (8.1.5.)
|
||||
* sOP -> sTW Connection reset
|
||||
* sCR -> sTW Connection reset
|
||||
* sCG -> sTW Connection reset
|
||||
* sTW -> sIG Ignore (don't refresh timer)
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIV, sTW, sTW, sTW, sTW, sTW, sTW, sIG
|
||||
},
|
||||
[DCCP_PKT_SYNC] = {
|
||||
/*
|
||||
* We currently ignore Sync packets
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
|
||||
},
|
||||
[DCCP_PKT_SYNCACK] = {
|
||||
/*
|
||||
* We currently ignore SyncAck packets
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
|
||||
},
|
||||
},
|
||||
[CT_DCCP_ROLE_SERVER] = {
|
||||
[DCCP_PKT_REQUEST] = {
|
||||
/*
|
||||
* sNO -> sIV Invalid
|
||||
* sRQ -> sIG Ignore, conntrack might be out of sync
|
||||
* sRS -> sIG Ignore, conntrack might be out of sync
|
||||
* sPO -> sIG Ignore, conntrack might be out of sync
|
||||
* sOP -> sIG Ignore, conntrack might be out of sync
|
||||
* sCR -> sIG Ignore, conntrack might be out of sync
|
||||
* sCG -> sIG Ignore, conntrack might be out of sync
|
||||
* sTW -> sRQ Reincarnation, must reverse roles
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIV, sIG, sIG, sIG, sIG, sIG, sIG, sRQ
|
||||
},
|
||||
[DCCP_PKT_RESPONSE] = {
|
||||
/*
|
||||
* sNO -> sIV Response without Request
|
||||
* sRQ -> sRS Response to clients Request
|
||||
* sRS -> sRS Retransmitted Response (8.1.3. SHOULD NOT)
|
||||
* sPO -> sIG Response to an ignored Request or late retransmit
|
||||
* sOP -> sIG Ignore, might be response to ignored Request
|
||||
* sCR -> sIG Ignore, might be response to ignored Request
|
||||
* sCG -> sIG Ignore, might be response to ignored Request
|
||||
* sTW -> sIV Invalid, Request from client in sTW moves to sRQ
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIV, sRS, sRS, sIG, sIG, sIG, sIG, sIV
|
||||
},
|
||||
[DCCP_PKT_ACK] = {
|
||||
/*
|
||||
* sNO -> sIV No connection
|
||||
* sRQ -> sIV No connection
|
||||
* sRS -> sIV No connection
|
||||
* sPO -> sOP Enter OPEN state (8.1.5.)
|
||||
* sOP -> sOP Regular Ack in OPEN state
|
||||
* sCR -> sIV Waiting for Close from client
|
||||
* sCG -> sCG Ack in CLOSING MAY be processed (8.3.)
|
||||
* sTW -> sIV
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV
|
||||
},
|
||||
[DCCP_PKT_DATA] = {
|
||||
/*
|
||||
* sNO -> sIV No connection
|
||||
* sRQ -> sIV No connection
|
||||
* sRS -> sIV No connection
|
||||
* sPO -> sOP Enter OPEN state (8.1.5.)
|
||||
* sOP -> sOP Regular Data packet in OPEN state
|
||||
* sCR -> sIV Waiting for Close from client
|
||||
* sCG -> sCG Data in CLOSING MAY be processed (8.3.)
|
||||
* sTW -> sIV
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV
|
||||
},
|
||||
[DCCP_PKT_DATAACK] = {
|
||||
/*
|
||||
* sNO -> sIV No connection
|
||||
* sRQ -> sIV No connection
|
||||
* sRS -> sIV No connection
|
||||
* sPO -> sOP Enter OPEN state (8.1.5.)
|
||||
* sOP -> sOP Regular DataAck in OPEN state
|
||||
* sCR -> sIV Waiting for Close from client
|
||||
* sCG -> sCG Data in CLOSING MAY be processed (8.3.)
|
||||
* sTW -> sIV
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV
|
||||
},
|
||||
[DCCP_PKT_CLOSEREQ] = {
|
||||
/*
|
||||
* sNO -> sIV No connection
|
||||
* sRQ -> sIV No connection
|
||||
* sRS -> sIV No connection
|
||||
* sPO -> sOP -> sCR Move directly to CLOSEREQ (8.1.5.)
|
||||
* sOP -> sCR CloseReq in OPEN state
|
||||
* sCR -> sCR Retransmit
|
||||
* sCG -> sCR Simultaneous close, client sends another Close
|
||||
* sTW -> sIV Already closed
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIV, sIV, sIV, sCR, sCR, sCR, sCR, sIV
|
||||
},
|
||||
[DCCP_PKT_CLOSE] = {
|
||||
/*
|
||||
* sNO -> sIV No connection
|
||||
* sRQ -> sIV No connection
|
||||
* sRS -> sIV No connection
|
||||
* sPO -> sOP -> sCG Move direcly to CLOSING
|
||||
* sOP -> sCG Move to CLOSING
|
||||
* sCR -> sIV Close after CloseReq is invalid
|
||||
* sCG -> sCG Retransmit
|
||||
* sTW -> sIV Already closed
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIV, sIV, sIV, sCG, sCG, sIV, sCG, sIV
|
||||
},
|
||||
[DCCP_PKT_RESET] = {
|
||||
/*
|
||||
* sNO -> sIV No connection
|
||||
* sRQ -> sTW Reset in response to Request
|
||||
* sRS -> sTW Timeout, SHOULD send Reset (8.1.3.)
|
||||
* sPO -> sTW Timeout, SHOULD send Reset (8.1.3.)
|
||||
* sOP -> sTW
|
||||
* sCR -> sTW
|
||||
* sCG -> sTW
|
||||
* sTW -> sIG Ignore (don't refresh timer)
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW, sTW */
|
||||
sIV, sTW, sTW, sTW, sTW, sTW, sTW, sTW, sIG
|
||||
},
|
||||
[DCCP_PKT_SYNC] = {
|
||||
/*
|
||||
* We currently ignore Sync packets
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
|
||||
},
|
||||
[DCCP_PKT_SYNCACK] = {
|
||||
/*
|
||||
* We currently ignore SyncAck packets
|
||||
*
|
||||
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
|
||||
sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static bool dccp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
struct dccp_hdr _hdr, *dh;
|
||||
|
||||
dh = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
|
||||
if (dh == NULL)
|
||||
return false;
|
||||
|
||||
tuple->src.u.dccp.port = dh->dccph_sport;
|
||||
tuple->dst.u.dccp.port = dh->dccph_dport;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool dccp_invert_tuple(struct nf_conntrack_tuple *inv,
|
||||
const struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
inv->src.u.dccp.port = tuple->dst.u.dccp.port;
|
||||
inv->dst.u.dccp.port = tuple->src.u.dccp.port;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff)
|
||||
{
|
||||
struct dccp_hdr _dh, *dh;
|
||||
const char *msg;
|
||||
u_int8_t state;
|
||||
|
||||
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
|
||||
BUG_ON(dh == NULL);
|
||||
|
||||
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
|
||||
switch (state) {
|
||||
default:
|
||||
if (nf_ct_dccp_loose == 0) {
|
||||
msg = "nf_ct_dccp: not picking up existing connection ";
|
||||
goto out_invalid;
|
||||
}
|
||||
case CT_DCCP_REQUEST:
|
||||
break;
|
||||
case CT_DCCP_INVALID:
|
||||
msg = "nf_ct_dccp: invalid state transition ";
|
||||
goto out_invalid;
|
||||
}
|
||||
|
||||
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
|
||||
ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
|
||||
ct->proto.dccp.state = CT_DCCP_NONE;
|
||||
return true;
|
||||
|
||||
out_invalid:
|
||||
if (LOG_INVALID(IPPROTO_DCCP))
|
||||
nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, msg);
|
||||
return false;
|
||||
}
|
||||
|
||||
static u64 dccp_ack_seq(const struct dccp_hdr *dh)
|
||||
{
|
||||
const struct dccp_hdr_ack_bits *dhack;
|
||||
|
||||
dhack = (void *)dh + __dccp_basic_hdr_len(dh);
|
||||
return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) +
|
||||
ntohl(dhack->dccph_ack_nr_low);
|
||||
}
|
||||
|
||||
static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff, enum ip_conntrack_info ctinfo,
|
||||
int pf, unsigned int hooknum)
|
||||
{
|
||||
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
|
||||
struct dccp_hdr _dh, *dh;
|
||||
u_int8_t type, old_state, new_state;
|
||||
enum ct_dccp_roles role;
|
||||
|
||||
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
|
||||
BUG_ON(dh == NULL);
|
||||
type = dh->dccph_type;
|
||||
|
||||
if (type == DCCP_PKT_RESET &&
|
||||
!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
|
||||
/* Tear down connection immediately if only reply is a RESET */
|
||||
if (del_timer(&ct->timeout))
|
||||
ct->timeout.function((unsigned long)ct);
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
write_lock_bh(&dccp_lock);
|
||||
|
||||
role = ct->proto.dccp.role[dir];
|
||||
old_state = ct->proto.dccp.state;
|
||||
new_state = dccp_state_table[role][type][old_state];
|
||||
|
||||
switch (new_state) {
|
||||
case CT_DCCP_REQUEST:
|
||||
if (old_state == CT_DCCP_TIMEWAIT &&
|
||||
role == CT_DCCP_ROLE_SERVER) {
|
||||
/* Reincarnation in the reverse direction: reopen and
|
||||
* reverse client/server roles. */
|
||||
ct->proto.dccp.role[dir] = CT_DCCP_ROLE_CLIENT;
|
||||
ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_SERVER;
|
||||
}
|
||||
break;
|
||||
case CT_DCCP_RESPOND:
|
||||
if (old_state == CT_DCCP_REQUEST)
|
||||
ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh);
|
||||
break;
|
||||
case CT_DCCP_PARTOPEN:
|
||||
if (old_state == CT_DCCP_RESPOND &&
|
||||
type == DCCP_PKT_ACK &&
|
||||
dccp_ack_seq(dh) == ct->proto.dccp.handshake_seq)
|
||||
set_bit(IPS_ASSURED_BIT, &ct->status);
|
||||
break;
|
||||
case CT_DCCP_IGNORE:
|
||||
/*
|
||||
* Connection tracking might be out of sync, so we ignore
|
||||
* packets that might establish a new connection and resync
|
||||
* if the server responds with a valid Response.
|
||||
*/
|
||||
if (ct->proto.dccp.last_dir == !dir &&
|
||||
ct->proto.dccp.last_pkt == DCCP_PKT_REQUEST &&
|
||||
type == DCCP_PKT_RESPONSE) {
|
||||
ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_CLIENT;
|
||||
ct->proto.dccp.role[dir] = CT_DCCP_ROLE_SERVER;
|
||||
ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh);
|
||||
new_state = CT_DCCP_RESPOND;
|
||||
break;
|
||||
}
|
||||
ct->proto.dccp.last_dir = dir;
|
||||
ct->proto.dccp.last_pkt = type;
|
||||
|
||||
write_unlock_bh(&dccp_lock);
|
||||
if (LOG_INVALID(IPPROTO_DCCP))
|
||||
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
|
||||
"nf_ct_dccp: invalid packet ignored ");
|
||||
return NF_ACCEPT;
|
||||
case CT_DCCP_INVALID:
|
||||
write_unlock_bh(&dccp_lock);
|
||||
if (LOG_INVALID(IPPROTO_DCCP))
|
||||
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
|
||||
"nf_ct_dccp: invalid state transition ");
|
||||
return -NF_ACCEPT;
|
||||
}
|
||||
|
||||
ct->proto.dccp.last_dir = dir;
|
||||
ct->proto.dccp.last_pkt = type;
|
||||
ct->proto.dccp.state = new_state;
|
||||
write_unlock_bh(&dccp_lock);
|
||||
nf_ct_refresh_acct(ct, ctinfo, skb, dccp_timeout[new_state]);
|
||||
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
static int dccp_error(struct sk_buff *skb, unsigned int dataoff,
|
||||
enum ip_conntrack_info *ctinfo, int pf,
|
||||
unsigned int hooknum)
|
||||
{
|
||||
struct dccp_hdr _dh, *dh;
|
||||
unsigned int dccp_len = skb->len - dataoff;
|
||||
unsigned int cscov;
|
||||
const char *msg;
|
||||
|
||||
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
|
||||
if (dh == NULL) {
|
||||
msg = "nf_ct_dccp: short packet ";
|
||||
goto out_invalid;
|
||||
}
|
||||
|
||||
if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) ||
|
||||
dh->dccph_doff * 4 > dccp_len) {
|
||||
msg = "nf_ct_dccp: truncated/malformed packet ";
|
||||
goto out_invalid;
|
||||
}
|
||||
|
||||
cscov = dccp_len;
|
||||
if (dh->dccph_cscov) {
|
||||
cscov = (dh->dccph_cscov - 1) * 4;
|
||||
if (cscov > dccp_len) {
|
||||
msg = "nf_ct_dccp: bad checksum coverage ";
|
||||
goto out_invalid;
|
||||
}
|
||||
}
|
||||
|
||||
if (nf_conntrack_checksum && hooknum == NF_INET_PRE_ROUTING &&
|
||||
nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_DCCP,
|
||||
pf)) {
|
||||
msg = "nf_ct_dccp: bad checksum ";
|
||||
goto out_invalid;
|
||||
}
|
||||
|
||||
if (dh->dccph_type >= DCCP_PKT_INVALID) {
|
||||
msg = "nf_ct_dccp: reserved packet type ";
|
||||
goto out_invalid;
|
||||
}
|
||||
|
||||
return NF_ACCEPT;
|
||||
|
||||
out_invalid:
|
||||
if (LOG_INVALID(IPPROTO_DCCP))
|
||||
nf_log_packet(pf, 0, skb, NULL, NULL, NULL, msg);
|
||||
return -NF_ACCEPT;
|
||||
}
|
||||
|
||||
static int dccp_print_tuple(struct seq_file *s,
|
||||
const struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
return seq_printf(s, "sport=%hu dport=%hu ",
|
||||
ntohs(tuple->src.u.dccp.port),
|
||||
ntohs(tuple->dst.u.dccp.port));
|
||||
}
|
||||
|
||||
static int dccp_print_conntrack(struct seq_file *s, const struct nf_conn *ct)
|
||||
{
|
||||
return seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
|
||||
const struct nf_conn *ct)
|
||||
{
|
||||
struct nlattr *nest_parms;
|
||||
|
||||
read_lock_bh(&dccp_lock);
|
||||
nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED);
|
||||
if (!nest_parms)
|
||||
goto nla_put_failure;
|
||||
NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state);
|
||||
nla_nest_end(skb, nest_parms);
|
||||
read_unlock_bh(&dccp_lock);
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
read_unlock_bh(&dccp_lock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = {
|
||||
[CTA_PROTOINFO_DCCP_STATE] = { .type = NLA_U8 },
|
||||
};
|
||||
|
||||
static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
|
||||
{
|
||||
struct nlattr *attr = cda[CTA_PROTOINFO_DCCP];
|
||||
struct nlattr *tb[CTA_PROTOINFO_DCCP_MAX + 1];
|
||||
int err;
|
||||
|
||||
if (!attr)
|
||||
return 0;
|
||||
|
||||
err = nla_parse_nested(tb, CTA_PROTOINFO_DCCP_MAX, attr,
|
||||
dccp_nla_policy);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (!tb[CTA_PROTOINFO_DCCP_STATE] ||
|
||||
nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]) >= CT_DCCP_IGNORE)
|
||||
return -EINVAL;
|
||||
|
||||
write_lock_bh(&dccp_lock);
|
||||
ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]);
|
||||
write_unlock_bh(&dccp_lock);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
static unsigned int dccp_sysctl_table_users;
|
||||
static struct ctl_table_header *dccp_sysctl_header;
|
||||
static ctl_table dccp_sysctl_table[] = {
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "nf_conntrack_dccp_timeout_request",
|
||||
.data = &dccp_timeout[CT_DCCP_REQUEST],
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "nf_conntrack_dccp_timeout_respond",
|
||||
.data = &dccp_timeout[CT_DCCP_RESPOND],
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "nf_conntrack_dccp_timeout_partopen",
|
||||
.data = &dccp_timeout[CT_DCCP_PARTOPEN],
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "nf_conntrack_dccp_timeout_open",
|
||||
.data = &dccp_timeout[CT_DCCP_OPEN],
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "nf_conntrack_dccp_timeout_closereq",
|
||||
.data = &dccp_timeout[CT_DCCP_CLOSEREQ],
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "nf_conntrack_dccp_timeout_closing",
|
||||
.data = &dccp_timeout[CT_DCCP_CLOSING],
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "nf_conntrack_dccp_timeout_timewait",
|
||||
.data = &dccp_timeout[CT_DCCP_TIMEWAIT],
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "nf_conntrack_dccp_loose",
|
||||
.data = &nf_ct_dccp_loose,
|
||||
.maxlen = sizeof(nf_ct_dccp_loose),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{
|
||||
.ctl_name = 0,
|
||||
}
|
||||
};
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
|
||||
.l3proto = AF_INET,
|
||||
.l4proto = IPPROTO_DCCP,
|
||||
.name = "dccp",
|
||||
.pkt_to_tuple = dccp_pkt_to_tuple,
|
||||
.invert_tuple = dccp_invert_tuple,
|
||||
.new = dccp_new,
|
||||
.packet = dccp_packet,
|
||||
.error = dccp_error,
|
||||
.print_tuple = dccp_print_tuple,
|
||||
.print_conntrack = dccp_print_conntrack,
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
.to_nlattr = dccp_to_nlattr,
|
||||
.from_nlattr = nlattr_to_dccp,
|
||||
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
|
||||
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
|
||||
.nla_policy = nf_ct_port_nla_policy,
|
||||
#endif
|
||||
#ifdef CONFIG_SYSCTL
|
||||
.ctl_table_users = &dccp_sysctl_table_users,
|
||||
.ctl_table_header = &dccp_sysctl_header,
|
||||
.ctl_table = dccp_sysctl_table,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
|
||||
.l3proto = AF_INET6,
|
||||
.l4proto = IPPROTO_DCCP,
|
||||
.name = "dccp",
|
||||
.pkt_to_tuple = dccp_pkt_to_tuple,
|
||||
.invert_tuple = dccp_invert_tuple,
|
||||
.new = dccp_new,
|
||||
.packet = dccp_packet,
|
||||
.error = dccp_error,
|
||||
.print_tuple = dccp_print_tuple,
|
||||
.print_conntrack = dccp_print_conntrack,
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
.to_nlattr = dccp_to_nlattr,
|
||||
.from_nlattr = nlattr_to_dccp,
|
||||
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
|
||||
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
|
||||
.nla_policy = nf_ct_port_nla_policy,
|
||||
#endif
|
||||
#ifdef CONFIG_SYSCTL
|
||||
.ctl_table_users = &dccp_sysctl_table_users,
|
||||
.ctl_table_header = &dccp_sysctl_header,
|
||||
.ctl_table = dccp_sysctl_table,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int __init nf_conntrack_proto_dccp_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = nf_conntrack_l4proto_register(&dccp_proto4);
|
||||
if (err < 0)
|
||||
goto err1;
|
||||
|
||||
err = nf_conntrack_l4proto_register(&dccp_proto6);
|
||||
if (err < 0)
|
||||
goto err2;
|
||||
return 0;
|
||||
|
||||
err2:
|
||||
nf_conntrack_l4proto_unregister(&dccp_proto4);
|
||||
err1:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit nf_conntrack_proto_dccp_fini(void)
|
||||
{
|
||||
nf_conntrack_l4proto_unregister(&dccp_proto6);
|
||||
nf_conntrack_l4proto_unregister(&dccp_proto4);
|
||||
}
|
||||
|
||||
module_init(nf_conntrack_proto_dccp_init);
|
||||
module_exit(nf_conntrack_proto_dccp_fini);
|
||||
|
||||
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
|
||||
MODULE_DESCRIPTION("DCCP connection tracking protocol helper");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -14,23 +14,23 @@
|
|||
|
||||
static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
|
||||
|
||||
static int generic_pkt_to_tuple(const struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
static bool generic_pkt_to_tuple(const struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
tuple->src.u.all = 0;
|
||||
tuple->dst.u.all = 0;
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int generic_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
static bool generic_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
{
|
||||
tuple->src.u.all = 0;
|
||||
tuple->dst.u.all = 0;
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Print out the per-protocol part of the tuple. */
|
||||
|
@ -53,10 +53,10 @@ static int packet(struct nf_conn *ct,
|
|||
}
|
||||
|
||||
/* Called when a new connection for this protocol found. */
|
||||
static int new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff)
|
||||
static bool new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff)
|
||||
{
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
|
|
|
@ -82,7 +82,7 @@ static __be16 gre_keymap_lookup(struct nf_conntrack_tuple *t)
|
|||
read_unlock_bh(&nf_ct_gre_lock);
|
||||
|
||||
pr_debug("lookup src key 0x%x for ", key);
|
||||
NF_CT_DUMP_TUPLE(t);
|
||||
nf_ct_dump_tuple(t);
|
||||
|
||||
return key;
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
|
|||
*kmp = km;
|
||||
|
||||
pr_debug("adding new entry %p: ", km);
|
||||
NF_CT_DUMP_TUPLE(&km->tuple);
|
||||
nf_ct_dump_tuple(&km->tuple);
|
||||
|
||||
write_lock_bh(&nf_ct_gre_lock);
|
||||
list_add_tail(&km->list, &gre_keymap_list);
|
||||
|
@ -148,18 +148,17 @@ EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_destroy);
|
|||
/* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */
|
||||
|
||||
/* invert gre part of tuple */
|
||||
static int gre_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
static bool gre_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
{
|
||||
tuple->dst.u.gre.key = orig->src.u.gre.key;
|
||||
tuple->src.u.gre.key = orig->dst.u.gre.key;
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* gre hdr info to tuple */
|
||||
static int gre_pkt_to_tuple(const struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
const struct gre_hdr_pptp *pgrehdr;
|
||||
struct gre_hdr_pptp _pgrehdr;
|
||||
|
@ -173,24 +172,24 @@ static int gre_pkt_to_tuple(const struct sk_buff *skb,
|
|||
/* try to behave like "nf_conntrack_proto_generic" */
|
||||
tuple->src.u.all = 0;
|
||||
tuple->dst.u.all = 0;
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* PPTP header is variable length, only need up to the call_id field */
|
||||
pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr);
|
||||
if (!pgrehdr)
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) {
|
||||
pr_debug("GRE_VERSION_PPTP but unknown proto\n");
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
tuple->dst.u.gre.key = pgrehdr->call_id;
|
||||
srckey = gre_keymap_lookup(tuple);
|
||||
tuple->src.u.gre.key = srckey;
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* print gre part of tuple */
|
||||
|
@ -235,18 +234,18 @@ static int gre_packet(struct nf_conn *ct,
|
|||
}
|
||||
|
||||
/* Called when a new connection for this protocol found. */
|
||||
static int gre_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff)
|
||||
static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff)
|
||||
{
|
||||
pr_debug(": ");
|
||||
NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
|
||||
/* initialize to sane value. Ideally a conntrack helper
|
||||
* (e.g. in case of pptp) is increasing them */
|
||||
ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT;
|
||||
ct->proto.gre.timeout = GRE_TIMEOUT;
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Called when a conntrack entry has already been removed from the hashes
|
||||
|
|
|
@ -33,7 +33,7 @@ static DEFINE_RWLOCK(sctp_lock);
|
|||
|
||||
And so for me for SCTP :D -Kiran */
|
||||
|
||||
static const char *sctp_conntrack_names[] = {
|
||||
static const char *const sctp_conntrack_names[] = {
|
||||
"NONE",
|
||||
"CLOSED",
|
||||
"COOKIE_WAIT",
|
||||
|
@ -130,28 +130,28 @@ static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
|
|||
}
|
||||
};
|
||||
|
||||
static int sctp_pkt_to_tuple(const struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
static bool sctp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
sctp_sctphdr_t _hdr, *hp;
|
||||
const struct sctphdr *hp;
|
||||
struct sctphdr _hdr;
|
||||
|
||||
/* Actually only need first 8 bytes. */
|
||||
hp = skb_header_pointer(skb, dataoff, 8, &_hdr);
|
||||
if (hp == NULL)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
tuple->src.u.sctp.port = hp->source;
|
||||
tuple->dst.u.sctp.port = hp->dest;
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int sctp_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
static bool sctp_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
{
|
||||
tuple->src.u.sctp.port = orig->dst.u.sctp.port;
|
||||
tuple->dst.u.sctp.port = orig->src.u.sctp.port;
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Print out the per-protocol part of the tuple. */
|
||||
|
@ -292,8 +292,10 @@ static int sctp_packet(struct nf_conn *ct,
|
|||
{
|
||||
enum sctp_conntrack new_state, old_state;
|
||||
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
|
||||
sctp_sctphdr_t _sctph, *sh;
|
||||
sctp_chunkhdr_t _sch, *sch;
|
||||
const struct sctphdr *sh;
|
||||
struct sctphdr _sctph;
|
||||
const struct sctp_chunkhdr *sch;
|
||||
struct sctp_chunkhdr _sch;
|
||||
u_int32_t offset, count;
|
||||
unsigned long map[256 / sizeof(unsigned long)] = { 0 };
|
||||
|
||||
|
@ -390,27 +392,29 @@ static int sctp_packet(struct nf_conn *ct,
|
|||
}
|
||||
|
||||
/* Called when a new connection for this protocol found. */
|
||||
static int sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff)
|
||||
static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff)
|
||||
{
|
||||
enum sctp_conntrack new_state;
|
||||
sctp_sctphdr_t _sctph, *sh;
|
||||
sctp_chunkhdr_t _sch, *sch;
|
||||
const struct sctphdr *sh;
|
||||
struct sctphdr _sctph;
|
||||
const struct sctp_chunkhdr *sch;
|
||||
struct sctp_chunkhdr _sch;
|
||||
u_int32_t offset, count;
|
||||
unsigned long map[256 / sizeof(unsigned long)] = { 0 };
|
||||
|
||||
sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
|
||||
if (sh == NULL)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (do_basic_checks(ct, skb, dataoff, map) != 0)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
/* If an OOTB packet has any of these chunks discard (Sec 8.4) */
|
||||
if (test_bit(SCTP_CID_ABORT, map) ||
|
||||
test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) ||
|
||||
test_bit(SCTP_CID_COOKIE_ACK, map))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
new_state = SCTP_CONNTRACK_MAX;
|
||||
for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
|
||||
|
@ -422,7 +426,7 @@ static int sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
|||
if (new_state == SCTP_CONNTRACK_NONE ||
|
||||
new_state == SCTP_CONNTRACK_MAX) {
|
||||
pr_debug("nf_conntrack_sctp: invalid new deleting.\n");
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Copy the vtag into the state info */
|
||||
|
@ -433,7 +437,7 @@ static int sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
|||
ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t),
|
||||
sizeof(_inithdr), &_inithdr);
|
||||
if (ih == NULL)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
pr_debug("Setting vtag %x for new conn\n",
|
||||
ih->init_tag);
|
||||
|
@ -442,7 +446,7 @@ static int sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
|||
ih->init_tag;
|
||||
} else {
|
||||
/* Sec 8.5.1 (A) */
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
/* If it is a shutdown ack OOTB packet, we expect a return
|
||||
|
@ -456,7 +460,7 @@ static int sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
|||
ct->proto.sctp.state = new_state;
|
||||
}
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
|
|
|
@ -257,9 +257,8 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
|
|||
}
|
||||
};
|
||||
|
||||
static int tcp_pkt_to_tuple(const struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
const struct tcphdr *hp;
|
||||
struct tcphdr _hdr;
|
||||
|
@ -267,20 +266,20 @@ static int tcp_pkt_to_tuple(const struct sk_buff *skb,
|
|||
/* Actually only need first 8 bytes. */
|
||||
hp = skb_header_pointer(skb, dataoff, 8, &_hdr);
|
||||
if (hp == NULL)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
tuple->src.u.tcp.port = hp->source;
|
||||
tuple->dst.u.tcp.port = hp->dest;
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int tcp_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
static bool tcp_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
{
|
||||
tuple->src.u.tcp.port = orig->dst.u.tcp.port;
|
||||
tuple->dst.u.tcp.port = orig->src.u.tcp.port;
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Print out the per-protocol part of the tuple. */
|
||||
|
@ -478,20 +477,20 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
|
|||
}
|
||||
}
|
||||
|
||||
static int tcp_in_window(const struct nf_conn *ct,
|
||||
struct ip_ct_tcp *state,
|
||||
enum ip_conntrack_dir dir,
|
||||
unsigned int index,
|
||||
const struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
const struct tcphdr *tcph,
|
||||
int pf)
|
||||
static bool tcp_in_window(const struct nf_conn *ct,
|
||||
struct ip_ct_tcp *state,
|
||||
enum ip_conntrack_dir dir,
|
||||
unsigned int index,
|
||||
const struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
const struct tcphdr *tcph,
|
||||
int pf)
|
||||
{
|
||||
struct ip_ct_tcp_state *sender = &state->seen[dir];
|
||||
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
|
||||
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
|
||||
__u32 seq, ack, sack, end, win, swin;
|
||||
int res;
|
||||
bool res;
|
||||
|
||||
/*
|
||||
* Get the required data from the packet.
|
||||
|
@ -506,7 +505,7 @@ static int tcp_in_window(const struct nf_conn *ct,
|
|||
|
||||
pr_debug("tcp_in_window: START\n");
|
||||
pr_debug("tcp_in_window: ");
|
||||
NF_CT_DUMP_TUPLE(tuple);
|
||||
nf_ct_dump_tuple(tuple);
|
||||
pr_debug("seq=%u ack=%u sack=%u win=%u end=%u\n",
|
||||
seq, ack, sack, win, end);
|
||||
pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
|
||||
|
@ -593,7 +592,7 @@ static int tcp_in_window(const struct nf_conn *ct,
|
|||
seq = end = sender->td_end;
|
||||
|
||||
pr_debug("tcp_in_window: ");
|
||||
NF_CT_DUMP_TUPLE(tuple);
|
||||
nf_ct_dump_tuple(tuple);
|
||||
pr_debug("seq=%u ack=%u sack =%u win=%u end=%u\n",
|
||||
seq, ack, sack, win, end);
|
||||
pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
|
||||
|
@ -657,12 +656,12 @@ static int tcp_in_window(const struct nf_conn *ct,
|
|||
state->retrans = 0;
|
||||
}
|
||||
}
|
||||
res = 1;
|
||||
res = true;
|
||||
} else {
|
||||
res = 0;
|
||||
res = false;
|
||||
if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
|
||||
nf_ct_tcp_be_liberal)
|
||||
res = 1;
|
||||
res = true;
|
||||
if (!res && LOG_INVALID(IPPROTO_TCP))
|
||||
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
|
||||
"nf_ct_tcp: %s ",
|
||||
|
@ -676,7 +675,7 @@ static int tcp_in_window(const struct nf_conn *ct,
|
|||
: "SEQ is over the upper bound (over the window of the receiver)");
|
||||
}
|
||||
|
||||
pr_debug("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u "
|
||||
pr_debug("tcp_in_window: res=%u sender end=%u maxend=%u maxwin=%u "
|
||||
"receiver end=%u maxend=%u maxwin=%u\n",
|
||||
res, sender->td_end, sender->td_maxend, sender->td_maxwin,
|
||||
receiver->td_end, receiver->td_maxend, receiver->td_maxwin);
|
||||
|
@ -937,7 +936,7 @@ static int tcp_packet(struct nf_conn *ct,
|
|||
ct->proto.tcp.last_dir = dir;
|
||||
|
||||
pr_debug("tcp_conntracks: ");
|
||||
NF_CT_DUMP_TUPLE(tuple);
|
||||
nf_ct_dump_tuple(tuple);
|
||||
pr_debug("syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
|
||||
(th->syn ? 1 : 0), (th->ack ? 1 : 0),
|
||||
(th->fin ? 1 : 0), (th->rst ? 1 : 0),
|
||||
|
@ -982,9 +981,8 @@ static int tcp_packet(struct nf_conn *ct,
|
|||
}
|
||||
|
||||
/* Called when a new connection for this protocol found. */
|
||||
static int tcp_new(struct nf_conn *ct,
|
||||
const struct sk_buff *skb,
|
||||
unsigned int dataoff)
|
||||
static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff)
|
||||
{
|
||||
enum tcp_conntrack new_state;
|
||||
const struct tcphdr *th;
|
||||
|
@ -1003,7 +1001,7 @@ static int tcp_new(struct nf_conn *ct,
|
|||
/* Invalid: delete conntrack */
|
||||
if (new_state >= TCP_CONNTRACK_MAX) {
|
||||
pr_debug("nf_ct_tcp: invalid new deleting.\n");
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (new_state == TCP_CONNTRACK_SYN_SENT) {
|
||||
|
@ -1021,7 +1019,7 @@ static int tcp_new(struct nf_conn *ct,
|
|||
ct->proto.tcp.seen[1].flags = 0;
|
||||
} else if (nf_ct_tcp_loose == 0) {
|
||||
/* Don't try to pick up connections. */
|
||||
return 0;
|
||||
return false;
|
||||
} else {
|
||||
/*
|
||||
* We are in the middle of a connection,
|
||||
|
@ -1061,7 +1059,7 @@ static int tcp_new(struct nf_conn *ct,
|
|||
sender->td_scale,
|
||||
receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
|
||||
receiver->td_scale);
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
|
@ -1129,11 +1127,13 @@ static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (!tb[CTA_PROTOINFO_TCP_STATE])
|
||||
if (tb[CTA_PROTOINFO_TCP_STATE] &&
|
||||
nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
write_lock_bh(&tcp_lock);
|
||||
ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
|
||||
if (tb[CTA_PROTOINFO_TCP_STATE])
|
||||
ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
|
||||
|
||||
if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) {
|
||||
struct nf_ct_tcp_flags *attr =
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
static unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ;
|
||||
static unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ;
|
||||
|
||||
static int udp_pkt_to_tuple(const struct sk_buff *skb,
|
||||
static bool udp_pkt_to_tuple(const struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
|
@ -36,20 +36,20 @@ static int udp_pkt_to_tuple(const struct sk_buff *skb,
|
|||
/* Actually only need first 8 bytes. */
|
||||
hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
|
||||
if (hp == NULL)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
tuple->src.u.udp.port = hp->source;
|
||||
tuple->dst.u.udp.port = hp->dest;
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int udp_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
static bool udp_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
{
|
||||
tuple->src.u.udp.port = orig->dst.u.udp.port;
|
||||
tuple->dst.u.udp.port = orig->src.u.udp.port;
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Print out the per-protocol part of the tuple. */
|
||||
|
@ -83,10 +83,10 @@ static int udp_packet(struct nf_conn *ct,
|
|||
}
|
||||
|
||||
/* Called when a new connection for this protocol found. */
|
||||
static int udp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff)
|
||||
static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff)
|
||||
{
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int udp_error(struct sk_buff *skb, unsigned int dataoff,
|
||||
|
|
|
@ -27,28 +27,28 @@
|
|||
static unsigned int nf_ct_udplite_timeout __read_mostly = 30*HZ;
|
||||
static unsigned int nf_ct_udplite_timeout_stream __read_mostly = 180*HZ;
|
||||
|
||||
static int udplite_pkt_to_tuple(const struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
static bool udplite_pkt_to_tuple(const struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
const struct udphdr *hp;
|
||||
struct udphdr _hdr;
|
||||
|
||||
hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
|
||||
if (hp == NULL)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
tuple->src.u.udp.port = hp->source;
|
||||
tuple->dst.u.udp.port = hp->dest;
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int udplite_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
static bool udplite_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_tuple *orig)
|
||||
{
|
||||
tuple->src.u.udp.port = orig->dst.u.udp.port;
|
||||
tuple->dst.u.udp.port = orig->src.u.udp.port;
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Print out the per-protocol part of the tuple. */
|
||||
|
@ -83,10 +83,10 @@ static int udplite_packet(struct nf_conn *ct,
|
|||
}
|
||||
|
||||
/* Called when a new connection for this protocol found. */
|
||||
static int udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff)
|
||||
static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff)
|
||||
{
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int udplite_error(struct sk_buff *skb, unsigned int dataoff,
|
||||
|
@ -127,32 +127,13 @@ static int udplite_error(struct sk_buff *skb, unsigned int dataoff,
|
|||
}
|
||||
|
||||
/* Checksum invalid? Ignore. */
|
||||
if (nf_conntrack_checksum && !skb_csum_unnecessary(skb) &&
|
||||
hooknum == NF_INET_PRE_ROUTING) {
|
||||
if (pf == PF_INET) {
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
|
||||
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
|
||||
udplen, IPPROTO_UDPLITE, 0);
|
||||
} else {
|
||||
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
|
||||
__wsum hsum = skb_checksum(skb, 0, dataoff, 0);
|
||||
|
||||
skb->csum = ~csum_unfold(
|
||||
csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
|
||||
udplen, IPPROTO_UDPLITE,
|
||||
csum_sub(0, hsum)));
|
||||
}
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
if (__skb_checksum_complete_head(skb, dataoff + cscov)) {
|
||||
if (LOG_INVALID(IPPROTO_UDPLITE))
|
||||
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
|
||||
"nf_ct_udplite: bad UDPLite "
|
||||
"checksum ");
|
||||
return -NF_ACCEPT;
|
||||
}
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
if (nf_conntrack_checksum && hooknum == NF_INET_PRE_ROUTING &&
|
||||
nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP,
|
||||
pf)) {
|
||||
if (LOG_INVALID(IPPROTO_UDPLITE))
|
||||
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
|
||||
"nf_ct_udplite: bad UDPLite checksum ");
|
||||
return -NF_ACCEPT;
|
||||
}
|
||||
|
||||
return NF_ACCEPT;
|
||||
|
|
|
@ -72,7 +72,6 @@ static int help(struct sk_buff *skb,
|
|||
struct nf_conntrack_tuple *tuple;
|
||||
struct sane_request *req;
|
||||
struct sane_reply_net_start *reply;
|
||||
int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
|
||||
|
||||
ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
|
||||
/* Until there's been traffic both ways, don't look in packets. */
|
||||
|
@ -143,12 +142,12 @@ static int help(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, family,
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
|
||||
&tuple->src.u3, &tuple->dst.u3,
|
||||
IPPROTO_TCP, NULL, &reply->port);
|
||||
|
||||
pr_debug("nf_ct_sane: expect: ");
|
||||
NF_CT_DUMP_TUPLE(&exp->tuple);
|
||||
nf_ct_dump_tuple(&exp->tuple);
|
||||
|
||||
/* Can't expect this? Best to drop packet now. */
|
||||
if (nf_ct_expect_related(exp) != 0)
|
||||
|
|
|
@ -142,10 +142,10 @@ static int parse_addr(const struct nf_conn *ct, const char *cp,
|
|||
const char *limit)
|
||||
{
|
||||
const char *end;
|
||||
int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
|
||||
int ret = 0;
|
||||
|
||||
switch (family) {
|
||||
memset(addr, 0, sizeof(*addr));
|
||||
switch (nf_ct_l3num(ct)) {
|
||||
case AF_INET:
|
||||
ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end);
|
||||
break;
|
||||
|
@ -739,7 +739,6 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
|
|||
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
|
||||
union nf_inet_addr *saddr;
|
||||
struct nf_conntrack_tuple tuple;
|
||||
int family = ct->tuplehash[!dir].tuple.src.l3num;
|
||||
int direct_rtp = 0, skip_expect = 0, ret = NF_DROP;
|
||||
u_int16_t base_port;
|
||||
__be16 rtp_port, rtcp_port;
|
||||
|
@ -769,7 +768,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
|
|||
memset(&tuple, 0, sizeof(tuple));
|
||||
if (saddr)
|
||||
tuple.src.u3 = *saddr;
|
||||
tuple.src.l3num = family;
|
||||
tuple.src.l3num = nf_ct_l3num(ct);
|
||||
tuple.dst.protonum = IPPROTO_UDP;
|
||||
tuple.dst.u3 = *daddr;
|
||||
tuple.dst.u.udp.port = port;
|
||||
|
@ -814,13 +813,13 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
|
|||
rtp_exp = nf_ct_expect_alloc(ct);
|
||||
if (rtp_exp == NULL)
|
||||
goto err1;
|
||||
nf_ct_expect_init(rtp_exp, class, family, saddr, daddr,
|
||||
nf_ct_expect_init(rtp_exp, class, nf_ct_l3num(ct), saddr, daddr,
|
||||
IPPROTO_UDP, NULL, &rtp_port);
|
||||
|
||||
rtcp_exp = nf_ct_expect_alloc(ct);
|
||||
if (rtcp_exp == NULL)
|
||||
goto err2;
|
||||
nf_ct_expect_init(rtcp_exp, class, family, saddr, daddr,
|
||||
nf_ct_expect_init(rtcp_exp, class, nf_ct_l3num(ct), saddr, daddr,
|
||||
IPPROTO_UDP, NULL, &rtcp_port);
|
||||
|
||||
nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook);
|
||||
|
@ -870,7 +869,6 @@ static int process_sdp(struct sk_buff *skb,
|
|||
{
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
||||
int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
|
||||
unsigned int matchoff, matchlen;
|
||||
unsigned int mediaoff, medialen;
|
||||
unsigned int sdpoff;
|
||||
|
@ -885,8 +883,8 @@ static int process_sdp(struct sk_buff *skb,
|
|||
typeof(nf_nat_sdp_session_hook) nf_nat_sdp_session;
|
||||
|
||||
nf_nat_sdp_addr = rcu_dereference(nf_nat_sdp_addr_hook);
|
||||
c_hdr = family == AF_INET ? SDP_HDR_CONNECTION_IP4 :
|
||||
SDP_HDR_CONNECTION_IP6;
|
||||
c_hdr = nf_ct_l3num(ct) == AF_INET ? SDP_HDR_CONNECTION_IP4 :
|
||||
SDP_HDR_CONNECTION_IP6;
|
||||
|
||||
/* Find beginning of session description */
|
||||
if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen,
|
||||
|
@ -1033,7 +1031,6 @@ static int process_register_request(struct sk_buff *skb,
|
|||
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
||||
struct nf_conn_help *help = nfct_help(ct);
|
||||
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
|
||||
int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
|
||||
unsigned int matchoff, matchlen;
|
||||
struct nf_conntrack_expect *exp;
|
||||
union nf_inet_addr *saddr, daddr;
|
||||
|
@ -1088,8 +1085,8 @@ static int process_register_request(struct sk_buff *skb,
|
|||
if (sip_direct_signalling)
|
||||
saddr = &ct->tuplehash[!dir].tuple.src.u3;
|
||||
|
||||
nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, family, saddr, &daddr,
|
||||
IPPROTO_UDP, NULL, &port);
|
||||
nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct),
|
||||
saddr, &daddr, IPPROTO_UDP, NULL, &port);
|
||||
exp->timeout.expires = sip_timeout * HZ;
|
||||
exp->helper = nfct_help(ct)->helper;
|
||||
exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
|
||||
|
|
|
@ -127,21 +127,14 @@ static int ct_seq_show(struct seq_file *s, void *v)
|
|||
if (NF_CT_DIRECTION(hash))
|
||||
return 0;
|
||||
|
||||
l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL]
|
||||
.tuple.src.l3num);
|
||||
|
||||
l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
|
||||
NF_CT_ASSERT(l3proto);
|
||||
l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL]
|
||||
.tuple.src.l3num,
|
||||
ct->tuplehash[IP_CT_DIR_ORIGINAL]
|
||||
.tuple.dst.protonum);
|
||||
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
|
||||
NF_CT_ASSERT(l4proto);
|
||||
|
||||
if (seq_printf(s, "%-8s %u %-8s %u %ld ",
|
||||
l3proto->name,
|
||||
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num,
|
||||
l4proto->name,
|
||||
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum,
|
||||
l3proto->name, nf_ct_l3num(ct),
|
||||
l4proto->name, nf_ct_protonum(ct),
|
||||
timer_pending(&ct->timeout)
|
||||
? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
|
||||
return -ENOSPC;
|
||||
|
@ -295,6 +288,41 @@ static const struct file_operations ct_cpu_seq_fops = {
|
|||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static int nf_conntrack_standalone_init_proc(void)
|
||||
{
|
||||
struct proc_dir_entry *pde;
|
||||
|
||||
pde = proc_net_fops_create(&init_net, "nf_conntrack", 0440, &ct_file_ops);
|
||||
if (!pde)
|
||||
goto out_nf_conntrack;
|
||||
pde = create_proc_entry("nf_conntrack", S_IRUGO, init_net.proc_net_stat);
|
||||
if (!pde)
|
||||
goto out_stat_nf_conntrack;
|
||||
pde->proc_fops = &ct_cpu_seq_fops;
|
||||
pde->owner = THIS_MODULE;
|
||||
return 0;
|
||||
|
||||
out_stat_nf_conntrack:
|
||||
proc_net_remove(&init_net, "nf_conntrack");
|
||||
out_nf_conntrack:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void nf_conntrack_standalone_fini_proc(void)
|
||||
{
|
||||
remove_proc_entry("nf_conntrack", init_net.proc_net_stat);
|
||||
proc_net_remove(&init_net, "nf_conntrack");
|
||||
}
|
||||
#else
|
||||
static int nf_conntrack_standalone_init_proc(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nf_conntrack_standalone_fini_proc(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
/* Sysctl support */
|
||||
|
@ -390,60 +418,61 @@ static struct ctl_path nf_ct_path[] = {
|
|||
};
|
||||
|
||||
EXPORT_SYMBOL_GPL(nf_ct_log_invalid);
|
||||
|
||||
static int nf_conntrack_standalone_init_sysctl(void)
|
||||
{
|
||||
nf_ct_sysctl_header =
|
||||
register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table);
|
||||
if (nf_ct_sysctl_header == NULL) {
|
||||
printk("nf_conntrack: can't register to sysctl.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static void nf_conntrack_standalone_fini_sysctl(void)
|
||||
{
|
||||
unregister_sysctl_table(nf_ct_sysctl_header);
|
||||
}
|
||||
#else
|
||||
static int nf_conntrack_standalone_init_sysctl(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nf_conntrack_standalone_fini_sysctl(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
static int __init nf_conntrack_standalone_init(void)
|
||||
{
|
||||
#ifdef CONFIG_PROC_FS
|
||||
struct proc_dir_entry *proc;
|
||||
#endif
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
ret = nf_conntrack_init();
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto out;
|
||||
ret = nf_conntrack_standalone_init_proc();
|
||||
if (ret < 0)
|
||||
goto out_proc;
|
||||
ret = nf_conntrack_standalone_init_sysctl();
|
||||
if (ret < 0)
|
||||
goto out_sysctl;
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
proc = proc_net_fops_create(&init_net, "nf_conntrack", 0440, &ct_file_ops);
|
||||
if (!proc) goto cleanup_init;
|
||||
|
||||
if (!proc_create("nf_conntrack", S_IRUGO,
|
||||
init_net.proc_net_stat, &ct_cpu_seq_fops))
|
||||
goto cleanup_proc;
|
||||
#endif
|
||||
#ifdef CONFIG_SYSCTL
|
||||
nf_ct_sysctl_header = register_sysctl_paths(nf_ct_path,
|
||||
nf_ct_netfilter_table);
|
||||
if (nf_ct_sysctl_header == NULL) {
|
||||
printk("nf_conntrack: can't register to sysctl.\n");
|
||||
ret = -ENOMEM;
|
||||
goto cleanup_proc_stat;
|
||||
}
|
||||
#endif
|
||||
return ret;
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
cleanup_proc_stat:
|
||||
#endif
|
||||
#ifdef CONFIG_PROC_FS
|
||||
remove_proc_entry("nf_conntrack", init_net. proc_net_stat);
|
||||
cleanup_proc:
|
||||
proc_net_remove(&init_net, "nf_conntrack");
|
||||
cleanup_init:
|
||||
#endif /* CNFIG_PROC_FS */
|
||||
out_sysctl:
|
||||
nf_conntrack_standalone_fini_proc();
|
||||
out_proc:
|
||||
nf_conntrack_cleanup();
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit nf_conntrack_standalone_fini(void)
|
||||
{
|
||||
#ifdef CONFIG_SYSCTL
|
||||
unregister_sysctl_table(nf_ct_sysctl_header);
|
||||
#endif
|
||||
#ifdef CONFIG_PROC_FS
|
||||
remove_proc_entry("nf_conntrack", init_net.proc_net_stat);
|
||||
proc_net_remove(&init_net, "nf_conntrack");
|
||||
#endif /* CNFIG_PROC_FS */
|
||||
nf_conntrack_standalone_fini_sysctl();
|
||||
nf_conntrack_standalone_fini_proc();
|
||||
nf_conntrack_cleanup();
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,6 @@ static int tftp_help(struct sk_buff *skb,
|
|||
struct nf_conntrack_expect *exp;
|
||||
struct nf_conntrack_tuple *tuple;
|
||||
unsigned int ret = NF_ACCEPT;
|
||||
int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
|
||||
typeof(nf_nat_tftp_hook) nf_nat_tftp;
|
||||
|
||||
tfh = skb_header_pointer(skb, protoff + sizeof(struct udphdr),
|
||||
|
@ -56,19 +55,20 @@ static int tftp_help(struct sk_buff *skb,
|
|||
case TFTP_OPCODE_READ:
|
||||
case TFTP_OPCODE_WRITE:
|
||||
/* RRQ and WRQ works the same way */
|
||||
NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
||||
nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
||||
|
||||
exp = nf_ct_expect_alloc(ct);
|
||||
if (exp == NULL)
|
||||
return NF_DROP;
|
||||
tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, family,
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
|
||||
nf_ct_l3num(ct),
|
||||
&tuple->src.u3, &tuple->dst.u3,
|
||||
IPPROTO_UDP, NULL, &tuple->dst.u.udp.port);
|
||||
|
||||
pr_debug("expect: ");
|
||||
NF_CT_DUMP_TUPLE(&exp->tuple);
|
||||
nf_ct_dump_tuple(&exp->tuple);
|
||||
|
||||
nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook);
|
||||
if (nf_nat_tftp && ct->status & IPS_NAT_MASK)
|
||||
|
|
|
@ -58,7 +58,7 @@ static struct xt_af *xt;
|
|||
#define duprintf(format, args...)
|
||||
#endif
|
||||
|
||||
static const char *xt_prefix[NPROTO] = {
|
||||
static const char *const xt_prefix[NPROTO] = {
|
||||
[AF_INET] = "ip",
|
||||
[AF_INET6] = "ip6",
|
||||
[NF_ARP] = "arp",
|
||||
|
@ -248,7 +248,7 @@ EXPORT_SYMBOL_GPL(xt_request_find_target);
|
|||
|
||||
static int match_revfn(int af, const char *name, u8 revision, int *bestp)
|
||||
{
|
||||
struct xt_match *m;
|
||||
const struct xt_match *m;
|
||||
int have_rev = 0;
|
||||
|
||||
list_for_each_entry(m, &xt[af].match, list) {
|
||||
|
@ -264,7 +264,7 @@ static int match_revfn(int af, const char *name, u8 revision, int *bestp)
|
|||
|
||||
static int target_revfn(int af, const char *name, u8 revision, int *bestp)
|
||||
{
|
||||
struct xt_target *t;
|
||||
const struct xt_target *t;
|
||||
int have_rev = 0;
|
||||
|
||||
list_for_each_entry(t, &xt[af].target, list) {
|
||||
|
@ -385,7 +385,7 @@ short xt_compat_calc_jump(int af, unsigned int offset)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
|
||||
|
||||
int xt_compat_match_offset(struct xt_match *match)
|
||||
int xt_compat_match_offset(const struct xt_match *match)
|
||||
{
|
||||
u_int16_t csize = match->compatsize ? : match->matchsize;
|
||||
return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
|
||||
|
@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(xt_compat_match_offset);
|
|||
int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
|
||||
unsigned int *size)
|
||||
{
|
||||
struct xt_match *match = m->u.kernel.match;
|
||||
const struct xt_match *match = m->u.kernel.match;
|
||||
struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
|
||||
int pad, off = xt_compat_match_offset(match);
|
||||
u_int16_t msize = cm->u.user.match_size;
|
||||
|
@ -422,7 +422,7 @@ EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
|
|||
int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr,
|
||||
unsigned int *size)
|
||||
{
|
||||
struct xt_match *match = m->u.kernel.match;
|
||||
const struct xt_match *match = m->u.kernel.match;
|
||||
struct compat_xt_entry_match __user *cm = *dstptr;
|
||||
int off = xt_compat_match_offset(match);
|
||||
u_int16_t msize = m->u.user.match_size - off;
|
||||
|
@ -479,7 +479,7 @@ int xt_check_target(const struct xt_target *target, unsigned short family,
|
|||
EXPORT_SYMBOL_GPL(xt_check_target);
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
int xt_compat_target_offset(struct xt_target *target)
|
||||
int xt_compat_target_offset(const struct xt_target *target)
|
||||
{
|
||||
u_int16_t csize = target->compatsize ? : target->targetsize;
|
||||
return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
|
||||
|
@ -489,7 +489,7 @@ EXPORT_SYMBOL_GPL(xt_compat_target_offset);
|
|||
void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
|
||||
unsigned int *size)
|
||||
{
|
||||
struct xt_target *target = t->u.kernel.target;
|
||||
const struct xt_target *target = t->u.kernel.target;
|
||||
struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
|
||||
int pad, off = xt_compat_target_offset(target);
|
||||
u_int16_t tsize = ct->u.user.target_size;
|
||||
|
@ -515,7 +515,7 @@ EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
|
|||
int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr,
|
||||
unsigned int *size)
|
||||
{
|
||||
struct xt_target *target = t->u.kernel.target;
|
||||
const struct xt_target *target = t->u.kernel.target;
|
||||
struct compat_xt_entry_target __user *ct = *dstptr;
|
||||
int off = xt_compat_target_offset(target);
|
||||
u_int16_t tsize = t->u.user.target_size - off;
|
||||
|
|
|
@ -55,7 +55,7 @@ static void secmark_save(const struct sk_buff *skb)
|
|||
static void secmark_restore(struct sk_buff *skb)
|
||||
{
|
||||
if (!skb->secmark) {
|
||||
struct nf_conn *ct;
|
||||
const struct nf_conn *ct;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
|
||||
ct = nf_ct_get(skb, &ctinfo);
|
||||
|
|
|
@ -96,7 +96,7 @@ xt_rateest_tg_checkentry(const char *tablename,
|
|||
void *targinfo,
|
||||
unsigned int hook_mask)
|
||||
{
|
||||
struct xt_rateest_target_info *info = (void *)targinfo;
|
||||
struct xt_rateest_target_info *info = targinfo;
|
||||
struct xt_rateest *est;
|
||||
struct {
|
||||
struct nlattr opt;
|
||||
|
|
|
@ -72,9 +72,7 @@ connlimit_iphash6(const union nf_inet_addr *addr,
|
|||
|
||||
static inline bool already_closed(const struct nf_conn *conn)
|
||||
{
|
||||
u_int16_t proto = conn->tuplehash[0].tuple.dst.protonum;
|
||||
|
||||
if (proto == IPPROTO_TCP)
|
||||
if (nf_ct_protonum(conn) == IPPROTO_TCP)
|
||||
return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT;
|
||||
else
|
||||
return 0;
|
||||
|
@ -106,10 +104,10 @@ static int count_them(struct xt_connlimit_data *data,
|
|||
const union nf_inet_addr *mask,
|
||||
const struct xt_match *match)
|
||||
{
|
||||
struct nf_conntrack_tuple_hash *found;
|
||||
const struct nf_conntrack_tuple_hash *found;
|
||||
struct xt_connlimit_conn *conn;
|
||||
struct xt_connlimit_conn *tmp;
|
||||
struct nf_conn *found_ct;
|
||||
const struct nf_conn *found_ct;
|
||||
struct list_head *hash;
|
||||
bool addit = true;
|
||||
int matches = 0;
|
||||
|
@ -256,7 +254,7 @@ connlimit_mt_check(const char *tablename, const void *ip,
|
|||
static void
|
||||
connlimit_mt_destroy(const struct xt_match *match, void *matchinfo)
|
||||
{
|
||||
struct xt_connlimit_info *info = matchinfo;
|
||||
const struct xt_connlimit_info *info = matchinfo;
|
||||
struct xt_connlimit_conn *conn;
|
||||
struct xt_connlimit_conn *tmp;
|
||||
struct list_head *hash = info->data->iphash;
|
||||
|
|
|
@ -65,7 +65,7 @@ conntrack_mt_v0(const struct sk_buff *skb, const struct net_device *in,
|
|||
}
|
||||
|
||||
if (sinfo->flags & XT_CONNTRACK_PROTO &&
|
||||
FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum !=
|
||||
FWINV(nf_ct_protonum(ct) !=
|
||||
sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum,
|
||||
XT_CONNTRACK_PROTO))
|
||||
return false;
|
||||
|
@ -174,7 +174,7 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo1 *info,
|
|||
|
||||
tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
|
||||
if ((info->match_flags & XT_CONNTRACK_PROTO) &&
|
||||
(tuple->dst.protonum == info->l4proto) ^
|
||||
(nf_ct_protonum(ct) == info->l4proto) ^
|
||||
!(info->invert_flags & XT_CONNTRACK_PROTO))
|
||||
return false;
|
||||
|
||||
|
|
|
@ -98,7 +98,8 @@ dccp_mt(const struct sk_buff *skb, const struct net_device *in,
|
|||
const void *matchinfo, int offset, unsigned int protoff, bool *hotdrop)
|
||||
{
|
||||
const struct xt_dccp_info *info = matchinfo;
|
||||
struct dccp_hdr _dh, *dh;
|
||||
const struct dccp_hdr *dh;
|
||||
struct dccp_hdr _dh;
|
||||
|
||||
if (offset)
|
||||
return false;
|
||||
|
|
|
@ -47,7 +47,8 @@ esp_mt(const struct sk_buff *skb, const struct net_device *in,
|
|||
const struct net_device *out, const struct xt_match *match,
|
||||
const void *matchinfo, int offset, unsigned int protoff, bool *hotdrop)
|
||||
{
|
||||
struct ip_esp_hdr _esp, *eh;
|
||||
const struct ip_esp_hdr *eh;
|
||||
struct ip_esp_hdr _esp;
|
||||
const struct xt_esp *espinfo = matchinfo;
|
||||
|
||||
/* Must not be a fragment. */
|
||||
|
|
|
@ -100,7 +100,8 @@ multiport_mt_v0(const struct sk_buff *skb, const struct net_device *in,
|
|||
const void *matchinfo, int offset, unsigned int protoff,
|
||||
bool *hotdrop)
|
||||
{
|
||||
__be16 _ports[2], *pptr;
|
||||
const __be16 *pptr;
|
||||
__be16 _ports[2];
|
||||
const struct xt_multiport *multiinfo = matchinfo;
|
||||
|
||||
if (offset)
|
||||
|
@ -126,7 +127,8 @@ multiport_mt(const struct sk_buff *skb, const struct net_device *in,
|
|||
const void *matchinfo, int offset, unsigned int protoff,
|
||||
bool *hotdrop)
|
||||
{
|
||||
__be16 _ports[2], *pptr;
|
||||
const __be16 *pptr;
|
||||
__be16 _ports[2];
|
||||
const struct xt_multiport_v1 *multiinfo = matchinfo;
|
||||
|
||||
if (offset)
|
||||
|
|
|
@ -136,7 +136,7 @@ policy_mt_check(const char *tablename, const void *ip_void,
|
|||
const struct xt_match *match, void *matchinfo,
|
||||
unsigned int hook_mask)
|
||||
{
|
||||
struct xt_policy_info *info = matchinfo;
|
||||
const struct xt_policy_info *info = matchinfo;
|
||||
|
||||
if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) {
|
||||
printk(KERN_ERR "xt_policy: neither incoming nor "
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue