Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

The following patchset contains Netfilter updates for net-next
to extend ctnetlink and the flowtable infrastructure:

1) Extend ctnetlink kernel side netlink dump filtering capabilities,
   from Romain Bellan.

2) Generalise the flowtable hook parser to take a hook list.

3) Pass a hook list to the flowtable hook registration/unregistration.

4) Add a helper function to release the flowtable hook list.

5) Update the flowtable event notifier to pass a flowtable hook list.

6) Allow users to add new devices to an existing flowtables.

7) Allow users to remove devices to an existing flowtables.

8) Allow for registering a flowtable with no initial devices.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-06-01 11:46:30 -07:00
commit af0a2482fa
9 changed files with 666 additions and 133 deletions

View File

@ -42,7 +42,8 @@ struct nf_conntrack_l4proto {
/* Calculate tuple nlattr size */
unsigned int (*nlattr_tuple_size)(void);
int (*nlattr_to_tuple)(struct nlattr *tb[],
struct nf_conntrack_tuple *t);
struct nf_conntrack_tuple *t,
u_int32_t flags);
const struct nla_policy *nla_policy;
struct {
@ -152,7 +153,8 @@ const struct nf_conntrack_l4proto *nf_ct_l4proto_find(u8 l4proto);
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple);
int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *t);
struct nf_conntrack_tuple *t,
u_int32_t flags);
unsigned int nf_ct_port_nlattr_tuple_size(void);
extern const struct nla_policy nf_ct_port_nla_policy[];

View File

@ -1002,6 +1002,7 @@ struct nft_stats {
struct nft_hook {
struct list_head list;
bool inactive;
struct nf_hook_ops ops;
struct rcu_head rcu;
};
@ -1481,10 +1482,16 @@ struct nft_trans_obj {
struct nft_trans_flowtable {
struct nft_flowtable *flowtable;
bool update;
struct list_head hook_list;
};
#define nft_trans_flowtable(trans) \
(((struct nft_trans_flowtable *)trans->data)->flowtable)
#define nft_trans_flowtable_update(trans) \
(((struct nft_trans_flowtable *)trans->data)->update)
#define nft_trans_flowtable_hooks(trans) \
(((struct nft_trans_flowtable *)trans->data)->hook_list)
int __init nft_chain_filter_init(void);
void nft_chain_filter_fini(void);

View File

@ -55,6 +55,7 @@ enum ctattr_type {
CTA_LABELS,
CTA_LABELS_MASK,
CTA_SYNPROXY,
CTA_FILTER,
__CTA_MAX
};
#define CTA_MAX (__CTA_MAX - 1)
@ -276,4 +277,12 @@ enum ctattr_expect_stats {
};
#define CTA_STATS_EXP_MAX (__CTA_STATS_EXP_MAX - 1)
enum ctattr_filter {
CTA_FILTER_UNSPEC,
CTA_FILTER_ORIG_FLAGS,
CTA_FILTER_REPLY_FLAGS,
__CTA_FILTER_MAX
};
#define CTA_FILTER_MAX (__CTA_FILTER_MAX - 1)
#endif /* _IPCONNTRACK_NETLINK_H */

View File

@ -1974,13 +1974,22 @@ const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *t)
struct nf_conntrack_tuple *t,
u_int32_t flags)
{
if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
return -EINVAL;
if (flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) {
if (!tb[CTA_PROTO_SRC_PORT])
return -EINVAL;
t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
}
if (flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) {
if (!tb[CTA_PROTO_DST_PORT])
return -EINVAL;
t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
}
return 0;
}

View File

@ -54,6 +54,8 @@
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
#include "nf_internals.h"
MODULE_LICENSE("GPL");
static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
@ -544,14 +546,16 @@ static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
static int
ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
struct nf_conn *ct, bool extinfo)
struct nf_conn *ct, bool extinfo, unsigned int flags)
{
const struct nf_conntrack_zone *zone;
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
struct nlattr *nest_parms;
unsigned int flags = portid ? NLM_F_MULTI : 0, event;
unsigned int event;
if (portid)
flags |= NLM_F_MULTI;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_NEW);
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
if (nlh == NULL)
@ -847,17 +851,70 @@ static int ctnetlink_done(struct netlink_callback *cb)
}
struct ctnetlink_filter {
u_int32_t cta_flags;
u8 family;
u_int32_t orig_flags;
u_int32_t reply_flags;
struct nf_conntrack_tuple orig;
struct nf_conntrack_tuple reply;
struct nf_conntrack_zone zone;
struct {
u_int32_t val;
u_int32_t mask;
} mark;
};
static const struct nla_policy cta_filter_nla_policy[CTA_FILTER_MAX + 1] = {
[CTA_FILTER_ORIG_FLAGS] = { .type = NLA_U32 },
[CTA_FILTER_REPLY_FLAGS] = { .type = NLA_U32 },
};
static int ctnetlink_parse_filter(const struct nlattr *attr,
struct ctnetlink_filter *filter)
{
struct nlattr *tb[CTA_FILTER_MAX + 1];
int ret = 0;
ret = nla_parse_nested(tb, CTA_FILTER_MAX, attr, cta_filter_nla_policy,
NULL);
if (ret)
return ret;
if (tb[CTA_FILTER_ORIG_FLAGS]) {
filter->orig_flags = nla_get_u32(tb[CTA_FILTER_ORIG_FLAGS]);
if (filter->orig_flags & ~CTA_FILTER_F_ALL)
return -EOPNOTSUPP;
}
if (tb[CTA_FILTER_REPLY_FLAGS]) {
filter->reply_flags = nla_get_u32(tb[CTA_FILTER_REPLY_FLAGS]);
if (filter->reply_flags & ~CTA_FILTER_F_ALL)
return -EOPNOTSUPP;
}
return 0;
}
static int ctnetlink_parse_zone(const struct nlattr *attr,
struct nf_conntrack_zone *zone);
static int ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],
struct nf_conntrack_tuple *tuple,
u32 type, u_int8_t l3num,
struct nf_conntrack_zone *zone,
u_int32_t flags);
/* applied on filters */
#define CTA_FILTER_F_CTA_MARK (1 << 0)
#define CTA_FILTER_F_CTA_MARK_MASK (1 << 1)
static struct ctnetlink_filter *
ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
{
struct ctnetlink_filter *filter;
int err;
#ifndef CONFIG_NF_CONNTRACK_MARK
if (cda[CTA_MARK] || cda[CTA_MARK_MASK])
@ -871,14 +928,65 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
filter->family = family;
#ifdef CONFIG_NF_CONNTRACK_MARK
if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
if (cda[CTA_MARK]) {
filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
filter->cta_flags |= CTA_FILTER_FLAG(CTA_MARK);
if (cda[CTA_MARK_MASK]) {
filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
filter->cta_flags |= CTA_FILTER_FLAG(CTA_MARK_MASK);
} else {
filter->mark.mask = 0xffffffff;
}
} else if (cda[CTA_MARK_MASK]) {
return ERR_PTR(-EINVAL);
}
#endif
if (!cda[CTA_FILTER])
return filter;
err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone);
if (err < 0)
return ERR_PTR(err);
err = ctnetlink_parse_filter(cda[CTA_FILTER], filter);
if (err < 0)
return ERR_PTR(err);
if (filter->orig_flags) {
if (!cda[CTA_TUPLE_ORIG])
return ERR_PTR(-EINVAL);
err = ctnetlink_parse_tuple_filter(cda, &filter->orig,
CTA_TUPLE_ORIG,
filter->family,
&filter->zone,
filter->orig_flags);
if (err < 0)
return ERR_PTR(err);
}
if (filter->reply_flags) {
if (!cda[CTA_TUPLE_REPLY])
return ERR_PTR(-EINVAL);
err = ctnetlink_parse_tuple_filter(cda, &filter->reply,
CTA_TUPLE_REPLY,
filter->family,
&filter->zone,
filter->orig_flags);
if (err < 0)
return ERR_PTR(err);
}
return filter;
}
static bool ctnetlink_needs_filter(u8 family, const struct nlattr * const *cda)
{
return family || cda[CTA_MARK] || cda[CTA_FILTER];
}
static int ctnetlink_start(struct netlink_callback *cb)
{
const struct nlattr * const *cda = cb->data;
@ -886,7 +994,7 @@ static int ctnetlink_start(struct netlink_callback *cb)
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
u8 family = nfmsg->nfgen_family;
if (family || (cda[CTA_MARK] && cda[CTA_MARK_MASK])) {
if (ctnetlink_needs_filter(family, cda)) {
filter = ctnetlink_alloc_filter(cda, family);
if (IS_ERR(filter))
return PTR_ERR(filter);
@ -896,9 +1004,79 @@ static int ctnetlink_start(struct netlink_callback *cb)
return 0;
}
static int ctnetlink_filter_match_tuple(struct nf_conntrack_tuple *filter_tuple,
struct nf_conntrack_tuple *ct_tuple,
u_int32_t flags, int family)
{
switch (family) {
case NFPROTO_IPV4:
if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) &&
filter_tuple->src.u3.ip != ct_tuple->src.u3.ip)
return 0;
if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) &&
filter_tuple->dst.u3.ip != ct_tuple->dst.u3.ip)
return 0;
break;
case NFPROTO_IPV6:
if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) &&
!ipv6_addr_cmp(&filter_tuple->src.u3.in6,
&ct_tuple->src.u3.in6))
return 0;
if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) &&
!ipv6_addr_cmp(&filter_tuple->dst.u3.in6,
&ct_tuple->dst.u3.in6))
return 0;
break;
}
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) &&
filter_tuple->dst.protonum != ct_tuple->dst.protonum)
return 0;
switch (ct_tuple->dst.protonum) {
case IPPROTO_TCP:
case IPPROTO_UDP:
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) &&
filter_tuple->src.u.tcp.port != ct_tuple->src.u.tcp.port)
return 0;
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) &&
filter_tuple->dst.u.tcp.port != ct_tuple->dst.u.tcp.port)
return 0;
break;
case IPPROTO_ICMP:
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_TYPE)) &&
filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type)
return 0;
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_CODE)) &&
filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code)
return 0;
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_ID)) &&
filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id)
return 0;
break;
case IPPROTO_ICMPV6:
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_TYPE)) &&
filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type)
return 0;
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_CODE)) &&
filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code)
return 0;
if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_ID)) &&
filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id)
return 0;
break;
}
return 1;
}
static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
{
struct ctnetlink_filter *filter = data;
struct nf_conntrack_tuple *tuple;
if (filter == NULL)
goto out;
@ -910,8 +1088,28 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
if (filter->family && nf_ct_l3num(ct) != filter->family)
goto ignore_entry;
if (filter->orig_flags) {
tuple = nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL);
if (!ctnetlink_filter_match_tuple(&filter->orig, tuple,
filter->orig_flags,
filter->family))
goto ignore_entry;
}
if (filter->reply_flags) {
tuple = nf_ct_tuple(ct, IP_CT_DIR_REPLY);
if (!ctnetlink_filter_match_tuple(&filter->reply, tuple,
filter->reply_flags,
filter->family))
goto ignore_entry;
}
#ifdef CONFIG_NF_CONNTRACK_MARK
if ((ct->mark & filter->mark.mask) != filter->mark.val)
if ((filter->cta_flags & CTA_FILTER_FLAG(CTA_MARK_MASK)) &&
(ct->mark & filter->mark.mask) != filter->mark.val)
goto ignore_entry;
else if ((filter->cta_flags & CTA_FILTER_FLAG(CTA_MARK)) &&
ct->mark != filter->mark.val)
goto ignore_entry;
#endif
@ -925,6 +1123,7 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
static int
ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0;
struct net *net = sock_net(skb->sk);
struct nf_conn *ct, *last;
struct nf_conntrack_tuple_hash *h;
@ -979,7 +1178,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
ct, true);
ct, true, flags);
if (res < 0) {
nf_conntrack_get(&ct->ct_general);
cb->args[1] = (unsigned long)ct;
@ -1014,31 +1213,50 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
}
static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *t)
struct nf_conntrack_tuple *t,
u_int32_t flags)
{
if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST])
return -EINVAL;
if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
if (!tb[CTA_IP_V4_SRC])
return -EINVAL;
t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]);
t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]);
t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]);
}
if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) {
if (!tb[CTA_IP_V4_DST])
return -EINVAL;
t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]);
}
return 0;
}
static int ipv6_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *t)
struct nf_conntrack_tuple *t,
u_int32_t flags)
{
if (!tb[CTA_IP_V6_SRC] || !tb[CTA_IP_V6_DST])
return -EINVAL;
if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
if (!tb[CTA_IP_V6_SRC])
return -EINVAL;
t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]);
t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]);
t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]);
}
if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) {
if (!tb[CTA_IP_V6_DST])
return -EINVAL;
t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]);
}
return 0;
}
static int ctnetlink_parse_tuple_ip(struct nlattr *attr,
struct nf_conntrack_tuple *tuple)
struct nf_conntrack_tuple *tuple,
u_int32_t flags)
{
struct nlattr *tb[CTA_IP_MAX+1];
int ret = 0;
@ -1054,10 +1272,10 @@ static int ctnetlink_parse_tuple_ip(struct nlattr *attr,
switch (tuple->src.l3num) {
case NFPROTO_IPV4:
ret = ipv4_nlattr_to_tuple(tb, tuple);
ret = ipv4_nlattr_to_tuple(tb, tuple, flags);
break;
case NFPROTO_IPV6:
ret = ipv6_nlattr_to_tuple(tb, tuple);
ret = ipv6_nlattr_to_tuple(tb, tuple, flags);
break;
}
@ -1069,7 +1287,8 @@ static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
};
static int ctnetlink_parse_tuple_proto(struct nlattr *attr,
struct nf_conntrack_tuple *tuple)
struct nf_conntrack_tuple *tuple,
u_int32_t flags)
{
const struct nf_conntrack_l4proto *l4proto;
struct nlattr *tb[CTA_PROTO_MAX+1];
@ -1080,8 +1299,12 @@ static int ctnetlink_parse_tuple_proto(struct nlattr *attr,
if (ret < 0)
return ret;
if (!(flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)))
return 0;
if (!tb[CTA_PROTO_NUM])
return -EINVAL;
tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
rcu_read_lock();
@ -1092,7 +1315,7 @@ static int ctnetlink_parse_tuple_proto(struct nlattr *attr,
l4proto->nla_policy,
NULL);
if (ret == 0)
ret = l4proto->nlattr_to_tuple(tb, tuple);
ret = l4proto->nlattr_to_tuple(tb, tuple, flags);
}
rcu_read_unlock();
@ -1143,10 +1366,21 @@ static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
[CTA_TUPLE_ZONE] = { .type = NLA_U16 },
};
#define CTA_FILTER_F_ALL_CTA_PROTO \
(CTA_FILTER_F_CTA_PROTO_SRC_PORT | \
CTA_FILTER_F_CTA_PROTO_DST_PORT | \
CTA_FILTER_F_CTA_PROTO_ICMP_TYPE | \
CTA_FILTER_F_CTA_PROTO_ICMP_CODE | \
CTA_FILTER_F_CTA_PROTO_ICMP_ID | \
CTA_FILTER_F_CTA_PROTO_ICMPV6_TYPE | \
CTA_FILTER_F_CTA_PROTO_ICMPV6_CODE | \
CTA_FILTER_F_CTA_PROTO_ICMPV6_ID)
static int
ctnetlink_parse_tuple(const struct nlattr * const cda[],
struct nf_conntrack_tuple *tuple, u32 type,
u_int8_t l3num, struct nf_conntrack_zone *zone)
ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],
struct nf_conntrack_tuple *tuple, u32 type,
u_int8_t l3num, struct nf_conntrack_zone *zone,
u_int32_t flags)
{
struct nlattr *tb[CTA_TUPLE_MAX+1];
int err;
@ -1158,23 +1392,32 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
if (err < 0)
return err;
if (!tb[CTA_TUPLE_IP])
return -EINVAL;
tuple->src.l3num = l3num;
err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
if (err < 0)
return err;
if (flags & CTA_FILTER_FLAG(CTA_IP_DST) ||
flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
if (!tb[CTA_TUPLE_IP])
return -EINVAL;
if (!tb[CTA_TUPLE_PROTO])
err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple, flags);
if (err < 0)
return err;
}
if (flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) {
if (!tb[CTA_TUPLE_PROTO])
return -EINVAL;
err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple, flags);
if (err < 0)
return err;
} else if (flags & CTA_FILTER_FLAG(ALL_CTA_PROTO)) {
/* Can't manage proto flags without a protonum */
return -EINVAL;
}
err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple);
if (err < 0)
return err;
if (tb[CTA_TUPLE_ZONE]) {
if ((flags & CTA_FILTER_FLAG(CTA_TUPLE_ZONE)) && tb[CTA_TUPLE_ZONE]) {
if (!zone)
return -EINVAL;
@ -1193,6 +1436,15 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
return 0;
}
static int
ctnetlink_parse_tuple(const struct nlattr * const cda[],
struct nf_conntrack_tuple *tuple, u32 type,
u_int8_t l3num, struct nf_conntrack_zone *zone)
{
return ctnetlink_parse_tuple_filter(cda, tuple, type, l3num, zone,
CTA_FILTER_FLAG(ALL));
}
static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
[CTA_HELP_NAME] = { .type = NLA_NUL_STRING,
.len = NF_CT_HELPER_NAME_LEN - 1 },
@ -1240,6 +1492,7 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
.len = NF_CT_LABELS_MAX_SIZE },
[CTA_LABELS_MASK] = { .type = NLA_BINARY,
.len = NF_CT_LABELS_MAX_SIZE },
[CTA_FILTER] = { .type = NLA_NESTED },
};
static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
@ -1256,7 +1509,10 @@ static int ctnetlink_flush_conntrack(struct net *net,
{
struct ctnetlink_filter *filter = NULL;
if (family || (cda[CTA_MARK] && cda[CTA_MARK_MASK])) {
if (ctnetlink_needs_filter(family, cda)) {
if (cda[CTA_FILTER])
return -EOPNOTSUPP;
filter = ctnetlink_alloc_filter(cda, family);
if (IS_ERR(filter))
return PTR_ERR(filter);
@ -1385,7 +1641,7 @@ static int ctnetlink_get_conntrack(struct net *net, struct sock *ctnl,
}
err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
NFNL_MSG_TYPE(nlh->nlmsg_type), ct, true);
NFNL_MSG_TYPE(nlh->nlmsg_type), ct, true, 0);
nf_ct_put(ct);
if (err <= 0)
goto free;
@ -1458,7 +1714,7 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
ct, dying ? true : false);
ct, dying ? true : false, 0);
if (res < 0) {
if (!atomic_inc_not_zero(&ct->ct_general.use))
continue;

View File

@ -20,6 +20,8 @@
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_log.h>
#include "nf_internals.h"
static const unsigned int nf_ct_icmp_timeout = 30*HZ;
bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
@ -271,20 +273,32 @@ static const struct nla_policy icmp_nla_policy[CTA_PROTO_MAX+1] = {
};
static int icmp_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *tuple)
struct nf_conntrack_tuple *tuple,
u_int32_t flags)
{
if (!tb[CTA_PROTO_ICMP_TYPE] ||
!tb[CTA_PROTO_ICMP_CODE] ||
!tb[CTA_PROTO_ICMP_ID])
return -EINVAL;
if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_TYPE)) {
if (!tb[CTA_PROTO_ICMP_TYPE])
return -EINVAL;
tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMP_TYPE]);
tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMP_CODE]);
tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMP_ID]);
tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMP_TYPE]);
if (tuple->dst.u.icmp.type >= sizeof(invmap) ||
!invmap[tuple->dst.u.icmp.type])
return -EINVAL;
}
if (tuple->dst.u.icmp.type >= sizeof(invmap) ||
!invmap[tuple->dst.u.icmp.type])
return -EINVAL;
if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_CODE)) {
if (!tb[CTA_PROTO_ICMP_CODE])
return -EINVAL;
tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMP_CODE]);
}
if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_ID)) {
if (!tb[CTA_PROTO_ICMP_ID])
return -EINVAL;
tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMP_ID]);
}
return 0;
}

View File

@ -24,6 +24,8 @@
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_log.h>
#include "nf_internals.h"
static const unsigned int nf_ct_icmpv6_timeout = 30*HZ;
bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
@ -193,21 +195,33 @@ static const struct nla_policy icmpv6_nla_policy[CTA_PROTO_MAX+1] = {
};
static int icmpv6_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *tuple)
struct nf_conntrack_tuple *tuple,
u_int32_t flags)
{
if (!tb[CTA_PROTO_ICMPV6_TYPE] ||
!tb[CTA_PROTO_ICMPV6_CODE] ||
!tb[CTA_PROTO_ICMPV6_ID])
return -EINVAL;
if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_TYPE)) {
if (!tb[CTA_PROTO_ICMPV6_TYPE])
return -EINVAL;
tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMPV6_TYPE]);
tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMPV6_CODE]);
tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMPV6_ID]);
tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMPV6_TYPE]);
if (tuple->dst.u.icmp.type < 128 ||
tuple->dst.u.icmp.type - 128 >= sizeof(invmap) ||
!invmap[tuple->dst.u.icmp.type - 128])
return -EINVAL;
}
if (tuple->dst.u.icmp.type < 128 ||
tuple->dst.u.icmp.type - 128 >= sizeof(invmap) ||
!invmap[tuple->dst.u.icmp.type - 128])
return -EINVAL;
if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_CODE)) {
if (!tb[CTA_PROTO_ICMPV6_CODE])
return -EINVAL;
tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMPV6_CODE]);
}
if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_ID)) {
if (!tb[CTA_PROTO_ICMPV6_ID])
return -EINVAL;
tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMPV6_ID]);
}
return 0;
}

View File

@ -6,6 +6,23 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
/* nf_conntrack_netlink.c: applied on tuple filters */
#define CTA_FILTER_F_CTA_IP_SRC (1 << 0)
#define CTA_FILTER_F_CTA_IP_DST (1 << 1)
#define CTA_FILTER_F_CTA_TUPLE_ZONE (1 << 2)
#define CTA_FILTER_F_CTA_PROTO_NUM (1 << 3)
#define CTA_FILTER_F_CTA_PROTO_SRC_PORT (1 << 4)
#define CTA_FILTER_F_CTA_PROTO_DST_PORT (1 << 5)
#define CTA_FILTER_F_CTA_PROTO_ICMP_TYPE (1 << 6)
#define CTA_FILTER_F_CTA_PROTO_ICMP_CODE (1 << 7)
#define CTA_FILTER_F_CTA_PROTO_ICMP_ID (1 << 8)
#define CTA_FILTER_F_CTA_PROTO_ICMPV6_TYPE (1 << 9)
#define CTA_FILTER_F_CTA_PROTO_ICMPV6_CODE (1 << 10)
#define CTA_FILTER_F_CTA_PROTO_ICMPV6_ID (1 << 11)
#define CTA_FILTER_F_MAX (1 << 12)
#define CTA_FILTER_F_ALL (CTA_FILTER_F_MAX-1)
#define CTA_FILTER_FLAG(ctattr) CTA_FILTER_F_ ## ctattr
/* nf_queue.c */
void nf_queue_nf_hook_drop(struct net *net);

View File

@ -1669,6 +1669,7 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
goto err_hook_dev;
}
hook->ops.dev = dev;
hook->inactive = false;
return hook;
@ -1678,17 +1679,17 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
return ERR_PTR(err);
}
static bool nft_hook_list_find(struct list_head *hook_list,
const struct nft_hook *this)
static struct nft_hook *nft_hook_list_find(struct list_head *hook_list,
const struct nft_hook *this)
{
struct nft_hook *hook;
list_for_each_entry(hook, hook_list, list) {
if (this->ops.dev == hook->ops.dev)
return true;
return hook;
}
return false;
return NULL;
}
static int nf_tables_parse_netdev_hooks(struct net *net,
@ -1723,8 +1724,6 @@ static int nf_tables_parse_netdev_hooks(struct net *net,
goto err_hook;
}
}
if (!n)
return -EINVAL;
return 0;
@ -1761,6 +1760,9 @@ static int nft_chain_parse_netdev(struct net *net,
hook_list);
if (err < 0)
return err;
if (list_empty(hook_list))
return -EINVAL;
} else {
return -EINVAL;
}
@ -6178,50 +6180,77 @@ nft_flowtable_lookup_byhandle(const struct nft_table *table,
return ERR_PTR(-ENOENT);
}
struct nft_flowtable_hook {
u32 num;
int priority;
struct list_head list;
};
static const struct nla_policy nft_flowtable_hook_policy[NFTA_FLOWTABLE_HOOK_MAX + 1] = {
[NFTA_FLOWTABLE_HOOK_NUM] = { .type = NLA_U32 },
[NFTA_FLOWTABLE_HOOK_PRIORITY] = { .type = NLA_U32 },
[NFTA_FLOWTABLE_HOOK_DEVS] = { .type = NLA_NESTED },
};
static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx,
const struct nlattr *attr,
struct nft_flowtable *flowtable)
static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
const struct nlattr *attr,
struct nft_flowtable_hook *flowtable_hook,
struct nft_flowtable *flowtable, bool add)
{
struct nlattr *tb[NFTA_FLOWTABLE_HOOK_MAX + 1];
struct nft_hook *hook;
int hooknum, priority;
int err;
INIT_LIST_HEAD(&flowtable_hook->list);
err = nla_parse_nested_deprecated(tb, NFTA_FLOWTABLE_HOOK_MAX, attr,
nft_flowtable_hook_policy, NULL);
if (err < 0)
return err;
if (!tb[NFTA_FLOWTABLE_HOOK_NUM] ||
!tb[NFTA_FLOWTABLE_HOOK_PRIORITY] ||
!tb[NFTA_FLOWTABLE_HOOK_DEVS])
return -EINVAL;
if (add) {
if (!tb[NFTA_FLOWTABLE_HOOK_NUM] ||
!tb[NFTA_FLOWTABLE_HOOK_PRIORITY])
return -EINVAL;
hooknum = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_NUM]));
if (hooknum != NF_NETDEV_INGRESS)
return -EINVAL;
hooknum = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_NUM]));
if (hooknum != NF_NETDEV_INGRESS)
return -EOPNOTSUPP;
priority = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_PRIORITY]));
priority = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_PRIORITY]));
err = nf_tables_parse_netdev_hooks(ctx->net,
tb[NFTA_FLOWTABLE_HOOK_DEVS],
&flowtable->hook_list);
if (err < 0)
return err;
flowtable_hook->priority = priority;
flowtable_hook->num = hooknum;
} else {
if (tb[NFTA_FLOWTABLE_HOOK_NUM]) {
hooknum = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_NUM]));
if (hooknum != flowtable->hooknum)
return -EOPNOTSUPP;
}
flowtable->hooknum = hooknum;
flowtable->data.priority = priority;
if (tb[NFTA_FLOWTABLE_HOOK_PRIORITY]) {
priority = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_PRIORITY]));
if (priority != flowtable->data.priority)
return -EOPNOTSUPP;
}
list_for_each_entry(hook, &flowtable->hook_list, list) {
flowtable_hook->priority = flowtable->data.priority;
flowtable_hook->num = flowtable->hooknum;
}
if (tb[NFTA_FLOWTABLE_HOOK_DEVS]) {
err = nf_tables_parse_netdev_hooks(ctx->net,
tb[NFTA_FLOWTABLE_HOOK_DEVS],
&flowtable_hook->list);
if (err < 0)
return err;
}
list_for_each_entry(hook, &flowtable_hook->list, list) {
hook->ops.pf = NFPROTO_NETDEV;
hook->ops.hooknum = hooknum;
hook->ops.priority = priority;
hook->ops.hooknum = flowtable_hook->num;
hook->ops.priority = flowtable_hook->priority;
hook->ops.priv = &flowtable->data;
hook->ops.hook = flowtable->data.type->hook;
}
@ -6270,23 +6299,24 @@ static void nft_unregister_flowtable_hook(struct net *net,
}
static void nft_unregister_flowtable_net_hooks(struct net *net,
struct nft_flowtable *flowtable)
struct list_head *hook_list)
{
struct nft_hook *hook;
list_for_each_entry(hook, &flowtable->hook_list, list)
list_for_each_entry(hook, hook_list, list)
nf_unregister_net_hook(net, &hook->ops);
}
static int nft_register_flowtable_net_hooks(struct net *net,
struct nft_table *table,
struct list_head *hook_list,
struct nft_flowtable *flowtable)
{
struct nft_hook *hook, *hook2, *next;
struct nft_flowtable *ft;
int err, i = 0;
list_for_each_entry(hook, &flowtable->hook_list, list) {
list_for_each_entry(hook, hook_list, list) {
list_for_each_entry(ft, &table->flowtables, list) {
list_for_each_entry(hook2, &ft->hook_list, list) {
if (hook->ops.dev == hook2->ops.dev &&
@ -6317,7 +6347,7 @@ static int nft_register_flowtable_net_hooks(struct net *net,
return 0;
err_unregister_net_hooks:
list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
list_for_each_entry_safe(hook, next, hook_list, list) {
if (i-- <= 0)
break;
@ -6329,6 +6359,72 @@ static int nft_register_flowtable_net_hooks(struct net *net,
return err;
}
static void nft_flowtable_hooks_destroy(struct list_head *hook_list)
{
struct nft_hook *hook, *next;
list_for_each_entry_safe(hook, next, hook_list, list) {
list_del_rcu(&hook->list);
kfree_rcu(hook, rcu);
}
}
static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
struct nft_flowtable *flowtable)
{
const struct nlattr * const *nla = ctx->nla;
struct nft_flowtable_hook flowtable_hook;
struct nft_hook *hook, *next;
struct nft_trans *trans;
bool unregister = false;
int err;
err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK],
&flowtable_hook, flowtable, false);
if (err < 0)
return err;
list_for_each_entry_safe(hook, next, &flowtable_hook.list, list) {
if (nft_hook_list_find(&flowtable->hook_list, hook)) {
list_del(&hook->list);
kfree(hook);
}
}
err = nft_register_flowtable_net_hooks(ctx->net, ctx->table,
&flowtable_hook.list, flowtable);
if (err < 0)
goto err_flowtable_update_hook;
trans = nft_trans_alloc(ctx, NFT_MSG_NEWFLOWTABLE,
sizeof(struct nft_trans_flowtable));
if (!trans) {
unregister = true;
err = -ENOMEM;
goto err_flowtable_update_hook;
}
nft_trans_flowtable(trans) = flowtable;
nft_trans_flowtable_update(trans) = true;
INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
list_splice(&flowtable_hook.list, &nft_trans_flowtable_hooks(trans));
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0;
err_flowtable_update_hook:
list_for_each_entry_safe(hook, next, &flowtable_hook.list, list) {
if (unregister)
nft_unregister_flowtable_hook(ctx->net, flowtable, hook);
list_del_rcu(&hook->list);
kfree_rcu(hook, rcu);
}
return err;
}
static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
struct sk_buff *skb,
const struct nlmsghdr *nlh,
@ -6336,6 +6432,7 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
struct netlink_ext_ack *extack)
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct nft_flowtable_hook flowtable_hook;
const struct nf_flowtable_type *type;
u8 genmask = nft_genmask_next(net);
int family = nfmsg->nfgen_family;
@ -6371,7 +6468,9 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
return -EEXIST;
}
return 0;
nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
return nft_flowtable_update(&ctx, nlh, flowtable);
}
nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
@ -6409,17 +6508,20 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
if (err < 0)
goto err3;
err = nf_tables_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK],
flowtable);
err = nft_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK],
&flowtable_hook, flowtable, true);
if (err < 0)
goto err4;
err = nft_register_flowtable_net_hooks(ctx.net, table, flowtable);
list_splice(&flowtable_hook.list, &flowtable->hook_list);
flowtable->data.priority = flowtable_hook.priority;
flowtable->hooknum = flowtable_hook.num;
err = nft_register_flowtable_net_hooks(ctx.net, table,
&flowtable->hook_list,
flowtable);
if (err < 0) {
list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
list_del_rcu(&hook->list);
kfree_rcu(hook, rcu);
}
nft_flowtable_hooks_destroy(&flowtable->hook_list);
goto err4;
}
@ -6448,6 +6550,51 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
return err;
}
static int nft_delflowtable_hook(struct nft_ctx *ctx,
struct nft_flowtable *flowtable)
{
const struct nlattr * const *nla = ctx->nla;
struct nft_flowtable_hook flowtable_hook;
struct nft_hook *this, *next, *hook;
struct nft_trans *trans;
int err;
err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK],
&flowtable_hook, flowtable, false);
if (err < 0)
return err;
list_for_each_entry_safe(this, next, &flowtable_hook.list, list) {
hook = nft_hook_list_find(&flowtable->hook_list, this);
if (!hook) {
err = -ENOENT;
goto err_flowtable_del_hook;
}
hook->inactive = true;
list_del(&this->list);
kfree(this);
}
trans = nft_trans_alloc(ctx, NFT_MSG_DELFLOWTABLE,
sizeof(struct nft_trans_flowtable));
if (!trans)
return -ENOMEM;
nft_trans_flowtable(trans) = flowtable;
nft_trans_flowtable_update(trans) = true;
INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0;
err_flowtable_del_hook:
list_for_each_entry(hook, &flowtable_hook.list, list)
hook->inactive = false;
return err;
}
static int nf_tables_delflowtable(struct net *net, struct sock *nlsk,
struct sk_buff *skb,
const struct nlmsghdr *nlh,
@ -6486,20 +6633,25 @@ static int nf_tables_delflowtable(struct net *net, struct sock *nlsk,
NL_SET_BAD_ATTR(extack, attr);
return PTR_ERR(flowtable);
}
nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
if (nla[NFTA_FLOWTABLE_HOOK])
return nft_delflowtable_hook(&ctx, flowtable);
if (flowtable->use > 0) {
NL_SET_BAD_ATTR(extack, attr);
return -EBUSY;
}
nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
return nft_delflowtable(&ctx, flowtable);
}
static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq, int event,
u32 flags, int family,
struct nft_flowtable *flowtable)
struct nft_flowtable *flowtable,
struct list_head *hook_list)
{
struct nlattr *nest, *nest_devs;
struct nfgenmsg *nfmsg;
@ -6535,7 +6687,7 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
if (!nest_devs)
goto nla_put_failure;
list_for_each_entry_rcu(hook, &flowtable->hook_list, list) {
list_for_each_entry_rcu(hook, hook_list, list) {
if (nla_put_string(skb, NFTA_DEVICE_NAME, hook->ops.dev->name))
goto nla_put_failure;
}
@ -6588,7 +6740,9 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb,
cb->nlh->nlmsg_seq,
NFT_MSG_NEWFLOWTABLE,
NLM_F_MULTI | NLM_F_APPEND,
table->family, flowtable) < 0)
table->family,
flowtable,
&flowtable->hook_list) < 0)
goto done;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@ -6685,7 +6839,7 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
err = nf_tables_fill_flowtable_info(skb2, net, NETLINK_CB(skb).portid,
nlh->nlmsg_seq,
NFT_MSG_NEWFLOWTABLE, 0, family,
flowtable);
flowtable, &flowtable->hook_list);
if (err < 0)
goto err;
@ -6697,6 +6851,7 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
static void nf_tables_flowtable_notify(struct nft_ctx *ctx,
struct nft_flowtable *flowtable,
struct list_head *hook_list,
int event)
{
struct sk_buff *skb;
@ -6712,7 +6867,7 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx,
err = nf_tables_fill_flowtable_info(skb, ctx->net, ctx->portid,
ctx->seq, event, 0,
ctx->family, flowtable);
ctx->family, flowtable, hook_list);
if (err < 0) {
kfree_skb(skb);
goto err;
@ -7098,7 +7253,10 @@ static void nft_commit_release(struct nft_trans *trans)
nft_obj_destroy(&trans->ctx, nft_trans_obj(trans));
break;
case NFT_MSG_DELFLOWTABLE:
nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
if (nft_trans_flowtable_update(trans))
nft_flowtable_hooks_destroy(&nft_trans_flowtable_hooks(trans));
else
nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
break;
}
@ -7259,6 +7417,17 @@ static void nft_chain_del(struct nft_chain *chain)
list_del_rcu(&chain->list);
}
static void nft_flowtable_hooks_del(struct nft_flowtable *flowtable,
struct list_head *hook_list)
{
struct nft_hook *hook, *next;
list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
if (hook->inactive)
list_move(&hook->list, hook_list);
}
}
static void nf_tables_module_autoload_cleanup(struct net *net)
{
struct nft_module_request *req, *next;
@ -7467,19 +7636,41 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
NFT_MSG_DELOBJ);
break;
case NFT_MSG_NEWFLOWTABLE:
nft_clear(net, nft_trans_flowtable(trans));
nf_tables_flowtable_notify(&trans->ctx,
nft_trans_flowtable(trans),
NFT_MSG_NEWFLOWTABLE);
if (nft_trans_flowtable_update(trans)) {
nf_tables_flowtable_notify(&trans->ctx,
nft_trans_flowtable(trans),
&nft_trans_flowtable_hooks(trans),
NFT_MSG_NEWFLOWTABLE);
list_splice(&nft_trans_flowtable_hooks(trans),
&nft_trans_flowtable(trans)->hook_list);
} else {
nft_clear(net, nft_trans_flowtable(trans));
nf_tables_flowtable_notify(&trans->ctx,
nft_trans_flowtable(trans),
&nft_trans_flowtable(trans)->hook_list,
NFT_MSG_NEWFLOWTABLE);
}
nft_trans_destroy(trans);
break;
case NFT_MSG_DELFLOWTABLE:
list_del_rcu(&nft_trans_flowtable(trans)->list);
nf_tables_flowtable_notify(&trans->ctx,
nft_trans_flowtable(trans),
NFT_MSG_DELFLOWTABLE);
nft_unregister_flowtable_net_hooks(net,
nft_trans_flowtable(trans));
if (nft_trans_flowtable_update(trans)) {
nft_flowtable_hooks_del(nft_trans_flowtable(trans),
&nft_trans_flowtable_hooks(trans));
nf_tables_flowtable_notify(&trans->ctx,
nft_trans_flowtable(trans),
&nft_trans_flowtable_hooks(trans),
NFT_MSG_DELFLOWTABLE);
nft_unregister_flowtable_net_hooks(net,
&nft_trans_flowtable_hooks(trans));
} else {
list_del_rcu(&nft_trans_flowtable(trans)->list);
nf_tables_flowtable_notify(&trans->ctx,
nft_trans_flowtable(trans),
&nft_trans_flowtable(trans)->hook_list,
NFT_MSG_DELFLOWTABLE);
nft_unregister_flowtable_net_hooks(net,
&nft_trans_flowtable(trans)->hook_list);
}
break;
}
}
@ -7528,7 +7719,10 @@ static void nf_tables_abort_release(struct nft_trans *trans)
nft_obj_destroy(&trans->ctx, nft_trans_obj(trans));
break;
case NFT_MSG_NEWFLOWTABLE:
nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
if (nft_trans_flowtable_update(trans))
nft_flowtable_hooks_destroy(&nft_trans_flowtable_hooks(trans));
else
nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
break;
}
kfree(trans);
@ -7538,6 +7732,7 @@ static int __nf_tables_abort(struct net *net, bool autoload)
{
struct nft_trans *trans, *next;
struct nft_trans_elem *te;
struct nft_hook *hook;
list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list,
list) {
@ -7635,14 +7830,24 @@ static int __nf_tables_abort(struct net *net, bool autoload)
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWFLOWTABLE:
trans->ctx.table->use--;
list_del_rcu(&nft_trans_flowtable(trans)->list);
nft_unregister_flowtable_net_hooks(net,
nft_trans_flowtable(trans));
if (nft_trans_flowtable_update(trans)) {
nft_unregister_flowtable_net_hooks(net,
&nft_trans_flowtable_hooks(trans));
} else {
trans->ctx.table->use--;
list_del_rcu(&nft_trans_flowtable(trans)->list);
nft_unregister_flowtable_net_hooks(net,
&nft_trans_flowtable(trans)->hook_list);
}
break;
case NFT_MSG_DELFLOWTABLE:
trans->ctx.table->use++;
nft_clear(trans->ctx.net, nft_trans_flowtable(trans));
if (nft_trans_flowtable_update(trans)) {
list_for_each_entry(hook, &nft_trans_flowtable(trans)->hook_list, list)
hook->inactive = false;
} else {
trans->ctx.table->use++;
nft_clear(trans->ctx.net, nft_trans_flowtable(trans));
}
nft_trans_destroy(trans);
break;
}