Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== Netfilter fixes for net The following patchset contains Netfilter fixes for net: 1) Missing netns context in arp_tables, from Florian Westphal. 2) Underflow in flowtable reference counter, from wenxu. 3) Fix incorrect ethernet destination address in flowtable offload, from wenxu. 4) Check for status of neighbour entry, from wenxu. 5) Fix NAT port mangling, from wenxu. 6) Unbind callbacks from destroy path to cleanup hardware properly on flowtable removal. 7) Fix missing casting statistics timestamp, add nf_flowtable_time_stamp and use it. 8) NULL pointer exception when timeout argument is null in conntrack dccp and sctp protocol helpers, from Florian Westphal. 9) Possible nul-dereference in ipset with IPSET_ATTR_LINENO, also from Florian. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
b73a65610b
|
@ -106,6 +106,12 @@ struct flow_offload {
|
|||
};
|
||||
|
||||
#define NF_FLOW_TIMEOUT (30 * HZ)
|
||||
#define nf_flowtable_time_stamp (u32)jiffies
|
||||
|
||||
static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
|
||||
{
|
||||
return (__s32)(timeout - nf_flowtable_time_stamp);
|
||||
}
|
||||
|
||||
struct nf_flow_route {
|
||||
struct {
|
||||
|
|
|
@ -384,10 +384,11 @@ next: ;
|
|||
return 1;
|
||||
}
|
||||
|
||||
static inline int check_target(struct arpt_entry *e, const char *name)
|
||||
static int check_target(struct arpt_entry *e, struct net *net, const char *name)
|
||||
{
|
||||
struct xt_entry_target *t = arpt_get_target(e);
|
||||
struct xt_tgchk_param par = {
|
||||
.net = net,
|
||||
.table = name,
|
||||
.entryinfo = e,
|
||||
.target = t->u.kernel.target,
|
||||
|
@ -399,8 +400,9 @@ static inline int check_target(struct arpt_entry *e, const char *name)
|
|||
return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
|
||||
}
|
||||
|
||||
static inline int
|
||||
find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
|
||||
static int
|
||||
find_check_entry(struct arpt_entry *e, struct net *net, const char *name,
|
||||
unsigned int size,
|
||||
struct xt_percpu_counter_alloc_state *alloc_state)
|
||||
{
|
||||
struct xt_entry_target *t;
|
||||
|
@ -419,7 +421,7 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
|
|||
}
|
||||
t->u.kernel.target = target;
|
||||
|
||||
ret = check_target(e, name);
|
||||
ret = check_target(e, net, name);
|
||||
if (ret)
|
||||
goto err;
|
||||
return 0;
|
||||
|
@ -512,7 +514,9 @@ static inline void cleanup_entry(struct arpt_entry *e)
|
|||
/* Checks and translates the user-supplied table segment (held in
|
||||
* newinfo).
|
||||
*/
|
||||
static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
||||
static int translate_table(struct net *net,
|
||||
struct xt_table_info *newinfo,
|
||||
void *entry0,
|
||||
const struct arpt_replace *repl)
|
||||
{
|
||||
struct xt_percpu_counter_alloc_state alloc_state = { 0 };
|
||||
|
@ -569,7 +573,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
|||
/* Finally, each sanity check must pass */
|
||||
i = 0;
|
||||
xt_entry_foreach(iter, entry0, newinfo->size) {
|
||||
ret = find_check_entry(iter, repl->name, repl->size,
|
||||
ret = find_check_entry(iter, net, repl->name, repl->size,
|
||||
&alloc_state);
|
||||
if (ret != 0)
|
||||
break;
|
||||
|
@ -974,7 +978,7 @@ static int do_replace(struct net *net, const void __user *user,
|
|||
goto free_newinfo;
|
||||
}
|
||||
|
||||
ret = translate_table(newinfo, loc_cpu_entry, &tmp);
|
||||
ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
|
||||
if (ret != 0)
|
||||
goto free_newinfo;
|
||||
|
||||
|
@ -1149,7 +1153,8 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
|
|||
}
|
||||
}
|
||||
|
||||
static int translate_compat_table(struct xt_table_info **pinfo,
|
||||
static int translate_compat_table(struct net *net,
|
||||
struct xt_table_info **pinfo,
|
||||
void **pentry0,
|
||||
const struct compat_arpt_replace *compatr)
|
||||
{
|
||||
|
@ -1217,7 +1222,7 @@ static int translate_compat_table(struct xt_table_info **pinfo,
|
|||
repl.num_counters = 0;
|
||||
repl.counters = NULL;
|
||||
repl.size = newinfo->size;
|
||||
ret = translate_table(newinfo, entry1, &repl);
|
||||
ret = translate_table(net, newinfo, entry1, &repl);
|
||||
if (ret)
|
||||
goto free_newinfo;
|
||||
|
||||
|
@ -1270,7 +1275,7 @@ static int compat_do_replace(struct net *net, void __user *user,
|
|||
goto free_newinfo;
|
||||
}
|
||||
|
||||
ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
|
||||
ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
|
||||
if (ret != 0)
|
||||
goto free_newinfo;
|
||||
|
||||
|
@ -1546,7 +1551,7 @@ int arpt_register_table(struct net *net,
|
|||
loc_cpu_entry = newinfo->entries;
|
||||
memcpy(loc_cpu_entry, repl->entries, repl->size);
|
||||
|
||||
ret = translate_table(newinfo, loc_cpu_entry, repl);
|
||||
ret = translate_table(net, newinfo, loc_cpu_entry, repl);
|
||||
if (ret != 0)
|
||||
goto out_free;
|
||||
|
||||
|
|
|
@ -1848,6 +1848,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
|
|||
struct ip_set *set;
|
||||
struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
|
||||
int ret = 0;
|
||||
u32 lineno;
|
||||
|
||||
if (unlikely(protocol_min_failed(attr) ||
|
||||
!attr[IPSET_ATTR_SETNAME] ||
|
||||
|
@ -1864,7 +1865,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
|
|||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
rcu_read_lock_bh();
|
||||
ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0);
|
||||
ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0);
|
||||
rcu_read_unlock_bh();
|
||||
/* Userspace can't trigger element to be re-added */
|
||||
if (ret == -EAGAIN)
|
||||
|
|
|
@ -677,6 +677,9 @@ static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
|
|||
unsigned int *timeouts = data;
|
||||
int i;
|
||||
|
||||
if (!timeouts)
|
||||
timeouts = dn->dccp_timeout;
|
||||
|
||||
/* set default DCCP timeouts. */
|
||||
for (i=0; i<CT_DCCP_MAX; i++)
|
||||
timeouts[i] = dn->dccp_timeout[i];
|
||||
|
|
|
@ -594,6 +594,9 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
|
|||
struct nf_sctp_net *sn = nf_sctp_pernet(net);
|
||||
int i;
|
||||
|
||||
if (!timeouts)
|
||||
timeouts = sn->timeouts;
|
||||
|
||||
/* set default SCTP timeouts. */
|
||||
for (i=0; i<SCTP_CONNTRACK_MAX; i++)
|
||||
timeouts[i] = sn->timeouts[i];
|
||||
|
|
|
@ -134,11 +134,6 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
|
|||
#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
|
||||
#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
|
||||
|
||||
static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
|
||||
{
|
||||
return (__s32)(timeout - (u32)jiffies);
|
||||
}
|
||||
|
||||
static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
|
||||
{
|
||||
const struct nf_conntrack_l4proto *l4proto;
|
||||
|
@ -232,7 +227,7 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
|
|||
{
|
||||
int err;
|
||||
|
||||
flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
|
||||
flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
|
||||
|
||||
err = rhashtable_insert_fast(&flow_table->rhashtable,
|
||||
&flow->tuplehash[0].node,
|
||||
|
|
|
@ -280,7 +280,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
|
|||
if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
|
||||
return NF_DROP;
|
||||
|
||||
flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
|
||||
flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
|
||||
iph = ip_hdr(skb);
|
||||
ip_decrease_ttl(iph);
|
||||
skb->tstamp = 0;
|
||||
|
@ -509,7 +509,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
|
|||
if (nf_flow_nat_ipv6(flow, skb, dir) < 0)
|
||||
return NF_DROP;
|
||||
|
||||
flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
|
||||
flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
|
||||
ip6h = ipv6_hdr(skb);
|
||||
ip6h->hop_limit--;
|
||||
skb->tstamp = 0;
|
||||
|
|
|
@ -166,24 +166,38 @@ static int flow_offload_eth_dst(struct net *net,
|
|||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
{
|
||||
const struct flow_offload_tuple *tuple = &flow->tuplehash[dir].tuple;
|
||||
struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
|
||||
struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
|
||||
const void *daddr = &flow->tuplehash[!dir].tuple.src_v4;
|
||||
const struct dst_entry *dst_cache;
|
||||
unsigned char ha[ETH_ALEN];
|
||||
struct neighbour *n;
|
||||
u32 mask, val;
|
||||
u8 nud_state;
|
||||
u16 val16;
|
||||
|
||||
n = dst_neigh_lookup(tuple->dst_cache, &tuple->dst_v4);
|
||||
dst_cache = flow->tuplehash[dir].tuple.dst_cache;
|
||||
n = dst_neigh_lookup(dst_cache, daddr);
|
||||
if (!n)
|
||||
return -ENOENT;
|
||||
|
||||
read_lock_bh(&n->lock);
|
||||
nud_state = n->nud_state;
|
||||
ether_addr_copy(ha, n->ha);
|
||||
read_unlock_bh(&n->lock);
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_release(n);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
mask = ~0xffffffff;
|
||||
memcpy(&val, n->ha, 4);
|
||||
memcpy(&val, ha, 4);
|
||||
flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0,
|
||||
&val, &mask);
|
||||
|
||||
mask = ~0x0000ffff;
|
||||
memcpy(&val16, n->ha + 4, 2);
|
||||
memcpy(&val16, ha + 4, 2);
|
||||
val = val16;
|
||||
flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
|
||||
&val, &mask);
|
||||
|
@ -335,22 +349,26 @@ static void flow_offload_port_snat(struct net *net,
|
|||
struct nf_flow_rule *flow_rule)
|
||||
{
|
||||
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
|
||||
u32 mask = ~htonl(0xffff0000), port;
|
||||
u32 mask, port;
|
||||
u32 offset;
|
||||
|
||||
switch (dir) {
|
||||
case FLOW_OFFLOAD_DIR_ORIGINAL:
|
||||
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
|
||||
offset = 0; /* offsetof(struct tcphdr, source); */
|
||||
port = htonl(port << 16);
|
||||
mask = ~htonl(0xffff0000);
|
||||
break;
|
||||
case FLOW_OFFLOAD_DIR_REPLY:
|
||||
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
|
||||
offset = 0; /* offsetof(struct tcphdr, dest); */
|
||||
port = htonl(port);
|
||||
mask = ~htonl(0xffff);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
port = htonl(port << 16);
|
||||
|
||||
flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
|
||||
&port, &mask);
|
||||
}
|
||||
|
@ -361,22 +379,26 @@ static void flow_offload_port_dnat(struct net *net,
|
|||
struct nf_flow_rule *flow_rule)
|
||||
{
|
||||
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
|
||||
u32 mask = ~htonl(0xffff), port;
|
||||
u32 mask, port;
|
||||
u32 offset;
|
||||
|
||||
switch (dir) {
|
||||
case FLOW_OFFLOAD_DIR_ORIGINAL:
|
||||
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
|
||||
offset = 0; /* offsetof(struct tcphdr, source); */
|
||||
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port);
|
||||
offset = 0; /* offsetof(struct tcphdr, dest); */
|
||||
port = htonl(port);
|
||||
mask = ~htonl(0xffff);
|
||||
break;
|
||||
case FLOW_OFFLOAD_DIR_REPLY:
|
||||
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
|
||||
offset = 0; /* offsetof(struct tcphdr, dest); */
|
||||
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port);
|
||||
offset = 0; /* offsetof(struct tcphdr, source); */
|
||||
port = htonl(port << 16);
|
||||
mask = ~htonl(0xffff0000);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
port = htonl(port);
|
||||
|
||||
flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
|
||||
&port, &mask);
|
||||
}
|
||||
|
@ -759,9 +781,9 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
|
|||
struct flow_offload *flow)
|
||||
{
|
||||
struct flow_offload_work *offload;
|
||||
s64 delta;
|
||||
__s32 delta;
|
||||
|
||||
delta = flow->timeout - jiffies;
|
||||
delta = nf_flow_timeout_delta(flow->timeout);
|
||||
if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) ||
|
||||
flow->flags & FLOW_OFFLOAD_HW_DYING)
|
||||
return;
|
||||
|
|
|
@ -5984,6 +5984,7 @@ nft_flowtable_type_get(struct net *net, u8 family)
|
|||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
/* Only called from error and netdev event paths. */
|
||||
static void nft_unregister_flowtable_hook(struct net *net,
|
||||
struct nft_flowtable *flowtable,
|
||||
struct nft_hook *hook)
|
||||
|
@ -5999,7 +6000,7 @@ static void nft_unregister_flowtable_net_hooks(struct net *net,
|
|||
struct nft_hook *hook;
|
||||
|
||||
list_for_each_entry(hook, &flowtable->hook_list, list)
|
||||
nft_unregister_flowtable_hook(net, flowtable, hook);
|
||||
nf_unregister_net_hook(net, &hook->ops);
|
||||
}
|
||||
|
||||
static int nft_register_flowtable_net_hooks(struct net *net,
|
||||
|
@ -6448,12 +6449,14 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
|
|||
{
|
||||
struct nft_hook *hook, *next;
|
||||
|
||||
flowtable->data.type->free(&flowtable->data);
|
||||
list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
|
||||
flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
|
||||
FLOW_BLOCK_UNBIND);
|
||||
list_del_rcu(&hook->list);
|
||||
kfree(hook);
|
||||
}
|
||||
kfree(flowtable->name);
|
||||
flowtable->data.type->free(&flowtable->data);
|
||||
module_put(flowtable->data.type->owner);
|
||||
kfree(flowtable);
|
||||
}
|
||||
|
@ -6497,6 +6500,7 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev,
|
|||
if (hook->ops.dev != dev)
|
||||
continue;
|
||||
|
||||
/* flow_offload_netdev_event() cleans up entries for us. */
|
||||
nft_unregister_flowtable_hook(dev_net(dev), flowtable, hook);
|
||||
list_del_rcu(&hook->list);
|
||||
kfree_rcu(hook, rcu);
|
||||
|
|
|
@ -200,9 +200,6 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx,
|
|||
static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_flow_offload *priv = nft_expr_priv(expr);
|
||||
|
||||
priv->flowtable->use--;
|
||||
nf_ct_netns_put(ctx->net, ctx->family);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue