Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== Netfilter/IPVS fixes for net The following patchset contains Netfilter/IPVS fixes for your net tree, they are: 1) Fix packet drops due to incorrect ECN handling in IPVS, from Vadim Fedorenko. 2) Fix splat with mark restoration in xt_socket with non-full-sock, patch from Subash Abhinov Kasiviswanathan. 3) ipset bogusly bails out when adding IPv4 range containing more than 2^31 addresses, from Jozsef Kadlecsik. 4) Incorrect pernet unregistration order in ipset, from Florian Westphal. 5) Races between dump and swap in ipset results in BUG_ON splats, from Ross Lagerwall. 6) Fix chain renames in nf_tables, from JingPiao Chen. 7) Fix race in pernet codepath with ebtables table registration, from Artem Savkov. 8) Memory leak in error path in set name allocation in nf_tables, patch from Arvind Yadav. 9) Don't dump chain counters if they are not available, this fixes a crash when listing the ruleset. 10) Fix out of bound memory read in strlcpy() in x_tables compat code, from Eric Dumazet. 11) Make sure we only process TCP packets in SYNPROXY hooks, patch from Lin Zhang. 12) Cannot load rules incrementally anymore after xt_bpf with pinned objects, added in revision 1. From Shmulik Ladkani. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
fb60bccc06
|
@ -368,6 +368,11 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
|
|||
{
|
||||
}
|
||||
|
||||
static inline int bpf_obj_get_user(const char __user *pathname)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
|
||||
u32 key)
|
||||
{
|
||||
|
|
|
@ -108,9 +108,10 @@ struct ebt_table {
|
|||
|
||||
#define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \
|
||||
~(__alignof__(struct _xt_align)-1))
|
||||
extern struct ebt_table *ebt_register_table(struct net *net,
|
||||
const struct ebt_table *table,
|
||||
const struct nf_hook_ops *);
|
||||
extern int ebt_register_table(struct net *net,
|
||||
const struct ebt_table *table,
|
||||
const struct nf_hook_ops *ops,
|
||||
struct ebt_table **res);
|
||||
extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
|
||||
const struct nf_hook_ops *);
|
||||
extern unsigned int ebt_do_table(struct sk_buff *skb,
|
||||
|
|
|
@ -23,6 +23,7 @@ enum xt_bpf_modes {
|
|||
XT_BPF_MODE_FD_PINNED,
|
||||
XT_BPF_MODE_FD_ELF,
|
||||
};
|
||||
#define XT_BPF_MODE_PATH_PINNED XT_BPF_MODE_FD_PINNED
|
||||
|
||||
struct xt_bpf_info_v1 {
|
||||
__u16 mode;
|
||||
|
|
|
@ -363,6 +363,7 @@ int bpf_obj_get_user(const char __user *pathname)
|
|||
putname(pname);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_obj_get_user);
|
||||
|
||||
static void bpf_evict_inode(struct inode *inode)
|
||||
{
|
||||
|
|
|
@ -65,8 +65,8 @@ static int ebt_broute(struct sk_buff *skb)
|
|||
|
||||
static int __net_init broute_net_init(struct net *net)
|
||||
{
|
||||
net->xt.broute_table = ebt_register_table(net, &broute_table, NULL);
|
||||
return PTR_ERR_OR_ZERO(net->xt.broute_table);
|
||||
return ebt_register_table(net, &broute_table, NULL,
|
||||
&net->xt.broute_table);
|
||||
}
|
||||
|
||||
static void __net_exit broute_net_exit(struct net *net)
|
||||
|
|
|
@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_filter[] = {
|
|||
|
||||
static int __net_init frame_filter_net_init(struct net *net)
|
||||
{
|
||||
net->xt.frame_filter = ebt_register_table(net, &frame_filter, ebt_ops_filter);
|
||||
return PTR_ERR_OR_ZERO(net->xt.frame_filter);
|
||||
return ebt_register_table(net, &frame_filter, ebt_ops_filter,
|
||||
&net->xt.frame_filter);
|
||||
}
|
||||
|
||||
static void __net_exit frame_filter_net_exit(struct net *net)
|
||||
|
|
|
@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_nat[] = {
|
|||
|
||||
static int __net_init frame_nat_net_init(struct net *net)
|
||||
{
|
||||
net->xt.frame_nat = ebt_register_table(net, &frame_nat, ebt_ops_nat);
|
||||
return PTR_ERR_OR_ZERO(net->xt.frame_nat);
|
||||
return ebt_register_table(net, &frame_nat, ebt_ops_nat,
|
||||
&net->xt.frame_nat);
|
||||
}
|
||||
|
||||
static void __net_exit frame_nat_net_exit(struct net *net)
|
||||
|
|
|
@ -1169,9 +1169,8 @@ static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
|
|||
kfree(table);
|
||||
}
|
||||
|
||||
struct ebt_table *
|
||||
ebt_register_table(struct net *net, const struct ebt_table *input_table,
|
||||
const struct nf_hook_ops *ops)
|
||||
int ebt_register_table(struct net *net, const struct ebt_table *input_table,
|
||||
const struct nf_hook_ops *ops, struct ebt_table **res)
|
||||
{
|
||||
struct ebt_table_info *newinfo;
|
||||
struct ebt_table *t, *table;
|
||||
|
@ -1183,7 +1182,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
|
|||
repl->entries == NULL || repl->entries_size == 0 ||
|
||||
repl->counters != NULL || input_table->private != NULL) {
|
||||
BUGPRINT("Bad table data for ebt_register_table!!!\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Don't add one table to multiple lists. */
|
||||
|
@ -1252,16 +1251,18 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
|
|||
list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
|
||||
mutex_unlock(&ebt_mutex);
|
||||
|
||||
WRITE_ONCE(*res, table);
|
||||
|
||||
if (!ops)
|
||||
return table;
|
||||
return 0;
|
||||
|
||||
ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
|
||||
if (ret) {
|
||||
__ebt_unregister_table(net, table);
|
||||
return ERR_PTR(ret);
|
||||
*res = NULL;
|
||||
}
|
||||
|
||||
return table;
|
||||
return ret;
|
||||
free_unlock:
|
||||
mutex_unlock(&ebt_mutex);
|
||||
free_chainstack:
|
||||
|
@ -1276,7 +1277,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
|
|||
free_table:
|
||||
kfree(table);
|
||||
out:
|
||||
return ERR_PTR(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ebt_unregister_table(struct net *net, struct ebt_table *table,
|
||||
|
|
|
@ -330,7 +330,8 @@ static unsigned int ipv4_synproxy_hook(void *priv,
|
|||
if (synproxy == NULL)
|
||||
return NF_ACCEPT;
|
||||
|
||||
if (nf_is_loopback_packet(skb))
|
||||
if (nf_is_loopback_packet(skb) ||
|
||||
ip_hdr(skb)->protocol != IPPROTO_TCP)
|
||||
return NF_ACCEPT;
|
||||
|
||||
thoff = ip_hdrlen(skb);
|
||||
|
|
|
@ -353,7 +353,7 @@ static unsigned int ipv6_synproxy_hook(void *priv,
|
|||
nexthdr = ipv6_hdr(skb)->nexthdr;
|
||||
thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
|
||||
&frag_off);
|
||||
if (thoff < 0)
|
||||
if (thoff < 0 || nexthdr != IPPROTO_TCP)
|
||||
return NF_ACCEPT;
|
||||
|
||||
th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
|
||||
|
|
|
@ -1191,14 +1191,17 @@ static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
|
|||
from->family == to->family))
|
||||
return -IPSET_ERR_TYPE_MISMATCH;
|
||||
|
||||
if (from->ref_netlink || to->ref_netlink)
|
||||
write_lock_bh(&ip_set_ref_lock);
|
||||
|
||||
if (from->ref_netlink || to->ref_netlink) {
|
||||
write_unlock_bh(&ip_set_ref_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
strncpy(from_name, from->name, IPSET_MAXNAMELEN);
|
||||
strncpy(from->name, to->name, IPSET_MAXNAMELEN);
|
||||
strncpy(to->name, from_name, IPSET_MAXNAMELEN);
|
||||
|
||||
write_lock_bh(&ip_set_ref_lock);
|
||||
swap(from->ref, to->ref);
|
||||
ip_set(inst, from_id) = to;
|
||||
ip_set(inst, to_id) = from;
|
||||
|
@ -2072,25 +2075,28 @@ static struct pernet_operations ip_set_net_ops = {
|
|||
static int __init
|
||||
ip_set_init(void)
|
||||
{
|
||||
int ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
|
||||
int ret = register_pernet_subsys(&ip_set_net_ops);
|
||||
|
||||
if (ret != 0) {
|
||||
pr_err("ip_set: cannot register with nfnetlink.\n");
|
||||
if (ret) {
|
||||
pr_err("ip_set: cannot register pernet_subsys.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
|
||||
if (ret != 0) {
|
||||
pr_err("ip_set: cannot register with nfnetlink.\n");
|
||||
unregister_pernet_subsys(&ip_set_net_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nf_register_sockopt(&so_set);
|
||||
if (ret != 0) {
|
||||
pr_err("SO_SET registry failed: %d\n", ret);
|
||||
nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
|
||||
unregister_pernet_subsys(&ip_set_net_ops);
|
||||
return ret;
|
||||
}
|
||||
ret = register_pernet_subsys(&ip_set_net_ops);
|
||||
if (ret) {
|
||||
pr_err("ip_set: cannot register pernet_subsys.\n");
|
||||
nf_unregister_sockopt(&so_set);
|
||||
nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pr_info("ip_set: protocol %u\n", IPSET_PROTOCOL);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2098,9 +2104,10 @@ ip_set_init(void)
|
|||
static void __exit
|
||||
ip_set_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ip_set_net_ops);
|
||||
nf_unregister_sockopt(&so_set);
|
||||
nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
|
||||
|
||||
unregister_pernet_subsys(&ip_set_net_ops);
|
||||
pr_debug("these are the famous last words\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -123,13 +123,12 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
return ret;
|
||||
|
||||
ip &= ip_set_hostmask(h->netmask);
|
||||
e.ip = htonl(ip);
|
||||
if (e.ip == 0)
|
||||
return -IPSET_ERR_HASH_ELEM;
|
||||
|
||||
if (adt == IPSET_TEST) {
|
||||
e.ip = htonl(ip);
|
||||
if (e.ip == 0)
|
||||
return -IPSET_ERR_HASH_ELEM;
|
||||
if (adt == IPSET_TEST)
|
||||
return adtfn(set, &e, &ext, &ext, flags);
|
||||
}
|
||||
|
||||
ip_to = ip;
|
||||
if (tb[IPSET_ATTR_IP_TO]) {
|
||||
|
@ -148,17 +147,20 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
|
||||
|
||||
if (retried)
|
||||
if (retried) {
|
||||
ip = ntohl(h->next.ip);
|
||||
for (; !before(ip_to, ip); ip += hosts) {
|
||||
e.ip = htonl(ip);
|
||||
if (e.ip == 0)
|
||||
return -IPSET_ERR_HASH_ELEM;
|
||||
}
|
||||
for (; ip <= ip_to;) {
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
|
||||
ip += hosts;
|
||||
e.ip = htonl(ip);
|
||||
if (e.ip == 0)
|
||||
return 0;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
|
|
|
@ -149,7 +149,7 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
for (; !before(ip_to, ip); ip++) {
|
||||
for (; ip <= ip_to; ip++) {
|
||||
e.ip = htonl(ip);
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
for (; !before(ip_to, ip); ip++) {
|
||||
for (; ip <= ip_to; ip++) {
|
||||
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
|
||||
: port;
|
||||
for (; p <= port_to; p++) {
|
||||
|
|
|
@ -185,7 +185,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
for (; !before(ip_to, ip); ip++) {
|
||||
for (; ip <= ip_to; ip++) {
|
||||
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
|
||||
: port;
|
||||
for (; p <= port_to; p++) {
|
||||
|
|
|
@ -271,7 +271,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
for (; !before(ip_to, ip); ip++) {
|
||||
for (; ip <= ip_to; ip++) {
|
||||
e.ip = htonl(ip);
|
||||
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
|
||||
: port;
|
||||
|
@ -281,7 +281,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ip == ntohl(h->next.ip) &&
|
||||
p == ntohs(h->next.port)
|
||||
? ntohl(h->next.ip2) : ip2_from;
|
||||
while (!after(ip2, ip2_to)) {
|
||||
while (ip2 <= ip2_to) {
|
||||
e.ip2 = htonl(ip2);
|
||||
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
|
||||
&cidr);
|
||||
|
|
|
@ -193,7 +193,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
}
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
while (!after(ip, ip_to)) {
|
||||
while (ip <= ip_to) {
|
||||
e.ip = htonl(ip);
|
||||
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
|
|
|
@ -255,7 +255,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
while (!after(ip, ip_to)) {
|
||||
while (ip <= ip_to) {
|
||||
e.ip = htonl(ip);
|
||||
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
|
|
|
@ -250,13 +250,13 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
if (retried)
|
||||
ip = ntohl(h->next.ip[0]);
|
||||
|
||||
while (!after(ip, ip_to)) {
|
||||
while (ip <= ip_to) {
|
||||
e.ip[0] = htonl(ip);
|
||||
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
|
||||
ip2 = (retried &&
|
||||
ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
|
||||
: ip2_from;
|
||||
while (!after(ip2, ip2_to)) {
|
||||
while (ip2 <= ip2_to) {
|
||||
e.ip[1] = htonl(ip2);
|
||||
last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
|
|
|
@ -241,7 +241,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
while (!after(ip, ip_to)) {
|
||||
while (ip <= ip_to) {
|
||||
e.ip = htonl(ip);
|
||||
last = ip_set_range_to_cidr(ip, ip_to, &cidr);
|
||||
e.cidr = cidr - 1;
|
||||
|
|
|
@ -291,7 +291,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
if (retried)
|
||||
ip = ntohl(h->next.ip[0]);
|
||||
|
||||
while (!after(ip, ip_to)) {
|
||||
while (ip <= ip_to) {
|
||||
e.ip[0] = htonl(ip);
|
||||
ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
|
||||
p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
|
||||
|
@ -301,7 +301,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ip2 = (retried && ip == ntohl(h->next.ip[0]) &&
|
||||
p == ntohs(h->next.port)) ? ntohl(h->next.ip[1])
|
||||
: ip2_from;
|
||||
while (!after(ip2, ip2_to)) {
|
||||
while (ip2 <= ip2_to) {
|
||||
e.ip[1] = htonl(ip2);
|
||||
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
|
||||
&e.cidr[1]);
|
||||
|
|
|
@ -921,6 +921,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
|
|||
{
|
||||
struct sk_buff *new_skb = NULL;
|
||||
struct iphdr *old_iph = NULL;
|
||||
__u8 old_dsfield;
|
||||
#ifdef CONFIG_IP_VS_IPV6
|
||||
struct ipv6hdr *old_ipv6h = NULL;
|
||||
#endif
|
||||
|
@ -945,7 +946,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
|
|||
*payload_len =
|
||||
ntohs(old_ipv6h->payload_len) +
|
||||
sizeof(*old_ipv6h);
|
||||
*dsfield = ipv6_get_dsfield(old_ipv6h);
|
||||
old_dsfield = ipv6_get_dsfield(old_ipv6h);
|
||||
*ttl = old_ipv6h->hop_limit;
|
||||
if (df)
|
||||
*df = 0;
|
||||
|
@ -960,12 +961,15 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
|
|||
|
||||
/* fix old IP header checksum */
|
||||
ip_send_check(old_iph);
|
||||
*dsfield = ipv4_get_dsfield(old_iph);
|
||||
old_dsfield = ipv4_get_dsfield(old_iph);
|
||||
*ttl = old_iph->ttl;
|
||||
if (payload_len)
|
||||
*payload_len = ntohs(old_iph->tot_len);
|
||||
}
|
||||
|
||||
/* Implement full-functionality option for ECN encapsulation */
|
||||
*dsfield = INET_ECN_encapsulate(old_dsfield, old_dsfield);
|
||||
|
||||
return skb;
|
||||
error:
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -1048,7 +1048,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
|
|||
if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (nft_dump_stats(skb, nft_base_chain(chain)->stats))
|
||||
if (basechain->stats && nft_dump_stats(skb, basechain->stats))
|
||||
goto nla_put_failure;
|
||||
}
|
||||
|
||||
|
@ -1487,8 +1487,8 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
|
|||
|
||||
chain2 = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME],
|
||||
genmask);
|
||||
if (IS_ERR(chain2))
|
||||
return PTR_ERR(chain2);
|
||||
if (!IS_ERR(chain2))
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
if (nla[NFTA_CHAIN_COUNTERS]) {
|
||||
|
@ -2741,8 +2741,10 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
|
|||
list_for_each_entry(i, &ctx->table->sets, list) {
|
||||
if (!nft_is_active_next(ctx->net, i))
|
||||
continue;
|
||||
if (!strcmp(set->name, i->name))
|
||||
if (!strcmp(set->name, i->name)) {
|
||||
kfree(set->name);
|
||||
return -ENFILE;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -892,7 +892,7 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
|
|||
if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
strlcpy(info->name, compat_tmp.name, sizeof(info->name));
|
||||
memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
|
||||
info->num_counters = compat_tmp.num_counters;
|
||||
user += sizeof(compat_tmp);
|
||||
} else
|
||||
|
@ -905,9 +905,9 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
|
|||
if (copy_from_user(info, user, sizeof(*info)) != 0)
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
info->name[sizeof(info->name) - 1] = '\0';
|
||||
user += sizeof(*info);
|
||||
}
|
||||
info->name[sizeof(info->name) - 1] = '\0';
|
||||
|
||||
size = sizeof(struct xt_counters);
|
||||
size *= info->num_counters;
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/bpf.h>
|
||||
|
@ -49,6 +50,22 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
|
||||
{
|
||||
mm_segment_t oldfs = get_fs();
|
||||
int retval, fd;
|
||||
|
||||
set_fs(KERNEL_DS);
|
||||
fd = bpf_obj_get_user(path);
|
||||
set_fs(oldfs);
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
|
||||
retval = __bpf_mt_check_fd(fd, ret);
|
||||
sys_close(fd);
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int bpf_mt_check(const struct xt_mtchk_param *par)
|
||||
{
|
||||
struct xt_bpf_info *info = par->matchinfo;
|
||||
|
@ -66,9 +83,10 @@ static int bpf_mt_check_v1(const struct xt_mtchk_param *par)
|
|||
return __bpf_mt_check_bytecode(info->bpf_program,
|
||||
info->bpf_program_num_elem,
|
||||
&info->filter);
|
||||
else if (info->mode == XT_BPF_MODE_FD_PINNED ||
|
||||
info->mode == XT_BPF_MODE_FD_ELF)
|
||||
else if (info->mode == XT_BPF_MODE_FD_ELF)
|
||||
return __bpf_mt_check_fd(info->fd, &info->filter);
|
||||
else if (info->mode == XT_BPF_MODE_PATH_PINNED)
|
||||
return __bpf_mt_check_path(info->path, &info->filter);
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
|
|||
transparent = nf_sk_is_transparent(sk);
|
||||
|
||||
if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
|
||||
transparent)
|
||||
transparent && sk_fullsock(sk))
|
||||
pskb->mark = sk->sk_mark;
|
||||
|
||||
if (sk != skb->sk)
|
||||
|
@ -133,7 +133,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
|
|||
transparent = nf_sk_is_transparent(sk);
|
||||
|
||||
if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
|
||||
transparent)
|
||||
transparent && sk_fullsock(sk))
|
||||
pskb->mark = sk->sk_mark;
|
||||
|
||||
if (sk != skb->sk)
|
||||
|
|
Loading…
Reference in New Issue