Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2019-06-07

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix several bugs in riscv64 JIT code emission which forgot to clear high
   32-bits for alu32 ops, from Björn and Luke with selftests covering all
   relevant BPF alu ops from Björn and Jiong.

2) Two fixes for UDP BPF reuseport that avoid calling the program in case of
   __udp6_lib_err and UDP GRO which broke reuseport_select_sock() assumption
   that skb->data is pointing to transport header, from Martin.

3) Two fixes for BPF sockmap: a use-after-free from sleep in psock's backlog
   workqueue, and a missing restore of sk_write_space when psock gets dropped,
   from Jakub and John.

4) Fix unconnected UDP sendmsg hook API which is insufficient as-is since it
   breaks standard applications like DNS if reverse NAT is not performed upon
   receive, from Daniel.

5) Fix an out-of-bounds read in __bpf_skc_lookup which in case of AF_INET6
   fails to verify that the length of the tuple is long enough, from Lorenz.

6) Fix libbpf's libbpf__probe_raw_btf to return an fd instead of 0/1 (for
   {un,}successful probe) as that is expected to be propagated as an fd to
   load_sk_storage_btf() and thus closing the wrong descriptor otherwise,
   from Michal.

7) Fix bpftool's JSON output for the case when a lookup fails, from Krzesimir.

8) Minor misc fixes in docs, samples and selftests, from various others.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-06-07 14:46:47 -07:00
commit 38e406f600
28 changed files with 887 additions and 64 deletions

View File

@ -316,16 +316,16 @@ A: When a netdev of a physical NIC is initialized, Linux usually
all the traffic, you can force the netdev to only have 1 queue, queue
id 0, and then bind to queue 0. You can use ethtool to do this::
sudo ethtool -L <interface> combined 1
sudo ethtool -L <interface> combined 1
If you want to only see part of the traffic, you can program the
NIC through ethtool to filter out your traffic to a single queue id
that you can bind your XDP socket to. Here is one example in which
UDP traffic to and from port 4242 are sent to queue 2::
sudo ethtool -N <interface> rx-flow-hash udp4 fn
sudo ethtool -N <interface> flow-type udp4 src-port 4242 dst-port \
4242 action 2
sudo ethtool -N <interface> rx-flow-hash udp4 fn
sudo ethtool -N <interface> flow-type udp4 src-port 4242 dst-port \
4242 action 2
A number of other ways are possible all up to the capabilitites of
the NIC you have.

View File

@ -751,22 +751,32 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU64 | BPF_ADD | BPF_X:
emit(is64 ? rv_add(rd, rd, rs) : rv_addw(rd, rd, rs), ctx);
if (!is64)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU64 | BPF_SUB | BPF_X:
emit(is64 ? rv_sub(rd, rd, rs) : rv_subw(rd, rd, rs), ctx);
if (!is64)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU64 | BPF_AND | BPF_X:
emit(rv_and(rd, rd, rs), ctx);
if (!is64)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU64 | BPF_OR | BPF_X:
emit(rv_or(rd, rd, rs), ctx);
if (!is64)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU64 | BPF_XOR | BPF_X:
emit(rv_xor(rd, rd, rs), ctx);
if (!is64)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_MUL | BPF_X:
case BPF_ALU64 | BPF_MUL | BPF_X:
@ -789,14 +799,20 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_LSH | BPF_X:
emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx);
if (!is64)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_X:
case BPF_ALU64 | BPF_RSH | BPF_X:
emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx);
if (!is64)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_ARSH | BPF_X:
case BPF_ALU64 | BPF_ARSH | BPF_X:
emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx);
if (!is64)
emit_zext_32(rd, ctx);
break;
/* dst = -dst */
@ -804,6 +820,8 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_ALU64 | BPF_NEG:
emit(is64 ? rv_sub(rd, RV_REG_ZERO, rd) :
rv_subw(rd, RV_REG_ZERO, rd), ctx);
if (!is64)
emit_zext_32(rd, ctx);
break;
/* dst = BSWAP##imm(dst) */
@ -958,14 +976,20 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU64 | BPF_LSH | BPF_K:
emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx);
if (!is64)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU64 | BPF_RSH | BPF_K:
emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx);
if (!is64)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_ARSH | BPF_K:
case BPF_ALU64 | BPF_ARSH | BPF_K:
emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx);
if (!is64)
emit_zext_32(rd, ctx);
break;
/* JUMP off */

View File

@ -238,6 +238,12 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
({ \
int __ret = 0; \
@ -339,6 +345,8 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; })

View File

@ -351,6 +351,8 @@ static inline void sk_psock_update_proto(struct sock *sk,
static inline void sk_psock_restore_proto(struct sock *sk,
struct sk_psock *psock)
{
sk->sk_write_space = psock->saved_write_space;
if (psock->sk_proto) {
sk->sk_prot = psock->sk_proto;
psock->sk_proto = NULL;

View File

@ -192,6 +192,8 @@ enum bpf_attach_type {
BPF_LIRC_MODE2,
BPF_FLOW_DISSECTOR,
BPF_CGROUP_SYSCTL,
BPF_CGROUP_UDP4_RECVMSG,
BPF_CGROUP_UDP6_RECVMSG,
__MAX_BPF_ATTACH_TYPE
};

View File

@ -1581,6 +1581,8 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
case BPF_CGROUP_INET6_CONNECT:
case BPF_CGROUP_UDP4_SENDMSG:
case BPF_CGROUP_UDP6_SENDMSG:
case BPF_CGROUP_UDP4_RECVMSG:
case BPF_CGROUP_UDP6_RECVMSG:
return 0;
default:
return -EINVAL;
@ -1875,6 +1877,8 @@ static int bpf_prog_attach(const union bpf_attr *attr)
case BPF_CGROUP_INET6_CONNECT:
case BPF_CGROUP_UDP4_SENDMSG:
case BPF_CGROUP_UDP6_SENDMSG:
case BPF_CGROUP_UDP4_RECVMSG:
case BPF_CGROUP_UDP6_RECVMSG:
ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
break;
case BPF_CGROUP_SOCK_OPS:
@ -1960,6 +1964,8 @@ static int bpf_prog_detach(const union bpf_attr *attr)
case BPF_CGROUP_INET6_CONNECT:
case BPF_CGROUP_UDP4_SENDMSG:
case BPF_CGROUP_UDP6_SENDMSG:
case BPF_CGROUP_UDP4_RECVMSG:
case BPF_CGROUP_UDP6_RECVMSG:
ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
break;
case BPF_CGROUP_SOCK_OPS:
@ -2011,6 +2017,8 @@ static int bpf_prog_query(const union bpf_attr *attr,
case BPF_CGROUP_INET6_CONNECT:
case BPF_CGROUP_UDP4_SENDMSG:
case BPF_CGROUP_UDP6_SENDMSG:
case BPF_CGROUP_UDP4_RECVMSG:
case BPF_CGROUP_UDP6_RECVMSG:
case BPF_CGROUP_SOCK_OPS:
case BPF_CGROUP_DEVICE:
case BPF_CGROUP_SYSCTL:

View File

@ -5361,9 +5361,12 @@ static int check_return_code(struct bpf_verifier_env *env)
struct tnum range = tnum_range(0, 1);
switch (env->prog->type) {
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG)
range = tnum_range(1, 1);
case BPF_PROG_TYPE_CGROUP_SKB:
case BPF_PROG_TYPE_CGROUP_SOCK:
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
case BPF_PROG_TYPE_SOCK_OPS:
case BPF_PROG_TYPE_CGROUP_DEVICE:
case BPF_PROG_TYPE_CGROUP_SYSCTL:
@ -5380,16 +5383,17 @@ static int check_return_code(struct bpf_verifier_env *env)
}
if (!tnum_in(range, reg->var_off)) {
char tn_buf[48];
verbose(env, "At program exit the register R0 ");
if (!tnum_is_unknown(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "has value %s", tn_buf);
} else {
verbose(env, "has unknown scalar value");
}
verbose(env, " should have been 0 or 1\n");
tnum_strn(tn_buf, sizeof(tn_buf), range);
verbose(env, " should have been in %s\n", tn_buf);
return -EINVAL;
}
return 0;

View File

@ -5300,7 +5300,13 @@ __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
struct net *net;
int sdif;
family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6;
if (len == sizeof(tuple->ipv4))
family = AF_INET;
else if (len == sizeof(tuple->ipv6))
family = AF_INET6;
else
return NULL;
if (unlikely(family == AF_UNSPEC || flags ||
!((s32)netns_id < 0 || netns_id <= S32_MAX)))
goto out;
@ -5333,8 +5339,14 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
ifindex, proto, netns_id, flags);
if (sk)
if (sk) {
sk = sk_to_full_sk(sk);
if (!sk_fullsock(sk)) {
if (!sock_flag(sk, SOCK_RCU_FREE))
sock_gen_put(sk);
return NULL;
}
}
return sk;
}
@ -5365,8 +5377,14 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
flags);
if (sk)
if (sk) {
sk = sk_to_full_sk(sk);
if (!sk_fullsock(sk)) {
if (!sock_flag(sk, SOCK_RCU_FREE))
sock_gen_put(sk);
return NULL;
}
}
return sk;
}
@ -6726,6 +6744,7 @@ static bool sock_addr_is_valid_access(int off, int size,
case BPF_CGROUP_INET4_BIND:
case BPF_CGROUP_INET4_CONNECT:
case BPF_CGROUP_UDP4_SENDMSG:
case BPF_CGROUP_UDP4_RECVMSG:
break;
default:
return false;
@ -6736,6 +6755,7 @@ static bool sock_addr_is_valid_access(int off, int size,
case BPF_CGROUP_INET6_BIND:
case BPF_CGROUP_INET6_CONNECT:
case BPF_CGROUP_UDP6_SENDMSG:
case BPF_CGROUP_UDP6_RECVMSG:
break;
default:
return false;

View File

@ -2337,6 +2337,7 @@ int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
kv.iov_base = skb->data + offset;
kv.iov_len = slen;
memset(&msg, 0, sizeof(msg));
msg.msg_flags = MSG_DONTWAIT;
ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
if (ret <= 0)

View File

@ -498,7 +498,11 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport)
{
return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table);
const struct iphdr *iph = ip_hdr(skb);
return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
iph->daddr, dport, inet_iif(skb),
inet_sdif(skb), &udp_table, NULL);
}
EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
@ -1773,6 +1777,10 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
*addr_len = sizeof(*sin);
if (cgroup_bpf_enabled)
BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk,
(struct sockaddr *)sin);
}
if (udp_sk(sk)->gro_enabled)

View File

@ -239,7 +239,7 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
&iph->daddr, dport, inet6_iif(skb),
inet6_sdif(skb), &udp_table, skb);
inet6_sdif(skb), &udp_table, NULL);
}
EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
@ -365,6 +365,10 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
inet6_iif(skb));
}
*addr_len = sizeof(*sin6);
if (cgroup_bpf_enabled)
BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
(struct sockaddr *)sin6);
}
if (udp_sk(sk)->gro_enabled)
@ -511,7 +515,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct net *net = dev_net(skb->dev);
sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
inet6_iif(skb), inet6_sdif(skb), udptable, skb);
inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
if (!sk) {
/* No socket for error: try tunnels before discarding */
sk = ERR_PTR(-ENOENT);

View File

@ -678,7 +678,7 @@ void read_trace_pipe(void)
static char buf[4096];
ssize_t sz;
sz = read(trace_fd, buf, sizeof(buf));
sz = read(trace_fd, buf, sizeof(buf) - 1);
if (sz > 0) {
buf[sz] = 0;
puts(buf);

View File

@ -216,7 +216,7 @@ static int test_debug_fs_uprobe(char *binary_path, long offset, bool is_return)
{
const char *event_type = "uprobe";
struct perf_event_attr attr = {};
char buf[256], event_alias[256];
char buf[256], event_alias[sizeof("test_1234567890")];
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
int err, res, kfd, efd;

View File

@ -29,7 +29,7 @@ CGROUP COMMANDS
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
| *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** |
| **bind4** | **bind6** | **post_bind4** | **post_bind6** | **connect4** | **connect6** |
| **sendmsg4** | **sendmsg6** | **sysctl** }
| **sendmsg4** | **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** }
| *ATTACH_FLAGS* := { **multi** | **override** }
DESCRIPTION
@ -86,6 +86,10 @@ DESCRIPTION
unconnected udp4 socket (since 4.18);
**sendmsg6** call to sendto(2), sendmsg(2), sendmmsg(2) for an
unconnected udp6 socket (since 4.18);
**recvmsg4** call to recvfrom(2), recvmsg(2), recvmmsg(2) for
an unconnected udp4 socket (since 5.2);
**recvmsg6** call to recvfrom(2), recvmsg(2), recvmmsg(2) for
an unconnected udp6 socket (since 5.2);
**sysctl** sysctl access (since 5.2).
**bpftool cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*

View File

@ -40,7 +40,7 @@ PROG COMMANDS
| **lwt_seg6local** | **sockops** | **sk_skb** | **sk_msg** | **lirc_mode2** |
| **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** |
| **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** |
| **cgroup/sysctl**
| **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl**
| }
| *ATTACH_TYPE* := {
| **msg_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector**

View File

@ -371,6 +371,7 @@ _bpftool()
lirc_mode2 cgroup/bind4 cgroup/bind6 \
cgroup/connect4 cgroup/connect6 \
cgroup/sendmsg4 cgroup/sendmsg6 \
cgroup/recvmsg4 cgroup/recvmsg6 \
cgroup/post_bind4 cgroup/post_bind6 \
cgroup/sysctl" -- \
"$cur" ) )
@ -666,7 +667,7 @@ _bpftool()
attach|detach)
local ATTACH_TYPES='ingress egress sock_create sock_ops \
device bind4 bind6 post_bind4 post_bind6 connect4 \
connect6 sendmsg4 sendmsg6 sysctl'
connect6 sendmsg4 sendmsg6 recvmsg4 recvmsg6 sysctl'
local ATTACH_FLAGS='multi override'
local PROG_TYPE='id pinned tag'
case $prev in
@ -676,7 +677,7 @@ _bpftool()
;;
ingress|egress|sock_create|sock_ops|device|bind4|bind6|\
post_bind4|post_bind6|connect4|connect6|sendmsg4|\
sendmsg6|sysctl)
sendmsg6|recvmsg4|recvmsg6|sysctl)
COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \
"$cur" ) )
return 0

View File

@ -25,7 +25,8 @@
" ATTACH_TYPE := { ingress | egress | sock_create |\n" \
" sock_ops | device | bind4 | bind6 |\n" \
" post_bind4 | post_bind6 | connect4 |\n" \
" connect6 | sendmsg4 | sendmsg6 | sysctl }"
" connect6 | sendmsg4 | sendmsg6 |\n" \
" recvmsg4 | recvmsg6 | sysctl }"
static const char * const attach_type_strings[] = {
[BPF_CGROUP_INET_INGRESS] = "ingress",
@ -42,6 +43,8 @@ static const char * const attach_type_strings[] = {
[BPF_CGROUP_UDP4_SENDMSG] = "sendmsg4",
[BPF_CGROUP_UDP6_SENDMSG] = "sendmsg6",
[BPF_CGROUP_SYSCTL] = "sysctl",
[BPF_CGROUP_UDP4_RECVMSG] = "recvmsg4",
[BPF_CGROUP_UDP6_RECVMSG] = "recvmsg6",
[__MAX_BPF_ATTACH_TYPE] = NULL,
};

View File

@ -716,12 +716,14 @@ static int dump_map_elem(int fd, void *key, void *value,
return 0;
if (json_output) {
jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "key");
print_hex_data_json(key, map_info->key_size);
jsonw_name(json_wtr, "value");
jsonw_start_object(json_wtr);
jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
jsonw_end_object(json_wtr);
jsonw_end_object(json_wtr);
} else {
const char *msg = NULL;

View File

@ -1063,7 +1063,8 @@ static int do_help(int argc, char **argv)
" sk_reuseport | flow_dissector | cgroup/sysctl |\n"
" cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
" cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
" cgroup/sendmsg4 | cgroup/sendmsg6 }\n"
" cgroup/sendmsg4 | cgroup/sendmsg6 | cgroup/recvmsg4 |\n"
" cgroup/recvmsg6 }\n"
" ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
" flow_dissector }\n"
" " HELP_SPEC_OPTIONS "\n"

View File

@ -192,6 +192,8 @@ enum bpf_attach_type {
BPF_LIRC_MODE2,
BPF_FLOW_DISSECTOR,
BPF_CGROUP_SYSCTL,
BPF_CGROUP_UDP4_RECVMSG,
BPF_CGROUP_UDP6_RECVMSG,
__MAX_BPF_ATTACH_TYPE
};

View File

@ -1645,14 +1645,16 @@ static int bpf_object__probe_btf_func(struct bpf_object *obj)
/* FUNC x */ /* [3] */
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
};
int res;
int btf_fd;
res = libbpf__probe_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs));
if (res < 0)
return res;
if (res > 0)
btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs));
if (btf_fd >= 0) {
obj->caps.btf_func = 1;
close(btf_fd);
return 1;
}
return 0;
}
@ -1670,14 +1672,16 @@ static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(2, 0, 4),
};
int res;
int btf_fd;
res = libbpf__probe_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs));
if (res < 0)
return res;
if (res > 0)
btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs));
if (btf_fd >= 0) {
obj->caps.btf_datasec = 1;
close(btf_fd);
return 1;
}
return 0;
}
@ -3206,6 +3210,10 @@ static const struct {
BPF_CGROUP_UDP4_SENDMSG),
BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
BPF_CGROUP_UDP6_SENDMSG),
BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
BPF_CGROUP_UDP4_RECVMSG),
BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
BPF_CGROUP_UDP6_RECVMSG),
BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
BPF_CGROUP_SYSCTL),
};

View File

@ -34,7 +34,7 @@ do { \
#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
int libbpf__probe_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len);
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len);
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */

View File

@ -133,8 +133,8 @@ bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
return errno != EINVAL && errno != EOPNOTSUPP;
}
int libbpf__probe_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len)
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len)
{
struct btf_header hdr = {
.magic = BTF_MAGIC,
@ -157,14 +157,9 @@ int libbpf__probe_raw_btf(const char *raw_types, size_t types_len,
memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false);
if (btf_fd < 0) {
free(raw_btf);
return 0;
}
close(btf_fd);
free(raw_btf);
return 1;
return btf_fd;
}
static int load_sk_storage_btf(void)
@ -190,7 +185,7 @@ static int load_sk_storage_btf(void)
BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
};
return libbpf__probe_raw_btf((char *)types, sizeof(types),
return libbpf__load_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs));
}

View File

@ -21,8 +21,8 @@ LDLIBS += -lcap -lelf -lrt -lpthread
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \
test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \
test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
test_cgroup_storage test_select_reuseport test_section_names \
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl
BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
@ -63,7 +63,8 @@ TEST_PROGS_EXTENDED := with_addr.sh \
# Compile but not part of 'make run_tests'
TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
test_lirc_mode2_user
include ../lib.mk

View File

@ -3,6 +3,7 @@
#include <error.h>
#include <linux/if.h>
#include <linux/if_tun.h>
#include <sys/uio.h>
#define CHECK_FLOW_KEYS(desc, got, expected) \
CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \

View File

@ -119,6 +119,16 @@ static struct sec_name_test tests[] = {
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG},
{0, BPF_CGROUP_UDP6_SENDMSG},
},
{
"cgroup/recvmsg4",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG},
{0, BPF_CGROUP_UDP4_RECVMSG},
},
{
"cgroup/recvmsg6",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG},
{0, BPF_CGROUP_UDP6_RECVMSG},
},
{
"cgroup/sysctl",
{0, BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL},

View File

@ -76,6 +76,7 @@ struct sock_addr_test {
enum {
LOAD_REJECT,
ATTACH_REJECT,
ATTACH_OKAY,
SYSCALL_EPERM,
SYSCALL_ENOTSUPP,
SUCCESS,
@ -88,9 +89,13 @@ static int connect4_prog_load(const struct sock_addr_test *test);
static int connect6_prog_load(const struct sock_addr_test *test);
static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
static int recvmsg_allow_prog_load(const struct sock_addr_test *test);
static int recvmsg_deny_prog_load(const struct sock_addr_test *test);
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
static int recvmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
static int recvmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
@ -507,6 +512,92 @@ static struct sock_addr_test tests[] = {
SRC6_REWRITE_IP,
SYSCALL_EPERM,
},
/* recvmsg */
{
"recvmsg4: return code ok",
recvmsg_allow_prog_load,
BPF_CGROUP_UDP4_RECVMSG,
BPF_CGROUP_UDP4_RECVMSG,
AF_INET,
SOCK_DGRAM,
NULL,
0,
NULL,
0,
NULL,
ATTACH_OKAY,
},
{
"recvmsg4: return code !ok",
recvmsg_deny_prog_load,
BPF_CGROUP_UDP4_RECVMSG,
BPF_CGROUP_UDP4_RECVMSG,
AF_INET,
SOCK_DGRAM,
NULL,
0,
NULL,
0,
NULL,
LOAD_REJECT,
},
{
"recvmsg6: return code ok",
recvmsg_allow_prog_load,
BPF_CGROUP_UDP6_RECVMSG,
BPF_CGROUP_UDP6_RECVMSG,
AF_INET6,
SOCK_DGRAM,
NULL,
0,
NULL,
0,
NULL,
ATTACH_OKAY,
},
{
"recvmsg6: return code !ok",
recvmsg_deny_prog_load,
BPF_CGROUP_UDP6_RECVMSG,
BPF_CGROUP_UDP6_RECVMSG,
AF_INET6,
SOCK_DGRAM,
NULL,
0,
NULL,
0,
NULL,
LOAD_REJECT,
},
{
"recvmsg4: rewrite IP & port (asm)",
recvmsg4_rw_asm_prog_load,
BPF_CGROUP_UDP4_RECVMSG,
BPF_CGROUP_UDP4_RECVMSG,
AF_INET,
SOCK_DGRAM,
SERV4_REWRITE_IP,
SERV4_REWRITE_PORT,
SERV4_REWRITE_IP,
SERV4_REWRITE_PORT,
SERV4_IP,
SUCCESS,
},
{
"recvmsg6: rewrite IP & port (asm)",
recvmsg6_rw_asm_prog_load,
BPF_CGROUP_UDP6_RECVMSG,
BPF_CGROUP_UDP6_RECVMSG,
AF_INET6,
SOCK_DGRAM,
SERV6_REWRITE_IP,
SERV6_REWRITE_PORT,
SERV6_REWRITE_IP,
SERV6_REWRITE_PORT,
SERV6_IP,
SUCCESS,
},
};
static int mk_sockaddr(int domain, const char *ip, unsigned short port,
@ -765,8 +856,8 @@ static int connect6_prog_load(const struct sock_addr_test *test)
return load_path(test, CONNECT6_PROG_PATH);
}
static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
int32_t rc)
static int xmsg_ret_only_prog_load(const struct sock_addr_test *test,
int32_t rc)
{
struct bpf_insn insns[] = {
/* return rc */
@ -778,12 +869,22 @@ static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
{
return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
return xmsg_ret_only_prog_load(test, /*rc*/ 1);
}
static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
{
return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
return xmsg_ret_only_prog_load(test, /*rc*/ 0);
}
static int recvmsg_allow_prog_load(const struct sock_addr_test *test)
{
return xmsg_ret_only_prog_load(test, /*rc*/ 1);
}
static int recvmsg_deny_prog_load(const struct sock_addr_test *test)
{
return xmsg_ret_only_prog_load(test, /*rc*/ 0);
}
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
@ -838,6 +939,47 @@ static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
}
static int recvmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
{
struct sockaddr_in src4_rw_addr;
if (mk_sockaddr(AF_INET, SERV4_IP, SERV4_PORT,
(struct sockaddr *)&src4_rw_addr,
sizeof(src4_rw_addr)) == -1)
return -1;
struct bpf_insn insns[] = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* if (sk.family == AF_INET && */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock_addr, family)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET, 6),
/* sk.type == SOCK_DGRAM) { */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock_addr, type)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_DGRAM, 4),
/* user_ip4 = src4_rw_addr.sin_addr */
BPF_MOV32_IMM(BPF_REG_7, src4_rw_addr.sin_addr.s_addr),
BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7,
offsetof(struct bpf_sock_addr, user_ip4)),
/* user_port = src4_rw_addr.sin_port */
BPF_MOV32_IMM(BPF_REG_7, src4_rw_addr.sin_port),
BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7,
offsetof(struct bpf_sock_addr, user_port)),
/* } */
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
};
return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
}
static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test)
{
return load_path(test, SENDMSG4_PROG_PATH);
@ -901,6 +1043,39 @@ static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test)
return sendmsg6_rw_dst_asm_prog_load(test, SERV6_REWRITE_IP);
}
static int recvmsg6_rw_asm_prog_load(const struct sock_addr_test *test)
{
struct sockaddr_in6 src6_rw_addr;
if (mk_sockaddr(AF_INET6, SERV6_IP, SERV6_PORT,
(struct sockaddr *)&src6_rw_addr,
sizeof(src6_rw_addr)) == -1)
return -1;
struct bpf_insn insns[] = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* if (sk.family == AF_INET6) { */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock_addr, family)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET6, 10),
STORE_IPV6(user_ip6, src6_rw_addr.sin6_addr.s6_addr32),
/* user_port = dst6_rw_addr.sin6_port */
BPF_MOV32_IMM(BPF_REG_7, src6_rw_addr.sin6_port),
BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7,
offsetof(struct bpf_sock_addr, user_port)),
/* } */
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
};
return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
}
static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
{
return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
@ -1282,13 +1457,13 @@ static int run_connect_test_case(const struct sock_addr_test *test)
return err;
}
static int run_sendmsg_test_case(const struct sock_addr_test *test)
static int run_xmsg_test_case(const struct sock_addr_test *test, int max_cmsg)
{
socklen_t addr_len = sizeof(struct sockaddr_storage);
struct sockaddr_storage expected_src_addr;
struct sockaddr_storage requested_addr;
struct sockaddr_storage expected_addr;
struct sockaddr_storage real_src_addr;
struct sockaddr_storage server_addr;
struct sockaddr_storage sendmsg_addr;
struct sockaddr_storage recvmsg_addr;
int clientfd = -1;
int servfd = -1;
int set_cmsg;
@ -1297,20 +1472,19 @@ static int run_sendmsg_test_case(const struct sock_addr_test *test)
if (test->type != SOCK_DGRAM)
goto err;
if (init_addrs(test, &requested_addr, &expected_addr,
&expected_src_addr))
if (init_addrs(test, &sendmsg_addr, &server_addr, &expected_addr))
goto err;
/* Prepare server to sendmsg to */
servfd = start_server(test->type, &expected_addr, addr_len);
servfd = start_server(test->type, &server_addr, addr_len);
if (servfd == -1)
goto err;
for (set_cmsg = 0; set_cmsg <= 1; ++set_cmsg) {
for (set_cmsg = 0; set_cmsg <= max_cmsg; ++set_cmsg) {
if (clientfd >= 0)
close(clientfd);
clientfd = sendmsg_to_server(test->type, &requested_addr,
clientfd = sendmsg_to_server(test->type, &sendmsg_addr,
addr_len, set_cmsg, /*flags*/0,
&err);
if (err)
@ -1330,10 +1504,10 @@ static int run_sendmsg_test_case(const struct sock_addr_test *test)
* specific packet may differ from the one used by default and
* returned by getsockname(2).
*/
if (recvmsg_from_client(servfd, &real_src_addr) == -1)
if (recvmsg_from_client(servfd, &recvmsg_addr) == -1)
goto err;
if (cmp_addr(&real_src_addr, &expected_src_addr, /*cmp_port*/0))
if (cmp_addr(&recvmsg_addr, &expected_addr, /*cmp_port*/0))
goto err;
}
@ -1366,6 +1540,9 @@ static int run_test_case(int cgfd, const struct sock_addr_test *test)
goto out;
} else if (test->expected_result == ATTACH_REJECT || err) {
goto err;
} else if (test->expected_result == ATTACH_OKAY) {
err = 0;
goto out;
}
switch (test->attach_type) {
@ -1379,7 +1556,11 @@ static int run_test_case(int cgfd, const struct sock_addr_test *test)
break;
case BPF_CGROUP_UDP4_SENDMSG:
case BPF_CGROUP_UDP6_SENDMSG:
err = run_sendmsg_test_case(test);
err = run_xmsg_test_case(test, 1);
break;
case BPF_CGROUP_UDP4_RECVMSG:
case BPF_CGROUP_UDP6_RECVMSG:
err = run_xmsg_test_case(test, 0);
break;
default:
goto err;

View File

@ -0,0 +1,533 @@
/* This file contains sub-register zero extension checks for insns defining
* sub-registers, meaning:
* - All insns under BPF_ALU class. Their BPF_ALU32 variants or narrow width
* forms (BPF_END) could define sub-registers.
* - Narrow direct loads, BPF_B/H/W | BPF_LDX.
* - BPF_LD is not exposed to JIT back-ends, so no need for testing.
*
* "get_prandom_u32" is used to initialize low 32-bit of some registers to
* prevent potential optimizations done by verifier or JIT back-ends which could
* optimize register back into constant when range info shows one register is a
* constant.
*/
{
"add32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
BPF_ALU32_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"add32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
/* An insn could have no effect on the low 32-bit, for example:
* a = a + 0
* a = a | 0
* a = a & -1
* But, they should still zero high 32-bit.
*/
BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, -2),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"sub32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL),
BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"sub32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mul32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL),
BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mul32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, -1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"div32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_0, -1),
BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"div32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 2),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"or32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL),
BPF_ALU32_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"or32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"and32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL),
BPF_ALU32_REG(BPF_AND, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"and32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -2),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"lsh32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU32_REG(BPF_LSH, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"lsh32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"rsh32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU32_REG(BPF_RSH, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"rsh32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"neg32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_NEG, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mod32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_0, -1),
BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mod32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 2),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"xor32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"xor32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_XOR, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mov32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
BPF_MOV32_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mov32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_MOV32_IMM(BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"arsh32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"arsh32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"end16 (to_le) reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 16),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"end32 (to_le) reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 32),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"end16 (to_be) reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 16),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"end32 (to_be) reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 32),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"ldx_b zero extend check",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"ldx_h zero extend check",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_6, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"ldx_w zero extend check",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},