bpf: sockmap, add selftests
This adds a new test program test_sockmap which is the old sample sockmap program. By moving the sample program here we can now run it as part of the self tests suite. To support this a populate_progs() routine is added to load programs and maps which was previously done with load_bpf_file(). This is needed because self test libs do not provide a similar routine. Also we now use the cgroup_helpers routines to manage cgroup use instead of manually creating one and supplying it to the CLI. Notice we keep the CLI around though because it is useful for dbg and specialized testing. To run use ./test_sockmap and the result should be, Summary 660 PASSED, 0 SKIPPED, 0 FAILED Signed-off-by: John Fastabend <john.fastabend@gmail.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
parent
5d9ffeae5e
commit
16962b2404
|
@ -894,6 +894,7 @@ enum bpf_func_id {
|
|||
/* BPF_FUNC_skb_set_tunnel_key flags. */
|
||||
#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
|
||||
#define BPF_F_DONT_FRAGMENT (1ULL << 2)
|
||||
#define BPF_F_SEQ_NUMBER (1ULL << 3)
|
||||
|
||||
/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
|
||||
* BPF_FUNC_perf_event_read_value flags.
|
||||
|
|
|
@ -941,4 +941,43 @@ enum {
|
|||
IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */
|
||||
};
|
||||
|
||||
/* tun section */
|
||||
|
||||
enum {
|
||||
IFLA_TUN_UNSPEC,
|
||||
IFLA_TUN_OWNER,
|
||||
IFLA_TUN_GROUP,
|
||||
IFLA_TUN_TYPE,
|
||||
IFLA_TUN_PI,
|
||||
IFLA_TUN_VNET_HDR,
|
||||
IFLA_TUN_PERSIST,
|
||||
IFLA_TUN_MULTI_QUEUE,
|
||||
IFLA_TUN_NUM_QUEUES,
|
||||
IFLA_TUN_NUM_DISABLED_QUEUES,
|
||||
__IFLA_TUN_MAX,
|
||||
};
|
||||
|
||||
#define IFLA_TUN_MAX (__IFLA_TUN_MAX - 1)
|
||||
|
||||
/* rmnet section */
|
||||
|
||||
#define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0)
|
||||
#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1)
|
||||
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2)
|
||||
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3)
|
||||
|
||||
enum {
|
||||
IFLA_RMNET_UNSPEC,
|
||||
IFLA_RMNET_MUX_ID,
|
||||
IFLA_RMNET_FLAGS,
|
||||
__IFLA_RMNET_MAX,
|
||||
};
|
||||
|
||||
#define IFLA_RMNET_MAX (__IFLA_RMNET_MAX - 1)
|
||||
|
||||
struct ifla_rmnet_flags {
|
||||
__u32 flags;
|
||||
__u32 mask;
|
||||
};
|
||||
|
||||
#endif /* _UAPI_LINUX_IF_LINK_H */
|
||||
|
|
|
@ -1961,8 +1961,8 @@ BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
|
|||
BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
|
||||
BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
|
||||
|
||||
static void bpf_program__set_expected_attach_type(struct bpf_program *prog,
|
||||
enum bpf_attach_type type)
|
||||
void bpf_program__set_expected_attach_type(struct bpf_program *prog,
|
||||
enum bpf_attach_type type)
|
||||
{
|
||||
prog->expected_attach_type = type;
|
||||
}
|
||||
|
|
|
@ -193,6 +193,8 @@ int bpf_program__set_sched_act(struct bpf_program *prog);
|
|||
int bpf_program__set_xdp(struct bpf_program *prog);
|
||||
int bpf_program__set_perf_event(struct bpf_program *prog);
|
||||
void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type);
|
||||
void bpf_program__set_expected_attach_type(struct bpf_program *prog,
|
||||
enum bpf_attach_type type);
|
||||
|
||||
bool bpf_program__is_socket_filter(struct bpf_program *prog);
|
||||
bool bpf_program__is_tracepoint(struct bpf_program *prog);
|
||||
|
|
|
@ -24,7 +24,7 @@ urandom_read: urandom_read.c
|
|||
# Order correspond to 'make run_tests' order
|
||||
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
|
||||
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
|
||||
test_sock test_btf
|
||||
test_sock test_btf test_sockmap
|
||||
|
||||
TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
|
||||
test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
|
||||
|
@ -32,7 +32,7 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test
|
|||
test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \
|
||||
sample_map_ret0.o test_tcpbpf_kern.o test_stacktrace_build_id.o \
|
||||
sockmap_tcp_msg_prog.o connect4_prog.o connect6_prog.o test_adjust_tail.o \
|
||||
test_btf_haskv.o test_btf_nokv.o
|
||||
test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o
|
||||
|
||||
# Order correspond to 'make run_tests' order
|
||||
TEST_PROGS := test_kmod.sh \
|
||||
|
@ -56,6 +56,7 @@ $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/libbpf.a
|
|||
$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c
|
||||
$(OUTPUT)/test_sock: cgroup_helpers.c
|
||||
$(OUTPUT)/test_sock_addr: cgroup_helpers.c
|
||||
$(OUTPUT)/test_sockmap: cgroup_helpers.c
|
||||
|
||||
.PHONY: force
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,340 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2017-2018 Covalent IO, Inc. http://covalent.io
|
||||
#include <stddef.h>
|
||||
#include <string.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/if_packet.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/pkt_cls.h>
|
||||
#include <sys/socket.h>
|
||||
#include "bpf_helpers.h"
|
||||
#include "bpf_endian.h"
|
||||
|
||||
/* Sockmap sample program connects a client and a backend together
|
||||
* using cgroups.
|
||||
*
|
||||
* client:X <---> frontend:80 client:X <---> backend:80
|
||||
*
|
||||
* For simplicity we hard code values here and bind 1:1. The hard
|
||||
* coded values are part of the setup in sockmap.sh script that
|
||||
* is associated with this BPF program.
|
||||
*
|
||||
* The bpf_printk is verbose and prints information as connections
|
||||
* are established and verdicts are decided.
|
||||
*/
|
||||
|
||||
#define bpf_printk(fmt, ...) \
|
||||
({ \
|
||||
char ____fmt[] = fmt; \
|
||||
bpf_trace_printk(____fmt, sizeof(____fmt), \
|
||||
##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_map = {
|
||||
.type = BPF_MAP_TYPE_SOCKMAP,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 20,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_map_txmsg = {
|
||||
.type = BPF_MAP_TYPE_SOCKMAP,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 20,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_map_redir = {
|
||||
.type = BPF_MAP_TYPE_SOCKMAP,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 20,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_apply_bytes = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 1
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_cork_bytes = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 1
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_pull_bytes = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 2
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_redir_flags = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 1
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_skb_opts = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 1
|
||||
};
|
||||
|
||||
SEC("sk_skb1")
|
||||
int bpf_prog1(struct __sk_buff *skb)
|
||||
{
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
SEC("sk_skb2")
|
||||
int bpf_prog2(struct __sk_buff *skb)
|
||||
{
|
||||
__u32 lport = skb->local_port;
|
||||
__u32 rport = skb->remote_port;
|
||||
int len, *f, ret, zero = 0;
|
||||
__u64 flags = 0;
|
||||
|
||||
if (lport == 10000)
|
||||
ret = 10;
|
||||
else
|
||||
ret = 1;
|
||||
|
||||
len = (__u32)skb->data_end - (__u32)skb->data;
|
||||
f = bpf_map_lookup_elem(&sock_skb_opts, &zero);
|
||||
if (f && *f) {
|
||||
ret = 3;
|
||||
flags = *f;
|
||||
}
|
||||
|
||||
bpf_printk("sk_skb2: redirect(%iB) flags=%i\n",
|
||||
len, flags);
|
||||
return bpf_sk_redirect_map(skb, &sock_map, ret, flags);
|
||||
}
|
||||
|
||||
SEC("sockops")
|
||||
int bpf_sockmap(struct bpf_sock_ops *skops)
|
||||
{
|
||||
__u32 lport, rport;
|
||||
int op, err = 0, index, key, ret;
|
||||
|
||||
|
||||
op = (int) skops->op;
|
||||
|
||||
switch (op) {
|
||||
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
|
||||
lport = skops->local_port;
|
||||
rport = skops->remote_port;
|
||||
|
||||
if (lport == 10000) {
|
||||
ret = 1;
|
||||
err = bpf_sock_map_update(skops, &sock_map, &ret,
|
||||
BPF_NOEXIST);
|
||||
bpf_printk("passive(%i -> %i) map ctx update err: %d\n",
|
||||
lport, bpf_ntohl(rport), err);
|
||||
}
|
||||
break;
|
||||
case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
|
||||
lport = skops->local_port;
|
||||
rport = skops->remote_port;
|
||||
|
||||
if (bpf_ntohl(rport) == 10001) {
|
||||
ret = 10;
|
||||
err = bpf_sock_map_update(skops, &sock_map, &ret,
|
||||
BPF_NOEXIST);
|
||||
bpf_printk("active(%i -> %i) map ctx update err: %d\n",
|
||||
lport, bpf_ntohl(rport), err);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("sk_msg1")
|
||||
int bpf_prog4(struct sk_msg_md *msg)
|
||||
{
|
||||
int *bytes, zero = 0, one = 1;
|
||||
int *start, *end;
|
||||
|
||||
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
|
||||
if (bytes)
|
||||
bpf_msg_apply_bytes(msg, *bytes);
|
||||
bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
|
||||
if (bytes)
|
||||
bpf_msg_cork_bytes(msg, *bytes);
|
||||
start = bpf_map_lookup_elem(&sock_pull_bytes, &zero);
|
||||
end = bpf_map_lookup_elem(&sock_pull_bytes, &one);
|
||||
if (start && end)
|
||||
bpf_msg_pull_data(msg, *start, *end, 0);
|
||||
return SK_PASS;
|
||||
}
|
||||
|
||||
SEC("sk_msg2")
|
||||
int bpf_prog5(struct sk_msg_md *msg)
|
||||
{
|
||||
int err1 = -1, err2 = -1, zero = 0, one = 1;
|
||||
int *bytes, *start, *end, len1, len2;
|
||||
|
||||
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
|
||||
if (bytes)
|
||||
err1 = bpf_msg_apply_bytes(msg, *bytes);
|
||||
bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
|
||||
if (bytes)
|
||||
err2 = bpf_msg_cork_bytes(msg, *bytes);
|
||||
len1 = (__u64)msg->data_end - (__u64)msg->data;
|
||||
start = bpf_map_lookup_elem(&sock_pull_bytes, &zero);
|
||||
end = bpf_map_lookup_elem(&sock_pull_bytes, &one);
|
||||
if (start && end) {
|
||||
int err;
|
||||
|
||||
bpf_printk("sk_msg2: pull(%i:%i)\n",
|
||||
start ? *start : 0, end ? *end : 0);
|
||||
err = bpf_msg_pull_data(msg, *start, *end, 0);
|
||||
if (err)
|
||||
bpf_printk("sk_msg2: pull_data err %i\n",
|
||||
err);
|
||||
len2 = (__u64)msg->data_end - (__u64)msg->data;
|
||||
bpf_printk("sk_msg2: length update %i->%i\n",
|
||||
len1, len2);
|
||||
}
|
||||
bpf_printk("sk_msg2: data length %i err1 %i err2 %i\n",
|
||||
len1, err1, err2);
|
||||
return SK_PASS;
|
||||
}
|
||||
|
||||
SEC("sk_msg3")
|
||||
int bpf_prog6(struct sk_msg_md *msg)
|
||||
{
|
||||
int *bytes, zero = 0, one = 1, key = 0;
|
||||
int *start, *end, *f;
|
||||
__u64 flags = 0;
|
||||
|
||||
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
|
||||
if (bytes)
|
||||
bpf_msg_apply_bytes(msg, *bytes);
|
||||
bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
|
||||
if (bytes)
|
||||
bpf_msg_cork_bytes(msg, *bytes);
|
||||
start = bpf_map_lookup_elem(&sock_pull_bytes, &zero);
|
||||
end = bpf_map_lookup_elem(&sock_pull_bytes, &one);
|
||||
if (start && end)
|
||||
bpf_msg_pull_data(msg, *start, *end, 0);
|
||||
f = bpf_map_lookup_elem(&sock_redir_flags, &zero);
|
||||
if (f && *f) {
|
||||
key = 2;
|
||||
flags = *f;
|
||||
}
|
||||
return bpf_msg_redirect_map(msg, &sock_map_redir, key, flags);
|
||||
}
|
||||
|
||||
SEC("sk_msg4")
|
||||
int bpf_prog7(struct sk_msg_md *msg)
|
||||
{
|
||||
int err1 = 0, err2 = 0, zero = 0, one = 1, key = 0;
|
||||
int *f, *bytes, *start, *end, len1, len2;
|
||||
__u64 flags = 0;
|
||||
|
||||
int err;
|
||||
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
|
||||
if (bytes)
|
||||
err1 = bpf_msg_apply_bytes(msg, *bytes);
|
||||
bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
|
||||
if (bytes)
|
||||
err2 = bpf_msg_cork_bytes(msg, *bytes);
|
||||
len1 = (__u64)msg->data_end - (__u64)msg->data;
|
||||
start = bpf_map_lookup_elem(&sock_pull_bytes, &zero);
|
||||
end = bpf_map_lookup_elem(&sock_pull_bytes, &one);
|
||||
if (start && end) {
|
||||
|
||||
bpf_printk("sk_msg2: pull(%i:%i)\n",
|
||||
start ? *start : 0, end ? *end : 0);
|
||||
err = bpf_msg_pull_data(msg, *start, *end, 0);
|
||||
if (err)
|
||||
bpf_printk("sk_msg2: pull_data err %i\n",
|
||||
err);
|
||||
len2 = (__u64)msg->data_end - (__u64)msg->data;
|
||||
bpf_printk("sk_msg2: length update %i->%i\n",
|
||||
len1, len2);
|
||||
}
|
||||
f = bpf_map_lookup_elem(&sock_redir_flags, &zero);
|
||||
if (f && *f) {
|
||||
key = 2;
|
||||
flags = *f;
|
||||
}
|
||||
bpf_printk("sk_msg3: redirect(%iB) flags=%i err=%i\n",
|
||||
len1, flags, err1 ? err1 : err2);
|
||||
err = bpf_msg_redirect_map(msg, &sock_map_redir, key, flags);
|
||||
bpf_printk("sk_msg3: err %i\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
SEC("sk_msg5")
|
||||
int bpf_prog8(struct sk_msg_md *msg)
|
||||
{
|
||||
void *data_end = (void *)(long) msg->data_end;
|
||||
void *data = (void *)(long) msg->data;
|
||||
int ret = 0, *bytes, zero = 0;
|
||||
|
||||
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
|
||||
if (bytes) {
|
||||
ret = bpf_msg_apply_bytes(msg, *bytes);
|
||||
if (ret)
|
||||
return SK_DROP;
|
||||
} else {
|
||||
return SK_DROP;
|
||||
}
|
||||
return SK_PASS;
|
||||
}
|
||||
SEC("sk_msg6")
|
||||
int bpf_prog9(struct sk_msg_md *msg)
|
||||
{
|
||||
void *data_end = (void *)(long) msg->data_end;
|
||||
void *data = (void *)(long) msg->data;
|
||||
int ret = 0, *bytes, zero = 0;
|
||||
|
||||
bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
|
||||
if (bytes) {
|
||||
if (((__u64)data_end - (__u64)data) >= *bytes)
|
||||
return SK_PASS;
|
||||
ret = bpf_msg_cork_bytes(msg, *bytes);
|
||||
if (ret)
|
||||
return SK_DROP;
|
||||
}
|
||||
return SK_PASS;
|
||||
}
|
||||
|
||||
SEC("sk_msg7")
|
||||
int bpf_prog10(struct sk_msg_md *msg)
|
||||
{
|
||||
int *bytes, zero = 0, one = 1;
|
||||
int *start, *end;
|
||||
|
||||
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
|
||||
if (bytes)
|
||||
bpf_msg_apply_bytes(msg, *bytes);
|
||||
bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
|
||||
if (bytes)
|
||||
bpf_msg_cork_bytes(msg, *bytes);
|
||||
start = bpf_map_lookup_elem(&sock_pull_bytes, &zero);
|
||||
end = bpf_map_lookup_elem(&sock_pull_bytes, &one);
|
||||
if (start && end)
|
||||
bpf_msg_pull_data(msg, *start, *end, 0);
|
||||
|
||||
return SK_DROP;
|
||||
}
|
||||
|
||||
int _version SEC("version") = 1;
|
||||
char _license[] SEC("license") = "GPL";
|
Loading…
Reference in New Issue