selftests: bpf: break up test_progs - tracepoint
Move tracepoint prog tests into separate files. Signed-off-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
615741d81d
commit
20cb14ff9c
|
@ -0,0 +1,139 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <test_progs.h>
|
||||
|
||||
#define MAX_CNT_RAWTP 10ull
|
||||
#define MAX_STACK_RAWTP 100
|
||||
struct get_stack_trace_t {
|
||||
int pid;
|
||||
int kern_stack_size;
|
||||
int user_stack_size;
|
||||
int user_stack_buildid_size;
|
||||
__u64 kern_stack[MAX_STACK_RAWTP];
|
||||
__u64 user_stack[MAX_STACK_RAWTP];
|
||||
struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
|
||||
};
|
||||
|
||||
static int get_stack_print_output(void *data, int size)
|
||||
{
|
||||
bool good_kern_stack = false, good_user_stack = false;
|
||||
const char *nonjit_func = "___bpf_prog_run";
|
||||
struct get_stack_trace_t *e = data;
|
||||
int i, num_stack;
|
||||
static __u64 cnt;
|
||||
struct ksym *ks;
|
||||
|
||||
cnt++;
|
||||
|
||||
if (size < sizeof(struct get_stack_trace_t)) {
|
||||
__u64 *raw_data = data;
|
||||
bool found = false;
|
||||
|
||||
num_stack = size / sizeof(__u64);
|
||||
/* If jit is enabled, we do not have a good way to
|
||||
* verify the sanity of the kernel stack. So we
|
||||
* just assume it is good if the stack is not empty.
|
||||
* This could be improved in the future.
|
||||
*/
|
||||
if (jit_enabled) {
|
||||
found = num_stack > 0;
|
||||
} else {
|
||||
for (i = 0; i < num_stack; i++) {
|
||||
ks = ksym_search(raw_data[i]);
|
||||
if (strcmp(ks->name, nonjit_func) == 0) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (found) {
|
||||
good_kern_stack = true;
|
||||
good_user_stack = true;
|
||||
}
|
||||
} else {
|
||||
num_stack = e->kern_stack_size / sizeof(__u64);
|
||||
if (jit_enabled) {
|
||||
good_kern_stack = num_stack > 0;
|
||||
} else {
|
||||
for (i = 0; i < num_stack; i++) {
|
||||
ks = ksym_search(e->kern_stack[i]);
|
||||
if (strcmp(ks->name, nonjit_func) == 0) {
|
||||
good_kern_stack = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
|
||||
good_user_stack = true;
|
||||
}
|
||||
if (!good_kern_stack || !good_user_stack)
|
||||
return LIBBPF_PERF_EVENT_ERROR;
|
||||
|
||||
if (cnt == MAX_CNT_RAWTP)
|
||||
return LIBBPF_PERF_EVENT_DONE;
|
||||
|
||||
return LIBBPF_PERF_EVENT_CONT;
|
||||
}
|
||||
|
||||
void test_get_stack_raw_tp(void)
|
||||
{
|
||||
const char *file = "./test_get_stack_rawtp.o";
|
||||
int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
|
||||
struct perf_event_attr attr = {};
|
||||
struct timespec tv = {0, 10};
|
||||
__u32 key = 0, duration = 0;
|
||||
struct bpf_object *obj;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
|
||||
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
|
||||
return;
|
||||
|
||||
efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
|
||||
if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
|
||||
goto close_prog;
|
||||
|
||||
perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
|
||||
if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
|
||||
perfmap_fd, errno))
|
||||
goto close_prog;
|
||||
|
||||
err = load_kallsyms();
|
||||
if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
|
||||
goto close_prog;
|
||||
|
||||
attr.sample_type = PERF_SAMPLE_RAW;
|
||||
attr.type = PERF_TYPE_SOFTWARE;
|
||||
attr.config = PERF_COUNT_SW_BPF_OUTPUT;
|
||||
pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
|
||||
-1/*group_fd*/, 0);
|
||||
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
|
||||
errno))
|
||||
goto close_prog;
|
||||
|
||||
err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
|
||||
if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
|
||||
errno))
|
||||
goto close_prog;
|
||||
|
||||
err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
|
||||
if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto close_prog;
|
||||
|
||||
err = perf_event_mmap(pmu_fd);
|
||||
if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
|
||||
goto close_prog;
|
||||
|
||||
/* trigger some syscall action */
|
||||
for (i = 0; i < MAX_CNT_RAWTP; i++)
|
||||
nanosleep(&tv, NULL);
|
||||
|
||||
err = perf_event_poller(pmu_fd, get_stack_print_output);
|
||||
if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
|
||||
goto close_prog;
|
||||
|
||||
goto close_prog_noerr;
|
||||
close_prog:
|
||||
error_cnt++;
|
||||
close_prog_noerr:
|
||||
bpf_object__close(obj);
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <test_progs.h>
|
||||
|
||||
void test_task_fd_query_rawtp(void)
|
||||
{
|
||||
const char *file = "./test_get_stack_rawtp.o";
|
||||
__u64 probe_offset, probe_addr;
|
||||
__u32 len, prog_id, fd_type;
|
||||
struct bpf_object *obj;
|
||||
int efd, err, prog_fd;
|
||||
__u32 duration = 0;
|
||||
char buf[256];
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
|
||||
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
|
||||
return;
|
||||
|
||||
efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
|
||||
if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
|
||||
goto close_prog;
|
||||
|
||||
/* query (getpid(), efd) */
|
||||
len = sizeof(buf);
|
||||
err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
|
||||
&fd_type, &probe_offset, &probe_addr);
|
||||
if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
|
||||
errno))
|
||||
goto close_prog;
|
||||
|
||||
err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
|
||||
strcmp(buf, "sys_enter") == 0;
|
||||
if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
|
||||
fd_type, buf))
|
||||
goto close_prog;
|
||||
|
||||
/* test zero len */
|
||||
len = 0;
|
||||
err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
|
||||
&fd_type, &probe_offset, &probe_addr);
|
||||
if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto close_prog;
|
||||
err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
|
||||
len == strlen("sys_enter");
|
||||
if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
|
||||
goto close_prog;
|
||||
|
||||
/* test empty buffer */
|
||||
len = sizeof(buf);
|
||||
err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
|
||||
&fd_type, &probe_offset, &probe_addr);
|
||||
if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto close_prog;
|
||||
err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
|
||||
len == strlen("sys_enter");
|
||||
if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
|
||||
goto close_prog;
|
||||
|
||||
/* test smaller buffer */
|
||||
len = 3;
|
||||
err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
|
||||
&fd_type, &probe_offset, &probe_addr);
|
||||
if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
|
||||
"err %d errno %d\n", err, errno))
|
||||
goto close_prog;
|
||||
err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
|
||||
len == strlen("sys_enter") &&
|
||||
strcmp(buf, "sy") == 0;
|
||||
if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
|
||||
goto close_prog;
|
||||
|
||||
goto close_prog_noerr;
|
||||
close_prog:
|
||||
error_cnt++;
|
||||
close_prog_noerr:
|
||||
bpf_object__close(obj);
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <test_progs.h>
|
||||
|
||||
static void test_task_fd_query_tp_core(const char *probe_name,
|
||||
const char *tp_name)
|
||||
{
|
||||
const char *file = "./test_tracepoint.o";
|
||||
int err, bytes, efd, prog_fd, pmu_fd;
|
||||
struct perf_event_attr attr = {};
|
||||
__u64 probe_offset, probe_addr;
|
||||
__u32 len, prog_id, fd_type;
|
||||
struct bpf_object *obj;
|
||||
__u32 duration = 0;
|
||||
char buf[256];
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
|
||||
if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
|
||||
goto close_prog;
|
||||
|
||||
snprintf(buf, sizeof(buf),
|
||||
"/sys/kernel/debug/tracing/events/%s/id", probe_name);
|
||||
efd = open(buf, O_RDONLY, 0);
|
||||
if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
|
||||
goto close_prog;
|
||||
bytes = read(efd, buf, sizeof(buf));
|
||||
close(efd);
|
||||
if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
|
||||
"bytes %d errno %d\n", bytes, errno))
|
||||
goto close_prog;
|
||||
|
||||
attr.config = strtol(buf, NULL, 0);
|
||||
attr.type = PERF_TYPE_TRACEPOINT;
|
||||
attr.sample_type = PERF_SAMPLE_RAW;
|
||||
attr.sample_period = 1;
|
||||
attr.wakeup_events = 1;
|
||||
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
|
||||
0 /* cpu 0 */, -1 /* group id */,
|
||||
0 /* flags */);
|
||||
if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
|
||||
goto close_pmu;
|
||||
|
||||
err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
|
||||
if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
|
||||
errno))
|
||||
goto close_pmu;
|
||||
|
||||
err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
|
||||
if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
|
||||
errno))
|
||||
goto close_pmu;
|
||||
|
||||
/* query (getpid(), pmu_fd) */
|
||||
len = sizeof(buf);
|
||||
err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
|
||||
&fd_type, &probe_offset, &probe_addr);
|
||||
if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
|
||||
errno))
|
||||
goto close_pmu;
|
||||
|
||||
err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
|
||||
if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
|
||||
fd_type, buf))
|
||||
goto close_pmu;
|
||||
|
||||
close(pmu_fd);
|
||||
goto close_prog_noerr;
|
||||
|
||||
close_pmu:
|
||||
close(pmu_fd);
|
||||
close_prog:
|
||||
error_cnt++;
|
||||
close_prog_noerr:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
void test_task_fd_query_tp(void)
|
||||
{
|
||||
test_task_fd_query_tp_core("sched/sched_switch",
|
||||
"sched_switch");
|
||||
test_task_fd_query_tp_core("syscalls/sys_enter_read",
|
||||
"sys_enter_read");
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <test_progs.h>
|
||||
|
||||
void test_tp_attach_query(void)
|
||||
{
|
||||
const int num_progs = 3;
|
||||
int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
|
||||
__u32 duration = 0, info_len, saved_prog_ids[num_progs];
|
||||
const char *file = "./test_tracepoint.o";
|
||||
struct perf_event_query_bpf *query;
|
||||
struct perf_event_attr attr = {};
|
||||
struct bpf_object *obj[num_progs];
|
||||
struct bpf_prog_info prog_info;
|
||||
char buf[256];
|
||||
|
||||
snprintf(buf, sizeof(buf),
|
||||
"/sys/kernel/debug/tracing/events/sched/sched_switch/id");
|
||||
efd = open(buf, O_RDONLY, 0);
|
||||
if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
|
||||
return;
|
||||
bytes = read(efd, buf, sizeof(buf));
|
||||
close(efd);
|
||||
if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
|
||||
"read", "bytes %d errno %d\n", bytes, errno))
|
||||
return;
|
||||
|
||||
attr.config = strtol(buf, NULL, 0);
|
||||
attr.type = PERF_TYPE_TRACEPOINT;
|
||||
attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
|
||||
attr.sample_period = 1;
|
||||
attr.wakeup_events = 1;
|
||||
|
||||
query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
|
||||
for (i = 0; i < num_progs; i++) {
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
|
||||
&prog_fd[i]);
|
||||
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
|
||||
goto cleanup1;
|
||||
|
||||
bzero(&prog_info, sizeof(prog_info));
|
||||
prog_info.jited_prog_len = 0;
|
||||
prog_info.xlated_prog_len = 0;
|
||||
prog_info.nr_map_ids = 0;
|
||||
info_len = sizeof(prog_info);
|
||||
err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
|
||||
if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto cleanup1;
|
||||
saved_prog_ids[i] = prog_info.id;
|
||||
|
||||
pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
|
||||
0 /* cpu 0 */, -1 /* group id */,
|
||||
0 /* flags */);
|
||||
if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
|
||||
pmu_fd[i], errno))
|
||||
goto cleanup2;
|
||||
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
|
||||
if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto cleanup3;
|
||||
|
||||
if (i == 0) {
|
||||
/* check NULL prog array query */
|
||||
query->ids_len = num_progs;
|
||||
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
|
||||
if (CHECK(err || query->prog_cnt != 0,
|
||||
"perf_event_ioc_query_bpf",
|
||||
"err %d errno %d query->prog_cnt %u\n",
|
||||
err, errno, query->prog_cnt))
|
||||
goto cleanup3;
|
||||
}
|
||||
|
||||
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
|
||||
if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto cleanup3;
|
||||
|
||||
if (i == 1) {
|
||||
/* try to get # of programs only */
|
||||
query->ids_len = 0;
|
||||
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
|
||||
if (CHECK(err || query->prog_cnt != 2,
|
||||
"perf_event_ioc_query_bpf",
|
||||
"err %d errno %d query->prog_cnt %u\n",
|
||||
err, errno, query->prog_cnt))
|
||||
goto cleanup3;
|
||||
|
||||
/* try a few negative tests */
|
||||
/* invalid query pointer */
|
||||
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
|
||||
(struct perf_event_query_bpf *)0x1);
|
||||
if (CHECK(!err || errno != EFAULT,
|
||||
"perf_event_ioc_query_bpf",
|
||||
"err %d errno %d\n", err, errno))
|
||||
goto cleanup3;
|
||||
|
||||
/* no enough space */
|
||||
query->ids_len = 1;
|
||||
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
|
||||
if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
|
||||
"perf_event_ioc_query_bpf",
|
||||
"err %d errno %d query->prog_cnt %u\n",
|
||||
err, errno, query->prog_cnt))
|
||||
goto cleanup3;
|
||||
}
|
||||
|
||||
query->ids_len = num_progs;
|
||||
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
|
||||
if (CHECK(err || query->prog_cnt != (i + 1),
|
||||
"perf_event_ioc_query_bpf",
|
||||
"err %d errno %d query->prog_cnt %u\n",
|
||||
err, errno, query->prog_cnt))
|
||||
goto cleanup3;
|
||||
for (j = 0; j < i + 1; j++)
|
||||
if (CHECK(saved_prog_ids[j] != query->ids[j],
|
||||
"perf_event_ioc_query_bpf",
|
||||
"#%d saved_prog_id %x query prog_id %x\n",
|
||||
j, saved_prog_ids[j], query->ids[j]))
|
||||
goto cleanup3;
|
||||
}
|
||||
|
||||
i = num_progs - 1;
|
||||
for (; i >= 0; i--) {
|
||||
cleanup3:
|
||||
ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
|
||||
cleanup2:
|
||||
close(pmu_fd[i]);
|
||||
cleanup1:
|
||||
bpf_object__close(obj[i]);
|
||||
}
|
||||
free(query);
|
||||
}
|
|
@ -531,136 +531,6 @@ static void test_obj_name(void)
|
|||
}
|
||||
}
|
||||
|
||||
static void test_tp_attach_query(void)
|
||||
{
|
||||
const int num_progs = 3;
|
||||
int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
|
||||
__u32 duration = 0, info_len, saved_prog_ids[num_progs];
|
||||
const char *file = "./test_tracepoint.o";
|
||||
struct perf_event_query_bpf *query;
|
||||
struct perf_event_attr attr = {};
|
||||
struct bpf_object *obj[num_progs];
|
||||
struct bpf_prog_info prog_info;
|
||||
char buf[256];
|
||||
|
||||
snprintf(buf, sizeof(buf),
|
||||
"/sys/kernel/debug/tracing/events/sched/sched_switch/id");
|
||||
efd = open(buf, O_RDONLY, 0);
|
||||
if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
|
||||
return;
|
||||
bytes = read(efd, buf, sizeof(buf));
|
||||
close(efd);
|
||||
if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
|
||||
"read", "bytes %d errno %d\n", bytes, errno))
|
||||
return;
|
||||
|
||||
attr.config = strtol(buf, NULL, 0);
|
||||
attr.type = PERF_TYPE_TRACEPOINT;
|
||||
attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
|
||||
attr.sample_period = 1;
|
||||
attr.wakeup_events = 1;
|
||||
|
||||
query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
|
||||
for (i = 0; i < num_progs; i++) {
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
|
||||
&prog_fd[i]);
|
||||
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
|
||||
goto cleanup1;
|
||||
|
||||
bzero(&prog_info, sizeof(prog_info));
|
||||
prog_info.jited_prog_len = 0;
|
||||
prog_info.xlated_prog_len = 0;
|
||||
prog_info.nr_map_ids = 0;
|
||||
info_len = sizeof(prog_info);
|
||||
err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
|
||||
if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto cleanup1;
|
||||
saved_prog_ids[i] = prog_info.id;
|
||||
|
||||
pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
|
||||
0 /* cpu 0 */, -1 /* group id */,
|
||||
0 /* flags */);
|
||||
if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
|
||||
pmu_fd[i], errno))
|
||||
goto cleanup2;
|
||||
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
|
||||
if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto cleanup3;
|
||||
|
||||
if (i == 0) {
|
||||
/* check NULL prog array query */
|
||||
query->ids_len = num_progs;
|
||||
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
|
||||
if (CHECK(err || query->prog_cnt != 0,
|
||||
"perf_event_ioc_query_bpf",
|
||||
"err %d errno %d query->prog_cnt %u\n",
|
||||
err, errno, query->prog_cnt))
|
||||
goto cleanup3;
|
||||
}
|
||||
|
||||
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
|
||||
if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto cleanup3;
|
||||
|
||||
if (i == 1) {
|
||||
/* try to get # of programs only */
|
||||
query->ids_len = 0;
|
||||
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
|
||||
if (CHECK(err || query->prog_cnt != 2,
|
||||
"perf_event_ioc_query_bpf",
|
||||
"err %d errno %d query->prog_cnt %u\n",
|
||||
err, errno, query->prog_cnt))
|
||||
goto cleanup3;
|
||||
|
||||
/* try a few negative tests */
|
||||
/* invalid query pointer */
|
||||
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
|
||||
(struct perf_event_query_bpf *)0x1);
|
||||
if (CHECK(!err || errno != EFAULT,
|
||||
"perf_event_ioc_query_bpf",
|
||||
"err %d errno %d\n", err, errno))
|
||||
goto cleanup3;
|
||||
|
||||
/* no enough space */
|
||||
query->ids_len = 1;
|
||||
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
|
||||
if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
|
||||
"perf_event_ioc_query_bpf",
|
||||
"err %d errno %d query->prog_cnt %u\n",
|
||||
err, errno, query->prog_cnt))
|
||||
goto cleanup3;
|
||||
}
|
||||
|
||||
query->ids_len = num_progs;
|
||||
err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
|
||||
if (CHECK(err || query->prog_cnt != (i + 1),
|
||||
"perf_event_ioc_query_bpf",
|
||||
"err %d errno %d query->prog_cnt %u\n",
|
||||
err, errno, query->prog_cnt))
|
||||
goto cleanup3;
|
||||
for (j = 0; j < i + 1; j++)
|
||||
if (CHECK(saved_prog_ids[j] != query->ids[j],
|
||||
"perf_event_ioc_query_bpf",
|
||||
"#%d saved_prog_id %x query prog_id %x\n",
|
||||
j, saved_prog_ids[j], query->ids[j]))
|
||||
goto cleanup3;
|
||||
}
|
||||
|
||||
i = num_progs - 1;
|
||||
for (; i >= 0; i--) {
|
||||
cleanup3:
|
||||
ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
|
||||
cleanup2:
|
||||
close(pmu_fd[i]);
|
||||
cleanup1:
|
||||
bpf_object__close(obj[i]);
|
||||
}
|
||||
free(query);
|
||||
}
|
||||
|
||||
int compare_map_keys(int map1_fd, int map2_fd)
|
||||
{
|
||||
__u32 key, next_key;
|
||||
|
@ -748,299 +618,6 @@ int extract_build_id(char *build_id, size_t size)
|
|||
return -1;
|
||||
}
|
||||
|
||||
#define MAX_CNT_RAWTP 10ull
|
||||
#define MAX_STACK_RAWTP 100
|
||||
struct get_stack_trace_t {
|
||||
int pid;
|
||||
int kern_stack_size;
|
||||
int user_stack_size;
|
||||
int user_stack_buildid_size;
|
||||
__u64 kern_stack[MAX_STACK_RAWTP];
|
||||
__u64 user_stack[MAX_STACK_RAWTP];
|
||||
struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
|
||||
};
|
||||
|
||||
static int get_stack_print_output(void *data, int size)
|
||||
{
|
||||
bool good_kern_stack = false, good_user_stack = false;
|
||||
const char *nonjit_func = "___bpf_prog_run";
|
||||
struct get_stack_trace_t *e = data;
|
||||
int i, num_stack;
|
||||
static __u64 cnt;
|
||||
struct ksym *ks;
|
||||
|
||||
cnt++;
|
||||
|
||||
if (size < sizeof(struct get_stack_trace_t)) {
|
||||
__u64 *raw_data = data;
|
||||
bool found = false;
|
||||
|
||||
num_stack = size / sizeof(__u64);
|
||||
/* If jit is enabled, we do not have a good way to
|
||||
* verify the sanity of the kernel stack. So we
|
||||
* just assume it is good if the stack is not empty.
|
||||
* This could be improved in the future.
|
||||
*/
|
||||
if (jit_enabled) {
|
||||
found = num_stack > 0;
|
||||
} else {
|
||||
for (i = 0; i < num_stack; i++) {
|
||||
ks = ksym_search(raw_data[i]);
|
||||
if (strcmp(ks->name, nonjit_func) == 0) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (found) {
|
||||
good_kern_stack = true;
|
||||
good_user_stack = true;
|
||||
}
|
||||
} else {
|
||||
num_stack = e->kern_stack_size / sizeof(__u64);
|
||||
if (jit_enabled) {
|
||||
good_kern_stack = num_stack > 0;
|
||||
} else {
|
||||
for (i = 0; i < num_stack; i++) {
|
||||
ks = ksym_search(e->kern_stack[i]);
|
||||
if (strcmp(ks->name, nonjit_func) == 0) {
|
||||
good_kern_stack = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
|
||||
good_user_stack = true;
|
||||
}
|
||||
if (!good_kern_stack || !good_user_stack)
|
||||
return LIBBPF_PERF_EVENT_ERROR;
|
||||
|
||||
if (cnt == MAX_CNT_RAWTP)
|
||||
return LIBBPF_PERF_EVENT_DONE;
|
||||
|
||||
return LIBBPF_PERF_EVENT_CONT;
|
||||
}
|
||||
|
||||
static void test_get_stack_raw_tp(void)
|
||||
{
|
||||
const char *file = "./test_get_stack_rawtp.o";
|
||||
int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
|
||||
struct perf_event_attr attr = {};
|
||||
struct timespec tv = {0, 10};
|
||||
__u32 key = 0, duration = 0;
|
||||
struct bpf_object *obj;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
|
||||
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
|
||||
return;
|
||||
|
||||
efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
|
||||
if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
|
||||
goto close_prog;
|
||||
|
||||
perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
|
||||
if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
|
||||
perfmap_fd, errno))
|
||||
goto close_prog;
|
||||
|
||||
err = load_kallsyms();
|
||||
if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
|
||||
goto close_prog;
|
||||
|
||||
attr.sample_type = PERF_SAMPLE_RAW;
|
||||
attr.type = PERF_TYPE_SOFTWARE;
|
||||
attr.config = PERF_COUNT_SW_BPF_OUTPUT;
|
||||
pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
|
||||
-1/*group_fd*/, 0);
|
||||
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
|
||||
errno))
|
||||
goto close_prog;
|
||||
|
||||
err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
|
||||
if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
|
||||
errno))
|
||||
goto close_prog;
|
||||
|
||||
err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
|
||||
if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto close_prog;
|
||||
|
||||
err = perf_event_mmap(pmu_fd);
|
||||
if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
|
||||
goto close_prog;
|
||||
|
||||
/* trigger some syscall action */
|
||||
for (i = 0; i < MAX_CNT_RAWTP; i++)
|
||||
nanosleep(&tv, NULL);
|
||||
|
||||
err = perf_event_poller(pmu_fd, get_stack_print_output);
|
||||
if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
|
||||
goto close_prog;
|
||||
|
||||
goto close_prog_noerr;
|
||||
close_prog:
|
||||
error_cnt++;
|
||||
close_prog_noerr:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
static void test_task_fd_query_rawtp(void)
|
||||
{
|
||||
const char *file = "./test_get_stack_rawtp.o";
|
||||
__u64 probe_offset, probe_addr;
|
||||
__u32 len, prog_id, fd_type;
|
||||
struct bpf_object *obj;
|
||||
int efd, err, prog_fd;
|
||||
__u32 duration = 0;
|
||||
char buf[256];
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
|
||||
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
|
||||
return;
|
||||
|
||||
efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
|
||||
if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
|
||||
goto close_prog;
|
||||
|
||||
/* query (getpid(), efd) */
|
||||
len = sizeof(buf);
|
||||
err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
|
||||
&fd_type, &probe_offset, &probe_addr);
|
||||
if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
|
||||
errno))
|
||||
goto close_prog;
|
||||
|
||||
err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
|
||||
strcmp(buf, "sys_enter") == 0;
|
||||
if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
|
||||
fd_type, buf))
|
||||
goto close_prog;
|
||||
|
||||
/* test zero len */
|
||||
len = 0;
|
||||
err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
|
||||
&fd_type, &probe_offset, &probe_addr);
|
||||
if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto close_prog;
|
||||
err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
|
||||
len == strlen("sys_enter");
|
||||
if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
|
||||
goto close_prog;
|
||||
|
||||
/* test empty buffer */
|
||||
len = sizeof(buf);
|
||||
err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
|
||||
&fd_type, &probe_offset, &probe_addr);
|
||||
if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto close_prog;
|
||||
err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
|
||||
len == strlen("sys_enter");
|
||||
if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
|
||||
goto close_prog;
|
||||
|
||||
/* test smaller buffer */
|
||||
len = 3;
|
||||
err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
|
||||
&fd_type, &probe_offset, &probe_addr);
|
||||
if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
|
||||
"err %d errno %d\n", err, errno))
|
||||
goto close_prog;
|
||||
err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
|
||||
len == strlen("sys_enter") &&
|
||||
strcmp(buf, "sy") == 0;
|
||||
if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
|
||||
goto close_prog;
|
||||
|
||||
goto close_prog_noerr;
|
||||
close_prog:
|
||||
error_cnt++;
|
||||
close_prog_noerr:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
static void test_task_fd_query_tp_core(const char *probe_name,
|
||||
const char *tp_name)
|
||||
{
|
||||
const char *file = "./test_tracepoint.o";
|
||||
int err, bytes, efd, prog_fd, pmu_fd;
|
||||
struct perf_event_attr attr = {};
|
||||
__u64 probe_offset, probe_addr;
|
||||
__u32 len, prog_id, fd_type;
|
||||
struct bpf_object *obj;
|
||||
__u32 duration = 0;
|
||||
char buf[256];
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
|
||||
if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
|
||||
goto close_prog;
|
||||
|
||||
snprintf(buf, sizeof(buf),
|
||||
"/sys/kernel/debug/tracing/events/%s/id", probe_name);
|
||||
efd = open(buf, O_RDONLY, 0);
|
||||
if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
|
||||
goto close_prog;
|
||||
bytes = read(efd, buf, sizeof(buf));
|
||||
close(efd);
|
||||
if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
|
||||
"bytes %d errno %d\n", bytes, errno))
|
||||
goto close_prog;
|
||||
|
||||
attr.config = strtol(buf, NULL, 0);
|
||||
attr.type = PERF_TYPE_TRACEPOINT;
|
||||
attr.sample_type = PERF_SAMPLE_RAW;
|
||||
attr.sample_period = 1;
|
||||
attr.wakeup_events = 1;
|
||||
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
|
||||
0 /* cpu 0 */, -1 /* group id */,
|
||||
0 /* flags */);
|
||||
if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
|
||||
goto close_pmu;
|
||||
|
||||
err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
|
||||
if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
|
||||
errno))
|
||||
goto close_pmu;
|
||||
|
||||
err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
|
||||
if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
|
||||
errno))
|
||||
goto close_pmu;
|
||||
|
||||
/* query (getpid(), pmu_fd) */
|
||||
len = sizeof(buf);
|
||||
err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
|
||||
&fd_type, &probe_offset, &probe_addr);
|
||||
if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
|
||||
errno))
|
||||
goto close_pmu;
|
||||
|
||||
err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
|
||||
if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
|
||||
fd_type, buf))
|
||||
goto close_pmu;
|
||||
|
||||
close(pmu_fd);
|
||||
goto close_prog_noerr;
|
||||
|
||||
close_pmu:
|
||||
close(pmu_fd);
|
||||
close_prog:
|
||||
error_cnt++;
|
||||
close_prog_noerr:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
static void test_task_fd_query_tp(void)
|
||||
{
|
||||
test_task_fd_query_tp_core("sched/sched_switch",
|
||||
"sched_switch");
|
||||
test_task_fd_query_tp_core("syscalls/sys_enter_read",
|
||||
"sys_enter_read");
|
||||
}
|
||||
|
||||
static int libbpf_debug_print(enum libbpf_print_level level,
|
||||
const char *format, va_list args)
|
||||
{
|
||||
|
@ -1423,10 +1000,6 @@ int main(void)
|
|||
test_tcp_estats();
|
||||
test_bpf_obj_id();
|
||||
test_obj_name();
|
||||
test_tp_attach_query();
|
||||
test_get_stack_raw_tp();
|
||||
test_task_fd_query_rawtp();
|
||||
test_task_fd_query_tp();
|
||||
test_reference_tracking();
|
||||
test_queue_stack_map(QUEUE);
|
||||
test_queue_stack_map(STACK);
|
||||
|
|
Loading…
Reference in New Issue