2020-05-10 01:59:11 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/* Copyright (c) 2020 Facebook */
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/namei.h>
|
|
|
|
#include <linux/pid_namespace.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/fdtable.h>
|
|
|
|
#include <linux/filter.h>
|
2020-07-21 00:34:03 +08:00
|
|
|
#include <linux/btf_ids.h>
|
2020-05-10 01:59:11 +08:00
|
|
|
|
|
|
|
struct bpf_iter_seq_task_common {
|
|
|
|
struct pid_namespace *ns;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct bpf_iter_seq_task_info {
|
|
|
|
/* The first field must be struct bpf_iter_seq_task_common.
|
|
|
|
* this is assumed by {init, fini}_seq_pidns() callback functions.
|
|
|
|
*/
|
|
|
|
struct bpf_iter_seq_task_common common;
|
|
|
|
u32 tid;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct task_struct *task_seq_get_next(struct pid_namespace *ns,
|
2020-09-02 10:31:12 +08:00
|
|
|
u32 *tid,
|
|
|
|
bool skip_if_dup_files)
|
2020-05-10 01:59:11 +08:00
|
|
|
{
|
|
|
|
struct task_struct *task = NULL;
|
|
|
|
struct pid *pid;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2020-05-14 13:51:37 +08:00
|
|
|
retry:
|
2020-08-19 06:23:10 +08:00
|
|
|
pid = find_ge_pid(*tid, ns);
|
2020-05-14 13:51:37 +08:00
|
|
|
if (pid) {
|
2020-08-19 06:23:10 +08:00
|
|
|
*tid = pid_nr_ns(pid, ns);
|
2020-05-10 01:59:11 +08:00
|
|
|
task = get_pid_task(pid, PIDTYPE_PID);
|
2020-05-14 13:51:37 +08:00
|
|
|
if (!task) {
|
|
|
|
++*tid;
|
|
|
|
goto retry;
|
2020-09-02 10:31:12 +08:00
|
|
|
} else if (skip_if_dup_files && task->tgid != task->pid &&
|
|
|
|
task->files == task->group_leader->files) {
|
|
|
|
put_task_struct(task);
|
|
|
|
task = NULL;
|
|
|
|
++*tid;
|
|
|
|
goto retry;
|
2020-05-14 13:51:37 +08:00
|
|
|
}
|
|
|
|
}
|
2020-05-10 01:59:11 +08:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *task_seq_start(struct seq_file *seq, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_info *info = seq->private;
|
|
|
|
struct task_struct *task;
|
|
|
|
|
2020-09-02 10:31:12 +08:00
|
|
|
task = task_seq_get_next(info->common.ns, &info->tid, false);
|
2020-05-10 01:59:11 +08:00
|
|
|
if (!task)
|
|
|
|
return NULL;
|
|
|
|
|
2020-07-23 03:51:56 +08:00
|
|
|
if (*pos == 0)
|
|
|
|
++*pos;
|
2020-05-10 01:59:11 +08:00
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *task_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_info *info = seq->private;
|
|
|
|
struct task_struct *task;
|
|
|
|
|
|
|
|
++*pos;
|
|
|
|
++info->tid;
|
|
|
|
put_task_struct((struct task_struct *)v);
|
2020-09-02 10:31:12 +08:00
|
|
|
task = task_seq_get_next(info->common.ns, &info->tid, false);
|
2020-05-10 01:59:11 +08:00
|
|
|
if (!task)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bpf_iter__task {
|
|
|
|
__bpf_md_ptr(struct bpf_iter_meta *, meta);
|
|
|
|
__bpf_md_ptr(struct task_struct *, task);
|
|
|
|
};
|
|
|
|
|
|
|
|
DEFINE_BPF_ITER_FUNC(task, struct bpf_iter_meta *meta, struct task_struct *task)
|
|
|
|
|
|
|
|
static int __task_seq_show(struct seq_file *seq, struct task_struct *task,
|
|
|
|
bool in_stop)
|
|
|
|
{
|
|
|
|
struct bpf_iter_meta meta;
|
|
|
|
struct bpf_iter__task ctx;
|
|
|
|
struct bpf_prog *prog;
|
|
|
|
|
|
|
|
meta.seq = seq;
|
|
|
|
prog = bpf_iter_get_info(&meta, in_stop);
|
|
|
|
if (!prog)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
meta.seq = seq;
|
|
|
|
ctx.meta = &meta;
|
|
|
|
ctx.task = task;
|
|
|
|
return bpf_iter_run_prog(prog, &ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int task_seq_show(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
return __task_seq_show(seq, v, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void task_seq_stop(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
if (!v)
|
|
|
|
(void)__task_seq_show(seq, v, true);
|
|
|
|
else
|
|
|
|
put_task_struct((struct task_struct *)v);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct seq_operations task_seq_ops = {
|
|
|
|
.start = task_seq_start,
|
|
|
|
.next = task_seq_next,
|
|
|
|
.stop = task_seq_stop,
|
|
|
|
.show = task_seq_show,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct bpf_iter_seq_task_file_info {
|
|
|
|
/* The first field must be struct bpf_iter_seq_task_common.
|
|
|
|
* this is assumed by {init, fini}_seq_pidns() callback functions.
|
|
|
|
*/
|
|
|
|
struct bpf_iter_seq_task_common common;
|
|
|
|
struct task_struct *task;
|
|
|
|
struct files_struct *files;
|
|
|
|
u32 tid;
|
|
|
|
u32 fd;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct file *
|
2020-11-20 08:28:33 +08:00
|
|
|
task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info)
|
2020-05-10 01:59:11 +08:00
|
|
|
{
|
|
|
|
struct pid_namespace *ns = info->common.ns;
|
|
|
|
u32 curr_tid = info->tid, max_fds;
|
|
|
|
struct files_struct *curr_files;
|
|
|
|
struct task_struct *curr_task;
|
|
|
|
int curr_fd = info->fd;
|
|
|
|
|
|
|
|
/* If this function returns a non-NULL file object,
|
|
|
|
* it held a reference to the task/files_struct/file.
|
|
|
|
* Otherwise, it does not hold any reference.
|
|
|
|
*/
|
|
|
|
again:
|
2020-11-20 08:28:33 +08:00
|
|
|
if (info->task) {
|
|
|
|
curr_task = info->task;
|
|
|
|
curr_files = info->files;
|
2020-05-10 01:59:11 +08:00
|
|
|
curr_fd = info->fd;
|
|
|
|
} else {
|
2020-09-02 10:31:12 +08:00
|
|
|
curr_task = task_seq_get_next(ns, &curr_tid, true);
|
2020-11-20 08:28:33 +08:00
|
|
|
if (!curr_task) {
|
|
|
|
info->task = NULL;
|
|
|
|
info->files = NULL;
|
2020-12-19 02:50:30 +08:00
|
|
|
info->tid = curr_tid;
|
2020-05-10 01:59:11 +08:00
|
|
|
return NULL;
|
2020-11-20 08:28:33 +08:00
|
|
|
}
|
2020-05-10 01:59:11 +08:00
|
|
|
|
|
|
|
curr_files = get_files_struct(curr_task);
|
|
|
|
if (!curr_files) {
|
|
|
|
put_task_struct(curr_task);
|
2020-12-19 02:50:30 +08:00
|
|
|
curr_tid = curr_tid + 1;
|
2020-05-10 01:59:11 +08:00
|
|
|
info->fd = 0;
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
2020-11-20 08:28:33 +08:00
|
|
|
info->files = curr_files;
|
|
|
|
info->task = curr_task;
|
2020-05-10 01:59:11 +08:00
|
|
|
if (curr_tid == info->tid) {
|
|
|
|
curr_fd = info->fd;
|
|
|
|
} else {
|
|
|
|
info->tid = curr_tid;
|
|
|
|
curr_fd = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
max_fds = files_fdtable(curr_files)->max_fds;
|
|
|
|
for (; curr_fd < max_fds; curr_fd++) {
|
|
|
|
struct file *f;
|
|
|
|
|
|
|
|
f = fcheck_files(curr_files, curr_fd);
|
|
|
|
if (!f)
|
|
|
|
continue;
|
2020-08-18 01:42:14 +08:00
|
|
|
if (!get_file_rcu(f))
|
|
|
|
continue;
|
2020-05-10 01:59:11 +08:00
|
|
|
|
|
|
|
/* set info->fd */
|
|
|
|
info->fd = curr_fd;
|
|
|
|
rcu_read_unlock();
|
|
|
|
return f;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the current task is done, go to the next task */
|
|
|
|
rcu_read_unlock();
|
|
|
|
put_files_struct(curr_files);
|
|
|
|
put_task_struct(curr_task);
|
2020-11-20 08:28:33 +08:00
|
|
|
info->task = NULL;
|
|
|
|
info->files = NULL;
|
2020-05-10 01:59:11 +08:00
|
|
|
info->fd = 0;
|
|
|
|
curr_tid = ++(info->tid);
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *task_file_seq_start(struct seq_file *seq, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_file_info *info = seq->private;
|
|
|
|
struct file *file;
|
|
|
|
|
2020-11-20 08:28:33 +08:00
|
|
|
info->task = NULL;
|
|
|
|
info->files = NULL;
|
|
|
|
file = task_file_seq_get_next(info);
|
|
|
|
if (file && *pos == 0)
|
2020-07-23 03:51:56 +08:00
|
|
|
++*pos;
|
2020-05-10 01:59:11 +08:00
|
|
|
|
|
|
|
return file;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *task_file_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_file_info *info = seq->private;
|
|
|
|
|
|
|
|
++*pos;
|
|
|
|
++info->fd;
|
|
|
|
fput((struct file *)v);
|
2020-11-20 08:28:33 +08:00
|
|
|
return task_file_seq_get_next(info);
|
2020-05-10 01:59:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct bpf_iter__task_file {
|
|
|
|
__bpf_md_ptr(struct bpf_iter_meta *, meta);
|
|
|
|
__bpf_md_ptr(struct task_struct *, task);
|
|
|
|
u32 fd __aligned(8);
|
|
|
|
__bpf_md_ptr(struct file *, file);
|
|
|
|
};
|
|
|
|
|
|
|
|
DEFINE_BPF_ITER_FUNC(task_file, struct bpf_iter_meta *meta,
|
|
|
|
struct task_struct *task, u32 fd,
|
|
|
|
struct file *file)
|
|
|
|
|
|
|
|
static int __task_file_seq_show(struct seq_file *seq, struct file *file,
|
|
|
|
bool in_stop)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_file_info *info = seq->private;
|
|
|
|
struct bpf_iter__task_file ctx;
|
|
|
|
struct bpf_iter_meta meta;
|
|
|
|
struct bpf_prog *prog;
|
|
|
|
|
|
|
|
meta.seq = seq;
|
|
|
|
prog = bpf_iter_get_info(&meta, in_stop);
|
|
|
|
if (!prog)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ctx.meta = &meta;
|
|
|
|
ctx.task = info->task;
|
|
|
|
ctx.fd = info->fd;
|
|
|
|
ctx.file = file;
|
|
|
|
return bpf_iter_run_prog(prog, &ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int task_file_seq_show(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
return __task_file_seq_show(seq, v, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void task_file_seq_stop(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_file_info *info = seq->private;
|
|
|
|
|
|
|
|
if (!v) {
|
|
|
|
(void)__task_file_seq_show(seq, v, true);
|
|
|
|
} else {
|
|
|
|
fput((struct file *)v);
|
|
|
|
put_files_struct(info->files);
|
|
|
|
put_task_struct(info->task);
|
|
|
|
info->files = NULL;
|
|
|
|
info->task = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-24 02:41:10 +08:00
|
|
|
static int init_seq_pidns(void *priv_data, struct bpf_iter_aux_info *aux)
|
2020-05-10 01:59:11 +08:00
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_common *common = priv_data;
|
|
|
|
|
|
|
|
common->ns = get_pid_ns(task_active_pid_ns(current));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void fini_seq_pidns(void *priv_data)
|
|
|
|
{
|
|
|
|
struct bpf_iter_seq_task_common *common = priv_data;
|
|
|
|
|
|
|
|
put_pid_ns(common->ns);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct seq_operations task_file_seq_ops = {
|
|
|
|
.start = task_file_seq_start,
|
|
|
|
.next = task_file_seq_next,
|
|
|
|
.stop = task_file_seq_stop,
|
|
|
|
.show = task_file_seq_show,
|
|
|
|
};
|
|
|
|
|
2020-07-21 00:34:03 +08:00
|
|
|
BTF_ID_LIST(btf_task_file_ids)
|
|
|
|
BTF_ID(struct, task_struct)
|
|
|
|
BTF_ID(struct, file)
|
|
|
|
|
2020-07-24 02:41:09 +08:00
|
|
|
static const struct bpf_iter_seq_info task_seq_info = {
|
2020-05-14 02:02:19 +08:00
|
|
|
.seq_ops = &task_seq_ops,
|
|
|
|
.init_seq_private = init_seq_pidns,
|
|
|
|
.fini_seq_private = fini_seq_pidns,
|
|
|
|
.seq_priv_size = sizeof(struct bpf_iter_seq_task_info),
|
2020-07-24 02:41:09 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct bpf_iter_reg task_reg_info = {
|
|
|
|
.target = "task",
|
bpf: Permit cond_resched for some iterators
Commit e679654a704e ("bpf: Fix a rcu_sched stall issue with
bpf task/task_file iterator") tries to fix rcu stalls warning
which is caused by bpf task_file iterator when running
"bpftool prog".
rcu: INFO: rcu_sched self-detected stall on CPU
rcu: \x097-....: (20999 ticks this GP) idle=302/1/0x4000000000000000 softirq=1508852/1508852 fqs=4913
\x09(t=21031 jiffies g=2534773 q=179750)
NMI backtrace for cpu 7
CPU: 7 PID: 184195 Comm: bpftool Kdump: loaded Tainted: G W 5.8.0-00004-g68bfc7f8c1b4 #6
Hardware name: Quanta Twin Lakes MP/Twin Lakes Passive MP, BIOS F09_3A17 05/03/2019
Call Trace:
<IRQ>
dump_stack+0x57/0x70
nmi_cpu_backtrace.cold+0x14/0x53
? lapic_can_unplug_cpu.cold+0x39/0x39
nmi_trigger_cpumask_backtrace+0xb7/0xc7
rcu_dump_cpu_stacks+0xa2/0xd0
rcu_sched_clock_irq.cold+0x1ff/0x3d9
? tick_nohz_handler+0x100/0x100
update_process_times+0x5b/0x90
tick_sched_timer+0x5e/0xf0
__hrtimer_run_queues+0x12a/0x2a0
hrtimer_interrupt+0x10e/0x280
__sysvec_apic_timer_interrupt+0x51/0xe0
asm_call_on_stack+0xf/0x20
</IRQ>
sysvec_apic_timer_interrupt+0x6f/0x80
...
task_file_seq_next+0x52/0xa0
bpf_seq_read+0xb9/0x320
vfs_read+0x9d/0x180
ksys_read+0x5f/0xe0
do_syscall_64+0x38/0x60
entry_SYSCALL_64_after_hwframe+0x44/0xa9
The fix is to limit the number of bpf program runs to be
one million. This fixed the program in most cases. But
we also found under heavy load, which can increase the wallclock
time for bpf_seq_read(), the warning may still be possible.
For example, calling bpf_delay() in the "while" loop of
bpf_seq_read(), which will introduce artificial delay,
the warning will show up in my qemu run.
static unsigned q;
volatile unsigned *p = &q;
volatile unsigned long long ll;
static void bpf_delay(void)
{
int i, j;
for (i = 0; i < 10000; i++)
for (j = 0; j < 10000; j++)
ll += *p;
}
There are two ways to fix this issue. One is to reduce the above
one million threshold to say 100,000 and hopefully rcu warning will
not show up any more. Another is to introduce a target feature
which enables bpf_seq_read() calling cond_resched().
This patch took second approach as the first approach may cause
more -EAGAIN failures for read() syscalls. Note that not all bpf_iter
targets can permit cond_resched() in bpf_seq_read() as some, e.g.,
netlink seq iterator, rcu read lock critical section spans through
seq_ops->next() -> seq_ops->show() -> seq_ops->next().
For the kernel code with the above hack, "bpftool p" roughly takes
38 seconds to finish on my VM with 184 bpf program runs.
Using the following command, I am able to collect the number of
context switches:
perf stat -e context-switches -- ./bpftool p >& log
Without this patch,
69 context-switches
With this patch,
75 context-switches
This patch added additional 6 context switches, roughly every 6 seconds
to reschedule, to avoid lengthy no-rescheduling which may cause the
above RCU warnings.
Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20201028061054.1411116-1-yhs@fb.com
2020-10-28 14:10:54 +08:00
|
|
|
.feature = BPF_ITER_RESCHED,
|
2020-05-14 02:02:21 +08:00
|
|
|
.ctx_arg_info_size = 1,
|
|
|
|
.ctx_arg_info = {
|
|
|
|
{ offsetof(struct bpf_iter__task, task),
|
|
|
|
PTR_TO_BTF_ID_OR_NULL },
|
|
|
|
},
|
2020-07-24 02:41:09 +08:00
|
|
|
.seq_info = &task_seq_info,
|
2020-05-14 02:02:19 +08:00
|
|
|
};
|
|
|
|
|
2020-07-24 02:41:09 +08:00
|
|
|
static const struct bpf_iter_seq_info task_file_seq_info = {
|
2020-05-14 02:02:19 +08:00
|
|
|
.seq_ops = &task_file_seq_ops,
|
|
|
|
.init_seq_private = init_seq_pidns,
|
|
|
|
.fini_seq_private = fini_seq_pidns,
|
|
|
|
.seq_priv_size = sizeof(struct bpf_iter_seq_task_file_info),
|
2020-07-24 02:41:09 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct bpf_iter_reg task_file_reg_info = {
|
|
|
|
.target = "task_file",
|
bpf: Permit cond_resched for some iterators
Commit e679654a704e ("bpf: Fix a rcu_sched stall issue with
bpf task/task_file iterator") tries to fix rcu stalls warning
which is caused by bpf task_file iterator when running
"bpftool prog".
rcu: INFO: rcu_sched self-detected stall on CPU
rcu: \x097-....: (20999 ticks this GP) idle=302/1/0x4000000000000000 softirq=1508852/1508852 fqs=4913
\x09(t=21031 jiffies g=2534773 q=179750)
NMI backtrace for cpu 7
CPU: 7 PID: 184195 Comm: bpftool Kdump: loaded Tainted: G W 5.8.0-00004-g68bfc7f8c1b4 #6
Hardware name: Quanta Twin Lakes MP/Twin Lakes Passive MP, BIOS F09_3A17 05/03/2019
Call Trace:
<IRQ>
dump_stack+0x57/0x70
nmi_cpu_backtrace.cold+0x14/0x53
? lapic_can_unplug_cpu.cold+0x39/0x39
nmi_trigger_cpumask_backtrace+0xb7/0xc7
rcu_dump_cpu_stacks+0xa2/0xd0
rcu_sched_clock_irq.cold+0x1ff/0x3d9
? tick_nohz_handler+0x100/0x100
update_process_times+0x5b/0x90
tick_sched_timer+0x5e/0xf0
__hrtimer_run_queues+0x12a/0x2a0
hrtimer_interrupt+0x10e/0x280
__sysvec_apic_timer_interrupt+0x51/0xe0
asm_call_on_stack+0xf/0x20
</IRQ>
sysvec_apic_timer_interrupt+0x6f/0x80
...
task_file_seq_next+0x52/0xa0
bpf_seq_read+0xb9/0x320
vfs_read+0x9d/0x180
ksys_read+0x5f/0xe0
do_syscall_64+0x38/0x60
entry_SYSCALL_64_after_hwframe+0x44/0xa9
The fix is to limit the number of bpf program runs to be
one million. This fixed the program in most cases. But
we also found under heavy load, which can increase the wallclock
time for bpf_seq_read(), the warning may still be possible.
For example, calling bpf_delay() in the "while" loop of
bpf_seq_read(), which will introduce artificial delay,
the warning will show up in my qemu run.
static unsigned q;
volatile unsigned *p = &q;
volatile unsigned long long ll;
static void bpf_delay(void)
{
int i, j;
for (i = 0; i < 10000; i++)
for (j = 0; j < 10000; j++)
ll += *p;
}
There are two ways to fix this issue. One is to reduce the above
one million threshold to say 100,000 and hopefully rcu warning will
not show up any more. Another is to introduce a target feature
which enables bpf_seq_read() calling cond_resched().
This patch took second approach as the first approach may cause
more -EAGAIN failures for read() syscalls. Note that not all bpf_iter
targets can permit cond_resched() in bpf_seq_read() as some, e.g.,
netlink seq iterator, rcu read lock critical section spans through
seq_ops->next() -> seq_ops->show() -> seq_ops->next().
For the kernel code with the above hack, "bpftool p" roughly takes
38 seconds to finish on my VM with 184 bpf program runs.
Using the following command, I am able to collect the number of
context switches:
perf stat -e context-switches -- ./bpftool p >& log
Without this patch,
69 context-switches
With this patch,
75 context-switches
This patch added additional 6 context switches, roughly every 6 seconds
to reschedule, to avoid lengthy no-rescheduling which may cause the
above RCU warnings.
Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20201028061054.1411116-1-yhs@fb.com
2020-10-28 14:10:54 +08:00
|
|
|
.feature = BPF_ITER_RESCHED,
|
2020-05-14 02:02:21 +08:00
|
|
|
.ctx_arg_info_size = 2,
|
|
|
|
.ctx_arg_info = {
|
|
|
|
{ offsetof(struct bpf_iter__task_file, task),
|
|
|
|
PTR_TO_BTF_ID_OR_NULL },
|
|
|
|
{ offsetof(struct bpf_iter__task_file, file),
|
|
|
|
PTR_TO_BTF_ID_OR_NULL },
|
|
|
|
},
|
2020-07-24 02:41:09 +08:00
|
|
|
.seq_info = &task_file_seq_info,
|
2020-05-14 02:02:19 +08:00
|
|
|
};
|
|
|
|
|
2020-05-10 01:59:11 +08:00
|
|
|
static int __init task_iter_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2020-07-21 00:34:03 +08:00
|
|
|
task_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
|
2020-05-10 01:59:11 +08:00
|
|
|
ret = bpf_iter_reg_target(&task_reg_info);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2020-07-21 00:34:03 +08:00
|
|
|
task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
|
|
|
|
task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[1];
|
2020-05-10 01:59:11 +08:00
|
|
|
return bpf_iter_reg_target(&task_file_reg_info);
|
|
|
|
}
|
|
|
|
late_initcall(task_iter_init);
|