mirror of https://gitee.com/openkylin/linux.git
bpf: Introduce bpf_prog ID
This patch generates an unique ID for each BPF_PROG_LOAD-ed prog. It is worth to note that each BPF_PROG_LOAD-ed prog will have a different ID even they have the same bpf instructions. The ID is generated by the existing idr_alloc_cyclic(). The ID is ranged from [1, INT_MAX). It is allocated in cyclic manner, so an ID will get reused every 2 billion BPF_PROG_LOAD. The bpf_prog_alloc_id() is done after bpf_prog_select_runtime() because the jit process may have allocated a new prog. Hence, we need to ensure the value of pointer 'prog' will not be changed any more before storing the prog to the prog_idr. After bpf_prog_select_runtime(), the prog is read-only. Hence, the id is stored in 'struct bpf_prog_aux'. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Alexei Starovoitov <ast@fb.com> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
8ea4fae926
commit
dc4bb0e235
|
@ -172,6 +172,7 @@ struct bpf_prog_aux {
|
||||||
u32 used_map_cnt;
|
u32 used_map_cnt;
|
||||||
u32 max_ctx_offset;
|
u32 max_ctx_offset;
|
||||||
u32 stack_depth;
|
u32 stack_depth;
|
||||||
|
u32 id;
|
||||||
struct latch_tree_node ksym_tnode;
|
struct latch_tree_node ksym_tnode;
|
||||||
struct list_head ksym_lnode;
|
struct list_head ksym_lnode;
|
||||||
const struct bpf_verifier_ops *ops;
|
const struct bpf_verifier_ops *ops;
|
||||||
|
|
|
@ -22,8 +22,11 @@
|
||||||
#include <linux/filter.h>
|
#include <linux/filter.h>
|
||||||
#include <linux/version.h>
|
#include <linux/version.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/idr.h>
|
||||||
|
|
||||||
DEFINE_PER_CPU(int, bpf_prog_active);
|
DEFINE_PER_CPU(int, bpf_prog_active);
|
||||||
|
static DEFINE_IDR(prog_idr);
|
||||||
|
static DEFINE_SPINLOCK(prog_idr_lock);
|
||||||
|
|
||||||
int sysctl_unprivileged_bpf_disabled __read_mostly;
|
int sysctl_unprivileged_bpf_disabled __read_mostly;
|
||||||
|
|
||||||
|
@ -650,6 +653,34 @@ static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
|
||||||
free_uid(user);
|
free_uid(user);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int bpf_prog_alloc_id(struct bpf_prog *prog)
|
||||||
|
{
|
||||||
|
int id;
|
||||||
|
|
||||||
|
spin_lock_bh(&prog_idr_lock);
|
||||||
|
id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
|
||||||
|
if (id > 0)
|
||||||
|
prog->aux->id = id;
|
||||||
|
spin_unlock_bh(&prog_idr_lock);
|
||||||
|
|
||||||
|
/* id is in [1, INT_MAX) */
|
||||||
|
if (WARN_ON_ONCE(!id))
|
||||||
|
return -ENOSPC;
|
||||||
|
|
||||||
|
return id > 0 ? 0 : id;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void bpf_prog_free_id(struct bpf_prog *prog)
|
||||||
|
{
|
||||||
|
/* cBPF to eBPF migrations are currently not in the idr store. */
|
||||||
|
if (!prog->aux->id)
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock_bh(&prog_idr_lock);
|
||||||
|
idr_remove(&prog_idr, prog->aux->id);
|
||||||
|
spin_unlock_bh(&prog_idr_lock);
|
||||||
|
}
|
||||||
|
|
||||||
static void __bpf_prog_put_rcu(struct rcu_head *rcu)
|
static void __bpf_prog_put_rcu(struct rcu_head *rcu)
|
||||||
{
|
{
|
||||||
struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
|
struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
|
||||||
|
@ -663,6 +694,7 @@ void bpf_prog_put(struct bpf_prog *prog)
|
||||||
{
|
{
|
||||||
if (atomic_dec_and_test(&prog->aux->refcnt)) {
|
if (atomic_dec_and_test(&prog->aux->refcnt)) {
|
||||||
trace_bpf_prog_put_rcu(prog);
|
trace_bpf_prog_put_rcu(prog);
|
||||||
|
bpf_prog_free_id(prog);
|
||||||
bpf_prog_kallsyms_del(prog);
|
bpf_prog_kallsyms_del(prog);
|
||||||
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
|
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
|
||||||
}
|
}
|
||||||
|
@ -857,15 +889,21 @@ static int bpf_prog_load(union bpf_attr *attr)
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto free_used_maps;
|
goto free_used_maps;
|
||||||
|
|
||||||
|
err = bpf_prog_alloc_id(prog);
|
||||||
|
if (err)
|
||||||
|
goto free_used_maps;
|
||||||
|
|
||||||
err = bpf_prog_new_fd(prog);
|
err = bpf_prog_new_fd(prog);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
/* failed to allocate fd */
|
/* failed to allocate fd */
|
||||||
goto free_used_maps;
|
goto free_id;
|
||||||
|
|
||||||
bpf_prog_kallsyms_add(prog);
|
bpf_prog_kallsyms_add(prog);
|
||||||
trace_bpf_prog_load(prog, err);
|
trace_bpf_prog_load(prog, err);
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
free_id:
|
||||||
|
bpf_prog_free_id(prog);
|
||||||
free_used_maps:
|
free_used_maps:
|
||||||
free_used_maps(prog->aux);
|
free_used_maps(prog->aux);
|
||||||
free_prog:
|
free_prog:
|
||||||
|
|
Loading…
Reference in New Issue