mirror of https://gitee.com/openkylin/linux.git
Merge branch 'bpf-global-funcs'
Alexei Starovoitov says: ==================== Introduce static vs global functions and function by function verification. This is another step toward dynamic re-linking (or replacement) of global functions. See patch 2 for details. v2->v3: - cleaned up a check spotted by Song. - rebased and dropped patch 2 that was trying to improve BTF based on ELF. - added one more unit test for scalar return value from global func. v1->v2: - addressed review comments from Song, Andrii, Yonghong - fixed memory leak in error path - added modified ctx check - added more tests in patch 7 ==================== Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
commit
7a2d070f91
|
@ -566,6 +566,7 @@ static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
|
|||
#endif
|
||||
|
||||
struct bpf_func_info_aux {
|
||||
u16 linkage;
|
||||
bool unreliable;
|
||||
};
|
||||
|
||||
|
@ -1081,7 +1082,11 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
|
|||
const char *func_name,
|
||||
struct btf_func_model *m);
|
||||
|
||||
int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog);
|
||||
struct bpf_reg_state;
|
||||
int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
|
||||
struct bpf_reg_state *regs);
|
||||
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
|
||||
struct bpf_reg_state *reg);
|
||||
|
||||
struct bpf_prog *bpf_prog_by_id(u32 id);
|
||||
|
||||
|
|
|
@ -304,11 +304,13 @@ struct bpf_insn_aux_data {
|
|||
u64 map_key_state; /* constant (32 bit) key tracking for maps */
|
||||
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
|
||||
int sanitize_stack_off; /* stack slot to be cleared */
|
||||
bool seen; /* this insn was processed by the verifier */
|
||||
u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
|
||||
bool zext_dst; /* this insn zero extends dst reg */
|
||||
u8 alu_state; /* used in combination with alu_limit */
|
||||
bool prune_point;
|
||||
|
||||
/* below fields are initialized once */
|
||||
unsigned int orig_idx; /* original instruction index */
|
||||
bool prune_point;
|
||||
};
|
||||
|
||||
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
|
||||
|
@ -379,6 +381,7 @@ struct bpf_verifier_env {
|
|||
int *insn_stack;
|
||||
int cur_stack;
|
||||
} cfg;
|
||||
u32 pass_cnt; /* number of times do_check() was called */
|
||||
u32 subprog_cnt;
|
||||
/* number of instructions analyzed by the verifier */
|
||||
u32 prev_insn_processed, insn_processed;
|
||||
|
@ -428,4 +431,7 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
|
|||
void
|
||||
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
|
||||
|
||||
int check_ctx_reg(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg, int regno);
|
||||
|
||||
#endif /* _LINUX_BPF_VERIFIER_H */
|
||||
|
|
|
@ -146,6 +146,12 @@ enum {
|
|||
BTF_VAR_GLOBAL_EXTERN = 2,
|
||||
};
|
||||
|
||||
enum btf_func_linkage {
|
||||
BTF_FUNC_STATIC = 0,
|
||||
BTF_FUNC_GLOBAL = 1,
|
||||
BTF_FUNC_EXTERN = 2,
|
||||
};
|
||||
|
||||
/* BTF_KIND_VAR is followed by a single "struct btf_var" to describe
|
||||
* additional information related to the variable such as its linkage.
|
||||
*/
|
||||
|
|
175
kernel/bpf/btf.c
175
kernel/bpf/btf.c
|
@ -2651,8 +2651,8 @@ static s32 btf_func_check_meta(struct btf_verifier_env *env,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (btf_type_vlen(t)) {
|
||||
btf_verifier_log_type(env, t, "vlen != 0");
|
||||
if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) {
|
||||
btf_verifier_log_type(env, t, "Invalid func linkage");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -3506,7 +3506,8 @@ static u8 bpf_ctx_convert_map[] = {
|
|||
|
||||
static const struct btf_member *
|
||||
btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
|
||||
const struct btf_type *t, enum bpf_prog_type prog_type)
|
||||
const struct btf_type *t, enum bpf_prog_type prog_type,
|
||||
int arg)
|
||||
{
|
||||
const struct btf_type *conv_struct;
|
||||
const struct btf_type *ctx_struct;
|
||||
|
@ -3527,12 +3528,13 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
|
|||
* is not supported yet.
|
||||
* BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
|
||||
*/
|
||||
bpf_log(log, "BPF program ctx type is not a struct\n");
|
||||
if (log->level & BPF_LOG_LEVEL)
|
||||
bpf_log(log, "arg#%d type is not a struct\n", arg);
|
||||
return NULL;
|
||||
}
|
||||
tname = btf_name_by_offset(btf, t->name_off);
|
||||
if (!tname) {
|
||||
bpf_log(log, "BPF program ctx struct doesn't have a name\n");
|
||||
bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
|
||||
return NULL;
|
||||
}
|
||||
/* prog_type is valid bpf program type. No need for bounds check. */
|
||||
|
@ -3565,11 +3567,12 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
|
|||
static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
|
||||
struct btf *btf,
|
||||
const struct btf_type *t,
|
||||
enum bpf_prog_type prog_type)
|
||||
enum bpf_prog_type prog_type,
|
||||
int arg)
|
||||
{
|
||||
const struct btf_member *prog_ctx_type, *kern_ctx_type;
|
||||
|
||||
prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type);
|
||||
prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type, arg);
|
||||
if (!prog_ctx_type)
|
||||
return -ENOENT;
|
||||
kern_ctx_type = prog_ctx_type + 1;
|
||||
|
@ -3731,7 +3734,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
|
|||
info->reg_type = PTR_TO_BTF_ID;
|
||||
|
||||
if (tgt_prog) {
|
||||
ret = btf_translate_to_vmlinux(log, btf, t, tgt_prog->type);
|
||||
ret = btf_translate_to_vmlinux(log, btf, t, tgt_prog->type, arg);
|
||||
if (ret > 0) {
|
||||
info->btf_id = ret;
|
||||
return true;
|
||||
|
@ -4112,11 +4115,16 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog)
|
||||
/* Compare BTF of a function with given bpf_reg_state.
|
||||
* Returns:
|
||||
* EFAULT - there is a verifier bug. Abort verification.
|
||||
* EINVAL - there is a type mismatch or BTF is not available.
|
||||
* 0 - BTF matches with what bpf_reg_state expects.
|
||||
* Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
|
||||
*/
|
||||
int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
|
||||
struct bpf_reg_state *reg)
|
||||
{
|
||||
struct bpf_verifier_state *st = env->cur_state;
|
||||
struct bpf_func_state *func = st->frame[st->curframe];
|
||||
struct bpf_reg_state *reg = func->regs;
|
||||
struct bpf_verifier_log *log = &env->log;
|
||||
struct bpf_prog *prog = env->prog;
|
||||
struct btf *btf = prog->aux->btf;
|
||||
|
@ -4126,27 +4134,30 @@ int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog)
|
|||
const char *tname;
|
||||
|
||||
if (!prog->aux->func_info)
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
|
||||
btf_id = prog->aux->func_info[subprog].type_id;
|
||||
if (!btf_id)
|
||||
return 0;
|
||||
return -EFAULT;
|
||||
|
||||
if (prog->aux->func_info_aux[subprog].unreliable)
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
|
||||
t = btf_type_by_id(btf, btf_id);
|
||||
if (!t || !btf_type_is_func(t)) {
|
||||
bpf_log(log, "BTF of subprog %d doesn't point to KIND_FUNC\n",
|
||||
/* These checks were already done by the verifier while loading
|
||||
* struct bpf_func_info
|
||||
*/
|
||||
bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
|
||||
subprog);
|
||||
return -EINVAL;
|
||||
return -EFAULT;
|
||||
}
|
||||
tname = btf_name_by_offset(btf, t->name_off);
|
||||
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
if (!t || !btf_type_is_func_proto(t)) {
|
||||
bpf_log(log, "Invalid type of func %s\n", tname);
|
||||
return -EINVAL;
|
||||
bpf_log(log, "Invalid BTF of func %s\n", tname);
|
||||
return -EFAULT;
|
||||
}
|
||||
args = (const struct btf_param *)(t + 1);
|
||||
nargs = btf_type_vlen(t);
|
||||
|
@ -4172,25 +4183,127 @@ int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog)
|
|||
bpf_log(log, "R%d is not a pointer\n", i + 1);
|
||||
goto out;
|
||||
}
|
||||
/* If program is passing PTR_TO_CTX into subprogram
|
||||
* check that BTF type matches.
|
||||
/* If function expects ctx type in BTF check that caller
|
||||
* is passing PTR_TO_CTX.
|
||||
*/
|
||||
if (reg[i + 1].type == PTR_TO_CTX &&
|
||||
!btf_get_prog_ctx_type(log, btf, t, prog->type))
|
||||
goto out;
|
||||
/* All other pointers are ok */
|
||||
continue;
|
||||
if (btf_get_prog_ctx_type(log, btf, t, prog->type, i)) {
|
||||
if (reg[i + 1].type != PTR_TO_CTX) {
|
||||
bpf_log(log,
|
||||
"arg#%d expected pointer to ctx, but got %s\n",
|
||||
i, btf_kind_str[BTF_INFO_KIND(t->info)]);
|
||||
goto out;
|
||||
}
|
||||
if (check_ctx_reg(env, ®[i + 1], i + 1))
|
||||
goto out;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
bpf_log(log, "Unrecognized argument type %s\n",
|
||||
btf_kind_str[BTF_INFO_KIND(t->info)]);
|
||||
bpf_log(log, "Unrecognized arg#%d type %s\n",
|
||||
i, btf_kind_str[BTF_INFO_KIND(t->info)]);
|
||||
goto out;
|
||||
}
|
||||
return 0;
|
||||
out:
|
||||
/* LLVM optimizations can remove arguments from static functions. */
|
||||
bpf_log(log,
|
||||
"Type info disagrees with actual arguments due to compiler optimizations\n");
|
||||
/* Compiler optimizations can remove arguments from static functions
|
||||
* or mismatched type can be passed into a global function.
|
||||
* In such cases mark the function as unreliable from BTF point of view.
|
||||
*/
|
||||
prog->aux->func_info_aux[subprog].unreliable = true;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Convert BTF of a function into bpf_reg_state if possible
|
||||
* Returns:
|
||||
* EFAULT - there is a verifier bug. Abort verification.
|
||||
* EINVAL - cannot convert BTF.
|
||||
* 0 - Successfully converted BTF into bpf_reg_state
|
||||
* (either PTR_TO_CTX or SCALAR_VALUE).
|
||||
*/
|
||||
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
|
||||
struct bpf_reg_state *reg)
|
||||
{
|
||||
struct bpf_verifier_log *log = &env->log;
|
||||
struct bpf_prog *prog = env->prog;
|
||||
struct btf *btf = prog->aux->btf;
|
||||
const struct btf_param *args;
|
||||
const struct btf_type *t;
|
||||
u32 i, nargs, btf_id;
|
||||
const char *tname;
|
||||
|
||||
if (!prog->aux->func_info ||
|
||||
prog->aux->func_info_aux[subprog].linkage != BTF_FUNC_GLOBAL) {
|
||||
bpf_log(log, "Verifier bug\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
btf_id = prog->aux->func_info[subprog].type_id;
|
||||
if (!btf_id) {
|
||||
bpf_log(log, "Global functions need valid BTF\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
t = btf_type_by_id(btf, btf_id);
|
||||
if (!t || !btf_type_is_func(t)) {
|
||||
/* These checks were already done by the verifier while loading
|
||||
* struct bpf_func_info
|
||||
*/
|
||||
bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
|
||||
subprog);
|
||||
return -EFAULT;
|
||||
}
|
||||
tname = btf_name_by_offset(btf, t->name_off);
|
||||
|
||||
if (log->level & BPF_LOG_LEVEL)
|
||||
bpf_log(log, "Validating %s() func#%d...\n",
|
||||
tname, subprog);
|
||||
|
||||
if (prog->aux->func_info_aux[subprog].unreliable) {
|
||||
bpf_log(log, "Verifier bug in function %s()\n", tname);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
if (!t || !btf_type_is_func_proto(t)) {
|
||||
bpf_log(log, "Invalid type of function %s()\n", tname);
|
||||
return -EFAULT;
|
||||
}
|
||||
args = (const struct btf_param *)(t + 1);
|
||||
nargs = btf_type_vlen(t);
|
||||
if (nargs > 5) {
|
||||
bpf_log(log, "Global function %s() with %d > 5 args. Buggy compiler.\n",
|
||||
tname, nargs);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* check that function returns int */
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
while (btf_type_is_modifier(t))
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
if (!btf_type_is_int(t) && !btf_type_is_enum(t)) {
|
||||
bpf_log(log,
|
||||
"Global function %s() doesn't return scalar. Only those are supported.\n",
|
||||
tname);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Convert BTF function arguments into verifier types.
|
||||
* Only PTR_TO_CTX and SCALAR are supported atm.
|
||||
*/
|
||||
for (i = 0; i < nargs; i++) {
|
||||
t = btf_type_by_id(btf, args[i].type);
|
||||
while (btf_type_is_modifier(t))
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
if (btf_type_is_int(t) || btf_type_is_enum(t)) {
|
||||
reg[i + 1].type = SCALAR_VALUE;
|
||||
continue;
|
||||
}
|
||||
if (btf_type_is_ptr(t) &&
|
||||
btf_get_prog_ctx_type(log, btf, t, prog->type, i)) {
|
||||
reg[i + 1].type = PTR_TO_CTX;
|
||||
continue;
|
||||
}
|
||||
bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
|
||||
i, btf_kind_str[BTF_INFO_KIND(t->info)], tname);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1122,10 +1122,6 @@ static void init_reg_state(struct bpf_verifier_env *env,
|
|||
regs[BPF_REG_FP].type = PTR_TO_STACK;
|
||||
mark_reg_known_zero(env, regs, BPF_REG_FP);
|
||||
regs[BPF_REG_FP].frameno = state->frameno;
|
||||
|
||||
/* 1st arg to a function */
|
||||
regs[BPF_REG_1].type = PTR_TO_CTX;
|
||||
mark_reg_known_zero(env, regs, BPF_REG_1);
|
||||
}
|
||||
|
||||
#define BPF_MAIN_FUNC (-1)
|
||||
|
@ -2739,8 +2735,8 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env,
|
|||
}
|
||||
#endif
|
||||
|
||||
static int check_ctx_reg(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg, int regno)
|
||||
int check_ctx_reg(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg, int regno)
|
||||
{
|
||||
/* Access to ctx or passing it to a helper is only allowed in
|
||||
* its original, unmodified form.
|
||||
|
@ -3956,12 +3952,26 @@ static int release_reference(struct bpf_verifier_env *env,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void clear_caller_saved_regs(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *regs)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* after the call registers r0 - r5 were scratched */
|
||||
for (i = 0; i < CALLER_SAVED_REGS; i++) {
|
||||
mark_reg_not_init(env, regs, caller_saved[i]);
|
||||
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
|
||||
}
|
||||
}
|
||||
|
||||
static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
int *insn_idx)
|
||||
{
|
||||
struct bpf_verifier_state *state = env->cur_state;
|
||||
struct bpf_func_info_aux *func_info_aux;
|
||||
struct bpf_func_state *caller, *callee;
|
||||
int i, err, subprog, target_insn;
|
||||
bool is_global = false;
|
||||
|
||||
if (state->curframe + 1 >= MAX_CALL_FRAMES) {
|
||||
verbose(env, "the call stack of %d frames is too deep\n",
|
||||
|
@ -3984,6 +3994,32 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
func_info_aux = env->prog->aux->func_info_aux;
|
||||
if (func_info_aux)
|
||||
is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
|
||||
err = btf_check_func_arg_match(env, subprog, caller->regs);
|
||||
if (err == -EFAULT)
|
||||
return err;
|
||||
if (is_global) {
|
||||
if (err) {
|
||||
verbose(env, "Caller passes invalid args into func#%d\n",
|
||||
subprog);
|
||||
return err;
|
||||
} else {
|
||||
if (env->log.level & BPF_LOG_LEVEL)
|
||||
verbose(env,
|
||||
"Func#%d is global and valid. Skipping.\n",
|
||||
subprog);
|
||||
clear_caller_saved_regs(env, caller->regs);
|
||||
|
||||
/* All global functions return SCALAR_VALUE */
|
||||
mark_reg_unknown(env, caller->regs, BPF_REG_0);
|
||||
|
||||
/* continue with next insn after call */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
callee = kzalloc(sizeof(*callee), GFP_KERNEL);
|
||||
if (!callee)
|
||||
return -ENOMEM;
|
||||
|
@ -4010,18 +4046,11 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
|||
for (i = BPF_REG_1; i <= BPF_REG_5; i++)
|
||||
callee->regs[i] = caller->regs[i];
|
||||
|
||||
/* after the call registers r0 - r5 were scratched */
|
||||
for (i = 0; i < CALLER_SAVED_REGS; i++) {
|
||||
mark_reg_not_init(env, caller->regs, caller_saved[i]);
|
||||
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
|
||||
}
|
||||
clear_caller_saved_regs(env, caller->regs);
|
||||
|
||||
/* only increment it after check_reg_arg() finished */
|
||||
state->curframe++;
|
||||
|
||||
if (btf_check_func_arg_match(env, subprog))
|
||||
return -EINVAL;
|
||||
|
||||
/* and go analyze first insn of the callee */
|
||||
*insn_idx = target_insn;
|
||||
|
||||
|
@ -6771,12 +6800,13 @@ static int check_btf_func(struct bpf_verifier_env *env,
|
|||
|
||||
/* check type_id */
|
||||
type = btf_type_by_id(btf, krecord[i].type_id);
|
||||
if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) {
|
||||
if (!type || !btf_type_is_func(type)) {
|
||||
verbose(env, "invalid type id %d in func info",
|
||||
krecord[i].type_id);
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
info_aux[i].linkage = BTF_INFO_VLEN(type->info);
|
||||
prev_offset = krecord[i].insn_off;
|
||||
urecord += urec_size;
|
||||
}
|
||||
|
@ -7756,35 +7786,13 @@ static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
|
|||
|
||||
static int do_check(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_verifier_state *state;
|
||||
struct bpf_verifier_state *state = env->cur_state;
|
||||
struct bpf_insn *insns = env->prog->insnsi;
|
||||
struct bpf_reg_state *regs;
|
||||
int insn_cnt = env->prog->len;
|
||||
bool do_print_state = false;
|
||||
int prev_insn_idx = -1;
|
||||
|
||||
env->prev_linfo = NULL;
|
||||
|
||||
state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
|
||||
if (!state)
|
||||
return -ENOMEM;
|
||||
state->curframe = 0;
|
||||
state->speculative = false;
|
||||
state->branches = 1;
|
||||
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
|
||||
if (!state->frame[0]) {
|
||||
kfree(state);
|
||||
return -ENOMEM;
|
||||
}
|
||||
env->cur_state = state;
|
||||
init_func_state(env, state->frame[0],
|
||||
BPF_MAIN_FUNC /* callsite */,
|
||||
0 /* frameno */,
|
||||
0 /* subprogno, zero == main subprog */);
|
||||
|
||||
if (btf_check_func_arg_match(env, 0))
|
||||
return -EINVAL;
|
||||
|
||||
for (;;) {
|
||||
struct bpf_insn *insn;
|
||||
u8 class;
|
||||
|
@ -7862,7 +7870,7 @@ static int do_check(struct bpf_verifier_env *env)
|
|||
}
|
||||
|
||||
regs = cur_regs(env);
|
||||
env->insn_aux_data[env->insn_idx].seen = true;
|
||||
env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
|
||||
prev_insn_idx = env->insn_idx;
|
||||
|
||||
if (class == BPF_ALU || class == BPF_ALU64) {
|
||||
|
@ -8082,7 +8090,7 @@ static int do_check(struct bpf_verifier_env *env)
|
|||
return err;
|
||||
|
||||
env->insn_idx++;
|
||||
env->insn_aux_data[env->insn_idx].seen = true;
|
||||
env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
|
||||
} else {
|
||||
verbose(env, "invalid BPF_LD mode\n");
|
||||
return -EINVAL;
|
||||
|
@ -8095,7 +8103,6 @@ static int do_check(struct bpf_verifier_env *env)
|
|||
env->insn_idx++;
|
||||
}
|
||||
|
||||
env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -8372,7 +8379,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
|
|||
memcpy(new_data + off + cnt - 1, old_data + off,
|
||||
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
|
||||
for (i = off; i < off + cnt - 1; i++) {
|
||||
new_data[i].seen = true;
|
||||
new_data[i].seen = env->pass_cnt;
|
||||
new_data[i].zext_dst = insn_has_def32(env, insn + i);
|
||||
}
|
||||
env->insn_aux_data = new_data;
|
||||
|
@ -9484,6 +9491,7 @@ static void free_states(struct bpf_verifier_env *env)
|
|||
kfree(sl);
|
||||
sl = sln;
|
||||
}
|
||||
env->free_list = NULL;
|
||||
|
||||
if (!env->explored_states)
|
||||
return;
|
||||
|
@ -9497,11 +9505,159 @@ static void free_states(struct bpf_verifier_env *env)
|
|||
kfree(sl);
|
||||
sl = sln;
|
||||
}
|
||||
env->explored_states[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* The verifier is using insn_aux_data[] to store temporary data during
|
||||
* verification and to store information for passes that run after the
|
||||
* verification like dead code sanitization. do_check_common() for subprogram N
|
||||
* may analyze many other subprograms. sanitize_insn_aux_data() clears all
|
||||
* temporary data after do_check_common() finds that subprogram N cannot be
|
||||
* verified independently. pass_cnt counts the number of times
|
||||
* do_check_common() was run and insn->aux->seen tells the pass number
|
||||
* insn_aux_data was touched. These variables are compared to clear temporary
|
||||
* data from failed pass. For testing and experiments do_check_common() can be
|
||||
* run multiple times even when prior attempt to verify is unsuccessful.
|
||||
*/
|
||||
static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_insn *insn = env->prog->insnsi;
|
||||
struct bpf_insn_aux_data *aux;
|
||||
int i, class;
|
||||
|
||||
for (i = 0; i < env->prog->len; i++) {
|
||||
class = BPF_CLASS(insn[i].code);
|
||||
if (class != BPF_LDX && class != BPF_STX)
|
||||
continue;
|
||||
aux = &env->insn_aux_data[i];
|
||||
if (aux->seen != env->pass_cnt)
|
||||
continue;
|
||||
memset(aux, 0, offsetof(typeof(*aux), orig_idx));
|
||||
}
|
||||
}
|
||||
|
||||
static int do_check_common(struct bpf_verifier_env *env, int subprog)
|
||||
{
|
||||
struct bpf_verifier_state *state;
|
||||
struct bpf_reg_state *regs;
|
||||
int ret, i;
|
||||
|
||||
env->prev_linfo = NULL;
|
||||
env->pass_cnt++;
|
||||
|
||||
state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
|
||||
if (!state)
|
||||
return -ENOMEM;
|
||||
state->curframe = 0;
|
||||
state->speculative = false;
|
||||
state->branches = 1;
|
||||
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
|
||||
if (!state->frame[0]) {
|
||||
kfree(state);
|
||||
return -ENOMEM;
|
||||
}
|
||||
env->cur_state = state;
|
||||
init_func_state(env, state->frame[0],
|
||||
BPF_MAIN_FUNC /* callsite */,
|
||||
0 /* frameno */,
|
||||
subprog);
|
||||
|
||||
regs = state->frame[state->curframe]->regs;
|
||||
if (subprog) {
|
||||
ret = btf_prepare_func_args(env, subprog, regs);
|
||||
if (ret)
|
||||
goto out;
|
||||
for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
|
||||
if (regs[i].type == PTR_TO_CTX)
|
||||
mark_reg_known_zero(env, regs, i);
|
||||
else if (regs[i].type == SCALAR_VALUE)
|
||||
mark_reg_unknown(env, regs, i);
|
||||
}
|
||||
} else {
|
||||
/* 1st arg to a function */
|
||||
regs[BPF_REG_1].type = PTR_TO_CTX;
|
||||
mark_reg_known_zero(env, regs, BPF_REG_1);
|
||||
ret = btf_check_func_arg_match(env, subprog, regs);
|
||||
if (ret == -EFAULT)
|
||||
/* unlikely verifier bug. abort.
|
||||
* ret == 0 and ret < 0 are sadly acceptable for
|
||||
* main() function due to backward compatibility.
|
||||
* Like socket filter program may be written as:
|
||||
* int bpf_prog(struct pt_regs *ctx)
|
||||
* and never dereference that ctx in the program.
|
||||
* 'struct pt_regs' is a type mismatch for socket
|
||||
* filter that should be using 'struct __sk_buff'.
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
|
||||
kvfree(env->explored_states);
|
||||
ret = do_check(env);
|
||||
out:
|
||||
free_verifier_state(env->cur_state, true);
|
||||
env->cur_state = NULL;
|
||||
while (!pop_stack(env, NULL, NULL));
|
||||
free_states(env);
|
||||
if (ret)
|
||||
/* clean aux data in case subprog was rejected */
|
||||
sanitize_insn_aux_data(env);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Verify all global functions in a BPF program one by one based on their BTF.
|
||||
* All global functions must pass verification. Otherwise the whole program is rejected.
|
||||
* Consider:
|
||||
* int bar(int);
|
||||
* int foo(int f)
|
||||
* {
|
||||
* return bar(f);
|
||||
* }
|
||||
* int bar(int b)
|
||||
* {
|
||||
* ...
|
||||
* }
|
||||
* foo() will be verified first for R1=any_scalar_value. During verification it
|
||||
* will be assumed that bar() already verified successfully and call to bar()
|
||||
* from foo() will be checked for type match only. Later bar() will be verified
|
||||
* independently to check that it's safe for R1=any_scalar_value.
|
||||
*/
|
||||
static int do_check_subprogs(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_prog_aux *aux = env->prog->aux;
|
||||
int i, ret;
|
||||
|
||||
if (!aux->func_info)
|
||||
return 0;
|
||||
|
||||
for (i = 1; i < env->subprog_cnt; i++) {
|
||||
if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
|
||||
continue;
|
||||
env->insn_idx = env->subprog_info[i].start;
|
||||
WARN_ON_ONCE(env->insn_idx == 0);
|
||||
ret = do_check_common(env, i);
|
||||
if (ret) {
|
||||
return ret;
|
||||
} else if (env->log.level & BPF_LOG_LEVEL) {
|
||||
verbose(env,
|
||||
"Func#%d is safe for any args that match its prototype\n",
|
||||
i);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_check_main(struct bpf_verifier_env *env)
|
||||
{
|
||||
int ret;
|
||||
|
||||
env->insn_idx = 0;
|
||||
ret = do_check_common(env, 0);
|
||||
if (!ret)
|
||||
env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void print_verification_stats(struct bpf_verifier_env *env)
|
||||
{
|
||||
int i;
|
||||
|
@ -9849,18 +10005,14 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
|
|||
if (ret < 0)
|
||||
goto skip_full_check;
|
||||
|
||||
ret = do_check(env);
|
||||
if (env->cur_state) {
|
||||
free_verifier_state(env->cur_state, true);
|
||||
env->cur_state = NULL;
|
||||
}
|
||||
ret = do_check_subprogs(env);
|
||||
ret = ret ?: do_check_main(env);
|
||||
|
||||
if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
|
||||
ret = bpf_prog_offload_finalize(env);
|
||||
|
||||
skip_full_check:
|
||||
while (!pop_stack(env, NULL, NULL));
|
||||
free_states(env);
|
||||
kvfree(env->explored_states);
|
||||
|
||||
if (ret == 0)
|
||||
ret = check_max_stack_depth(env);
|
||||
|
|
|
@ -146,6 +146,12 @@ enum {
|
|||
BTF_VAR_GLOBAL_EXTERN = 2,
|
||||
};
|
||||
|
||||
enum btf_func_linkage {
|
||||
BTF_FUNC_STATIC = 0,
|
||||
BTF_FUNC_GLOBAL = 1,
|
||||
BTF_FUNC_EXTERN = 2,
|
||||
};
|
||||
|
||||
/* BTF_KIND_VAR is followed by a single "struct btf_var" to describe
|
||||
* additional information related to the variable such as its linkage.
|
||||
*/
|
||||
|
|
|
@ -173,6 +173,8 @@ struct bpf_capabilities {
|
|||
__u32 btf_datasec:1;
|
||||
/* BPF_F_MMAPABLE is supported for arrays */
|
||||
__u32 array_mmap:1;
|
||||
/* BTF_FUNC_GLOBAL is supported */
|
||||
__u32 btf_func_global:1;
|
||||
};
|
||||
|
||||
enum reloc_type {
|
||||
|
@ -2209,13 +2211,14 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
|
|||
|
||||
static void bpf_object__sanitize_btf(struct bpf_object *obj)
|
||||
{
|
||||
bool has_func_global = obj->caps.btf_func_global;
|
||||
bool has_datasec = obj->caps.btf_datasec;
|
||||
bool has_func = obj->caps.btf_func;
|
||||
struct btf *btf = obj->btf;
|
||||
struct btf_type *t;
|
||||
int i, j, vlen;
|
||||
|
||||
if (!obj->btf || (has_func && has_datasec))
|
||||
if (!obj->btf || (has_func && has_datasec && has_func_global))
|
||||
return;
|
||||
|
||||
for (i = 1; i <= btf__get_nr_types(btf); i++) {
|
||||
|
@ -2263,6 +2266,9 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
|
|||
} else if (!has_func && btf_is_func(t)) {
|
||||
/* replace FUNC with TYPEDEF */
|
||||
t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
|
||||
} else if (!has_func_global && btf_is_func(t)) {
|
||||
/* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
|
||||
t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3205,6 +3211,32 @@ static int bpf_object__probe_btf_func(struct bpf_object *obj)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_object__probe_btf_func_global(struct bpf_object *obj)
|
||||
{
|
||||
static const char strs[] = "\0int\0x\0a";
|
||||
/* static void x(int a) {} */
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
/* FUNC_PROTO */ /* [2] */
|
||||
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
|
||||
BTF_PARAM_ENC(7, 1),
|
||||
/* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
|
||||
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
|
||||
};
|
||||
int btf_fd;
|
||||
|
||||
btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs));
|
||||
if (btf_fd >= 0) {
|
||||
obj->caps.btf_func_global = 1;
|
||||
close(btf_fd);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
|
||||
{
|
||||
static const char strs[] = "\0x\0.data";
|
||||
|
@ -3260,6 +3292,7 @@ bpf_object__probe_caps(struct bpf_object *obj)
|
|||
bpf_object__probe_name,
|
||||
bpf_object__probe_global_data,
|
||||
bpf_object__probe_btf_func,
|
||||
bpf_object__probe_btf_func_global,
|
||||
bpf_object__probe_btf_datasec,
|
||||
bpf_object__probe_array_mmap,
|
||||
};
|
||||
|
|
|
@ -48,6 +48,8 @@ void test_bpf_verif_scale(void)
|
|||
{ "test_verif_scale2.o", BPF_PROG_TYPE_SCHED_CLS },
|
||||
{ "test_verif_scale3.o", BPF_PROG_TYPE_SCHED_CLS },
|
||||
|
||||
{ "pyperf_global.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
|
||||
|
||||
/* full unroll by llvm */
|
||||
{ "pyperf50.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
|
||||
{ "pyperf100.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
|
||||
|
|
|
@ -98,6 +98,7 @@ static void test_target_yes_callees(void)
|
|||
"fexit/test_pkt_access",
|
||||
"fexit/test_pkt_access_subprog1",
|
||||
"fexit/test_pkt_access_subprog2",
|
||||
"fexit/test_pkt_access_subprog3",
|
||||
};
|
||||
test_fexit_bpf2bpf_common("./fexit_bpf2bpf.o",
|
||||
"./test_pkt_access.o",
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2020 Facebook */
|
||||
#include <test_progs.h>
|
||||
|
||||
const char *err_str;
|
||||
bool found;
|
||||
|
||||
static int libbpf_debug_print(enum libbpf_print_level level,
|
||||
const char *format, va_list args)
|
||||
{
|
||||
char *log_buf;
|
||||
|
||||
if (level != LIBBPF_WARN ||
|
||||
strcmp(format, "libbpf: \n%s\n")) {
|
||||
vprintf(format, args);
|
||||
return 0;
|
||||
}
|
||||
|
||||
log_buf = va_arg(args, char *);
|
||||
if (!log_buf)
|
||||
goto out;
|
||||
if (strstr(log_buf, err_str) == 0)
|
||||
found = true;
|
||||
out:
|
||||
printf(format, log_buf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern int extra_prog_load_log_flags;
|
||||
|
||||
static int check_load(const char *file)
|
||||
{
|
||||
struct bpf_prog_load_attr attr;
|
||||
struct bpf_object *obj = NULL;
|
||||
int err, prog_fd;
|
||||
|
||||
memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
|
||||
attr.file = file;
|
||||
attr.prog_type = BPF_PROG_TYPE_UNSPEC;
|
||||
attr.log_level = extra_prog_load_log_flags;
|
||||
attr.prog_flags = BPF_F_TEST_RND_HI32;
|
||||
found = false;
|
||||
err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
|
||||
bpf_object__close(obj);
|
||||
return err;
|
||||
}
|
||||
|
||||
struct test_def {
|
||||
const char *file;
|
||||
const char *err_str;
|
||||
};
|
||||
|
||||
void test_test_global_funcs(void)
|
||||
{
|
||||
struct test_def tests[] = {
|
||||
{ "test_global_func1.o", "combined stack size of 4 calls is 544" },
|
||||
{ "test_global_func2.o" },
|
||||
{ "test_global_func3.o" , "the call stack of 8 frames" },
|
||||
{ "test_global_func4.o" },
|
||||
{ "test_global_func5.o" , "expected pointer to ctx, but got PTR" },
|
||||
{ "test_global_func6.o" , "modified ctx ptr R2" },
|
||||
{ "test_global_func7.o" , "foo() doesn't return scalar" },
|
||||
};
|
||||
libbpf_print_fn_t old_print_fn = NULL;
|
||||
int err, i, duration = 0;
|
||||
|
||||
old_print_fn = libbpf_set_print(libbpf_debug_print);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(tests); i++) {
|
||||
const struct test_def *test = &tests[i];
|
||||
|
||||
if (!test__start_subtest(test->file))
|
||||
continue;
|
||||
|
||||
err_str = test->err_str;
|
||||
err = check_load(test->file);
|
||||
CHECK_FAIL(!!err ^ !!err_str);
|
||||
if (err_str)
|
||||
CHECK(found, "", "expected string '%s'", err_str);
|
||||
}
|
||||
libbpf_set_print(old_print_fn);
|
||||
}
|
|
@ -79,4 +79,19 @@ int test_subprog2(struct args_subprog2 *ctx)
|
|||
test_result_subprog2 = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u64 test_result_subprog3 = 0;
|
||||
BPF_TRACE_3("fexit/test_pkt_access_subprog3", test_subprog3,
|
||||
int, val, struct sk_buff *, skb, int, ret)
|
||||
{
|
||||
int len;
|
||||
|
||||
__builtin_preserve_access_index(({
|
||||
len = skb->len;
|
||||
}));
|
||||
if (len != 74 || ret != 74 * val || val != 3)
|
||||
return 0;
|
||||
test_result_subprog3 = 1;
|
||||
return 0;
|
||||
}
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
|
|
@ -154,7 +154,12 @@ struct {
|
|||
__uint(value_size, sizeof(long long) * 127);
|
||||
} stackmap SEC(".maps");
|
||||
|
||||
static __always_inline int __on_event(struct pt_regs *ctx)
|
||||
#ifdef GLOBAL_FUNC
|
||||
__attribute__((noinline))
|
||||
#else
|
||||
static __always_inline
|
||||
#endif
|
||||
int __on_event(struct bpf_raw_tracepoint_args *ctx)
|
||||
{
|
||||
uint64_t pid_tgid = bpf_get_current_pid_tgid();
|
||||
pid_t pid = (pid_t)(pid_tgid >> 32);
|
||||
|
@ -254,7 +259,7 @@ static __always_inline int __on_event(struct pt_regs *ctx)
|
|||
}
|
||||
|
||||
SEC("raw_tracepoint/kfree_skb")
|
||||
int on_event(struct pt_regs* ctx)
|
||||
int on_event(struct bpf_raw_tracepoint_args* ctx)
|
||||
{
|
||||
int i, ret = 0;
|
||||
ret |= __on_event(ctx);
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2020 Facebook */
|
||||
#define STACK_MAX_LEN 50
|
||||
#define GLOBAL_FUNC
|
||||
#include "pyperf.h"
|
|
@ -0,0 +1,45 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2020 Facebook */
|
||||
#include <stddef.h>
|
||||
#include <linux/bpf.h>
|
||||
#include "bpf_helpers.h"
|
||||
|
||||
#ifndef MAX_STACK
|
||||
#define MAX_STACK (512 - 3 * 32 + 8)
|
||||
#endif
|
||||
|
||||
static __attribute__ ((noinline))
|
||||
int f0(int var, struct __sk_buff *skb)
|
||||
{
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int f1(struct __sk_buff *skb)
|
||||
{
|
||||
volatile char buf[MAX_STACK] = {};
|
||||
|
||||
return f0(0, skb) + skb->len;
|
||||
}
|
||||
|
||||
int f3(int, struct __sk_buff *skb, int);
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int f2(int val, struct __sk_buff *skb)
|
||||
{
|
||||
return f1(skb) + f3(val, skb, 1);
|
||||
}
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int f3(int val, struct __sk_buff *skb, int var)
|
||||
{
|
||||
volatile char buf[MAX_STACK] = {};
|
||||
|
||||
return skb->ifindex * val * var;
|
||||
}
|
||||
|
||||
SEC("classifier/test")
|
||||
int test_cls(struct __sk_buff *skb)
|
||||
{
|
||||
return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4);
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2020 Facebook */
|
||||
#define MAX_STACK (512 - 3 * 32)
|
||||
#include "test_global_func1.c"
|
|
@ -0,0 +1,65 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2020 Facebook */
|
||||
#include <stddef.h>
|
||||
#include <linux/bpf.h>
|
||||
#include "bpf_helpers.h"
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int f1(struct __sk_buff *skb)
|
||||
{
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int f2(int val, struct __sk_buff *skb)
|
||||
{
|
||||
return f1(skb) + val;
|
||||
}
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int f3(int val, struct __sk_buff *skb, int var)
|
||||
{
|
||||
return f2(var, skb) + val;
|
||||
}
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int f4(struct __sk_buff *skb)
|
||||
{
|
||||
return f3(1, skb, 2);
|
||||
}
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int f5(struct __sk_buff *skb)
|
||||
{
|
||||
return f4(skb);
|
||||
}
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int f6(struct __sk_buff *skb)
|
||||
{
|
||||
return f5(skb);
|
||||
}
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int f7(struct __sk_buff *skb)
|
||||
{
|
||||
return f6(skb);
|
||||
}
|
||||
|
||||
#ifndef NO_FN8
|
||||
__attribute__ ((noinline))
|
||||
int f8(struct __sk_buff *skb)
|
||||
{
|
||||
return f7(skb);
|
||||
}
|
||||
#endif
|
||||
|
||||
SEC("classifier/test")
|
||||
int test_cls(struct __sk_buff *skb)
|
||||
{
|
||||
#ifndef NO_FN8
|
||||
return f8(skb);
|
||||
#else
|
||||
return f7(skb);
|
||||
#endif
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2020 Facebook */
|
||||
#define NO_FN8
|
||||
#include "test_global_func3.c"
|
|
@ -0,0 +1,31 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2020 Facebook */
|
||||
#include <stddef.h>
|
||||
#include <linux/bpf.h>
|
||||
#include "bpf_helpers.h"
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int f1(struct __sk_buff *skb)
|
||||
{
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
int f3(int, struct __sk_buff *skb);
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int f2(int val, struct __sk_buff *skb)
|
||||
{
|
||||
return f1(skb) + f3(val, (void *)&val); /* type mismatch */
|
||||
}
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int f3(int val, struct __sk_buff *skb)
|
||||
{
|
||||
return skb->ifindex * val;
|
||||
}
|
||||
|
||||
SEC("classifier/test")
|
||||
int test_cls(struct __sk_buff *skb)
|
||||
{
|
||||
return f1(skb) + f2(2, skb) + f3(3, skb);
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2020 Facebook */
|
||||
#include <stddef.h>
|
||||
#include <linux/bpf.h>
|
||||
#include "bpf_helpers.h"
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int f1(struct __sk_buff *skb)
|
||||
{
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
int f3(int, struct __sk_buff *skb);
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int f2(int val, struct __sk_buff *skb)
|
||||
{
|
||||
return f1(skb) + f3(val, skb + 1); /* type mismatch */
|
||||
}
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int f3(int val, struct __sk_buff *skb)
|
||||
{
|
||||
return skb->ifindex * val;
|
||||
}
|
||||
|
||||
SEC("classifier/test")
|
||||
int test_cls(struct __sk_buff *skb)
|
||||
{
|
||||
return f1(skb) + f2(2, skb) + f3(3, skb);
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2020 Facebook */
|
||||
#include <stddef.h>
|
||||
#include <linux/bpf.h>
|
||||
#include "bpf_helpers.h"
|
||||
|
||||
__attribute__ ((noinline))
|
||||
void foo(struct __sk_buff *skb)
|
||||
{
|
||||
skb->tc_index = 0;
|
||||
}
|
||||
|
||||
SEC("classifier/test")
|
||||
int test_cls(struct __sk_buff *skb)
|
||||
{
|
||||
foo(skb);
|
||||
return 0;
|
||||
}
|
|
@ -47,6 +47,32 @@ int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb)
|
|||
return skb->len * val;
|
||||
}
|
||||
|
||||
#define MAX_STACK (512 - 2 * 32)
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int get_skb_len(struct __sk_buff *skb)
|
||||
{
|
||||
volatile char buf[MAX_STACK] = {};
|
||||
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
int get_skb_ifindex(int, struct __sk_buff *skb, int);
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int test_pkt_access_subprog3(int val, struct __sk_buff *skb)
|
||||
{
|
||||
return get_skb_len(skb) * get_skb_ifindex(val, skb, 1);
|
||||
}
|
||||
|
||||
__attribute__ ((noinline))
|
||||
int get_skb_ifindex(int val, struct __sk_buff *skb, int var)
|
||||
{
|
||||
volatile char buf[MAX_STACK] = {};
|
||||
|
||||
return skb->ifindex * val * var;
|
||||
}
|
||||
|
||||
SEC("classifier/test_pkt_access")
|
||||
int test_pkt_access(struct __sk_buff *skb)
|
||||
{
|
||||
|
@ -82,6 +108,8 @@ int test_pkt_access(struct __sk_buff *skb)
|
|||
return TC_ACT_SHOT;
|
||||
if (test_pkt_access_subprog2(2, skb) != skb->len * 2)
|
||||
return TC_ACT_SHOT;
|
||||
if (test_pkt_access_subprog3(3, skb) != skb->len * 3 * skb->ifindex)
|
||||
return TC_ACT_SHOT;
|
||||
if (tcp) {
|
||||
if (((void *)(tcp) + 20) > data_end || proto != 6)
|
||||
return TC_ACT_SHOT;
|
||||
|
|
|
@ -86,7 +86,7 @@ u32 jhash(const void *key, u32 length, u32 initval)
|
|||
return c;
|
||||
}
|
||||
|
||||
static __attribute__ ((noinline))
|
||||
__attribute__ ((noinline))
|
||||
u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
|
||||
{
|
||||
a += initval;
|
||||
|
@ -96,7 +96,7 @@ u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
|
|||
return c;
|
||||
}
|
||||
|
||||
static __attribute__ ((noinline))
|
||||
__attribute__ ((noinline))
|
||||
u32 jhash_2words(u32 a, u32 b, u32 initval)
|
||||
{
|
||||
return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
|
||||
|
|
Loading…
Reference in New Issue