selftests/bpf: additional bpf_call tests

Add some additional checks for few more corner cases.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
Daniel Borkmann 2017-12-14 17:55:17 -08:00
parent db496944fd
commit 28ab173e96
1 changed files with 597 additions and 0 deletions

View File

@ -8110,6 +8110,180 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
},
{
"calls: not on unpriviledged",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"calls: overlapping caller/callee",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "last insn is not an exit or jmp",
.result = REJECT,
},
{
"calls: wrong recursive calls",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 4),
BPF_JMP_IMM(BPF_JA, 0, 0, 4),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "jump out of range",
.result = REJECT,
},
{
"calls: wrong src reg",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "BPF_CALL uses reserved fields",
.result = REJECT,
},
{
"calls: wrong off value",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "BPF_CALL uses reserved fields",
.result = REJECT,
},
{
"calls: jump back loop",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge from insn 0 to 0",
.result = REJECT,
},
{
"calls: conditional call",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "jump out of range",
.result = REJECT,
},
{
"calls: conditional call 2",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
},
{
"calls: conditional call 3",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_JMP_IMM(BPF_JA, 0, 0, 4),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, -6),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_JMP_IMM(BPF_JA, 0, 0, -6),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge from insn",
.result = REJECT,
},
{
"calls: conditional call 4",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, -5),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
},
{
"calls: conditional call 5",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, -6),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge from insn",
.result = REJECT,
},
{
"calls: conditional call 6",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge from insn",
.result = REJECT,
},
{
"calls: using r0 returned by callee",
.insns = {
@ -8121,6 +8295,17 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
},
{
"calls: using uninit r0 from callee",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "!read_ok",
.result = REJECT,
},
{
"calls: callee is using r1",
.insns = {
@ -8223,6 +8408,71 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"calls: calls with stack arith",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"calls: calls with misaligned stack access",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
.errstr = "misaligned stack access",
.result = REJECT,
},
{
"calls: calls control flow, jump test",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 43),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, -3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"calls: calls control flow, jump test 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 43),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "jump out of range from insn 1 to 4",
.result = REJECT,
},
{
"calls: two calls with bad jump",
.insns = {
@ -8297,6 +8547,18 @@ static struct bpf_test tests[] = {
.errstr = "invalid destination",
.result = REJECT,
},
{
"calls: invalid call 2",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "invalid destination",
.result = REJECT,
},
{
"calls: jumping across function bodies. test1",
.insns = {
@ -8366,6 +8628,30 @@ static struct bpf_test tests[] = {
.errstr = "last insn",
.result = REJECT,
},
{
"calls: ld_abs with changing ctx data in callee",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LD_ABS(BPF_B, 0),
BPF_LD_ABS(BPF_H, 0),
BPF_LD_ABS(BPF_W, 0),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
BPF_LD_ABS(BPF_B, 0),
BPF_LD_ABS(BPF_H, 0),
BPF_LD_ABS(BPF_W, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_vlan_push),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
.result = REJECT,
},
{
"calls: two calls with bad fallthrough",
.insns = {
@ -8459,6 +8745,36 @@ static struct bpf_test tests[] = {
.errstr = "cannot spill",
.result = REJECT,
},
{
"calls: write into caller stack frame",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.result = ACCEPT,
},
{
"calls: write into callee stack frame",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.errstr = "cannot return stack pointer",
.result = REJECT,
},
{
"calls: two calls with stack write and void return",
.insns = {
@ -9056,6 +9372,287 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"calls: pkt_ptr spill into caller stack 2",
.insns = {
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
/* Marking is still kept, but not in all cases safe. */
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
/* spill unchecked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
/* now the pkt range is verified, read pkt_ptr from stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "invalid access to packet",
.result = REJECT,
},
{
"calls: pkt_ptr spill into caller stack 3",
.insns = {
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
/* Marking is still kept and safe here. */
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
/* spill unchecked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* now the pkt range is verified, read pkt_ptr from stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"calls: pkt_ptr spill into caller stack 4",
.insns = {
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
/* Check marking propagated. */
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
/* spill unchecked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"calls: pkt_ptr spill into caller stack 5",
.insns = {
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
/* spill checked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "same insn cannot be used with different",
.result = REJECT,
},
{
"calls: pkt_ptr spill into caller stack 6",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
/* spill checked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "R4 invalid mem access",
.result = REJECT,
},
{
"calls: pkt_ptr spill into caller stack 7",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
/* spill checked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "R4 invalid mem access",
.result = REJECT,
},
{
"calls: pkt_ptr spill into caller stack 8",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
/* spill checked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"calls: pkt_ptr spill into caller stack 9",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_5, 0),
/* spill unchecked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "invalid access to packet",
.result = REJECT,
},
{
"calls: caller stack init to zero or map_value_or_null",
.insns = {