linux/tools/testing/selftests/bpf/test_verifier.c

4685 lines
140 KiB
C
Raw Normal View History

/*
* Testsuite for eBPF verifier
*
* Copyright (c) 2014 PLUMgrid, http://plumgrid.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <stddef.h>
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
#include <stdbool.h>
#include <sched.h>
#include <sys/capability.h>
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
#include <sys/resource.h>
#include <linux/unistd.h>
#include <linux/filter.h>
#include <linux/bpf_perf_event.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include "../../../include/linux/filter.h"
#include "bpf_sys.h"
#ifndef ARRAY_SIZE
# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
#define MAX_INSNS 512
#define MAX_FIXUPS 8
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
struct bpf_test {
const char *descr;
struct bpf_insn insns[MAX_INSNS];
int fixup_map1[MAX_FIXUPS];
int fixup_map2[MAX_FIXUPS];
int fixup_prog[MAX_FIXUPS];
const char *errstr;
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
const char *errstr_unpriv;
enum {
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
UNDEF,
ACCEPT,
REJECT
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
} result, result_unpriv;
enum bpf_prog_type prog_type;
};
/* Note we want this to be 64 bit aligned so that the end of our array is
* actually the end of the structure.
*/
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
static struct bpf_test tests[] = {
{
"add+sub+mul",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
BPF_MOV64_IMM(BPF_REG_2, 3),
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"unreachable",
.insns = {
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
},
.errstr = "unreachable",
.result = REJECT,
},
{
"unreachable2",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "unreachable",
.result = REJECT,
},
{
"out of range jump",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "jump out of range",
.result = REJECT,
},
{
"out of range jump2",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, -2),
BPF_EXIT_INSN(),
},
.errstr = "jump out of range",
.result = REJECT,
},
{
"test1 ld_imm64",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_LD_IMM insn",
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"test2 ld_imm64",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_LD_IMM insn",
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"test3 ld_imm64",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test4 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test5 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"no bpf_exit",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
},
.errstr = "jump out of range",
.result = REJECT,
},
{
"loop (back-edge)",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, -1),
BPF_EXIT_INSN(),
},
.errstr = "back-edge",
.result = REJECT,
},
{
"loop2 (back-edge)",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_JMP_IMM(BPF_JA, 0, 0, -4),
BPF_EXIT_INSN(),
},
.errstr = "back-edge",
.result = REJECT,
},
{
"conditional loop",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
BPF_EXIT_INSN(),
},
.errstr = "back-edge",
.result = REJECT,
},
{
"read uninitialized register",
.insns = {
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.errstr = "R2 !read_ok",
.result = REJECT,
},
{
"read invalid register",
.insns = {
BPF_MOV64_REG(BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
.errstr = "R15 is invalid",
.result = REJECT,
},
{
"program doesn't init R0 before exit",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr = "R0 !read_ok",
.result = REJECT,
},
{
"program doesn't init R0 before exit in all branches",
.insns = {
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.errstr = "R0 !read_ok",
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"stack out of bounds",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack",
.result = REJECT,
},
{
"invalid call insn1",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "BPF_CALL uses reserved",
.result = REJECT,
},
{
"invalid call insn2",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
BPF_EXIT_INSN(),
},
.errstr = "BPF_CALL uses reserved",
.result = REJECT,
},
{
"invalid function call",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
BPF_EXIT_INSN(),
},
bpf: fix multiple issues in selftest suite and samples 1) The test_lru_map and test_lru_dist fails building on my machine since the sys/resource.h header is not included. 2) test_verifier fails in one test case where we try to call an invalid function, since the verifier log output changed wrt printing function names. 3) Current selftest suite code relies on sysconf(_SC_NPROCESSORS_CONF) for retrieving the number of possible CPUs. This is broken at least in our scenario and really just doesn't work. glibc tries a number of things for retrieving _SC_NPROCESSORS_CONF. First it tries equivalent of /sys/devices/system/cpu/cpu[0-9]* | wc -l, if that fails, depending on the config, it either tries to count CPUs in /proc/cpuinfo, or returns the _SC_NPROCESSORS_ONLN value instead. If /proc/cpuinfo has some issue, it returns just 1 worst case. This oddity is nothing new [1], but semantics/behaviour seems to be settled. _SC_NPROCESSORS_ONLN will parse /sys/devices/system/cpu/online, if that fails it looks into /proc/stat for cpuX entries, and if also that fails for some reason, /proc/cpuinfo is consulted (and returning 1 if unlikely all breaks down). While that might match num_possible_cpus() from the kernel in some cases, it's really not guaranteed with CPU hotplugging, and can result in a buffer overflow since the array in user space could have too few number of slots, and on perpcu map lookup, the kernel will write beyond that memory of the value buffer. William Tu reported such mismatches: [...] The fact that sysconf(_SC_NPROCESSORS_CONF) != num_possible_cpu() happens when CPU hotadd is enabled. For example, in Fusion when setting vcpu.hotadd = "TRUE" or in KVM, setting ./qemu-system-x86_64 -smp 2, maxcpus=4 ... the num_possible_cpu() will be 4 and sysconf() will be 2 [2]. [...] Documentation/cputopology.txt says /sys/devices/system/cpu/possible outputs cpu_possible_mask. That is the same as in num_possible_cpus(), so first step would be to fix the _SC_NPROCESSORS_CONF calls with our own implementation. Later, we could add support to bpf(2) for passing a mask via CPU_SET(3), for example, to just select a subset of CPUs. BPF samples code needs this fix as well (at least so that people stop copying this). Thus, define bpf_num_possible_cpus() once in selftests and import it from there for the sample code to avoid duplicating it. The remaining sysconf(_SC_NPROCESSORS_CONF) in samples are unrelated. After all three issues are fixed, the test suite runs fine again: # make run_tests | grep self selftests: test_verifier [PASS] selftests: test_maps [PASS] selftests: test_lru_map [PASS] selftests: test_kmod.sh [PASS] [1] https://www.sourceware.org/ml/libc-alpha/2011-06/msg00079.html [2] https://www.mail-archive.com/netdev@vger.kernel.org/msg121183.html Fixes: 3059303f59cf ("samples/bpf: update tracex[23] examples to use per-cpu maps") Fixes: 86af8b4191d2 ("Add sample for adding simple drop program to link") Fixes: df570f577231 ("samples/bpf: unit test for BPF_MAP_TYPE_PERCPU_ARRAY") Fixes: e15596717948 ("samples/bpf: unit test for BPF_MAP_TYPE_PERCPU_HASH") Fixes: ebb676daa1a3 ("bpf: Print function name in addition to function id") Fixes: 5db58faf989f ("bpf: Add tests for the LRU bpf_htab") Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: William Tu <u9012063@gmail.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-26 08:28:09 +08:00
.errstr = "invalid func unknown#1234567",
.result = REJECT,
},
{
"uninitialized stack1",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 2 },
.errstr = "invalid indirect read from stack",
.result = REJECT,
},
{
"uninitialized stack2",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
BPF_EXIT_INSN(),
},
.errstr = "invalid read from stack",
.result = REJECT,
},
{
"invalid argument register",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_cgroup_classid),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_cgroup_classid),
BPF_EXIT_INSN(),
},
.errstr = "R1 !read_ok",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"non-invalid argument register",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_cgroup_classid),
BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_cgroup_classid),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"check valid spill/fill",
.insns = {
/* spill R1(ctx) into stack */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
/* fill it back into R2 */
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
/* should be able to access R0 = *(R2 + 8) */
/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R0 leaks addr",
.result = ACCEPT,
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.result_unpriv = REJECT,
},
{
"check valid spill/fill, skb mark",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = ACCEPT,
},
{
"check corrupted spill/fill",
.insns = {
/* spill R1(ctx) into stack */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
/* mess up with R1 pointer on stack */
BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
/* fill back into R0 should fail */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_EXIT_INSN(),
},
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "attempt to corrupt spilled",
.errstr = "corrupted spill",
.result = REJECT,
},
{
"invalid src register in STX",
.insns = {
BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "R15 is invalid",
.result = REJECT,
},
{
"invalid dst register in STX",
.insns = {
BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
BPF_EXIT_INSN(),
},
.errstr = "R14 is invalid",
.result = REJECT,
},
{
"invalid dst register in ST",
.insns = {
BPF_ST_MEM(BPF_B, 14, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "R14 is invalid",
.result = REJECT,
},
{
"invalid src register in LDX",
.insns = {
BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
BPF_EXIT_INSN(),
},
.errstr = "R12 is invalid",
.result = REJECT,
},
{
"invalid dst register in LDX",
.insns = {
BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.errstr = "R11 is invalid",
.result = REJECT,
},
{
"junk insn",
.insns = {
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_LD_IMM",
.result = REJECT,
},
{
"junk insn2",
.insns = {
BPF_RAW_INSN(1, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "BPF_LDX uses reserved fields",
.result = REJECT,
},
{
"junk insn3",
.insns = {
BPF_RAW_INSN(-1, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_ALU opcode f0",
.result = REJECT,
},
{
"junk insn4",
.insns = {
BPF_RAW_INSN(-1, -1, -1, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_ALU opcode f0",
.result = REJECT,
},
{
"junk insn5",
.insns = {
BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "BPF_ALU uses reserved fields",
.result = REJECT,
},
{
"misaligned read from stack",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
BPF_EXIT_INSN(),
},
.errstr = "misaligned access",
.result = REJECT,
},
{
"invalid map_fd for function call",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_delete_elem),
BPF_EXIT_INSN(),
},
.errstr = "fd 0 is not pointing to valid bpf_map",
.result = REJECT,
},
{
"don't check return value before access",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 invalid mem access 'map_value_or_null'",
.result = REJECT,
},
{
"access memory with incorrect alignment",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "misaligned access",
.result = REJECT,
},
{
"sometimes access memory with incorrect alignment",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 invalid mem access",
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R0 leaks addr",
.result = REJECT,
},
{
"jump test 1",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jump test 2",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 14),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 11),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 5),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jump test 3",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 19),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_JMP_IMM(BPF_JA, 0, 0, 15),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
BPF_JMP_IMM(BPF_JA, 0, 0, 11),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
BPF_JMP_IMM(BPF_JA, 0, 0, 7),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
BPF_JMP_IMM(BPF_JA, 0, 0, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_delete_elem),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 24 },
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jump test 4",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jump test 5",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"access skb fields ok",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, pkt_type)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, queue_mapping)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, protocol)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, vlan_present)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, vlan_tci)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"access skb fields bad1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"access skb fields bad2",
.insns = {
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, pkt_type)),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "different pointers",
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"access skb fields bad3",
.insns = {
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, pkt_type)),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JA, 0, 0, -12),
},
.fixup_map1 = { 6 },
.errstr = "different pointers",
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"access skb fields bad4",
.insns = {
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JA, 0, 0, -13),
},
.fixup_map1 = { 7 },
.errstr = "different pointers",
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"check skb->mark is not writeable by sockets",
.insns = {
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R1 leaks addr",
.result = REJECT,
},
{
"check skb->tc_index is not writeable by sockets",
.insns = {
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, tc_index)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R1 leaks addr",
.result = REJECT,
},
{
"check cb access: byte",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 3),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1]) + 3),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2]) + 3),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3]) + 3),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 3),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check cb access: byte, oob 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 4),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: byte, oob 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) - 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: byte, oob 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 4),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: byte, oob 4",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) - 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: byte, wrong type",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"check cb access: half",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1]) + 2),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2]) + 2),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3]) + 2),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check cb access: half, unaligned",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 1),
BPF_EXIT_INSN(),
},
.errstr = "misaligned access",
.result = REJECT,
},
{
"check cb access: half, oob 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 4),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: half, oob 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) - 2),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: half, oob 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 4),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: half, oob 4",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) - 2),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: half, wrong type",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"check cb access: word",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check cb access: word, unaligned 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_EXIT_INSN(),
},
.errstr = "misaligned access",
.result = REJECT,
},
{
"check cb access: word, unaligned 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 1),
BPF_EXIT_INSN(),
},
.errstr = "misaligned access",
.result = REJECT,
},
{
"check cb access: word, unaligned 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_EXIT_INSN(),
},
.errstr = "misaligned access",
.result = REJECT,
},
{
"check cb access: word, unaligned 4",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 3),
BPF_EXIT_INSN(),
},
.errstr = "misaligned access",
.result = REJECT,
},
{
"check cb access: double",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check cb access: double, unaligned 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1])),
BPF_EXIT_INSN(),
},
.errstr = "misaligned access",
.result = REJECT,
},
{
"check cb access: double, unaligned 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_EXIT_INSN(),
},
.errstr = "misaligned access",
.result = REJECT,
},
{
"check cb access: double, oob 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: double, oob 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 8),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: double, oob 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) - 8),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: double, oob 4",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: double, oob 5",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 8),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: double, oob 6",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) - 8),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: double, wrong type",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"check out of range skb->cb access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
offsetof(struct __sk_buff, cb[0]) + 256),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_ACT,
},
{
"write skb fields from socket prog",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_index)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R1 leaks addr",
.result_unpriv = REJECT,
},
{
"write skb fields from tc_cls_act prog",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_index)),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, tc_index)),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_EXIT_INSN(),
},
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "",
.result_unpriv = REJECT,
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"PTR_TO_STACK store/load",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"PTR_TO_STACK store/load - bad alignment on off",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "misaligned access off -6 size 8",
},
{
"PTR_TO_STACK store/load - bad alignment on reg",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "misaligned access off -2 size 8",
},
{
"PTR_TO_STACK store/load - out of bounds low",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack off=-79992 size=8",
},
{
"PTR_TO_STACK store/load - out of bounds high",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack off=0 size=8",
},
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
{
"unpriv: return pointer",
.insns = {
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr",
},
{
"unpriv: add const to pointer",
.insns = {
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R1 pointer arithmetic",
},
{
"unpriv: add pointer to pointer",
.insns = {
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R1 pointer arithmetic",
},
{
"unpriv: neg pointer",
.insns = {
BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R1 pointer arithmetic",
},
{
"unpriv: cmp pointer with const",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R1 pointer comparison",
},
{
"unpriv: cmp pointer with pointer",
.insns = {
BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R10 pointer comparison",
},
{
"unpriv: check that printk is disallowed",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_trace_printk),
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "unknown func bpf_trace_printk#6",
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: pass pointer to helper function",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_update_elem),
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R4 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: indirectly pass pointer on stack to helper function",
.insns = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr = "invalid indirect read from stack off -8+0 size 8",
.result = REJECT,
},
{
"unpriv: mangle pointer on stack 1",
.insns = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "attempt to corrupt spilled",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: mangle pointer on stack 2",
.insns = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "attempt to corrupt spilled",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: read pointer from stack in small chunks",
.insns = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid size",
.result = REJECT,
},
{
"unpriv: write pointer into ctx",
.insns = {
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 leaks addr",
.result_unpriv = REJECT,
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"unpriv: spill/fill of ctx",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"unpriv: spill/fill of ctx 2",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_hash_recalc),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"unpriv: spill/fill of ctx 3",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_hash_recalc),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R1 type=fp expected=ctx",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"unpriv: spill/fill of ctx 4",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
BPF_REG_0, -8, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_hash_recalc),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R1 type=inv expected=ctx",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"unpriv: spill/fill of different pointers stx",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, 42),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
offsetof(struct __sk_buff, mark)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "same insn cannot be used with different pointers",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"unpriv: spill/fill of different pointers ldx",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
-(__s32)offsetof(struct bpf_perf_event_data,
sample_period) - 8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
offsetof(struct bpf_perf_event_data,
sample_period)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "same insn cannot be used with different pointers",
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
{
"unpriv: write pointer into map elem value",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R0 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: partial copy of pointer",
.insns = {
BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R10 partial copy",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: pass pointer to tail_call",
.insns = {
BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_prog = { 1 },
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R3 leaks addr into helper",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: cmp map pointer with zero",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 1 },
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: write into frame pointer",
.insns = {
BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "frame pointer is read only",
.result = REJECT,
},
{
"unpriv: spill/fill frame pointer",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "frame pointer is read only",
.result = REJECT,
},
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
{
"unpriv: cmp of frame pointer",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R10 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: cmp of stack pointer",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R2 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: obfuscate stack pointer",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R2 pointer arithmetic",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"raw_stack: no skb_load_bytes",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
/* Call to skb_load_bytes() omitted. */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid read from stack off -8+0 size 8",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, negative len",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, negative len 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, ~0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, zero len",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, no init",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, init",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, spilled regs around bounds",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
offsetof(struct __sk_buff, priority)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, spilled regs corruption",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R0 invalid mem access 'inv'",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, spilled regs corruption 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
offsetof(struct __sk_buff, priority)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
offsetof(struct __sk_buff, pkt_type)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R3 invalid mem access 'inv'",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, spilled regs + data",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
offsetof(struct __sk_buff, priority)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3 off=-513 access_size=8",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3 off=-1 access_size=8",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3 off=-1 access_size=-1",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 4",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3 off=-1 access_size=2147483647",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 5",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3 off=-512 access_size=2147483647",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 6",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3 off=-512 access_size=0",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, large access",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 512),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test3",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access off=76",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
},
{
"direct packet access: test4 (write)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
bpf: fix range propagation on direct packet access LLVM can generate code that tests for direct packet access via skb->data/data_end in a way that currently gets rejected by the verifier, example: [...] 7: (61) r3 = *(u32 *)(r6 +80) 8: (61) r9 = *(u32 *)(r6 +76) 9: (bf) r2 = r9 10: (07) r2 += 54 11: (3d) if r3 >= r2 goto pc+12 R1=inv R2=pkt(id=0,off=54,r=0) R3=pkt_end R4=inv R6=ctx R9=pkt(id=0,off=0,r=0) R10=fp 12: (18) r4 = 0xffffff7a 14: (05) goto pc+430 [...] from 11 to 24: R1=inv R2=pkt(id=0,off=54,r=0) R3=pkt_end R4=inv R6=ctx R9=pkt(id=0,off=0,r=0) R10=fp 24: (7b) *(u64 *)(r10 -40) = r1 25: (b7) r1 = 0 26: (63) *(u32 *)(r6 +56) = r1 27: (b7) r2 = 40 28: (71) r8 = *(u8 *)(r9 +20) invalid access to packet, off=20 size=1, R9(id=0,off=0,r=0) The reason why this gets rejected despite a proper test is that we currently call find_good_pkt_pointers() only in case where we detect tests like rX > pkt_end, where rX is of type pkt(id=Y,off=Z,r=0) and derived, for example, from a register of type pkt(id=Y,off=0,r=0) pointing to skb->data. find_good_pkt_pointers() then fills the range in the current branch to pkt(id=Y,off=0,r=Z) on success. For above case, we need to extend that to recognize pkt_end >= rX pattern and mark the other branch that is taken on success with the appropriate pkt(id=Y,off=0,r=Z) type via find_good_pkt_pointers(). Since eBPF operates on BPF_JGT (>) and BPF_JGE (>=), these are the only two practical options to test for from what LLVM could have generated, since there's no such thing as BPF_JLT (<) or BPF_JLE (<=) that we would need to take into account as well. After the fix: [...] 7: (61) r3 = *(u32 *)(r6 +80) 8: (61) r9 = *(u32 *)(r6 +76) 9: (bf) r2 = r9 10: (07) r2 += 54 11: (3d) if r3 >= r2 goto pc+12 R1=inv R2=pkt(id=0,off=54,r=0) R3=pkt_end R4=inv R6=ctx R9=pkt(id=0,off=0,r=0) R10=fp 12: (18) r4 = 0xffffff7a 14: (05) goto pc+430 [...] from 11 to 24: R1=inv R2=pkt(id=0,off=54,r=54) R3=pkt_end R4=inv R6=ctx R9=pkt(id=0,off=0,r=54) R10=fp 24: (7b) *(u64 *)(r10 -40) = r1 25: (b7) r1 = 0 26: (63) *(u32 *)(r6 +56) = r1 27: (b7) r2 = 40 28: (71) r8 = *(u8 *)(r9 +20) 29: (bf) r1 = r8 30: (25) if r8 > 0x3c goto pc+47 R1=inv56 R2=imm40 R3=pkt_end R4=inv R6=ctx R8=inv56 R9=pkt(id=0,off=0,r=54) R10=fp 31: (b7) r1 = 1 [...] Verifier test cases are also added in this work, one that demonstrates the mentioned example here and one that tries a bad packet access for the current/fall-through branch (the one with types pkt(id=X,off=Y,r=0), pkt(id=X,off=0,r=0)), then a case with good and bad accesses, and two with both test variants (>, >=). Fixes: 969bf05eb3ce ("bpf: direct packet access") Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-09-08 07:03:42 +08:00
{
"direct packet access: test5 (pkt_end >= reg, good access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test6 (pkt_end >= reg, bad access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid access to packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test7 (pkt_end >= reg, both accesses)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid access to packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test8 (double test, variant 1)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test9 (double test, variant 2)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test10 (write invalid)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid access to packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
bpf: enable verifier to better track const alu ops William reported couple of issues in relation to direct packet access. Typical scheme is to check for data + [off] <= data_end, where [off] can be either immediate or coming from a tracked register that contains an immediate, depending on the branch, we can then access the data. However, in case of calculating [off] for either the mentioned test itself or for access after the test in a more "complex" way, then the verifier will stop tracking the CONST_IMM marked register and will mark it as UNKNOWN_VALUE one. Adding that UNKNOWN_VALUE typed register to a pkt() marked register, the verifier then bails out in check_packet_ptr_add() as it finds the registers imm value below 48. In the first below example, that is due to evaluate_reg_imm_alu() not handling right shifts and thus marking the register as UNKNOWN_VALUE via helper __mark_reg_unknown_value() that resets imm to 0. In the second case the same happens at the time when r4 is set to r4 &= r5, where it transitions to UNKNOWN_VALUE from evaluate_reg_imm_alu(). Later on r4 we shift right by 3 inside evaluate_reg_alu(), where the register's imm turns into 3. That is, for registers with type UNKNOWN_VALUE, imm of 0 means that we don't know what value the register has, and for imm > 0 it means that the value has [imm] upper zero bits. F.e. when shifting an UNKNOWN_VALUE register by 3 to the right, no matter what value it had, we know that the 3 upper most bits must be zero now. This is to make sure that ALU operations with unknown registers don't overflow. Meaning, once we know that we have more than 48 upper zero bits, or, in other words cannot go beyond 0xffff offset with ALU ops, such an addition will track the target register as a new pkt() register with a new id, but 0 offset and 0 range, so for that a new data/data_end test will be required. Is the source register a CONST_IMM one that is to be added to the pkt() register, or the source instruction is an add instruction with immediate value, then it will get added if it stays within max 0xffff bounds. >From there, pkt() type, can be accessed should reg->off + imm be within the access range of pkt(). [...] from 28 to 30: R0=imm1,min_value=1,max_value=1 R1=pkt(id=0,off=0,r=22) R2=pkt_end R3=imm144,min_value=144,max_value=144 R4=imm0,min_value=0,max_value=0 R5=inv48,min_value=2054,max_value=2054 R10=fp 30: (bf) r5 = r3 31: (07) r5 += 23 32: (77) r5 >>= 3 33: (bf) r6 = r1 34: (0f) r6 += r5 cannot add integer value with 0 upper zero bits to ptr_to_packet [...] from 52 to 80: R0=imm1,min_value=1,max_value=1 R1=pkt(id=0,off=0,r=34) R2=pkt_end R3=inv R4=imm272 R5=inv56,min_value=17,max_value=17 R6=pkt(id=0,off=26,r=34) R10=fp 80: (07) r4 += 71 81: (18) r5 = 0xfffffff8 83: (5f) r4 &= r5 84: (77) r4 >>= 3 85: (0f) r1 += r4 cannot add integer value with 3 upper zero bits to ptr_to_packet Thus to get above use-cases working, evaluate_reg_imm_alu() has been extended for further ALU ops. This is fine, because we only operate strictly within realm of CONST_IMM types, so here we don't care about overflows as they will happen in the simulated but also real execution and interaction with pkt() in check_packet_ptr_add() will check actual imm value once added to pkt(), but it's irrelevant before. With regards to 06c1c049721a ("bpf: allow helpers access to variable memory") that works on UNKNOWN_VALUE registers, the verifier becomes now a bit smarter as it can better resolve ALU ops, so we need to adapt two test cases there, as min/max bound tracking only becomes necessary when registers were spilled to stack. So while mask was set before to track upper bound for UNKNOWN_VALUE case, it's now resolved directly as CONST_IMM, and such contructs are only necessary when f.e. registers are spilled. For commit 6b17387307ba ("bpf: recognize 64bit immediate loads as consts") that initially enabled dw load tracking only for nfp jit/ analyzer, I did couple of tests on large, complex programs and we don't increase complexity badly (my tests were in ~3% range on avg). I've added a couple of tests similar to affected code above, and it works fine with verifier now. Reported-by: William Tu <u9012063@gmail.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Gianluca Borello <g.borello@gmail.com> Cc: William Tu <u9012063@gmail.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-24 08:06:30 +08:00
{
"direct packet access: test11 (shift, good access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
BPF_MOV64_IMM(BPF_REG_3, 144),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test12 (and, good access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
BPF_MOV64_IMM(BPF_REG_3, 144),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test13 (branches, good access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_MOV64_IMM(BPF_REG_4, 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
BPF_MOV64_IMM(BPF_REG_3, 14),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV64_IMM(BPF_REG_3, 24),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
BPF_MOV64_IMM(BPF_REG_5, 12),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test1, valid packet_ptr range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_update_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 5 },
.result_unpriv = ACCEPT,
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
},
{
"helper access to packet: test2, unchecked packet_ptr",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data)),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 1 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_XDP,
},
{
"helper access to packet: test3, variable add",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 11 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
},
{
"helper access to packet: test4, packet_ptr with bad range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 7 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_XDP,
},
{
"helper access to packet: test5, packet_ptr with too short range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 6 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_XDP,
},
{
"helper access to packet: test6, cls valid packet_ptr range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_update_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 5 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test7, cls unchecked packet_ptr",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 1 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test8, cls variable add",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 11 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test9, cls packet_ptr with bad range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 7 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test10, cls packet_ptr with too short range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 6 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test11, cls unsuitable helper 1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_4, 42),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_store_bytes),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "helper access to the packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test12, cls unsuitable helper 2",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_4, 4),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "helper access to the packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test13, cls helper ok",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_diff),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test14, cls helper fail sub",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_diff),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "type=inv expected=fp",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test15, cls helper fail range 1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_diff),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test16, cls helper fail range 2",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_2, -9),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_diff),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test17, cls helper fail range 3",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_2, ~0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_diff),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test18, cls helper fail range zero",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_diff),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test19, pkt end as input",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_diff),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R1 type=pkt_end expected=fp",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to packet: test20, wrong reg",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_diff),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"valid map access into an array with a constant",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"valid map access into an array with a register",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_IMM(BPF_REG_1, 4),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"valid map access into an array with a variable",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"valid map access into an array with a signed variable",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"invalid map access into an array with a constant",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=48 size=8",
.result = REJECT,
},
{
"invalid map access into an array with a register",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is outside of the array range",
.result_unpriv = REJECT,
.result = REJECT,
},
{
"invalid map access into an array with a variable",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result_unpriv = REJECT,
.result = REJECT,
},
{
"invalid map access into an array with no floor check",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result_unpriv = REJECT,
.result = REJECT,
},
{
"invalid map access into an array with a invalid max check",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "invalid access to map value, value_size=48 off=44 size=8",
.result_unpriv = REJECT,
.result = REJECT,
},
{
"invalid map access into an array with a invalid max check",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3, 11 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result_unpriv = REJECT,
.result = REJECT,
},
{
"multiple registers share map_lookup_elem result",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
"invalid memory access with multiple map_lookup_elem calls",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.result = REJECT,
.errstr = "R4 !read_ok",
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
"valid indirect map_lookup_elem access with 2nd lookup in branch",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_2, 10),
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
bpf: fix regression on verifier pruning wrt map lookups Commit 57a09bf0a416 ("bpf: Detect identical PTR_TO_MAP_VALUE_OR_NULL registers") introduced a regression where existing programs stopped loading due to reaching the verifier's maximum complexity limit, whereas prior to this commit they were loading just fine; the affected program has roughly 2k instructions. What was found is that state pruning couldn't be performed effectively anymore due to mismatches of the verifier's register state, in particular in the id tracking. It doesn't mean that 57a09bf0a416 is incorrect per se, but rather that verifier needs to perform a lot more work for the same program with regards to involved map lookups. Since commit 57a09bf0a416 is only about tracking registers with type PTR_TO_MAP_VALUE_OR_NULL, the id is only needed to follow registers until they are promoted through pattern matching with a NULL check to either PTR_TO_MAP_VALUE or UNKNOWN_VALUE type. After that point, the id becomes irrelevant for the transitioned types. For UNKNOWN_VALUE, id is already reset to 0 via mark_reg_unknown_value(), but not so for PTR_TO_MAP_VALUE where id is becoming stale. It's even transferred further into other types that don't make use of it. Among others, one example is where UNKNOWN_VALUE is set on function call return with RET_INTEGER return type. states_equal() will then fall through the memcmp() on register state; note that the second memcmp() uses offsetofend(), so the id is part of that since d2a4dd37f6b4 ("bpf: fix state equivalence"). But the bisect pointed already to 57a09bf0a416, where we really reach beyond complexity limit. What I found was that states_equal() often failed in this case due to id mismatches in spilled regs with registers in type PTR_TO_MAP_VALUE. Unlike non-spilled regs, spilled regs just perform a memcmp() on their reg state and don't have any other optimizations in place, therefore also id was relevant in this case for making a pruning decision. We can safely reset id to 0 as well when converting to PTR_TO_MAP_VALUE. For the affected program, it resulted in a ~17 fold reduction of complexity and let the program load fine again. Selftest suite also runs fine. The only other place where env->id_gen is used currently is through direct packet access, but for these cases id is long living, thus a different scenario. Also, the current logic in mark_map_regs() is not fully correct when marking NULL branch with UNKNOWN_VALUE. We need to cache the destination reg's id in any case. Otherwise, once we marked that reg as UNKNOWN_VALUE, it's id is reset and any subsequent registers that hold the original id and are of type PTR_TO_MAP_VALUE_OR_NULL won't be marked UNKNOWN_VALUE anymore, since mark_map_reg() reuses the uncached regs[regno].id that was just overridden. Note, we don't need to cache it outside of mark_map_regs(), since it's called once on this_branch and the other time on other_branch, which are both two independent verifier states. A test case for this is added here, too. Fixes: 57a09bf0a416 ("bpf: Detect identical PTR_TO_MAP_VALUE_OR_NULL registers") Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Thomas Graf <tgraf@suug.ch> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-12-15 08:30:06 +08:00
{
"multiple registers share map_lookup_elem bad reg type",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0),
BPF_MOV64_IMM(BPF_REG_1, 3),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.result = REJECT,
.errstr = "R3 invalid mem access 'inv'",
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
"invalid map access from else condition",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
.result = REJECT,
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result_unpriv = REJECT,
},
{
"constant register |= constant should keep constant type",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
BPF_MOV64_IMM(BPF_REG_2, 34),
BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"constant register |= constant should not bypass stack boundary checks",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
BPF_MOV64_IMM(BPF_REG_2, 34),
BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-48 access_size=58",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"constant register |= constant register should keep constant type",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
BPF_MOV64_IMM(BPF_REG_2, 34),
BPF_MOV64_IMM(BPF_REG_4, 13),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"constant register |= constant register should not bypass stack boundary checks",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
BPF_MOV64_IMM(BPF_REG_2, 34),
BPF_MOV64_IMM(BPF_REG_4, 24),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-48 access_size=58",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"invalid direct packet write for LWT_IN",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "cannot write into packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
{
"invalid direct packet write for LWT_OUT",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "cannot write into packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_LWT_OUT,
},
{
"direct packet write for LWT_XMIT",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_LWT_XMIT,
},
{
"direct packet read for LWT_IN",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
{
"direct packet read for LWT_OUT",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_LWT_OUT,
},
{
"direct packet read for LWT_XMIT",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_LWT_XMIT,
},
{
"invalid access of tc_classid for LWT_IN",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid bpf_context access",
},
{
"invalid access of tc_classid for LWT_OUT",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid bpf_context access",
},
{
"invalid access of tc_classid for LWT_XMIT",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid bpf_context access",
},
bpf: allow helpers access to map element values Enable helpers to directly access a map element value by passing a register type PTR_TO_MAP_VALUE (or PTR_TO_MAP_VALUE_ADJ) to helper arguments ARG_PTR_TO_STACK or ARG_PTR_TO_RAW_STACK. This enables several use cases. For example, a typical tracing program might want to capture pathnames passed to sys_open() with: struct trace_data { char pathname[PATHLEN]; }; SEC("kprobe/sys_open") void bpf_sys_open(struct pt_regs *ctx) { struct trace_data data; bpf_probe_read(data.pathname, sizeof(data.pathname), ctx->di); /* consume data.pathname, for example via * bpf_trace_printk() or bpf_perf_event_output() */ } Such a program could easily hit the stack limit in case PATHLEN needs to be large or more local variables need to exist, both of which are quite common scenarios. Allowing direct helper access to map element values, one could do: struct bpf_map_def SEC("maps") scratch_map = { .type = BPF_MAP_TYPE_PERCPU_ARRAY, .key_size = sizeof(u32), .value_size = sizeof(struct trace_data), .max_entries = 1, }; SEC("kprobe/sys_open") int bpf_sys_open(struct pt_regs *ctx) { int id = 0; struct trace_data *p = bpf_map_lookup_elem(&scratch_map, &id); if (!p) return; bpf_probe_read(p->pathname, sizeof(p->pathname), ctx->di); /* consume p->pathname, for example via * bpf_trace_printk() or bpf_perf_event_output() */ } And wouldn't risk exhausting the stack. Code changes are loosely modeled after commit 6841de8b0d03 ("bpf: allow helpers access the packet directly"). Unlike with PTR_TO_PACKET, these changes just work with ARG_PTR_TO_STACK and ARG_PTR_TO_RAW_STACK (not ARG_PTR_TO_MAP_KEY, ARG_PTR_TO_MAP_VALUE, ...): adding those would be trivial, but since there is not currently a use case for that, it's reasonable to limit the set of changes. Also, add new tests to make sure accesses to map element values from helpers never go out of boundary, even when adjusted. Signed-off-by: Gianluca Borello <g.borello@gmail.com> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-10 02:19:47 +08:00
{
"helper access to map: full range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: partial range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: empty range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=0 size=0",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: out-of-bound range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=0 size=56",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to map: negative range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, -8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=0 size=-8",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const imm): full range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_2,
sizeof(struct test_val) -
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const imm): partial range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const imm): empty range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const imm): out-of-bound range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_2,
sizeof(struct test_val) -
offsetof(struct test_val, foo) + 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=4 size=52",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const imm): negative range (> adjustment)",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_2, -8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=4 size=-8",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const imm): negative range (< adjustment)",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const reg): full range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3,
offsetof(struct test_val, foo)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2,
sizeof(struct test_val) -
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const reg): partial range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3,
offsetof(struct test_val, foo)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const reg): empty range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const reg): out-of-bound range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3,
offsetof(struct test_val, foo)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2,
sizeof(struct test_val) -
offsetof(struct test_val, foo) + 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=4 size=52",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const reg): negative range (> adjustment)",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3,
offsetof(struct test_val, foo)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, -8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=4 size=-8",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via const reg): negative range (< adjustment)",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3,
offsetof(struct test_val, foo)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via variable): full range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
offsetof(struct test_val, foo), 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2,
sizeof(struct test_val) -
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via variable): partial range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
offsetof(struct test_val, foo), 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via variable): empty range",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
offsetof(struct test_val, foo), 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via variable): no max check",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to adjusted map (via variable): wrong max check",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
offsetof(struct test_val, foo), 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2,
sizeof(struct test_val) -
offsetof(struct test_val, foo) + 1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=4 size=45",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"map element value is preserved across register spilling",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result = ACCEPT,
.result_unpriv = REJECT,
},
{
"map element value (adjusted) is preserved across register spilling",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
offsetof(struct test_val, foo)),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.result = ACCEPT,
.result_unpriv = REJECT,
},
bpf: allow helpers access to variable memory Currently, helpers that read and write from/to the stack can do so using a pair of arguments of type ARG_PTR_TO_STACK and ARG_CONST_STACK_SIZE. ARG_CONST_STACK_SIZE accepts a constant register of type CONST_IMM, so that the verifier can safely check the memory access. However, requiring the argument to be a constant can be limiting in some circumstances. Since the current logic keeps track of the minimum and maximum value of a register throughout the simulated execution, ARG_CONST_STACK_SIZE can be changed to also accept an UNKNOWN_VALUE register in case its boundaries have been set and the range doesn't cause invalid memory accesses. One common situation when this is useful: int len; char buf[BUFSIZE]; /* BUFSIZE is 128 */ if (some_condition) len = 42; else len = 84; some_helper(..., buf, len & (BUFSIZE - 1)); The compiler can often decide to assign the constant values 42 or 48 into a variable on the stack, instead of keeping it in a register. When the variable is then read back from stack into the register in order to be passed to the helper, the verifier will not be able to recognize the register as constant (the verifier is not currently tracking all constant writes into memory), and the program won't be valid. However, by allowing the helper to accept an UNKNOWN_VALUE register, this program will work because the bitwise AND operation will set the range of possible values for the UNKNOWN_VALUE register to [0, BUFSIZE), so the verifier can guarantee the helper call will be safe (assuming the argument is of type ARG_CONST_STACK_SIZE_OR_ZERO, otherwise one more check against 0 would be needed). Custom ranges can be set not only with ALU operations, but also by explicitly comparing the UNKNOWN_VALUE register with constants. Another very common example happens when intercepting system call arguments and accessing user-provided data of variable size using bpf_probe_read(). One can load at runtime the user-provided length in an UNKNOWN_VALUE register, and then read that exact amount of data up to a compile-time determined limit in order to fit into the proper local storage allocated on the stack, without having to guess a suboptimal access size at compile time. Also, in case the helpers accepting the UNKNOWN_VALUE register operate in raw mode, disable the raw mode so that the program is required to initialize all memory, since there is no guarantee the helper will fill it completely, leaving possibilities for data leak (just relevant when the memory used by the helper is the stack, not when using a pointer to map element value or packet). In other words, ARG_PTR_TO_RAW_STACK will be treated as ARG_PTR_TO_STACK. Signed-off-by: Gianluca Borello <g.borello@gmail.com> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-10 02:19:49 +08:00
{
"helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, bitwise AND, zero included",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-64 access_size=0",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-64 access_size=65",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, JMP, correct bounds",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, JMP (signed), correct bounds",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, JMP, bounds + offset",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-64 access_size=65",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, JMP, wrong max",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-64 access_size=65",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, JMP, no max check",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "R2 unbounded memory access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, JMP, no min check",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-64 access_size=0",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: stack, JMP (signed), no min check",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "R2 min value is negative",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: map, JMP, correct bounds",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
sizeof(struct test_val), 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: map, JMP, wrong max",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
sizeof(struct test_val) + 1, 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "invalid access to map value, value_size=48 off=0 size=49",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: map adjusted, JMP, correct bounds",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
sizeof(struct test_val) - 20, 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: map adjusted, JMP, wrong max",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
sizeof(struct test_val) - 19, 4),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: size > 0 not allowed on NULL",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 0),
bpf: enable verifier to better track const alu ops William reported couple of issues in relation to direct packet access. Typical scheme is to check for data + [off] <= data_end, where [off] can be either immediate or coming from a tracked register that contains an immediate, depending on the branch, we can then access the data. However, in case of calculating [off] for either the mentioned test itself or for access after the test in a more "complex" way, then the verifier will stop tracking the CONST_IMM marked register and will mark it as UNKNOWN_VALUE one. Adding that UNKNOWN_VALUE typed register to a pkt() marked register, the verifier then bails out in check_packet_ptr_add() as it finds the registers imm value below 48. In the first below example, that is due to evaluate_reg_imm_alu() not handling right shifts and thus marking the register as UNKNOWN_VALUE via helper __mark_reg_unknown_value() that resets imm to 0. In the second case the same happens at the time when r4 is set to r4 &= r5, where it transitions to UNKNOWN_VALUE from evaluate_reg_imm_alu(). Later on r4 we shift right by 3 inside evaluate_reg_alu(), where the register's imm turns into 3. That is, for registers with type UNKNOWN_VALUE, imm of 0 means that we don't know what value the register has, and for imm > 0 it means that the value has [imm] upper zero bits. F.e. when shifting an UNKNOWN_VALUE register by 3 to the right, no matter what value it had, we know that the 3 upper most bits must be zero now. This is to make sure that ALU operations with unknown registers don't overflow. Meaning, once we know that we have more than 48 upper zero bits, or, in other words cannot go beyond 0xffff offset with ALU ops, such an addition will track the target register as a new pkt() register with a new id, but 0 offset and 0 range, so for that a new data/data_end test will be required. Is the source register a CONST_IMM one that is to be added to the pkt() register, or the source instruction is an add instruction with immediate value, then it will get added if it stays within max 0xffff bounds. >From there, pkt() type, can be accessed should reg->off + imm be within the access range of pkt(). [...] from 28 to 30: R0=imm1,min_value=1,max_value=1 R1=pkt(id=0,off=0,r=22) R2=pkt_end R3=imm144,min_value=144,max_value=144 R4=imm0,min_value=0,max_value=0 R5=inv48,min_value=2054,max_value=2054 R10=fp 30: (bf) r5 = r3 31: (07) r5 += 23 32: (77) r5 >>= 3 33: (bf) r6 = r1 34: (0f) r6 += r5 cannot add integer value with 0 upper zero bits to ptr_to_packet [...] from 52 to 80: R0=imm1,min_value=1,max_value=1 R1=pkt(id=0,off=0,r=34) R2=pkt_end R3=inv R4=imm272 R5=inv56,min_value=17,max_value=17 R6=pkt(id=0,off=26,r=34) R10=fp 80: (07) r4 += 71 81: (18) r5 = 0xfffffff8 83: (5f) r4 &= r5 84: (77) r4 >>= 3 85: (0f) r1 += r4 cannot add integer value with 3 upper zero bits to ptr_to_packet Thus to get above use-cases working, evaluate_reg_imm_alu() has been extended for further ALU ops. This is fine, because we only operate strictly within realm of CONST_IMM types, so here we don't care about overflows as they will happen in the simulated but also real execution and interaction with pkt() in check_packet_ptr_add() will check actual imm value once added to pkt(), but it's irrelevant before. With regards to 06c1c049721a ("bpf: allow helpers access to variable memory") that works on UNKNOWN_VALUE registers, the verifier becomes now a bit smarter as it can better resolve ALU ops, so we need to adapt two test cases there, as min/max bound tracking only becomes necessary when registers were spilled to stack. So while mask was set before to track upper bound for UNKNOWN_VALUE case, it's now resolved directly as CONST_IMM, and such contructs are only necessary when f.e. registers are spilled. For commit 6b17387307ba ("bpf: recognize 64bit immediate loads as consts") that initially enabled dw load tracking only for nfp jit/ analyzer, I did couple of tests on large, complex programs and we don't increase complexity badly (my tests were in ~3% range on avg). I've added a couple of tests similar to affected code above, and it works fine with verifier now. Reported-by: William Tu <u9012063@gmail.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Gianluca Borello <g.borello@gmail.com> Cc: William Tu <u9012063@gmail.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-24 08:06:30 +08:00
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
bpf: allow helpers access to variable memory Currently, helpers that read and write from/to the stack can do so using a pair of arguments of type ARG_PTR_TO_STACK and ARG_CONST_STACK_SIZE. ARG_CONST_STACK_SIZE accepts a constant register of type CONST_IMM, so that the verifier can safely check the memory access. However, requiring the argument to be a constant can be limiting in some circumstances. Since the current logic keeps track of the minimum and maximum value of a register throughout the simulated execution, ARG_CONST_STACK_SIZE can be changed to also accept an UNKNOWN_VALUE register in case its boundaries have been set and the range doesn't cause invalid memory accesses. One common situation when this is useful: int len; char buf[BUFSIZE]; /* BUFSIZE is 128 */ if (some_condition) len = 42; else len = 84; some_helper(..., buf, len & (BUFSIZE - 1)); The compiler can often decide to assign the constant values 42 or 48 into a variable on the stack, instead of keeping it in a register. When the variable is then read back from stack into the register in order to be passed to the helper, the verifier will not be able to recognize the register as constant (the verifier is not currently tracking all constant writes into memory), and the program won't be valid. However, by allowing the helper to accept an UNKNOWN_VALUE register, this program will work because the bitwise AND operation will set the range of possible values for the UNKNOWN_VALUE register to [0, BUFSIZE), so the verifier can guarantee the helper call will be safe (assuming the argument is of type ARG_CONST_STACK_SIZE_OR_ZERO, otherwise one more check against 0 would be needed). Custom ranges can be set not only with ALU operations, but also by explicitly comparing the UNKNOWN_VALUE register with constants. Another very common example happens when intercepting system call arguments and accessing user-provided data of variable size using bpf_probe_read(). One can load at runtime the user-provided length in an UNKNOWN_VALUE register, and then read that exact amount of data up to a compile-time determined limit in order to fit into the proper local storage allocated on the stack, without having to guess a suboptimal access size at compile time. Also, in case the helpers accepting the UNKNOWN_VALUE register operate in raw mode, disable the raw mode so that the program is required to initialize all memory, since there is no guarantee the helper will fill it completely, leaving possibilities for data leak (just relevant when the memory used by the helper is the stack, not when using a pointer to map element value or packet). In other words, ARG_PTR_TO_RAW_STACK will be treated as ARG_PTR_TO_STACK. Signed-off-by: Gianluca Borello <g.borello@gmail.com> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-10 02:19:49 +08:00
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
BPF_EXIT_INSN(),
},
.errstr = "R1 type=imm expected=fp",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to variable memory: size = 0 not allowed on != NULL",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-8 access_size=0",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to variable memory: 8 bytes leak",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_MOV64_IMM(BPF_REG_2, 0),
bpf: enable verifier to better track const alu ops William reported couple of issues in relation to direct packet access. Typical scheme is to check for data + [off] <= data_end, where [off] can be either immediate or coming from a tracked register that contains an immediate, depending on the branch, we can then access the data. However, in case of calculating [off] for either the mentioned test itself or for access after the test in a more "complex" way, then the verifier will stop tracking the CONST_IMM marked register and will mark it as UNKNOWN_VALUE one. Adding that UNKNOWN_VALUE typed register to a pkt() marked register, the verifier then bails out in check_packet_ptr_add() as it finds the registers imm value below 48. In the first below example, that is due to evaluate_reg_imm_alu() not handling right shifts and thus marking the register as UNKNOWN_VALUE via helper __mark_reg_unknown_value() that resets imm to 0. In the second case the same happens at the time when r4 is set to r4 &= r5, where it transitions to UNKNOWN_VALUE from evaluate_reg_imm_alu(). Later on r4 we shift right by 3 inside evaluate_reg_alu(), where the register's imm turns into 3. That is, for registers with type UNKNOWN_VALUE, imm of 0 means that we don't know what value the register has, and for imm > 0 it means that the value has [imm] upper zero bits. F.e. when shifting an UNKNOWN_VALUE register by 3 to the right, no matter what value it had, we know that the 3 upper most bits must be zero now. This is to make sure that ALU operations with unknown registers don't overflow. Meaning, once we know that we have more than 48 upper zero bits, or, in other words cannot go beyond 0xffff offset with ALU ops, such an addition will track the target register as a new pkt() register with a new id, but 0 offset and 0 range, so for that a new data/data_end test will be required. Is the source register a CONST_IMM one that is to be added to the pkt() register, or the source instruction is an add instruction with immediate value, then it will get added if it stays within max 0xffff bounds. >From there, pkt() type, can be accessed should reg->off + imm be within the access range of pkt(). [...] from 28 to 30: R0=imm1,min_value=1,max_value=1 R1=pkt(id=0,off=0,r=22) R2=pkt_end R3=imm144,min_value=144,max_value=144 R4=imm0,min_value=0,max_value=0 R5=inv48,min_value=2054,max_value=2054 R10=fp 30: (bf) r5 = r3 31: (07) r5 += 23 32: (77) r5 >>= 3 33: (bf) r6 = r1 34: (0f) r6 += r5 cannot add integer value with 0 upper zero bits to ptr_to_packet [...] from 52 to 80: R0=imm1,min_value=1,max_value=1 R1=pkt(id=0,off=0,r=34) R2=pkt_end R3=inv R4=imm272 R5=inv56,min_value=17,max_value=17 R6=pkt(id=0,off=26,r=34) R10=fp 80: (07) r4 += 71 81: (18) r5 = 0xfffffff8 83: (5f) r4 &= r5 84: (77) r4 >>= 3 85: (0f) r1 += r4 cannot add integer value with 3 upper zero bits to ptr_to_packet Thus to get above use-cases working, evaluate_reg_imm_alu() has been extended for further ALU ops. This is fine, because we only operate strictly within realm of CONST_IMM types, so here we don't care about overflows as they will happen in the simulated but also real execution and interaction with pkt() in check_packet_ptr_add() will check actual imm value once added to pkt(), but it's irrelevant before. With regards to 06c1c049721a ("bpf: allow helpers access to variable memory") that works on UNKNOWN_VALUE registers, the verifier becomes now a bit smarter as it can better resolve ALU ops, so we need to adapt two test cases there, as min/max bound tracking only becomes necessary when registers were spilled to stack. So while mask was set before to track upper bound for UNKNOWN_VALUE case, it's now resolved directly as CONST_IMM, and such contructs are only necessary when f.e. registers are spilled. For commit 6b17387307ba ("bpf: recognize 64bit immediate loads as consts") that initially enabled dw load tracking only for nfp jit/ analyzer, I did couple of tests on large, complex programs and we don't increase complexity badly (my tests were in ~3% range on avg). I've added a couple of tests similar to affected code above, and it works fine with verifier now. Reported-by: William Tu <u9012063@gmail.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Gianluca Borello <g.borello@gmail.com> Cc: William Tu <u9012063@gmail.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-24 08:06:30 +08:00
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
bpf: allow helpers access to variable memory Currently, helpers that read and write from/to the stack can do so using a pair of arguments of type ARG_PTR_TO_STACK and ARG_CONST_STACK_SIZE. ARG_CONST_STACK_SIZE accepts a constant register of type CONST_IMM, so that the verifier can safely check the memory access. However, requiring the argument to be a constant can be limiting in some circumstances. Since the current logic keeps track of the minimum and maximum value of a register throughout the simulated execution, ARG_CONST_STACK_SIZE can be changed to also accept an UNKNOWN_VALUE register in case its boundaries have been set and the range doesn't cause invalid memory accesses. One common situation when this is useful: int len; char buf[BUFSIZE]; /* BUFSIZE is 128 */ if (some_condition) len = 42; else len = 84; some_helper(..., buf, len & (BUFSIZE - 1)); The compiler can often decide to assign the constant values 42 or 48 into a variable on the stack, instead of keeping it in a register. When the variable is then read back from stack into the register in order to be passed to the helper, the verifier will not be able to recognize the register as constant (the verifier is not currently tracking all constant writes into memory), and the program won't be valid. However, by allowing the helper to accept an UNKNOWN_VALUE register, this program will work because the bitwise AND operation will set the range of possible values for the UNKNOWN_VALUE register to [0, BUFSIZE), so the verifier can guarantee the helper call will be safe (assuming the argument is of type ARG_CONST_STACK_SIZE_OR_ZERO, otherwise one more check against 0 would be needed). Custom ranges can be set not only with ALU operations, but also by explicitly comparing the UNKNOWN_VALUE register with constants. Another very common example happens when intercepting system call arguments and accessing user-provided data of variable size using bpf_probe_read(). One can load at runtime the user-provided length in an UNKNOWN_VALUE register, and then read that exact amount of data up to a compile-time determined limit in order to fit into the proper local storage allocated on the stack, without having to guess a suboptimal access size at compile time. Also, in case the helpers accepting the UNKNOWN_VALUE register operate in raw mode, disable the raw mode so that the program is required to initialize all memory, since there is no guarantee the helper will fill it completely, leaving possibilities for data leak (just relevant when the memory used by the helper is the stack, not when using a pointer to map element value or packet). In other words, ARG_PTR_TO_RAW_STACK will be treated as ARG_PTR_TO_STACK. Signed-off-by: Gianluca Borello <g.borello@gmail.com> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-10 02:19:49 +08:00
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_EXIT_INSN(),
},
.errstr = "invalid indirect read from stack off -64+32 size 64",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: 8 bytes no leak (init memory)",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"invalid and of negative number",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_IMM(BPF_REG_1, 6),
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"invalid range check",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_9, 1),
BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
BPF_MOV32_IMM(BPF_REG_3, 1),
BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
BPF_MOV64_REG(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result = REJECT,
.result_unpriv = REJECT,
}
};
static int probe_filter_length(const struct bpf_insn *fp)
{
int len;
for (len = MAX_INSNS - 1; len > 0; --len)
if (fp[len].code != 0 || fp[len].imm != 0)
break;
return len + 1;
}
static int create_map(uint32_t size_value, uint32_t max_elem)
{
int fd;
fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(long long),
size_value, max_elem, BPF_F_NO_PREALLOC);
if (fd < 0)
printf("Failed to create hash map '%s'!\n", strerror(errno));
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
return fd;
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
}
static int create_prog_array(void)
{
int fd;
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
fd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
sizeof(int), 4, 0);
if (fd < 0)
printf("Failed to create prog array '%s'!\n", strerror(errno));
return fd;
}
static char bpf_vlog[32768];
static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
int *fd_f1, int *fd_f2, int *fd_f3)
{
int *fixup_map1 = test->fixup_map1;
int *fixup_map2 = test->fixup_map2;
int *fixup_prog = test->fixup_prog;
/* Allocating HTs with 1 elem is fine here, since we only test
* for verifier and not do a runtime lookup, so the only thing
* that really matters is value size in this case.
*/
if (*fixup_map1) {
*fd_f1 = create_map(sizeof(long long), 1);
do {
prog[*fixup_map1].imm = *fd_f1;
fixup_map1++;
} while (*fixup_map1);
}
if (*fixup_map2) {
*fd_f2 = create_map(sizeof(struct test_val), 1);
do {
prog[*fixup_map2].imm = *fd_f2;
fixup_map2++;
} while (*fixup_map2);
}
if (*fixup_prog) {
*fd_f3 = create_prog_array();
do {
prog[*fixup_prog].imm = *fd_f3;
fixup_prog++;
} while (*fixup_prog);
}
}
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
static void do_test_single(struct bpf_test *test, bool unpriv,
int *passes, int *errors)
{
struct bpf_insn *prog = test->insns;
int prog_len = probe_filter_length(prog);
int prog_type = test->prog_type;
int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1;
int fd_prog, expected_ret;
const char *expected_err;
do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
prog, prog_len, "GPL", 0, bpf_vlog,
sizeof(bpf_vlog));
expected_ret = unpriv && test->result_unpriv != UNDEF ?
test->result_unpriv : test->result;
expected_err = unpriv && test->errstr_unpriv ?
test->errstr_unpriv : test->errstr;
if (expected_ret == ACCEPT) {
if (fd_prog < 0) {
printf("FAIL\nFailed to load prog '%s'!\n",
strerror(errno));
goto fail_log;
}
} else {
if (fd_prog >= 0) {
printf("FAIL\nUnexpected success to load!\n");
goto fail_log;
}
if (!strstr(bpf_vlog, expected_err)) {
printf("FAIL\nUnexpected error message!\n");
goto fail_log;
}
}
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
(*passes)++;
printf("OK\n");
close_fds:
close(fd_prog);
close(fd_f1);
close(fd_f2);
close(fd_f3);
sched_yield();
return;
fail_log:
(*errors)++;
printf("%s", bpf_vlog);
goto close_fds;
}
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
static bool is_admin(void)
{
cap_t caps;
cap_flag_value_t sysadmin = CAP_CLEAR;
const cap_value_t cap_val = CAP_SYS_ADMIN;
if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
perror("cap_get_flag");
return false;
}
caps = cap_get_proc();
if (!caps) {
perror("cap_get_proc");
return false;
}
if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
perror("cap_get_flag");
if (cap_free(caps))
perror("cap_free");
return (sysadmin == CAP_SET);
}
static int set_admin(bool admin)
{
cap_t caps;
const cap_value_t cap_val = CAP_SYS_ADMIN;
int ret = -1;
caps = cap_get_proc();
if (!caps) {
perror("cap_get_proc");
return -1;
}
if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
admin ? CAP_SET : CAP_CLEAR)) {
perror("cap_set_flag");
goto out;
}
if (cap_set_proc(caps)) {
perror("cap_set_proc");
goto out;
}
ret = 0;
out:
if (cap_free(caps))
perror("cap_free");
return ret;
}
static int do_test(bool unpriv, unsigned int from, unsigned int to)
{
int i, passes = 0, errors = 0;
for (i = from; i < to; i++) {
struct bpf_test *test = &tests[i];
/* Program types that are not supported by non-root we
* skip right away.
*/
if (!test->prog_type) {
if (!unpriv)
set_admin(false);
printf("#%d/u %s ", i, test->descr);
do_test_single(test, true, &passes, &errors);
if (!unpriv)
set_admin(true);
}
if (!unpriv) {
printf("#%d/p %s ", i, test->descr);
do_test_single(test, false, &passes, &errors);
}
}
printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
return errors ? -errors : 0;
}
int main(int argc, char **argv)
{
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
struct rlimit rlim = { 1 << 20, 1 << 20 };
unsigned int from = 0, to = ARRAY_SIZE(tests);
bool unpriv = !is_admin();
if (argc == 3) {
unsigned int l = atoi(argv[argc - 2]);
unsigned int u = atoi(argv[argc - 1]);
if (l < to && u < to) {
from = l;
to = u + 1;
}
} else if (argc == 2) {
unsigned int t = atoi(argv[argc - 1]);
if (t < to) {
from = t;
to = t + 1;
}
}
bpf: add unprivileged bpf tests Add new tests samples/bpf/test_verifier: unpriv: return pointer checks that pointer cannot be returned from the eBPF program unpriv: add const to pointer unpriv: add pointer to pointer unpriv: neg pointer checks that pointer arithmetic is disallowed unpriv: cmp pointer with const unpriv: cmp pointer with pointer checks that comparison of pointers is disallowed Only one case allowed 'void *value = bpf_map_lookup_elem(..); if (value == 0) ...' unpriv: check that printk is disallowed since bpf_trace_printk is not available to unprivileged unpriv: pass pointer to helper function checks that pointers cannot be passed to functions that expect integers If function expects a pointer the verifier allows only that type of pointer. Like 1st argument of bpf_map_lookup_elem() must be pointer to map. (applies to non-root as well) unpriv: indirectly pass pointer on stack to helper function checks that pointer stored into stack cannot be used as part of key passed into bpf_map_lookup_elem() unpriv: mangle pointer on stack 1 unpriv: mangle pointer on stack 2 checks that writing into stack slot that already contains a pointer is disallowed unpriv: read pointer from stack in small chunks checks that < 8 byte read from stack slot that contains a pointer is disallowed unpriv: write pointer into ctx checks that storing pointers into skb->fields is disallowed unpriv: write pointer into map elem value checks that storing pointers into element values is disallowed For example: int bpf_prog(struct __sk_buff *skb) { u32 key = 0; u64 *value = bpf_map_lookup_elem(&map, &key); if (value) *value = (u64) skb; } will be rejected. unpriv: partial copy of pointer checks that doing 32-bit register mov from register containing a pointer is disallowed unpriv: pass pointer to tail_call checks that passing pointer as an index into bpf_tail_call is disallowed unpriv: cmp map pointer with zero checks that comparing map pointer with constant is disallowed unpriv: write into frame pointer checks that frame pointer is read-only (applies to root too) unpriv: cmp of frame pointer checks that R10 cannot be using in comparison unpriv: cmp of stack pointer checks that Rx = R10 - imm is ok, but comparing Rx is not unpriv: obfuscate stack pointer checks that Rx = R10 - imm is ok, but Rx -= imm is not Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-10-08 13:23:23 +08:00
setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
return do_test(unpriv, from, to);
}