bpf: Add support for BTF pointers to interpreter

Pointer to BTF object is a pointer to kernel object or NULL.
The memory access in the interpreter has to be done via probe_kernel_read
to avoid page faults.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/20191016032505.2089704-9-ast@kernel.org
This commit is contained in:
Alexei Starovoitov 2019-10-15 20:25:02 -07:00 committed by Daniel Borkmann
parent ac4414b5ca
commit 2a02759ef5
3 changed files with 30 additions and 0 deletions

View File

@ -65,6 +65,9 @@ struct ctl_table_header;
/* unused opcode to mark special call to bpf_tail_call() helper */ /* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL 0xf0 #define BPF_TAIL_CALL 0xf0
/* unused opcode to mark special load instruction. Same as BPF_ABS */
#define BPF_PROBE_MEM 0x20
/* unused opcode to mark call to interpreter with arguments */ /* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0 #define BPF_CALL_ARGS 0xe0

View File

@ -1291,6 +1291,11 @@ bool bpf_opcode_in_insntable(u8 code)
} }
#ifndef CONFIG_BPF_JIT_ALWAYS_ON #ifndef CONFIG_BPF_JIT_ALWAYS_ON
u64 __weak bpf_probe_read(void * dst, u32 size, const void * unsafe_ptr)
{
memset(dst, 0, size);
return -EFAULT;
}
/** /**
* __bpf_prog_run - run eBPF program on a given context * __bpf_prog_run - run eBPF program on a given context
* @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
@ -1310,6 +1315,10 @@ static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u6
/* Non-UAPI available opcodes. */ /* Non-UAPI available opcodes. */
[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
}; };
#undef BPF_INSN_3_LBL #undef BPF_INSN_3_LBL
#undef BPF_INSN_2_LBL #undef BPF_INSN_2_LBL
@ -1542,6 +1551,16 @@ static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u6
LDST(W, u32) LDST(W, u32)
LDST(DW, u64) LDST(DW, u64)
#undef LDST #undef LDST
#define LDX_PROBE(SIZEOP, SIZE) \
LDX_PROBE_MEM_##SIZEOP: \
bpf_probe_read(&DST, SIZE, (const void *)(long) SRC); \
CONT;
LDX_PROBE(B, 1)
LDX_PROBE(H, 2)
LDX_PROBE(W, 4)
LDX_PROBE(DW, 8)
#undef LDX_PROBE
STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
atomic_add((u32) SRC, (atomic_t *)(unsigned long) atomic_add((u32) SRC, (atomic_t *)(unsigned long)
(DST + insn->off)); (DST + insn->off));

View File

@ -7581,6 +7581,7 @@ static bool reg_type_mismatch_ok(enum bpf_reg_type type)
case PTR_TO_TCP_SOCK: case PTR_TO_TCP_SOCK:
case PTR_TO_TCP_SOCK_OR_NULL: case PTR_TO_TCP_SOCK_OR_NULL:
case PTR_TO_XDP_SOCK: case PTR_TO_XDP_SOCK:
case PTR_TO_BTF_ID:
return false; return false;
default: default:
return true; return true;
@ -8722,6 +8723,13 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
case PTR_TO_XDP_SOCK: case PTR_TO_XDP_SOCK:
convert_ctx_access = bpf_xdp_sock_convert_ctx_access; convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
break; break;
case PTR_TO_BTF_ID:
if (type == BPF_WRITE) {
verbose(env, "Writes through BTF pointers are not allowed\n");
return -EINVAL;
}
insn->code = BPF_LDX | BPF_PROBE_MEM | BPF_SIZE((insn)->code);
continue;
default: default:
continue; continue;
} }