bpf: fix context access in tracing progs on 32 bit archs
Wang reported that all the testcases for BPF_PROG_TYPE_PERF_EVENT program type in test_verifier report the following errors on x86_32: 172/p unpriv: spill/fill of different pointers ldx FAIL Unexpected error message! 0: (bf) r6 = r10 1: (07) r6 += -8 2: (15) if r1 == 0x0 goto pc+3 R1=ctx(id=0,off=0,imm=0) R6=fp-8,call_-1 R10=fp0,call_-1 3: (bf) r2 = r10 4: (07) r2 += -76 5: (7b) *(u64 *)(r6 +0) = r2 6: (55) if r1 != 0x0 goto pc+1 R1=ctx(id=0,off=0,imm=0) R2=fp-76,call_-1 R6=fp-8,call_-1 R10=fp0,call_-1 fp-8=fp 7: (7b) *(u64 *)(r6 +0) = r1 8: (79) r1 = *(u64 *)(r6 +0) 9: (79) r1 = *(u64 *)(r1 +68) invalid bpf_context access off=68 size=8 378/p check bpf_perf_event_data->sample_period byte load permitted FAIL Failed to load prog 'Permission denied'! 0: (b7) r0 = 0 1: (71) r0 = *(u8 *)(r1 +68) invalid bpf_context access off=68 size=1 379/p check bpf_perf_event_data->sample_period half load permitted FAIL Failed to load prog 'Permission denied'! 0: (b7) r0 = 0 1: (69) r0 = *(u16 *)(r1 +68) invalid bpf_context access off=68 size=2 380/p check bpf_perf_event_data->sample_period word load permitted FAIL Failed to load prog 'Permission denied'! 0: (b7) r0 = 0 1: (61) r0 = *(u32 *)(r1 +68) invalid bpf_context access off=68 size=4 381/p check bpf_perf_event_data->sample_period dword load permitted FAIL Failed to load prog 'Permission denied'! 0: (b7) r0 = 0 1: (79) r0 = *(u64 *)(r1 +68) invalid bpf_context access off=68 size=8 Reason is that struct pt_regs on x86_32 doesn't fully align to 8 byte boundary due to its size of 68 bytes. Therefore, bpf_ctx_narrow_access_ok() will then bail out saying that off & (size_default - 1) which is 68 & 7 doesn't cleanly align in the case of sample_period access from struct bpf_perf_event_data, hence verifier wrongly thinks we might be doing an unaligned access here though underlying arch can handle it just fine. Therefore adjust this down to machine size and check and rewrite the offset for narrow access on that basis. We also need to fix corresponding pe_prog_is_valid_access(), since we hit the check for off % size != 0 (e.g. 68 % 8 -> 4) in the first and last test. With that in place, progs for tracing work on x86_32. Reported-by: Wang YanQing <udknight@gmail.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Tested-by: Wang YanQing <udknight@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
b3bbba3570
commit
bc23105ca0
|
@ -639,16 +639,34 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
|
|||
return prog->type == BPF_PROG_TYPE_UNSPEC;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
bpf_ctx_narrow_access_ok(u32 off, u32 size, const u32 size_default)
|
||||
static inline u32 bpf_ctx_off_adjust_machine(u32 size)
|
||||
{
|
||||
bool off_ok;
|
||||
const u32 size_machine = sizeof(unsigned long);
|
||||
|
||||
if (size > size_machine && size % size_machine == 0)
|
||||
size = size_machine;
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
|
||||
u32 size_default)
|
||||
{
|
||||
size_default = bpf_ctx_off_adjust_machine(size_default);
|
||||
size_access = bpf_ctx_off_adjust_machine(size_access);
|
||||
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
off_ok = (off & (size_default - 1)) == 0;
|
||||
return (off & (size_default - 1)) == 0;
|
||||
#else
|
||||
off_ok = (off & (size_default - 1)) + size == size_default;
|
||||
return (off & (size_default - 1)) + size_access == size_default;
|
||||
#endif
|
||||
return off_ok && size <= size_default && (size & (size - 1)) == 0;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
|
||||
{
|
||||
return bpf_ctx_narrow_align_ok(off, size, size_default) &&
|
||||
size <= size_default && (size & (size - 1)) == 0;
|
||||
}
|
||||
|
||||
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
|
||||
|
|
|
@ -5349,6 +5349,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|||
*/
|
||||
is_narrower_load = size < ctx_field_size;
|
||||
if (is_narrower_load) {
|
||||
u32 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
|
||||
u32 off = insn->off;
|
||||
u8 size_code;
|
||||
|
||||
|
@ -5363,7 +5364,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|||
else if (ctx_field_size == 8)
|
||||
size_code = BPF_DW;
|
||||
|
||||
insn->off = off & ~(ctx_field_size - 1);
|
||||
insn->off = off & ~(size_default - 1);
|
||||
insn->code = BPF_LDX | BPF_MEM | size_code;
|
||||
}
|
||||
|
||||
|
|
|
@ -880,8 +880,14 @@ static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type
|
|||
return false;
|
||||
if (type != BPF_READ)
|
||||
return false;
|
||||
if (off % size != 0)
|
||||
return false;
|
||||
if (off % size != 0) {
|
||||
if (sizeof(unsigned long) != 4)
|
||||
return false;
|
||||
if (size != 8)
|
||||
return false;
|
||||
if (off % size != 4)
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (off) {
|
||||
case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
|
||||
|
|
Loading…
Reference in New Issue