bpf: Rename btf_member accessors.
Rename btf_member_bit_offset() and btf_member_bitfield_size() to avoid conflicts with similarly named helpers in libbpf's btf.h. Rename the kernel helpers, since libbpf helpers are part of uapi. Suggested-by: Andrii Nakryiko <andrii@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20211201181040.23337-3-alexei.starovoitov@gmail.com
This commit is contained in:
parent
74753e1462
commit
8293eb995f
|
@ -194,14 +194,14 @@ static inline bool btf_type_kflag(const struct btf_type *t)
|
|||
return BTF_INFO_KFLAG(t->info);
|
||||
}
|
||||
|
||||
static inline u32 btf_member_bit_offset(const struct btf_type *struct_type,
|
||||
static inline u32 __btf_member_bit_offset(const struct btf_type *struct_type,
|
||||
const struct btf_member *member)
|
||||
{
|
||||
return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
|
||||
: member->offset;
|
||||
}
|
||||
|
||||
static inline u32 btf_member_bitfield_size(const struct btf_type *struct_type,
|
||||
static inline u32 __btf_member_bitfield_size(const struct btf_type *struct_type,
|
||||
const struct btf_member *member)
|
||||
{
|
||||
return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
|
||||
|
|
|
@ -165,7 +165,7 @@ void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
|
|||
break;
|
||||
}
|
||||
|
||||
if (btf_member_bitfield_size(t, member)) {
|
||||
if (__btf_member_bitfield_size(t, member)) {
|
||||
pr_warn("bit field member %s in struct %s is not supported\n",
|
||||
mname, st_ops->name);
|
||||
break;
|
||||
|
@ -296,7 +296,7 @@ static int check_zero_holes(const struct btf_type *t, void *data)
|
|||
const struct btf_type *mtype;
|
||||
|
||||
for_each_member(i, t, member) {
|
||||
moff = btf_member_bit_offset(t, member) / 8;
|
||||
moff = __btf_member_bit_offset(t, member) / 8;
|
||||
if (moff > prev_mend &&
|
||||
memchr_inv(data + prev_mend, 0, moff - prev_mend))
|
||||
return -EINVAL;
|
||||
|
@ -387,7 +387,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
|||
struct bpf_prog *prog;
|
||||
u32 moff;
|
||||
|
||||
moff = btf_member_bit_offset(t, member) / 8;
|
||||
moff = __btf_member_bit_offset(t, member) / 8;
|
||||
ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
|
||||
if (ptype == module_type) {
|
||||
if (*(void **)(udata + moff))
|
||||
|
|
|
@ -2969,7 +2969,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
offset = btf_member_bit_offset(t, member);
|
||||
offset = __btf_member_bit_offset(t, member);
|
||||
if (is_union && offset) {
|
||||
btf_verifier_log_member(env, t, member,
|
||||
"Invalid member bits_offset");
|
||||
|
@ -3094,7 +3094,7 @@ static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t
|
|||
if (off != -ENOENT)
|
||||
/* only one such field is allowed */
|
||||
return -E2BIG;
|
||||
off = btf_member_bit_offset(t, member);
|
||||
off = __btf_member_bit_offset(t, member);
|
||||
if (off % 8)
|
||||
/* valid C code cannot generate such BTF */
|
||||
return -EINVAL;
|
||||
|
@ -3184,8 +3184,8 @@ static void __btf_struct_show(const struct btf *btf, const struct btf_type *t,
|
|||
|
||||
btf_show_start_member(show, member);
|
||||
|
||||
member_offset = btf_member_bit_offset(t, member);
|
||||
bitfield_size = btf_member_bitfield_size(t, member);
|
||||
member_offset = __btf_member_bit_offset(t, member);
|
||||
bitfield_size = __btf_member_bitfield_size(t, member);
|
||||
bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
|
||||
bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
|
||||
if (bitfield_size) {
|
||||
|
@ -5060,7 +5060,7 @@ static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
|
|||
if (array_elem->nelems != 0)
|
||||
goto error;
|
||||
|
||||
moff = btf_member_bit_offset(t, member) / 8;
|
||||
moff = __btf_member_bit_offset(t, member) / 8;
|
||||
if (off < moff)
|
||||
goto error;
|
||||
|
||||
|
@ -5083,14 +5083,14 @@ static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
|
|||
|
||||
for_each_member(i, t, member) {
|
||||
/* offset of the field in bytes */
|
||||
moff = btf_member_bit_offset(t, member) / 8;
|
||||
moff = __btf_member_bit_offset(t, member) / 8;
|
||||
if (off + size <= moff)
|
||||
/* won't find anything, field is already too far */
|
||||
break;
|
||||
|
||||
if (btf_member_bitfield_size(t, member)) {
|
||||
u32 end_bit = btf_member_bit_offset(t, member) +
|
||||
btf_member_bitfield_size(t, member);
|
||||
if (__btf_member_bitfield_size(t, member)) {
|
||||
u32 end_bit = __btf_member_bit_offset(t, member) +
|
||||
__btf_member_bitfield_size(t, member);
|
||||
|
||||
/* off <= moff instead of off == moff because clang
|
||||
* does not generate a BTF member for anonymous
|
||||
|
|
|
@ -169,7 +169,7 @@ static u32 prog_ops_moff(const struct bpf_prog *prog)
|
|||
t = bpf_tcp_congestion_ops.type;
|
||||
m = &btf_type_member(t)[midx];
|
||||
|
||||
return btf_member_bit_offset(t, m) / 8;
|
||||
return __btf_member_bit_offset(t, m) / 8;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto *
|
||||
|
@ -244,7 +244,7 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
|
|||
utcp_ca = (const struct tcp_congestion_ops *)udata;
|
||||
tcp_ca = (struct tcp_congestion_ops *)kdata;
|
||||
|
||||
moff = btf_member_bit_offset(t, member) / 8;
|
||||
moff = __btf_member_bit_offset(t, member) / 8;
|
||||
switch (moff) {
|
||||
case offsetof(struct tcp_congestion_ops, flags):
|
||||
if (utcp_ca->flags & ~TCP_CONG_MASK)
|
||||
|
@ -274,7 +274,7 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
|
|||
static int bpf_tcp_ca_check_member(const struct btf_type *t,
|
||||
const struct btf_member *member)
|
||||
{
|
||||
if (is_unsupported(btf_member_bit_offset(t, member) / 8))
|
||||
if (is_unsupported(__btf_member_bit_offset(t, member) / 8))
|
||||
return -ENOTSUPP;
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue