mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says: ==================== pull-request: bpf 2021-03-04 The following pull-request contains BPF updates for your *net* tree. We've added 7 non-merge commits during the last 4 day(s) which contain a total of 9 files changed, 128 insertions(+), 40 deletions(-). The main changes are: 1) Fix 32-bit cmpxchg, from Brendan. 2) Fix atomic+fetch logic, from Ilya. 3) Fix usage of bpf_csum_diff in selftests, from Yauheni. ====================
This commit is contained in:
commit
638526bb41
|
@ -3959,8 +3959,6 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
|
|||
int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
|
||||
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
|
||||
|
||||
int xdp_umem_query(struct net_device *dev, u16 queue_id);
|
||||
|
||||
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
|
||||
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
|
||||
int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
|
||||
|
|
|
@ -2344,6 +2344,10 @@ bool __weak bpf_helper_changes_pkt_data(void *func)
|
|||
/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
|
||||
* analysis code and wants explicit zero extension inserted by verifier.
|
||||
* Otherwise, return FALSE.
|
||||
*
|
||||
* The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
|
||||
* you don't override this. JITs that don't want these extra insns can detect
|
||||
* them using insn_is_zext.
|
||||
*/
|
||||
bool __weak bpf_jit_needs_zext(void)
|
||||
{
|
||||
|
|
|
@ -504,6 +504,13 @@ static bool is_ptr_cast_function(enum bpf_func_id func_id)
|
|||
func_id == BPF_FUNC_skc_to_tcp_request_sock;
|
||||
}
|
||||
|
||||
static bool is_cmpxchg_insn(const struct bpf_insn *insn)
|
||||
{
|
||||
return BPF_CLASS(insn->code) == BPF_STX &&
|
||||
BPF_MODE(insn->code) == BPF_ATOMIC &&
|
||||
insn->imm == BPF_CMPXCHG;
|
||||
}
|
||||
|
||||
/* string representation of 'enum bpf_reg_type' */
|
||||
static const char * const reg_type_str[] = {
|
||||
[NOT_INIT] = "?",
|
||||
|
@ -1703,7 +1710,11 @@ static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
|||
}
|
||||
|
||||
if (class == BPF_STX) {
|
||||
if (reg->type != SCALAR_VALUE)
|
||||
/* BPF_STX (including atomic variants) has multiple source
|
||||
* operands, one of which is a ptr. Check whether the caller is
|
||||
* asking about it.
|
||||
*/
|
||||
if (t == SRC_OP && reg->type != SCALAR_VALUE)
|
||||
return true;
|
||||
return BPF_SIZE(code) == BPF_DW;
|
||||
}
|
||||
|
@ -1735,22 +1746,38 @@ static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
|||
return true;
|
||||
}
|
||||
|
||||
/* Return TRUE if INSN doesn't have explicit value define. */
|
||||
static bool insn_no_def(struct bpf_insn *insn)
|
||||
/* Return the regno defined by the insn, or -1. */
|
||||
static int insn_def_regno(const struct bpf_insn *insn)
|
||||
{
|
||||
u8 class = BPF_CLASS(insn->code);
|
||||
|
||||
return (class == BPF_JMP || class == BPF_JMP32 ||
|
||||
class == BPF_STX || class == BPF_ST);
|
||||
switch (BPF_CLASS(insn->code)) {
|
||||
case BPF_JMP:
|
||||
case BPF_JMP32:
|
||||
case BPF_ST:
|
||||
return -1;
|
||||
case BPF_STX:
|
||||
if (BPF_MODE(insn->code) == BPF_ATOMIC &&
|
||||
(insn->imm & BPF_FETCH)) {
|
||||
if (insn->imm == BPF_CMPXCHG)
|
||||
return BPF_REG_0;
|
||||
else
|
||||
return insn->src_reg;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
default:
|
||||
return insn->dst_reg;
|
||||
}
|
||||
}
|
||||
|
||||
/* Return TRUE if INSN has defined any 32-bit value explicitly. */
|
||||
static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
{
|
||||
if (insn_no_def(insn))
|
||||
int dst_reg = insn_def_regno(insn);
|
||||
|
||||
if (dst_reg == -1)
|
||||
return false;
|
||||
|
||||
return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP);
|
||||
return !is_reg64(env, insn, dst_reg, NULL, DST_OP);
|
||||
}
|
||||
|
||||
static void mark_insn_zext(struct bpf_verifier_env *env,
|
||||
|
@ -11006,9 +11033,10 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
|
|||
for (i = 0; i < len; i++) {
|
||||
int adj_idx = i + delta;
|
||||
struct bpf_insn insn;
|
||||
u8 load_reg;
|
||||
int load_reg;
|
||||
|
||||
insn = insns[adj_idx];
|
||||
load_reg = insn_def_regno(&insn);
|
||||
if (!aux[adj_idx].zext_dst) {
|
||||
u8 code, class;
|
||||
u32 imm_rnd;
|
||||
|
@ -11018,14 +11046,14 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
|
|||
|
||||
code = insn.code;
|
||||
class = BPF_CLASS(code);
|
||||
if (insn_no_def(&insn))
|
||||
if (load_reg == -1)
|
||||
continue;
|
||||
|
||||
/* NOTE: arg "reg" (the fourth one) is only used for
|
||||
* BPF_STX which has been ruled out in above
|
||||
* check, it is safe to pass NULL here.
|
||||
* BPF_STX + SRC_OP, so it is safe to pass NULL
|
||||
* here.
|
||||
*/
|
||||
if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) {
|
||||
if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) {
|
||||
if (class == BPF_LD &&
|
||||
BPF_MODE(code) == BPF_IMM)
|
||||
i++;
|
||||
|
@ -11040,31 +11068,28 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
|
|||
imm_rnd = get_random_int();
|
||||
rnd_hi32_patch[0] = insn;
|
||||
rnd_hi32_patch[1].imm = imm_rnd;
|
||||
rnd_hi32_patch[3].dst_reg = insn.dst_reg;
|
||||
rnd_hi32_patch[3].dst_reg = load_reg;
|
||||
patch = rnd_hi32_patch;
|
||||
patch_len = 4;
|
||||
goto apply_patch_buffer;
|
||||
}
|
||||
|
||||
if (!bpf_jit_needs_zext())
|
||||
/* Add in an zero-extend instruction if a) the JIT has requested
|
||||
* it or b) it's a CMPXCHG.
|
||||
*
|
||||
* The latter is because: BPF_CMPXCHG always loads a value into
|
||||
* R0, therefore always zero-extends. However some archs'
|
||||
* equivalent instruction only does this load when the
|
||||
* comparison is successful. This detail of CMPXCHG is
|
||||
* orthogonal to the general zero-extension behaviour of the
|
||||
* CPU, so it's treated independently of bpf_jit_needs_zext.
|
||||
*/
|
||||
if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn))
|
||||
continue;
|
||||
|
||||
/* zext_dst means that we want to zero-extend whatever register
|
||||
* the insn defines, which is dst_reg most of the time, with
|
||||
* the notable exception of BPF_STX + BPF_ATOMIC + BPF_FETCH.
|
||||
*/
|
||||
if (BPF_CLASS(insn.code) == BPF_STX &&
|
||||
BPF_MODE(insn.code) == BPF_ATOMIC) {
|
||||
/* BPF_STX + BPF_ATOMIC insns without BPF_FETCH do not
|
||||
* define any registers, therefore zext_dst cannot be
|
||||
* set.
|
||||
*/
|
||||
if (WARN_ON(!(insn.imm & BPF_FETCH)))
|
||||
return -EINVAL;
|
||||
load_reg = insn.imm == BPF_CMPXCHG ? BPF_REG_0
|
||||
: insn.src_reg;
|
||||
} else {
|
||||
load_reg = insn.dst_reg;
|
||||
if (WARN_ON(load_reg == -1)) {
|
||||
verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
zext_patch[0] = insn;
|
||||
|
|
|
@ -1699,5 +1699,7 @@ int main(int argc, char **argv)
|
|||
|
||||
xdpsock_cleanup();
|
||||
|
||||
munmap(bufs, NUM_FRAMES * opt_xsk_frame_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -610,15 +610,16 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
|
|||
if (fd < 0)
|
||||
continue;
|
||||
|
||||
memset(&map_info, 0, map_len);
|
||||
err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
|
||||
if (err) {
|
||||
close(fd);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!strcmp(map_info.name, "xsks_map")) {
|
||||
if (!strncmp(map_info.name, "xsks_map", sizeof(map_info.name))) {
|
||||
ctx->xsks_map_fd = fd;
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
|
||||
close(fd);
|
||||
|
|
|
@ -16,6 +16,13 @@ bool skip = false;
|
|||
#define STRSIZE 2048
|
||||
#define EXPECTED_STRSIZE 256
|
||||
|
||||
#if defined(bpf_target_s390)
|
||||
/* NULL points to a readable struct lowcore on s390, so take the last page */
|
||||
#define BADPTR ((void *)0xFFFFFFFFFFFFF000ULL)
|
||||
#else
|
||||
#define BADPTR 0
|
||||
#endif
|
||||
|
||||
#ifndef ARRAY_SIZE
|
||||
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
|
||||
#endif
|
||||
|
@ -113,11 +120,11 @@ int BPF_PROG(trace_netif_receive_skb, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
/* Check invalid ptr value */
|
||||
p.ptr = 0;
|
||||
p.ptr = BADPTR;
|
||||
__ret = bpf_snprintf_btf(str, STRSIZE, &p, sizeof(p), 0);
|
||||
if (__ret >= 0) {
|
||||
bpf_printk("printing NULL should generate error, got (%d)",
|
||||
__ret);
|
||||
bpf_printk("printing %llx should generate error, got (%d)",
|
||||
(unsigned long long)BADPTR, __ret);
|
||||
ret = -ERANGE;
|
||||
}
|
||||
|
||||
|
|
|
@ -250,12 +250,13 @@
|
|||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_csum_diff),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.fixup_map_array_ro = { 3 },
|
||||
.result = ACCEPT,
|
||||
.retval = -29,
|
||||
.retval = 65507,
|
||||
},
|
||||
{
|
||||
"invalid write map access into a read-only array 1",
|
||||
|
|
|
@ -94,3 +94,28 @@
|
|||
.result = REJECT,
|
||||
.errstr = "invalid read from stack",
|
||||
},
|
||||
{
|
||||
"BPF_W cmpxchg should zero top 32 bits",
|
||||
.insns = {
|
||||
/* r0 = U64_MAX; */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
|
||||
/* u64 val = r0; */
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
/* r0 = (u32)atomic_cmpxchg((u32 *)&val, r0, 1); */
|
||||
BPF_MOV32_IMM(BPF_REG_1, 1),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
|
||||
/* r1 = 0x00000000FFFFFFFFull; */
|
||||
BPF_MOV64_IMM(BPF_REG_1, 1),
|
||||
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
|
||||
/* if (r0 != r1) exit(1); */
|
||||
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_1, 2),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
/* exit(0); */
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
},
|
||||
|
|
|
@ -75,3 +75,28 @@
|
|||
},
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"BPF_W atomic_fetch_or should zero top 32 bits",
|
||||
.insns = {
|
||||
/* r1 = U64_MAX; */
|
||||
BPF_MOV64_IMM(BPF_REG_1, 0),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
|
||||
/* u64 val = r1; */
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
|
||||
/* r1 = (u32)atomic_fetch_or((u32 *)&val, 2); */
|
||||
BPF_MOV32_IMM(BPF_REG_1, 2),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_OR | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
|
||||
/* r2 = 0x00000000FFFFFFFF; */
|
||||
BPF_MOV64_IMM(BPF_REG_2, 1),
|
||||
BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 1),
|
||||
/* if (r2 != r1) exit(1); */
|
||||
BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_1, 2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
/* exit(0); */
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
},
|
||||
|
|
Loading…
Reference in New Issue