mirror of https://gitee.com/openkylin/linux.git
bpf: Rename BPF_XADD and prepare to encode other atomics in .imm
A subsequent patch will add additional atomic operations. These new operations will use the same opcode field as the existing XADD, with the immediate discriminating different operations. In preparation, rename the instruction mode BPF_ATOMIC and start calling the zero immediate BPF_ADD. This is possible (doesn't break existing valid BPF progs) because the immediate field is currently reserved MBZ and BPF_ADD is zero. All uses are removed from the tree but the BPF_XADD definition is kept around to avoid breaking builds for people including kernel headers. Signed-off-by: Brendan Jackman <jackmanb@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Björn Töpel <bjorn.topel@gmail.com> Link: https://lore.kernel.org/bpf/20210114181751.768687-5-jackmanb@google.com
This commit is contained in:
parent
e5f02caccf
commit
91c960b005
|
@ -1006,13 +1006,13 @@ Size modifier is one of ...
|
|||
|
||||
Mode modifier is one of::
|
||||
|
||||
BPF_IMM 0x00 /* used for 32-bit mov in classic BPF and 64-bit in eBPF */
|
||||
BPF_ABS 0x20
|
||||
BPF_IND 0x40
|
||||
BPF_MEM 0x60
|
||||
BPF_LEN 0x80 /* classic BPF only, reserved in eBPF */
|
||||
BPF_MSH 0xa0 /* classic BPF only, reserved in eBPF */
|
||||
BPF_XADD 0xc0 /* eBPF only, exclusive add */
|
||||
BPF_IMM 0x00 /* used for 32-bit mov in classic BPF and 64-bit in eBPF */
|
||||
BPF_ABS 0x20
|
||||
BPF_IND 0x40
|
||||
BPF_MEM 0x60
|
||||
BPF_LEN 0x80 /* classic BPF only, reserved in eBPF */
|
||||
BPF_MSH 0xa0 /* classic BPF only, reserved in eBPF */
|
||||
BPF_ATOMIC 0xc0 /* eBPF only, atomic operations */
|
||||
|
||||
eBPF has two non-generic instructions: (BPF_ABS | <size> | BPF_LD) and
|
||||
(BPF_IND | <size> | BPF_LD) which are used to access packet data.
|
||||
|
@ -1044,11 +1044,19 @@ Unlike classic BPF instruction set, eBPF has generic load/store operations::
|
|||
BPF_MEM | <size> | BPF_STX: *(size *) (dst_reg + off) = src_reg
|
||||
BPF_MEM | <size> | BPF_ST: *(size *) (dst_reg + off) = imm32
|
||||
BPF_MEM | <size> | BPF_LDX: dst_reg = *(size *) (src_reg + off)
|
||||
BPF_XADD | BPF_W | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg
|
||||
BPF_XADD | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg
|
||||
|
||||
Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW. Note that 1 and
|
||||
2 byte atomic increments are not supported.
|
||||
Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW.
|
||||
|
||||
It also includes atomic operations, which use the immediate field for extra
|
||||
encoding.
|
||||
|
||||
.imm = BPF_ADD, .code = BPF_ATOMIC | BPF_W | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg
|
||||
.imm = BPF_ADD, .code = BPF_ATOMIC | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg
|
||||
|
||||
Note that 1 and 2 byte atomic operations are not supported.
|
||||
|
||||
You may encounter BPF_XADD - this is a legacy name for BPF_ATOMIC, referring to
|
||||
the exclusive-add operation encoded when the immediate field is zero.
|
||||
|
||||
eBPF has one 16-byte instruction: BPF_LD | BPF_DW | BPF_IMM which consists
|
||||
of two consecutive ``struct bpf_insn`` 8-byte blocks and interpreted as single
|
||||
|
|
|
@ -1620,10 +1620,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
|||
}
|
||||
emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code));
|
||||
break;
|
||||
/* STX XADD: lock *(u32 *)(dst + off) += src */
|
||||
case BPF_STX | BPF_XADD | BPF_W:
|
||||
/* STX XADD: lock *(u64 *)(dst + off) += src */
|
||||
case BPF_STX | BPF_XADD | BPF_DW:
|
||||
/* Atomic ops */
|
||||
case BPF_STX | BPF_ATOMIC | BPF_W:
|
||||
case BPF_STX | BPF_ATOMIC | BPF_DW:
|
||||
goto notyet;
|
||||
/* STX: *(size *)(dst + off) = src */
|
||||
case BPF_STX | BPF_MEM | BPF_W:
|
||||
|
|
|
@ -875,10 +875,18 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
}
|
||||
break;
|
||||
|
||||
/* STX XADD: lock *(u32 *)(dst + off) += src */
|
||||
case BPF_STX | BPF_XADD | BPF_W:
|
||||
/* STX XADD: lock *(u64 *)(dst + off) += src */
|
||||
case BPF_STX | BPF_XADD | BPF_DW:
|
||||
case BPF_STX | BPF_ATOMIC | BPF_W:
|
||||
case BPF_STX | BPF_ATOMIC | BPF_DW:
|
||||
if (insn->imm != BPF_ADD) {
|
||||
pr_err_once("unknown atomic op code %02x\n", insn->imm);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* STX XADD: lock *(u32 *)(dst + off) += src
|
||||
* and
|
||||
* STX XADD: lock *(u64 *)(dst + off) += src
|
||||
*/
|
||||
|
||||
if (!off) {
|
||||
reg = dst;
|
||||
} else {
|
||||
|
|
|
@ -1423,8 +1423,8 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
case BPF_STX | BPF_H | BPF_MEM:
|
||||
case BPF_STX | BPF_W | BPF_MEM:
|
||||
case BPF_STX | BPF_DW | BPF_MEM:
|
||||
case BPF_STX | BPF_W | BPF_XADD:
|
||||
case BPF_STX | BPF_DW | BPF_XADD:
|
||||
case BPF_STX | BPF_W | BPF_ATOMIC:
|
||||
case BPF_STX | BPF_DW | BPF_ATOMIC:
|
||||
if (insn->dst_reg == BPF_REG_10) {
|
||||
ctx->flags |= EBPF_SEEN_FP;
|
||||
dst = MIPS_R_SP;
|
||||
|
@ -1438,7 +1438,12 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
|
||||
if (src < 0)
|
||||
return src;
|
||||
if (BPF_MODE(insn->code) == BPF_XADD) {
|
||||
if (BPF_MODE(insn->code) == BPF_ATOMIC) {
|
||||
if (insn->imm != BPF_ADD) {
|
||||
pr_err("ATOMIC OP %02x NOT HANDLED\n", insn->imm);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If mem_off does not fit within the 9 bit ll/sc
|
||||
* instruction immediate field, use a temp reg.
|
||||
|
|
|
@ -683,10 +683,18 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
|
|||
break;
|
||||
|
||||
/*
|
||||
* BPF_STX XADD (atomic_add)
|
||||
* BPF_STX ATOMIC (atomic ops)
|
||||
*/
|
||||
/* *(u32 *)(dst + off) += src */
|
||||
case BPF_STX | BPF_XADD | BPF_W:
|
||||
case BPF_STX | BPF_ATOMIC | BPF_W:
|
||||
if (insn->imm != BPF_ADD) {
|
||||
pr_err_ratelimited(
|
||||
"eBPF filter atomic op code %02x (@%d) unsupported\n",
|
||||
code, i);
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
/* *(u32 *)(dst + off) += src */
|
||||
|
||||
/* Get EA into TMP_REG_1 */
|
||||
EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
|
||||
tmp_idx = ctx->idx * 4;
|
||||
|
@ -699,8 +707,15 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
|
|||
/* we're done if this succeeded */
|
||||
PPC_BCC_SHORT(COND_NE, tmp_idx);
|
||||
break;
|
||||
/* *(u64 *)(dst + off) += src */
|
||||
case BPF_STX | BPF_XADD | BPF_DW:
|
||||
case BPF_STX | BPF_ATOMIC | BPF_DW:
|
||||
if (insn->imm != BPF_ADD) {
|
||||
pr_err_ratelimited(
|
||||
"eBPF filter atomic op code %02x (@%d) unsupported\n",
|
||||
code, i);
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
/* *(u64 *)(dst + off) += src */
|
||||
|
||||
EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
|
||||
tmp_idx = ctx->idx * 4;
|
||||
EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
|
||||
|
|
|
@ -881,7 +881,7 @@ static int emit_store_r64(const s8 *dst, const s8 *src, s16 off,
|
|||
const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
|
||||
const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
|
||||
|
||||
if (mode == BPF_XADD && size != BPF_W)
|
||||
if (mode == BPF_ATOMIC && size != BPF_W)
|
||||
return -1;
|
||||
|
||||
emit_imm(RV_REG_T0, off, ctx);
|
||||
|
@ -899,7 +899,7 @@ static int emit_store_r64(const s8 *dst, const s8 *src, s16 off,
|
|||
case BPF_MEM:
|
||||
emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx);
|
||||
break;
|
||||
case BPF_XADD:
|
||||
case BPF_ATOMIC: /* Only BPF_ADD supported */
|
||||
emit(rv_amoadd_w(RV_REG_ZERO, lo(rs), RV_REG_T0, 0, 0),
|
||||
ctx);
|
||||
break;
|
||||
|
@ -1260,7 +1260,6 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
|||
case BPF_STX | BPF_MEM | BPF_H:
|
||||
case BPF_STX | BPF_MEM | BPF_W:
|
||||
case BPF_STX | BPF_MEM | BPF_DW:
|
||||
case BPF_STX | BPF_XADD | BPF_W:
|
||||
if (BPF_CLASS(code) == BPF_ST) {
|
||||
emit_imm32(tmp2, imm, ctx);
|
||||
src = tmp2;
|
||||
|
@ -1271,8 +1270,21 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
|||
return -1;
|
||||
break;
|
||||
|
||||
case BPF_STX | BPF_ATOMIC | BPF_W:
|
||||
if (insn->imm != BPF_ADD) {
|
||||
pr_info_once(
|
||||
"bpf-jit: not supported: atomic operation %02x ***\n",
|
||||
insn->imm);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code),
|
||||
BPF_MODE(code)))
|
||||
return -1;
|
||||
break;
|
||||
|
||||
/* No hardware support for 8-byte atomics in RV32. */
|
||||
case BPF_STX | BPF_XADD | BPF_DW:
|
||||
case BPF_STX | BPF_ATOMIC | BPF_DW:
|
||||
/* Fallthrough. */
|
||||
|
||||
notsupported:
|
||||
|
|
|
@ -1027,10 +1027,18 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
|||
emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
|
||||
emit_sd(RV_REG_T1, 0, rs, ctx);
|
||||
break;
|
||||
/* STX XADD: lock *(u32 *)(dst + off) += src */
|
||||
case BPF_STX | BPF_XADD | BPF_W:
|
||||
/* STX XADD: lock *(u64 *)(dst + off) += src */
|
||||
case BPF_STX | BPF_XADD | BPF_DW:
|
||||
case BPF_STX | BPF_ATOMIC | BPF_W:
|
||||
case BPF_STX | BPF_ATOMIC | BPF_DW:
|
||||
if (insn->imm != BPF_ADD) {
|
||||
pr_err("bpf-jit: not supported: atomic operation %02x ***\n",
|
||||
insn->imm);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* atomic_add: lock *(u32 *)(dst + off) += src
|
||||
* atomic_add: lock *(u64 *)(dst + off) += src
|
||||
*/
|
||||
|
||||
if (off) {
|
||||
if (is_12b_int(off)) {
|
||||
emit_addi(RV_REG_T1, rd, off, ctx);
|
||||
|
|
|
@ -1205,18 +1205,23 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
|||
jit->seen |= SEEN_MEM;
|
||||
break;
|
||||
/*
|
||||
* BPF_STX XADD (atomic_add)
|
||||
* BPF_ATOMIC
|
||||
*/
|
||||
case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */
|
||||
/* laal %w0,%src,off(%dst) */
|
||||
EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W0, src_reg,
|
||||
dst_reg, off);
|
||||
jit->seen |= SEEN_MEM;
|
||||
break;
|
||||
case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */
|
||||
/* laalg %w0,%src,off(%dst) */
|
||||
EMIT6_DISP_LH(0xeb000000, 0x00ea, REG_W0, src_reg,
|
||||
dst_reg, off);
|
||||
case BPF_STX | BPF_ATOMIC | BPF_DW:
|
||||
case BPF_STX | BPF_ATOMIC | BPF_W:
|
||||
if (insn->imm != BPF_ADD) {
|
||||
pr_err("Unknown atomic operation %02x\n", insn->imm);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* *(u32/u64 *)(dst + off) += src
|
||||
*
|
||||
* BFW_W: laal %w0,%src,off(%dst)
|
||||
* BPF_DW: laalg %w0,%src,off(%dst)
|
||||
*/
|
||||
EMIT6_DISP_LH(0xeb000000,
|
||||
BPF_SIZE(insn->code) == BPF_W ? 0x00fa : 0x00ea,
|
||||
REG_W0, src_reg, dst_reg, off);
|
||||
jit->seen |= SEEN_MEM;
|
||||
break;
|
||||
/*
|
||||
|
|
|
@ -1366,12 +1366,18 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
|||
break;
|
||||
}
|
||||
|
||||
/* STX XADD: lock *(u32 *)(dst + off) += src */
|
||||
case BPF_STX | BPF_XADD | BPF_W: {
|
||||
case BPF_STX | BPF_ATOMIC | BPF_W: {
|
||||
const u8 tmp = bpf2sparc[TMP_REG_1];
|
||||
const u8 tmp2 = bpf2sparc[TMP_REG_2];
|
||||
const u8 tmp3 = bpf2sparc[TMP_REG_3];
|
||||
|
||||
if (insn->imm != BPF_ADD) {
|
||||
pr_err_once("unknown atomic op %02x\n", insn->imm);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* lock *(u32 *)(dst + off) += src */
|
||||
|
||||
if (insn->dst_reg == BPF_REG_FP)
|
||||
ctx->saw_frame_pointer = true;
|
||||
|
||||
|
@ -1390,11 +1396,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
|||
break;
|
||||
}
|
||||
/* STX XADD: lock *(u64 *)(dst + off) += src */
|
||||
case BPF_STX | BPF_XADD | BPF_DW: {
|
||||
case BPF_STX | BPF_ATOMIC | BPF_DW: {
|
||||
const u8 tmp = bpf2sparc[TMP_REG_1];
|
||||
const u8 tmp2 = bpf2sparc[TMP_REG_2];
|
||||
const u8 tmp3 = bpf2sparc[TMP_REG_3];
|
||||
|
||||
if (insn->imm != BPF_ADD) {
|
||||
pr_err_once("unknown atomic op %02x\n", insn->imm);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (insn->dst_reg == BPF_REG_FP)
|
||||
ctx->saw_frame_pointer = true;
|
||||
|
||||
|
|
|
@ -795,6 +795,33 @@ static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
|
|||
*pprog = prog;
|
||||
}
|
||||
|
||||
static int emit_atomic(u8 **pprog, u8 atomic_op,
|
||||
u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
int cnt = 0;
|
||||
|
||||
EMIT1(0xF0); /* lock prefix */
|
||||
|
||||
maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
|
||||
|
||||
/* emit opcode */
|
||||
switch (atomic_op) {
|
||||
case BPF_ADD:
|
||||
/* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
|
||||
EMIT1(simple_alu_opcodes[atomic_op]);
|
||||
break;
|
||||
default:
|
||||
pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
emit_insn_suffix(&prog, dst_reg, src_reg, off);
|
||||
|
||||
*pprog = prog;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool ex_handler_bpf(const struct exception_table_entry *x,
|
||||
struct pt_regs *regs, int trapnr,
|
||||
unsigned long error_code, unsigned long fault_addr)
|
||||
|
@ -839,6 +866,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
|||
int i, cnt = 0, excnt = 0;
|
||||
int proglen = 0;
|
||||
u8 *prog = temp;
|
||||
int err;
|
||||
|
||||
detect_reg_usage(insn, insn_cnt, callee_regs_used,
|
||||
&tail_call_seen);
|
||||
|
@ -1250,18 +1278,12 @@ st: if (is_imm8(insn->off))
|
|||
}
|
||||
break;
|
||||
|
||||
/* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
|
||||
case BPF_STX | BPF_XADD | BPF_W:
|
||||
/* Emit 'lock add dword ptr [rax + off], eax' */
|
||||
if (is_ereg(dst_reg) || is_ereg(src_reg))
|
||||
EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
|
||||
else
|
||||
EMIT2(0xF0, 0x01);
|
||||
goto xadd;
|
||||
case BPF_STX | BPF_XADD | BPF_DW:
|
||||
EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
|
||||
xadd:
|
||||
emit_modrm_dstoff(&prog, dst_reg, src_reg, insn->off);
|
||||
case BPF_STX | BPF_ATOMIC | BPF_W:
|
||||
case BPF_STX | BPF_ATOMIC | BPF_DW:
|
||||
err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
|
||||
insn->off, BPF_SIZE(insn->code));
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
|
||||
/* call */
|
||||
|
|
|
@ -2243,10 +2243,8 @@ emit_cond_jmp: jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
|
|||
return -EFAULT;
|
||||
}
|
||||
break;
|
||||
/* STX XADD: lock *(u32 *)(dst + off) += src */
|
||||
case BPF_STX | BPF_XADD | BPF_W:
|
||||
/* STX XADD: lock *(u64 *)(dst + off) += src */
|
||||
case BPF_STX | BPF_XADD | BPF_DW:
|
||||
case BPF_STX | BPF_ATOMIC | BPF_W:
|
||||
case BPF_STX | BPF_ATOMIC | BPF_DW:
|
||||
goto notyet;
|
||||
case BPF_JMP | BPF_EXIT:
|
||||
if (seen_exit) {
|
||||
|
|
|
@ -3109,13 +3109,19 @@ mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
||||
static int mem_atomic4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
||||
{
|
||||
if (meta->insn.imm != BPF_ADD)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return mem_xadd(nfp_prog, meta, false);
|
||||
}
|
||||
|
||||
static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
||||
static int mem_atomic8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
||||
{
|
||||
if (meta->insn.imm != BPF_ADD)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return mem_xadd(nfp_prog, meta, true);
|
||||
}
|
||||
|
||||
|
@ -3475,8 +3481,8 @@ static const instr_cb_t instr_cb[256] = {
|
|||
[BPF_STX | BPF_MEM | BPF_H] = mem_stx2,
|
||||
[BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
|
||||
[BPF_STX | BPF_MEM | BPF_DW] = mem_stx8,
|
||||
[BPF_STX | BPF_XADD | BPF_W] = mem_xadd4,
|
||||
[BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8,
|
||||
[BPF_STX | BPF_ATOMIC | BPF_W] = mem_atomic4,
|
||||
[BPF_STX | BPF_ATOMIC | BPF_DW] = mem_atomic8,
|
||||
[BPF_ST | BPF_MEM | BPF_B] = mem_st1,
|
||||
[BPF_ST | BPF_MEM | BPF_H] = mem_st2,
|
||||
[BPF_ST | BPF_MEM | BPF_W] = mem_st4,
|
||||
|
|
|
@ -428,9 +428,9 @@ static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
|
|||
return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
|
||||
}
|
||||
|
||||
static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
|
||||
static inline bool is_mbpf_atomic(const struct nfp_insn_meta *meta)
|
||||
{
|
||||
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
|
||||
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_ATOMIC);
|
||||
}
|
||||
|
||||
static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
|
||||
|
|
|
@ -479,7 +479,7 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
|
|||
pr_vlog(env, "map writes not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
if (is_mbpf_xadd(meta)) {
|
||||
if (is_mbpf_atomic(meta)) {
|
||||
err = nfp_bpf_map_mark_used(env, meta, reg,
|
||||
NFP_MAP_USE_ATOMIC_CNT);
|
||||
if (err)
|
||||
|
@ -523,12 +523,17 @@ nfp_bpf_check_store(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
|
|||
}
|
||||
|
||||
static int
|
||||
nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
|
||||
struct bpf_verifier_env *env)
|
||||
nfp_bpf_check_atomic(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
|
||||
struct bpf_verifier_env *env)
|
||||
{
|
||||
const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
|
||||
const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
|
||||
|
||||
if (meta->insn.imm != BPF_ADD) {
|
||||
pr_vlog(env, "atomic op not implemented: %d\n", meta->insn.imm);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (dreg->type != PTR_TO_MAP_VALUE) {
|
||||
pr_vlog(env, "atomic add not to a map value pointer: %d\n",
|
||||
dreg->type);
|
||||
|
@ -655,8 +660,8 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
|
|||
if (is_mbpf_store(meta))
|
||||
return nfp_bpf_check_store(nfp_prog, meta, env);
|
||||
|
||||
if (is_mbpf_xadd(meta))
|
||||
return nfp_bpf_check_xadd(nfp_prog, meta, env);
|
||||
if (is_mbpf_atomic(meta))
|
||||
return nfp_bpf_check_atomic(nfp_prog, meta, env);
|
||||
|
||||
if (is_mbpf_alu(meta))
|
||||
return nfp_bpf_check_alu(nfp_prog, meta, env);
|
||||
|
|
|
@ -259,15 +259,23 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
|
|||
.off = OFF, \
|
||||
.imm = 0 })
|
||||
|
||||
/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
|
||||
|
||||
#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
|
||||
/*
|
||||
* Atomic operations:
|
||||
*
|
||||
* BPF_ADD *(uint *) (dst_reg + off16) += src_reg
|
||||
*/
|
||||
|
||||
#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
|
||||
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
.off = OFF, \
|
||||
.imm = 0 })
|
||||
.imm = OP })
|
||||
|
||||
/* Legacy alias */
|
||||
#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
|
||||
|
||||
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
|
||||
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
|
||||
/* ld/ldx fields */
|
||||
#define BPF_DW 0x18 /* double word (64-bit) */
|
||||
#define BPF_XADD 0xc0 /* exclusive add */
|
||||
#define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
|
||||
#define BPF_XADD 0xc0 /* exclusive add - legacy name */
|
||||
|
||||
/* alu/jmp fields */
|
||||
#define BPF_MOV 0xb0 /* mov reg to reg */
|
||||
|
@ -2448,7 +2449,7 @@ union bpf_attr {
|
|||
* running simultaneously.
|
||||
*
|
||||
* A user should care about the synchronization by himself.
|
||||
* For example, by using the **BPF_STX_XADD** instruction to alter
|
||||
* For example, by using the **BPF_ATOMIC** instructions to alter
|
||||
* the shared data.
|
||||
* Return
|
||||
* A pointer to the local storage area.
|
||||
|
|
|
@ -1309,8 +1309,8 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
|
|||
INSN_3(STX, MEM, H), \
|
||||
INSN_3(STX, MEM, W), \
|
||||
INSN_3(STX, MEM, DW), \
|
||||
INSN_3(STX, XADD, W), \
|
||||
INSN_3(STX, XADD, DW), \
|
||||
INSN_3(STX, ATOMIC, W), \
|
||||
INSN_3(STX, ATOMIC, DW), \
|
||||
/* Immediate based. */ \
|
||||
INSN_3(ST, MEM, B), \
|
||||
INSN_3(ST, MEM, H), \
|
||||
|
@ -1618,13 +1618,25 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
|
|||
LDX_PROBE(DW, 8)
|
||||
#undef LDX_PROBE
|
||||
|
||||
STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
|
||||
atomic_add((u32) SRC, (atomic_t *)(unsigned long)
|
||||
(DST + insn->off));
|
||||
STX_ATOMIC_W:
|
||||
switch (IMM) {
|
||||
case BPF_ADD:
|
||||
/* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
|
||||
atomic_add((u32) SRC, (atomic_t *)(unsigned long)
|
||||
(DST + insn->off));
|
||||
default:
|
||||
goto default_label;
|
||||
}
|
||||
CONT;
|
||||
STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
|
||||
atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
|
||||
(DST + insn->off));
|
||||
STX_ATOMIC_DW:
|
||||
switch (IMM) {
|
||||
case BPF_ADD:
|
||||
/* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
|
||||
atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
|
||||
(DST + insn->off));
|
||||
default:
|
||||
goto default_label;
|
||||
}
|
||||
CONT;
|
||||
|
||||
default_label:
|
||||
|
@ -1634,7 +1646,8 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
|
|||
*
|
||||
* Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
|
||||
*/
|
||||
pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
|
||||
pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
|
||||
insn->code, insn->imm);
|
||||
BUG_ON(1);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -153,14 +153,16 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
|
|||
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
||||
insn->dst_reg,
|
||||
insn->off, insn->src_reg);
|
||||
else if (BPF_MODE(insn->code) == BPF_XADD)
|
||||
else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
|
||||
insn->imm == BPF_ADD) {
|
||||
verbose(cbs->private_data, "(%02x) lock *(%s *)(r%d %+d) += r%d\n",
|
||||
insn->code,
|
||||
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
||||
insn->dst_reg, insn->off,
|
||||
insn->src_reg);
|
||||
else
|
||||
} else {
|
||||
verbose(cbs->private_data, "BUG_%02x\n", insn->code);
|
||||
}
|
||||
} else if (class == BPF_ST) {
|
||||
if (BPF_MODE(insn->code) != BPF_MEM) {
|
||||
verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
|
||||
|
|
|
@ -3604,13 +3604,17 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|||
return err;
|
||||
}
|
||||
|
||||
static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
|
||||
static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
|
||||
{
|
||||
int err;
|
||||
|
||||
if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
|
||||
insn->imm != 0) {
|
||||
verbose(env, "BPF_XADD uses reserved fields\n");
|
||||
if (insn->imm != BPF_ADD) {
|
||||
verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
|
||||
verbose(env, "invalid atomic operand size\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -3633,19 +3637,19 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
|
|||
is_pkt_reg(env, insn->dst_reg) ||
|
||||
is_flow_key_reg(env, insn->dst_reg) ||
|
||||
is_sk_reg(env, insn->dst_reg)) {
|
||||
verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
|
||||
verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
|
||||
insn->dst_reg,
|
||||
reg_type_str[reg_state(env, insn->dst_reg)->type]);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
/* check whether atomic_add can read the memory */
|
||||
/* check whether we can read the memory */
|
||||
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_READ, -1, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* check whether atomic_add can write into the same memory */
|
||||
/* check whether we can write into the same memory */
|
||||
return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_WRITE, -1, true);
|
||||
}
|
||||
|
@ -9524,8 +9528,8 @@ static int do_check(struct bpf_verifier_env *env)
|
|||
} else if (class == BPF_STX) {
|
||||
enum bpf_reg_type *prev_dst_type, dst_reg_type;
|
||||
|
||||
if (BPF_MODE(insn->code) == BPF_XADD) {
|
||||
err = check_xadd(env, env->insn_idx, insn);
|
||||
if (BPF_MODE(insn->code) == BPF_ATOMIC) {
|
||||
err = check_atomic(env, env->insn_idx, insn);
|
||||
if (err)
|
||||
return err;
|
||||
env->insn_idx++;
|
||||
|
@ -10010,7 +10014,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
|
|||
|
||||
if (BPF_CLASS(insn->code) == BPF_STX &&
|
||||
((BPF_MODE(insn->code) != BPF_MEM &&
|
||||
BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
|
||||
BPF_MODE(insn->code) != BPF_ATOMIC) || insn->imm != 0)) {
|
||||
verbose(env, "BPF_STX uses reserved fields\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -4295,13 +4295,13 @@ static struct bpf_test tests[] = {
|
|||
{ { 0, 0xffffffff } },
|
||||
.stack_depth = 40,
|
||||
},
|
||||
/* BPF_STX | BPF_XADD | BPF_W/DW */
|
||||
/* BPF_STX | BPF_ATOMIC | BPF_W/DW */
|
||||
{
|
||||
"STX_XADD_W: Test: 0x12 + 0x10 = 0x22",
|
||||
.u.insns_int = {
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
|
||||
BPF_ST_MEM(BPF_W, R10, -40, 0x10),
|
||||
BPF_STX_XADD(BPF_W, R10, R0, -40),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
|
||||
BPF_LDX_MEM(BPF_W, R0, R10, -40),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
|
@ -4316,7 +4316,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_ALU64_REG(BPF_MOV, R1, R10),
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
|
||||
BPF_ST_MEM(BPF_W, R10, -40, 0x10),
|
||||
BPF_STX_XADD(BPF_W, R10, R0, -40),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
|
||||
BPF_ALU64_REG(BPF_MOV, R0, R10),
|
||||
BPF_ALU64_REG(BPF_SUB, R0, R1),
|
||||
BPF_EXIT_INSN(),
|
||||
|
@ -4331,7 +4331,7 @@ static struct bpf_test tests[] = {
|
|||
.u.insns_int = {
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
|
||||
BPF_ST_MEM(BPF_W, R10, -40, 0x10),
|
||||
BPF_STX_XADD(BPF_W, R10, R0, -40),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
|
@ -4352,7 +4352,7 @@ static struct bpf_test tests[] = {
|
|||
.u.insns_int = {
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
|
||||
BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
|
||||
BPF_STX_XADD(BPF_DW, R10, R0, -40),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
|
||||
BPF_LDX_MEM(BPF_DW, R0, R10, -40),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
|
@ -4367,7 +4367,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_ALU64_REG(BPF_MOV, R1, R10),
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
|
||||
BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
|
||||
BPF_STX_XADD(BPF_DW, R10, R0, -40),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
|
||||
BPF_ALU64_REG(BPF_MOV, R0, R10),
|
||||
BPF_ALU64_REG(BPF_SUB, R0, R1),
|
||||
BPF_EXIT_INSN(),
|
||||
|
@ -4382,7 +4382,7 @@ static struct bpf_test tests[] = {
|
|||
.u.insns_int = {
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
|
||||
BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
|
||||
BPF_STX_XADD(BPF_DW, R10, R0, -40),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
|
|
|
@ -138,11 +138,11 @@ struct bpf_insn;
|
|||
|
||||
#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
|
||||
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
.off = OFF, \
|
||||
.imm = 0 })
|
||||
.imm = BPF_ADD })
|
||||
|
||||
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
|
||||
|
||||
|
|
|
@ -147,12 +147,12 @@ static void prog_load(void)
|
|||
*/
|
||||
BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_1, 1),
|
||||
BPF_STX_XADD(BPF_DW, BPF_REG_9, BPF_REG_1,
|
||||
offsetof(struct stats, packets)),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_9, BPF_REG_1,
|
||||
offsetof(struct stats, packets)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
|
||||
offsetof(struct __sk_buff, len)),
|
||||
BPF_STX_XADD(BPF_DW, BPF_REG_9, BPF_REG_1,
|
||||
offsetof(struct stats, bytes)),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_9, BPF_REG_1,
|
||||
offsetof(struct stats, bytes)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
|
||||
offsetof(struct __sk_buff, len)),
|
||||
BPF_EXIT_INSN(),
|
||||
|
|
|
@ -54,7 +54,7 @@ static int test_sock(void)
|
|||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */
|
||||
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
|
|
|
@ -53,7 +53,7 @@ static int prog_load(int map_fd, int verdict)
|
|||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */
|
||||
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
|
||||
|
||||
/* Count bytes */
|
||||
BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_BYTES), /* r0 = 1 */
|
||||
|
@ -64,7 +64,8 @@ static int prog_load(int map_fd, int verdict)
|
|||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, offsetof(struct __sk_buff, len)), /* r1 = skb->len */
|
||||
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
|
||||
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
|
||||
|
||||
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
|
||||
BPF_EXIT_INSN(),
|
||||
|
|
|
@ -169,15 +169,22 @@
|
|||
.off = OFF, \
|
||||
.imm = 0 })
|
||||
|
||||
/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
|
||||
/*
|
||||
* Atomic operations:
|
||||
*
|
||||
* BPF_ADD *(uint *) (dst_reg + off16) += src_reg
|
||||
*/
|
||||
|
||||
#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
|
||||
#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
|
||||
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
.off = OFF, \
|
||||
.imm = 0 })
|
||||
.imm = OP })
|
||||
|
||||
/* Legacy alias */
|
||||
#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
|
||||
|
||||
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
|
||||
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
|
||||
/* ld/ldx fields */
|
||||
#define BPF_DW 0x18 /* double word (64-bit) */
|
||||
#define BPF_XADD 0xc0 /* exclusive add */
|
||||
#define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
|
||||
#define BPF_XADD 0xc0 /* exclusive add - legacy name */
|
||||
|
||||
/* alu/jmp fields */
|
||||
#define BPF_MOV 0xb0 /* mov reg to reg */
|
||||
|
@ -2448,7 +2449,7 @@ union bpf_attr {
|
|||
* running simultaneously.
|
||||
*
|
||||
* A user should care about the synchronization by himself.
|
||||
* For example, by using the **BPF_STX_XADD** instruction to alter
|
||||
* For example, by using the **BPF_ATOMIC** instructions to alter
|
||||
* the shared data.
|
||||
* Return
|
||||
* A pointer to the local storage area.
|
||||
|
|
|
@ -45,13 +45,13 @@ static int prog_load_cnt(int verdict, int val)
|
|||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
|
||||
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
|
||||
|
||||
BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_MOV64_IMM(BPF_REG_1, val),
|
||||
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
|
||||
|
||||
BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
|
|
|
@ -29,7 +29,7 @@ int main(int argc, char **argv)
|
|||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_get_local_storage),
|
||||
BPF_MOV64_IMM(BPF_REG_1, 1),
|
||||
BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
|
|
|
@ -10,14 +10,13 @@
|
|||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"context stores via XADD",
|
||||
"context stores via BPF_ATOMIC",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
|
||||
BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_1, BPF_REG_0, offsetof(struct __sk_buff, mark)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "BPF_XADD stores into R1 ctx is not allowed",
|
||||
.errstr = "BPF_ATOMIC stores into R1 ctx is not allowed",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
|
|
|
@ -333,7 +333,7 @@
|
|||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
|
||||
BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_4, BPF_REG_5, 0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
|
@ -488,7 +488,7 @@
|
|||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
|
||||
BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_4, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
|
||||
BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
||||
offsetof(struct __sk_buff, cb[0])),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_1, BPF_REG_2,
|
||||
offsetof(struct __sk_buff, cb[0])),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
|
@ -13,7 +13,7 @@
|
|||
.errstr_unpriv = "R2 leaks addr into mem",
|
||||
.result_unpriv = REJECT,
|
||||
.result = REJECT,
|
||||
.errstr = "BPF_XADD stores into R1 ctx is not allowed",
|
||||
.errstr = "BPF_ATOMIC stores into R1 ctx is not allowed",
|
||||
},
|
||||
{
|
||||
"leak pointer into ctx 2",
|
||||
|
@ -21,14 +21,14 @@
|
|||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
||||
offsetof(struct __sk_buff, cb[0])),
|
||||
BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_1, BPF_REG_10,
|
||||
offsetof(struct __sk_buff, cb[0])),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R10 leaks addr into mem",
|
||||
.result_unpriv = REJECT,
|
||||
.result = REJECT,
|
||||
.errstr = "BPF_XADD stores into R1 ctx is not allowed",
|
||||
.errstr = "BPF_ATOMIC stores into R1 ctx is not allowed",
|
||||
},
|
||||
{
|
||||
"leak pointer into ctx 3",
|
||||
|
@ -56,7 +56,7 @@
|
|||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
|
||||
BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
|
|
|
@ -171,7 +171,7 @@
|
|||
BPF_MOV64_IMM(BPF_REG_5, 42),
|
||||
BPF_MOV64_IMM(BPF_REG_6, 24),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
|
||||
BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_6, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
|
||||
|
@ -196,7 +196,7 @@
|
|||
BPF_MOV64_IMM(BPF_REG_5, 42),
|
||||
BPF_MOV64_IMM(BPF_REG_6, 24),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
|
||||
BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_6, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
|
||||
|
|
|
@ -207,7 +207,8 @@
|
|||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, BPF_REG_0, -8, 0),
|
||||
BPF_RAW_INSN(BPF_STX | BPF_ATOMIC | BPF_DW,
|
||||
BPF_REG_10, BPF_REG_0, -8, BPF_ADD),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
|
||||
BPF_EXIT_INSN(),
|
||||
|
|
|
@ -82,7 +82,7 @@
|
|||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
|
||||
BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_2, BPF_REG_3, 0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
|
||||
BPF_EXIT_INSN(),
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -7),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
|
@ -22,7 +22,7 @@
|
|||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_IMM(BPF_REG_1, 1),
|
||||
BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 3),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
|
@ -45,13 +45,13 @@
|
|||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
|
||||
BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
|
||||
BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
|
||||
BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 1),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 2),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "BPF_XADD stores into R2 pkt is not allowed",
|
||||
.errstr = "BPF_ATOMIC stores into R2 pkt is not allowed",
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
|
@ -62,8 +62,8 @@
|
|||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
|
||||
BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
|
||||
|
@ -82,8 +82,8 @@
|
|||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
|
||||
BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
|
||||
|
|
Loading…
Reference in New Issue