mirror of https://gitee.com/openkylin/qemu.git
accel/tcg/plugin-gen: fix the call signature for inline callbacks
A recent change to the handling of constants in TCG changed the pattern of ops emitted for a constant add. We no longer emit a mov and the constant can be applied directly to the TCG_op_add arguments. This was causing SEGVs when running the insn plugin with arg=inline. Fix this by updating copy_add_i64 to do the right thing while also adding a comment at the top of the append section as an aide memoir if something like this happens again. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Cc: Emilio G. Cota <cota@braap.org> Message-Id: <20210213130325.14781-10-alex.bennee@linaro.org>
This commit is contained in:
parent
24fa5d669d
commit
0d6e6cb779
|
@ -320,22 +320,6 @@ static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
|
|||
return op;
|
||||
}
|
||||
|
||||
static TCGOp *copy_const_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
|
||||
{
|
||||
if (TCG_TARGET_REG_BITS == 32) {
|
||||
/* 2x mov_i32 */
|
||||
op = copy_op(begin_op, op, INDEX_op_mov_i32);
|
||||
op->args[1] = tcgv_i32_arg(tcg_constant_i32(v));
|
||||
op = copy_op(begin_op, op, INDEX_op_mov_i32);
|
||||
op->args[1] = tcgv_i32_arg(tcg_constant_i32(v >> 32));
|
||||
} else {
|
||||
/* mov_i64 */
|
||||
op = copy_op(begin_op, op, INDEX_op_mov_i64);
|
||||
op->args[1] = tcgv_i64_arg(tcg_constant_i64(v));
|
||||
}
|
||||
return op;
|
||||
}
|
||||
|
||||
static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op)
|
||||
{
|
||||
if (TARGET_LONG_BITS == 32) {
|
||||
|
@ -374,14 +358,17 @@ static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op)
|
|||
return op;
|
||||
}
|
||||
|
||||
static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op)
|
||||
static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
|
||||
{
|
||||
if (TCG_TARGET_REG_BITS == 32) {
|
||||
/* all 32-bit backends must implement add2_i32 */
|
||||
g_assert(TCG_TARGET_HAS_add2_i32);
|
||||
op = copy_op(begin_op, op, INDEX_op_add2_i32);
|
||||
op->args[4] = tcgv_i32_arg(tcg_constant_i32(v));
|
||||
op->args[5] = tcgv_i32_arg(tcg_constant_i32(v >> 32));
|
||||
} else {
|
||||
op = copy_op(begin_op, op, INDEX_op_add_i64);
|
||||
op->args[2] = tcgv_i64_arg(tcg_constant_i64(v));
|
||||
}
|
||||
return op;
|
||||
}
|
||||
|
@ -431,6 +418,12 @@ static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func,
|
|||
return op;
|
||||
}
|
||||
|
||||
/*
|
||||
* When we append/replace ops here we are sensitive to changing patterns of
|
||||
* TCGOps generated by the tcg_gen_FOO calls when we generated the
|
||||
* empty callbacks. This will assert very quickly in a debug build as
|
||||
* we assert the ops we are replacing are the correct ones.
|
||||
*/
|
||||
static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
|
||||
TCGOp *begin_op, TCGOp *op, int *cb_idx)
|
||||
{
|
||||
|
@ -462,11 +455,8 @@ static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
|
|||
/* ld_i64 */
|
||||
op = copy_ld_i64(&begin_op, op);
|
||||
|
||||
/* const_i64 */
|
||||
op = copy_const_i64(&begin_op, op, cb->inline_insn.imm);
|
||||
|
||||
/* add_i64 */
|
||||
op = copy_add_i64(&begin_op, op);
|
||||
op = copy_add_i64(&begin_op, op, cb->inline_insn.imm);
|
||||
|
||||
/* st_i64 */
|
||||
op = copy_st_i64(&begin_op, op);
|
||||
|
|
Loading…
Reference in New Issue