2012-12-06 19:15:58 +08:00
|
|
|
#ifndef GEN_ICOUNT_H
|
2016-06-29 21:29:06 +08:00
|
|
|
#define GEN_ICOUNT_H
|
2012-12-06 19:15:58 +08:00
|
|
|
|
2012-12-18 01:20:00 +08:00
|
|
|
#include "qemu/timer.h"
|
2010-03-30 03:24:00 +08:00
|
|
|
|
2008-07-01 01:22:19 +08:00
|
|
|
/* Helpers for instruction counting code generation. */
|
2008-06-29 18:43:16 +08:00
|
|
|
|
2017-11-02 22:19:14 +08:00
|
|
|
static TCGOp *icount_start_insn;
|
2008-06-29 18:43:16 +08:00
|
|
|
|
2019-07-25 16:44:49 +08:00
|
|
|
static inline void gen_io_start(void)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp = tcg_const_i32(1);
|
|
|
|
tcg_gen_st_i32(tmp, cpu_env,
|
|
|
|
offsetof(ArchCPU, parent_obj.can_do_io) -
|
|
|
|
offsetof(ArchCPU, env));
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
}
|
|
|
|
|
2019-07-25 16:44:55 +08:00
|
|
|
/*
|
|
|
|
* cpu->can_do_io is cleared automatically at the beginning of
|
|
|
|
* each translation block. The cost is minimal and only paid
|
|
|
|
* for -icount, plus it would be very easy to forget doing it
|
|
|
|
* in the translator. Therefore, backends only need to call
|
|
|
|
* gen_io_start.
|
|
|
|
*/
|
2019-07-25 16:44:49 +08:00
|
|
|
static inline void gen_io_end(void)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp = tcg_const_i32(0);
|
|
|
|
tcg_gen_st_i32(tmp, cpu_env,
|
|
|
|
offsetof(ArchCPU, parent_obj.can_do_io) -
|
|
|
|
offsetof(ArchCPU, env));
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
}
|
|
|
|
|
2020-10-30 00:49:05 +08:00
|
|
|
static inline void gen_tb_start(const TranslationBlock *tb)
|
2008-06-29 18:43:16 +08:00
|
|
|
{
|
2020-04-18 00:31:55 +08:00
|
|
|
TCGv_i32 count;
|
2013-02-23 02:10:03 +08:00
|
|
|
|
2017-07-05 01:54:21 +08:00
|
|
|
tcg_ctx->exitreq_label = gen_new_label();
|
2017-07-19 08:46:52 +08:00
|
|
|
if (tb_cflags(tb) & CF_USE_ICOUNT) {
|
2017-01-27 18:25:33 +08:00
|
|
|
count = tcg_temp_local_new_i32();
|
|
|
|
} else {
|
|
|
|
count = tcg_temp_new_i32();
|
2014-09-20 04:49:15 +08:00
|
|
|
}
|
2008-06-29 18:43:16 +08:00
|
|
|
|
2017-10-11 05:34:37 +08:00
|
|
|
tcg_gen_ld_i32(count, cpu_env,
|
2019-03-29 05:54:23 +08:00
|
|
|
offsetof(ArchCPU, neg.icount_decr.u32) -
|
|
|
|
offsetof(ArchCPU, env));
|
2014-09-20 04:49:15 +08:00
|
|
|
|
2017-07-19 08:46:52 +08:00
|
|
|
if (tb_cflags(tb) & CF_USE_ICOUNT) {
|
2020-04-18 00:31:55 +08:00
|
|
|
/*
|
|
|
|
* We emit a sub with a dummy immediate argument. Keep the insn index
|
|
|
|
* of the sub so that we later (when we know the actual insn count)
|
|
|
|
* can update the argument with the actual insn count.
|
|
|
|
*/
|
|
|
|
tcg_gen_sub_i32(count, count, tcg_constant_i32(0));
|
2017-11-02 22:19:14 +08:00
|
|
|
icount_start_insn = tcg_last_op();
|
2017-01-27 18:25:33 +08:00
|
|
|
}
|
|
|
|
|
2017-07-05 01:54:21 +08:00
|
|
|
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
|
2014-09-20 04:49:15 +08:00
|
|
|
|
2017-07-19 08:46:52 +08:00
|
|
|
if (tb_cflags(tb) & CF_USE_ICOUNT) {
|
2017-10-11 05:34:37 +08:00
|
|
|
tcg_gen_st16_i32(count, cpu_env,
|
2019-03-29 05:54:23 +08:00
|
|
|
offsetof(ArchCPU, neg.icount_decr.u16.low) -
|
|
|
|
offsetof(ArchCPU, env));
|
2019-07-25 16:44:49 +08:00
|
|
|
gen_io_end();
|
2017-01-27 18:25:33 +08:00
|
|
|
}
|
2008-06-29 18:43:16 +08:00
|
|
|
|
2008-11-17 22:43:54 +08:00
|
|
|
tcg_temp_free_i32(count);
|
2008-06-29 18:43:16 +08:00
|
|
|
}
|
|
|
|
|
2020-10-30 00:49:05 +08:00
|
|
|
static inline void gen_tb_end(const TranslationBlock *tb, int num_insns)
|
2008-06-29 18:43:16 +08:00
|
|
|
{
|
2017-07-19 08:46:52 +08:00
|
|
|
if (tb_cflags(tb) & CF_USE_ICOUNT) {
|
2020-04-18 00:31:55 +08:00
|
|
|
/*
|
|
|
|
* Update the num_insn immediate parameter now that we know
|
|
|
|
* the actual insn count.
|
|
|
|
*/
|
|
|
|
tcg_set_insn_param(icount_start_insn, 2,
|
|
|
|
tcgv_i32_arg(tcg_constant_i32(num_insns)));
|
2008-06-29 18:43:16 +08:00
|
|
|
}
|
2014-03-31 05:50:30 +08:00
|
|
|
|
2017-07-05 01:54:21 +08:00
|
|
|
gen_set_label(tcg_ctx->exitreq_label);
|
2018-05-31 09:06:23 +08:00
|
|
|
tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED);
|
2008-06-29 18:43:16 +08:00
|
|
|
}
|
|
|
|
|
2012-12-06 19:15:58 +08:00
|
|
|
#endif
|