mirror of https://gitee.com/openkylin/qemu.git
target-arm: Fix typos in comments
Fix a variety of typos in comments in target-arm files. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Peter Crosthwaite <peter.crosthwaite@petalogix.com> Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
This commit is contained in:
parent
656267413c
commit
b90372ad2a
|
@ -281,7 +281,7 @@ uint32_t do_arm_semihosting(CPUARMState *env)
|
||||||
return len - ret;
|
return len - ret;
|
||||||
}
|
}
|
||||||
case TARGET_SYS_READC:
|
case TARGET_SYS_READC:
|
||||||
/* XXX: Read from debug cosole. Not implemented. */
|
/* XXX: Read from debug console. Not implemented. */
|
||||||
return 0;
|
return 0;
|
||||||
case TARGET_SYS_ISTTY:
|
case TARGET_SYS_ISTTY:
|
||||||
if (use_gdb_syscalls()) {
|
if (use_gdb_syscalls()) {
|
||||||
|
|
|
@ -79,7 +79,7 @@ struct arm_boot_info;
|
||||||
typedef struct CPUARMState {
|
typedef struct CPUARMState {
|
||||||
/* Regs for current mode. */
|
/* Regs for current mode. */
|
||||||
uint32_t regs[16];
|
uint32_t regs[16];
|
||||||
/* Frequently accessed CPSR bits are stored separately for efficiently.
|
/* Frequently accessed CPSR bits are stored separately for efficiency.
|
||||||
This contains all the other bits. Use cpsr_{read,write} to access
|
This contains all the other bits. Use cpsr_{read,write} to access
|
||||||
the whole CPSR. */
|
the whole CPSR. */
|
||||||
uint32_t uncached_cpsr;
|
uint32_t uncached_cpsr;
|
||||||
|
|
|
@ -988,7 +988,7 @@ static void ttbr164_reset(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const ARMCPRegInfo lpae_cp_reginfo[] = {
|
static const ARMCPRegInfo lpae_cp_reginfo[] = {
|
||||||
/* NOP AMAIR0/1: the override is because these clash with tha rather
|
/* NOP AMAIR0/1: the override is because these clash with the rather
|
||||||
* broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
|
* broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
|
||||||
*/
|
*/
|
||||||
{ .name = "AMAIR0", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
|
{ .name = "AMAIR0", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
|
||||||
|
@ -2899,8 +2899,8 @@ uint32_t HELPER(logicq_cc)(uint64_t val)
|
||||||
return (val >> 32) | (val != 0);
|
return (val >> 32) | (val != 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* VFP support. We follow the convention used for VFP instrunctions:
|
/* VFP support. We follow the convention used for VFP instructions:
|
||||||
Single precition routines have a "s" suffix, double precision a
|
Single precision routines have a "s" suffix, double precision a
|
||||||
"d" suffix. */
|
"d" suffix. */
|
||||||
|
|
||||||
/* Convert host exception flags to vfp form. */
|
/* Convert host exception flags to vfp form. */
|
||||||
|
|
|
@ -530,7 +530,7 @@ NEON_VOP(rshl_s16, neon_s16, 2)
|
||||||
#undef NEON_FN
|
#undef NEON_FN
|
||||||
|
|
||||||
/* The addition of the rounding constant may overflow, so we use an
|
/* The addition of the rounding constant may overflow, so we use an
|
||||||
* intermediate 64 bits accumulator. */
|
* intermediate 64 bit accumulator. */
|
||||||
uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop)
|
uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop)
|
||||||
{
|
{
|
||||||
int32_t dest;
|
int32_t dest;
|
||||||
|
@ -547,8 +547,8 @@ uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop)
|
||||||
return dest;
|
return dest;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handling addition overflow with 64 bits inputs values is more
|
/* Handling addition overflow with 64 bit input values is more
|
||||||
* tricky than with 32 bits values. */
|
* tricky than with 32 bit values. */
|
||||||
uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop)
|
uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop)
|
||||||
{
|
{
|
||||||
int8_t shift = (int8_t)shiftop;
|
int8_t shift = (int8_t)shiftop;
|
||||||
|
@ -590,7 +590,7 @@ NEON_VOP(rshl_u16, neon_u16, 2)
|
||||||
#undef NEON_FN
|
#undef NEON_FN
|
||||||
|
|
||||||
/* The addition of the rounding constant may overflow, so we use an
|
/* The addition of the rounding constant may overflow, so we use an
|
||||||
* intermediate 64 bits accumulator. */
|
* intermediate 64 bit accumulator. */
|
||||||
uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop)
|
uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop)
|
||||||
{
|
{
|
||||||
uint32_t dest;
|
uint32_t dest;
|
||||||
|
@ -608,8 +608,8 @@ uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop)
|
||||||
return dest;
|
return dest;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handling addition overflow with 64 bits inputs values is more
|
/* Handling addition overflow with 64 bit input values is more
|
||||||
* tricky than with 32 bits values. */
|
* tricky than with 32 bit values. */
|
||||||
uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
|
uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
|
||||||
{
|
{
|
||||||
int8_t shift = (uint8_t)shiftop;
|
int8_t shift = (uint8_t)shiftop;
|
||||||
|
@ -817,7 +817,7 @@ NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
|
||||||
#undef NEON_FN
|
#undef NEON_FN
|
||||||
|
|
||||||
/* The addition of the rounding constant may overflow, so we use an
|
/* The addition of the rounding constant may overflow, so we use an
|
||||||
* intermediate 64 bits accumulator. */
|
* intermediate 64 bit accumulator. */
|
||||||
uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop)
|
uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop)
|
||||||
{
|
{
|
||||||
uint32_t dest;
|
uint32_t dest;
|
||||||
|
@ -846,8 +846,8 @@ uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop
|
||||||
return dest;
|
return dest;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handling addition overflow with 64 bits inputs values is more
|
/* Handling addition overflow with 64 bit input values is more
|
||||||
* tricky than with 32 bits values. */
|
* tricky than with 32 bit values. */
|
||||||
uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop)
|
uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop)
|
||||||
{
|
{
|
||||||
int8_t shift = (int8_t)shiftop;
|
int8_t shift = (int8_t)shiftop;
|
||||||
|
@ -914,7 +914,7 @@ NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
|
||||||
#undef NEON_FN
|
#undef NEON_FN
|
||||||
|
|
||||||
/* The addition of the rounding constant may overflow, so we use an
|
/* The addition of the rounding constant may overflow, so we use an
|
||||||
* intermediate 64 bits accumulator. */
|
* intermediate 64 bit accumulator. */
|
||||||
uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop)
|
uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop)
|
||||||
{
|
{
|
||||||
int32_t dest;
|
int32_t dest;
|
||||||
|
@ -942,8 +942,8 @@ uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shift
|
||||||
return dest;
|
return dest;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handling addition overflow with 64 bits inputs values is more
|
/* Handling addition overflow with 64 bit input values is more
|
||||||
* tricky than with 32 bits values. */
|
* tricky than with 32 bit values. */
|
||||||
uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
|
uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
|
||||||
{
|
{
|
||||||
int8_t shift = (uint8_t)shiftop;
|
int8_t shift = (uint8_t)shiftop;
|
||||||
|
@ -1671,7 +1671,7 @@ uint64_t HELPER(neon_negl_u64)(uint64_t x)
|
||||||
return -x;
|
return -x;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Saturnating sign manuipulation. */
|
/* Saturating sign manipulation. */
|
||||||
/* ??? Make these use NEON_VOP1 */
|
/* ??? Make these use NEON_VOP1 */
|
||||||
#define DO_QABS8(x) do { \
|
#define DO_QABS8(x) do { \
|
||||||
if (x == (int8_t)0x80) { \
|
if (x == (int8_t)0x80) { \
|
||||||
|
|
|
@ -99,7 +99,7 @@ void tlb_fill(CPUARMState *env1, target_ulong addr, int is_write, int mmu_idx,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* FIXME: Pass an axplicit pointer to QF to CPUARMState, and move saturating
|
/* FIXME: Pass an explicit pointer to QF to CPUARMState, and move saturating
|
||||||
instructions into helper.c */
|
instructions into helper.c */
|
||||||
uint32_t HELPER(add_setq)(uint32_t a, uint32_t b)
|
uint32_t HELPER(add_setq)(uint32_t a, uint32_t b)
|
||||||
{
|
{
|
||||||
|
|
|
@ -53,7 +53,7 @@ typedef struct DisasContext {
|
||||||
int condjmp;
|
int condjmp;
|
||||||
/* The label that will be jumped to when the instruction is skipped. */
|
/* The label that will be jumped to when the instruction is skipped. */
|
||||||
int condlabel;
|
int condlabel;
|
||||||
/* Thumb-2 condtional execution bits. */
|
/* Thumb-2 conditional execution bits. */
|
||||||
int condexec_mask;
|
int condexec_mask;
|
||||||
int condexec_cond;
|
int condexec_cond;
|
||||||
struct TranslationBlock *tb;
|
struct TranslationBlock *tb;
|
||||||
|
@ -77,7 +77,7 @@ static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* These instructions trap after executing, so defer them until after the
|
/* These instructions trap after executing, so defer them until after the
|
||||||
conditional executions state has been updated. */
|
conditional execution state has been updated. */
|
||||||
#define DISAS_WFI 4
|
#define DISAS_WFI 4
|
||||||
#define DISAS_SWI 5
|
#define DISAS_SWI 5
|
||||||
|
|
||||||
|
@ -155,7 +155,7 @@ static void load_reg_var(DisasContext *s, TCGv var, int reg)
|
||||||
{
|
{
|
||||||
if (reg == 15) {
|
if (reg == 15) {
|
||||||
uint32_t addr;
|
uint32_t addr;
|
||||||
/* normaly, since we updated PC, we need only to add one insn */
|
/* normally, since we updated PC, we need only to add one insn */
|
||||||
if (s->thumb)
|
if (s->thumb)
|
||||||
addr = (long)s->pc + 2;
|
addr = (long)s->pc + 2;
|
||||||
else
|
else
|
||||||
|
@ -4897,7 +4897,7 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
|
||||||
size--;
|
size--;
|
||||||
}
|
}
|
||||||
shift = (insn >> 16) & ((1 << (3 + size)) - 1);
|
shift = (insn >> 16) & ((1 << (3 + size)) - 1);
|
||||||
/* To avoid excessive dumplication of ops we implement shift
|
/* To avoid excessive duplication of ops we implement shift
|
||||||
by immediate using the variable shift operations. */
|
by immediate using the variable shift operations. */
|
||||||
if (op < 8) {
|
if (op < 8) {
|
||||||
/* Shift by immediate:
|
/* Shift by immediate:
|
||||||
|
@ -6402,7 +6402,7 @@ static void gen_logicq_cc(TCGv_i64 val)
|
||||||
|
|
||||||
/* Load/Store exclusive instructions are implemented by remembering
|
/* Load/Store exclusive instructions are implemented by remembering
|
||||||
the value/address loaded, and seeing if these are the same
|
the value/address loaded, and seeing if these are the same
|
||||||
when the store is performed. This should be is sufficient to implement
|
when the store is performed. This should be sufficient to implement
|
||||||
the architecturally mandated semantics, and avoids having to monitor
|
the architecturally mandated semantics, and avoids having to monitor
|
||||||
regular stores.
|
regular stores.
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue