mirror of https://gitee.com/openkylin/qemu.git
target-alpha: convert palcode ops to TCG
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5360 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
parent
f4ed86794c
commit
8bb6e981e0
|
@ -1425,10 +1425,8 @@ case "$target_cpu" in
|
|||
;;
|
||||
alpha)
|
||||
echo "TARGET_ARCH=alpha" >> $config_mak
|
||||
echo "CONFIG_DYNGEN_OP=yes" >> $config_mak
|
||||
echo "#define TARGET_ARCH \"alpha\"" >> $config_h
|
||||
echo "#define TARGET_ALPHA 1" >> $config_h
|
||||
echo "#define CONFIG_DYNGEN_OP 1" >> $config_h
|
||||
;;
|
||||
arm|armeb)
|
||||
echo "TARGET_ARCH=arm" >> $config_mak
|
||||
|
|
|
@ -29,29 +29,10 @@
|
|||
|
||||
register struct CPUAlphaState *env asm(AREG0);
|
||||
|
||||
#if TARGET_LONG_BITS > HOST_LONG_BITS
|
||||
|
||||
/* no registers can be used */
|
||||
#define T0 (env->t0)
|
||||
#define T1 (env->t1)
|
||||
|
||||
#else
|
||||
|
||||
register uint64_t T0 asm(AREG1);
|
||||
register uint64_t T1 asm(AREG2);
|
||||
|
||||
#endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
|
||||
|
||||
#define PARAM(n) ((uint64_t)PARAM##n)
|
||||
#define SPARAM(n) ((int32_t)PARAM##n)
|
||||
#define FP_STATUS (env->fp_status)
|
||||
|
||||
#if defined (DEBUG_OP)
|
||||
#define RETURN() __asm__ __volatile__("nop" : : : "memory");
|
||||
#else
|
||||
#define RETURN() __asm__ __volatile__("" : : : "memory");
|
||||
#endif
|
||||
|
||||
#include "cpu.h"
|
||||
#include "exec-all.h"
|
||||
|
||||
|
|
|
@ -436,13 +436,3 @@ void cpu_dump_state (CPUState *env, FILE *f,
|
|||
}
|
||||
}
|
||||
|
||||
void cpu_dump_EA (target_ulong EA)
|
||||
{
|
||||
FILE *f;
|
||||
|
||||
if (logfile)
|
||||
f = logfile;
|
||||
else
|
||||
f = stdout;
|
||||
fprintf(f, "Memory access at address " TARGET_FMT_lx "\n", EA);
|
||||
}
|
||||
|
|
|
@ -108,3 +108,26 @@ DEF_HELPER(uint64_t, helper_cvtql, (uint64_t))
|
|||
DEF_HELPER(uint64_t, helper_cvtqlv, (uint64_t))
|
||||
DEF_HELPER(uint64_t, helper_cvtqlsv, (uint64_t))
|
||||
|
||||
#if !defined (CONFIG_USER_ONLY)
|
||||
DEF_HELPER(void, helper_hw_rei, (void))
|
||||
DEF_HELPER(void, helper_hw_ret, (uint64_t))
|
||||
DEF_HELPER(uint64_t, helper_mfpr, (int, uint64_t))
|
||||
DEF_HELPER(void, helper_mtpr, (int, uint64_t))
|
||||
DEF_HELPER(void, helper_set_alt_mode, (void))
|
||||
DEF_HELPER(void, helper_restore_mode, (void))
|
||||
|
||||
DEF_HELPER(uint64_t, helper_ld_virt_to_phys, (uint64_t))
|
||||
DEF_HELPER(uint64_t, helper_st_virt_to_phys, (uint64_t))
|
||||
DEF_HELPER(void, helper_ldl_raw, (uint64_t, uint64_t))
|
||||
DEF_HELPER(void, helper_ldq_raw, (uint64_t, uint64_t))
|
||||
DEF_HELPER(void, helper_ldl_l_raw, (uint64_t, uint64_t))
|
||||
DEF_HELPER(void, helper_ldq_l_raw, (uint64_t, uint64_t))
|
||||
DEF_HELPER(void, helper_ldl_kernel, (uint64_t, uint64_t))
|
||||
DEF_HELPER(void, helper_ldq_kernel, (uint64_t, uint64_t))
|
||||
DEF_HELPER(void, helper_ldl_data, (uint64_t, uint64_t))
|
||||
DEF_HELPER(void, helper_ldq_data, (uint64_t, uint64_t))
|
||||
DEF_HELPER(void, helper_stl_raw, (uint64_t, uint64_t))
|
||||
DEF_HELPER(void, helper_stq_raw, (uint64_t, uint64_t))
|
||||
DEF_HELPER(uint64_t, helper_stl_c_raw, (uint64_t, uint64_t))
|
||||
DEF_HELPER(uint64_t, helper_stq_c_raw, (uint64_t, uint64_t))
|
||||
#endif
|
||||
|
|
|
@ -1,99 +0,0 @@
|
|||
/*
|
||||
* Alpha emulation cpu micro-operations for qemu.
|
||||
*
|
||||
* Copyright (c) 2007 Jocelyn Mayer
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#define DEBUG_OP
|
||||
|
||||
#include "config.h"
|
||||
#include "exec.h"
|
||||
#include "host-utils.h"
|
||||
#include "op_helper.h"
|
||||
|
||||
/* Load and stores */
|
||||
#define MEMSUFFIX _raw
|
||||
#include "op_mem.h"
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
#define MEMSUFFIX _kernel
|
||||
#include "op_mem.h"
|
||||
#define MEMSUFFIX _executive
|
||||
#include "op_mem.h"
|
||||
#define MEMSUFFIX _supervisor
|
||||
#include "op_mem.h"
|
||||
#define MEMSUFFIX _user
|
||||
#include "op_mem.h"
|
||||
/* This is used for pal modes */
|
||||
#define MEMSUFFIX _data
|
||||
#include "op_mem.h"
|
||||
#endif
|
||||
|
||||
/* PALcode support special instructions */
|
||||
#if !defined (CONFIG_USER_ONLY)
|
||||
void OPPROTO op_hw_rei (void)
|
||||
{
|
||||
env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
|
||||
env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
|
||||
/* XXX: re-enable interrupts and memory mapping */
|
||||
RETURN();
|
||||
}
|
||||
|
||||
void OPPROTO op_hw_ret (void)
|
||||
{
|
||||
env->pc = T0 & ~3;
|
||||
env->ipr[IPR_EXC_ADDR] = T0 & 1;
|
||||
/* XXX: re-enable interrupts and memory mapping */
|
||||
RETURN();
|
||||
}
|
||||
|
||||
void OPPROTO op_mfpr (void)
|
||||
{
|
||||
helper_mfpr(PARAM(1));
|
||||
RETURN();
|
||||
}
|
||||
|
||||
void OPPROTO op_mtpr (void)
|
||||
{
|
||||
helper_mtpr(PARAM(1));
|
||||
RETURN();
|
||||
}
|
||||
|
||||
void OPPROTO op_set_alt_mode (void)
|
||||
{
|
||||
env->saved_mode = env->ps & 0xC;
|
||||
env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
|
||||
RETURN();
|
||||
}
|
||||
|
||||
void OPPROTO op_restore_mode (void)
|
||||
{
|
||||
env->ps = (env->ps & ~0xC) | env->saved_mode;
|
||||
RETURN();
|
||||
}
|
||||
|
||||
void OPPROTO op_ld_phys_to_virt (void)
|
||||
{
|
||||
helper_ld_phys_to_virt();
|
||||
RETURN();
|
||||
}
|
||||
|
||||
void OPPROTO op_st_phys_to_virt (void)
|
||||
{
|
||||
helper_st_phys_to_virt();
|
||||
RETURN();
|
||||
}
|
||||
#endif /* !defined (CONFIG_USER_ONLY) */
|
|
@ -22,19 +22,11 @@
|
|||
#include "host-utils.h"
|
||||
#include "softfloat.h"
|
||||
|
||||
#include "op_helper.h"
|
||||
|
||||
void helper_tb_flush (void)
|
||||
{
|
||||
tlb_flush(env, 1);
|
||||
}
|
||||
|
||||
void cpu_dump_EA (target_ulong EA);
|
||||
void helper_print_mem_EA (target_ulong EA)
|
||||
{
|
||||
cpu_dump_EA(EA);
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Exceptions processing helpers */
|
||||
void helper_excp (int excp, int error)
|
||||
|
@ -990,19 +982,48 @@ uint64_t helper_cvtqlsv (uint64_t a)
|
|||
return __helper_cvtql(a, 1, 1);
|
||||
}
|
||||
|
||||
/* PALcode support special instructions */
|
||||
#if !defined (CONFIG_USER_ONLY)
|
||||
void helper_mfpr (int iprn)
|
||||
void helper_hw_rei (void)
|
||||
{
|
||||
uint64_t val;
|
||||
|
||||
if (cpu_alpha_mfpr(env, iprn, &val) == 0)
|
||||
T0 = val;
|
||||
env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
|
||||
env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
|
||||
/* XXX: re-enable interrupts and memory mapping */
|
||||
}
|
||||
|
||||
void helper_mtpr (int iprn)
|
||||
void helper_hw_ret (uint64_t a)
|
||||
{
|
||||
cpu_alpha_mtpr(env, iprn, T0, NULL);
|
||||
env->pc = a & ~3;
|
||||
env->ipr[IPR_EXC_ADDR] = a & 1;
|
||||
/* XXX: re-enable interrupts and memory mapping */
|
||||
}
|
||||
|
||||
uint64_t helper_mfpr (int iprn, uint64_t val)
|
||||
{
|
||||
uint64_t tmp;
|
||||
|
||||
if (cpu_alpha_mfpr(env, iprn, &tmp) == 0)
|
||||
val = tmp;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
void helper_mtpr (int iprn, uint64_t val)
|
||||
{
|
||||
cpu_alpha_mtpr(env, iprn, val, NULL);
|
||||
}
|
||||
|
||||
void helper_set_alt_mode (void)
|
||||
{
|
||||
env->saved_mode = env->ps & 0xC;
|
||||
env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
|
||||
}
|
||||
|
||||
void helper_restore_mode (void)
|
||||
{
|
||||
env->ps = (env->ps & ~0xC) | env->saved_mode;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*****************************************************************************/
|
||||
|
@ -1013,48 +1034,130 @@ void helper_mtpr (int iprn)
|
|||
* Hopefully, we emulate the PALcode, then we should never see
|
||||
* HW_LD / HW_ST instructions.
|
||||
*/
|
||||
void helper_ld_phys_to_virt (void)
|
||||
uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
|
||||
{
|
||||
uint64_t tlb_addr, physaddr;
|
||||
int index, mmu_idx;
|
||||
void *retaddr;
|
||||
|
||||
mmu_idx = cpu_mmu_index(env);
|
||||
index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
redo:
|
||||
tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
|
||||
if ((T0 & TARGET_PAGE_MASK) ==
|
||||
if ((virtaddr & TARGET_PAGE_MASK) ==
|
||||
(tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
|
||||
physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
|
||||
} else {
|
||||
/* the page is not in the TLB : fill it */
|
||||
retaddr = GETPC();
|
||||
tlb_fill(T0, 0, mmu_idx, retaddr);
|
||||
tlb_fill(virtaddr, 0, mmu_idx, retaddr);
|
||||
goto redo;
|
||||
}
|
||||
T0 = physaddr;
|
||||
return physaddr;
|
||||
}
|
||||
|
||||
void helper_st_phys_to_virt (void)
|
||||
uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
|
||||
{
|
||||
uint64_t tlb_addr, physaddr;
|
||||
int index, mmu_idx;
|
||||
void *retaddr;
|
||||
|
||||
mmu_idx = cpu_mmu_index(env);
|
||||
index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
redo:
|
||||
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
||||
if ((T0 & TARGET_PAGE_MASK) ==
|
||||
if ((virtaddr & TARGET_PAGE_MASK) ==
|
||||
(tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
|
||||
physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
|
||||
} else {
|
||||
/* the page is not in the TLB : fill it */
|
||||
retaddr = GETPC();
|
||||
tlb_fill(T0, 1, mmu_idx, retaddr);
|
||||
tlb_fill(virtaddr, 1, mmu_idx, retaddr);
|
||||
goto redo;
|
||||
}
|
||||
T0 = physaddr;
|
||||
return physaddr;
|
||||
}
|
||||
|
||||
void helper_ldl_raw(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
ldl_raw(t1, t0);
|
||||
}
|
||||
|
||||
void helper_ldq_raw(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
ldq_raw(t1, t0);
|
||||
}
|
||||
|
||||
void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
env->lock = t1;
|
||||
ldl_raw(t1, t0);
|
||||
}
|
||||
|
||||
void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
env->lock = t1;
|
||||
ldl_raw(t1, t0);
|
||||
}
|
||||
|
||||
void helper_ldl_kernel(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
ldl_kernel(t1, t0);
|
||||
}
|
||||
|
||||
void helper_ldq_kernel(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
ldq_kernel(t1, t0);
|
||||
}
|
||||
|
||||
void helper_ldl_data(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
ldl_data(t1, t0);
|
||||
}
|
||||
|
||||
void helper_ldq_data(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
ldq_data(t1, t0);
|
||||
}
|
||||
|
||||
void helper_stl_raw(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
stl_raw(t1, t0);
|
||||
}
|
||||
|
||||
void helper_stq_raw(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
stq_raw(t1, t0);
|
||||
}
|
||||
|
||||
uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
uint64_t ret;
|
||||
|
||||
if (t1 == env->lock) {
|
||||
stl_raw(t1, t0);
|
||||
ret = 0;
|
||||
} else
|
||||
ret = 1;
|
||||
|
||||
env->lock = 1;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
|
||||
{
|
||||
uint64_t ret;
|
||||
|
||||
if (t1 == env->lock) {
|
||||
stq_raw(t1, t0);
|
||||
ret = 0;
|
||||
} else
|
||||
ret = 1;
|
||||
|
||||
env->lock = 1;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define MMUSUFFIX _mmu
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
/*
|
||||
* Alpha emulation cpu micro-operations helpers definitions for qemu.
|
||||
*
|
||||
* Copyright (c) 2007 Jocelyn Mayer
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
void helper_call_pal (uint32_t palcode);
|
||||
|
||||
double helper_ldff_raw (target_ulong ea);
|
||||
void helper_stff_raw (target_ulong ea, double op);
|
||||
double helper_ldfg_raw (target_ulong ea);
|
||||
void helper_stfg_raw (target_ulong ea, double op);
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
double helper_ldff_user (target_ulong ea);
|
||||
void helper_stff_user (target_ulong ea, double op);
|
||||
double helper_ldff_kernel (target_ulong ea);
|
||||
void helper_stff_kernel (target_ulong ea, double op);
|
||||
double helper_ldff_data (target_ulong ea);
|
||||
void helper_stff_data (target_ulong ea, double op);
|
||||
double helper_ldfg_user (target_ulong ea);
|
||||
void helper_stfg_user (target_ulong ea, double op);
|
||||
double helper_ldfg_kernel (target_ulong ea);
|
||||
void helper_stfg_kernel (target_ulong ea, double op);
|
||||
double helper_ldfg_data (target_ulong ea);
|
||||
void helper_stfg_data (target_ulong ea, double op);
|
||||
#endif
|
||||
|
||||
void helper_mfpr (int iprn);
|
||||
void helper_mtpr (int iprn);
|
||||
void helper_ld_phys_to_virt (void);
|
||||
void helper_st_phys_to_virt (void);
|
||||
|
|
@ -1,93 +0,0 @@
|
|||
/*
|
||||
* Alpha emulation cpu micro-operations for memory accesses for qemu.
|
||||
*
|
||||
* Copyright (c) 2007 Jocelyn Mayer
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
//#define DEBUG_MEM_ACCESSES
|
||||
#if defined (DEBUG_MEM_ACCESSES)
|
||||
void helper_print_mem_EA (target_ulong EA);
|
||||
#define print_mem_EA(EA) do { helper_print_mem_EA(EA); } while (0)
|
||||
#else
|
||||
#define print_mem_EA(EA) do { } while (0)
|
||||
#endif
|
||||
|
||||
static always_inline uint32_t glue(ldl_l, MEMSUFFIX) (target_ulong EA)
|
||||
{
|
||||
env->lock = EA;
|
||||
|
||||
return glue(ldl, MEMSUFFIX)(EA);
|
||||
}
|
||||
|
||||
static always_inline uint32_t glue(ldq_l, MEMSUFFIX) (target_ulong EA)
|
||||
{
|
||||
env->lock = EA;
|
||||
|
||||
return glue(ldq, MEMSUFFIX)(EA);
|
||||
}
|
||||
|
||||
static always_inline void glue(stl_c, MEMSUFFIX) (target_ulong EA,
|
||||
uint32_t data)
|
||||
{
|
||||
if (EA == env->lock) {
|
||||
glue(stl, MEMSUFFIX)(EA, data);
|
||||
T0 = 0;
|
||||
} else {
|
||||
T0 = 1;
|
||||
}
|
||||
env->lock = -1;
|
||||
}
|
||||
|
||||
static always_inline void glue(stq_c, MEMSUFFIX) (target_ulong EA,
|
||||
uint64_t data)
|
||||
{
|
||||
if (EA == env->lock) {
|
||||
glue(stq, MEMSUFFIX)(EA, data);
|
||||
T0 = 0;
|
||||
} else {
|
||||
T0 = 1;
|
||||
}
|
||||
env->lock = -1;
|
||||
}
|
||||
|
||||
#define ALPHA_LD_OP(name, op) \
|
||||
void OPPROTO glue(glue(op_ld, name), MEMSUFFIX) (void) \
|
||||
{ \
|
||||
print_mem_EA(T0); \
|
||||
T1 = glue(op, MEMSUFFIX)(T0); \
|
||||
RETURN(); \
|
||||
}
|
||||
|
||||
#define ALPHA_ST_OP(name, op) \
|
||||
void OPPROTO glue(glue(op_st, name), MEMSUFFIX) (void) \
|
||||
{ \
|
||||
print_mem_EA(T0); \
|
||||
glue(op, MEMSUFFIX)(T0, T1); \
|
||||
RETURN(); \
|
||||
}
|
||||
|
||||
ALPHA_LD_OP(l, ldl);
|
||||
ALPHA_ST_OP(l, stl);
|
||||
ALPHA_LD_OP(q, ldq);
|
||||
ALPHA_ST_OP(q, stq);
|
||||
|
||||
ALPHA_LD_OP(l_l, ldl_l);
|
||||
ALPHA_LD_OP(q_l, ldq_l);
|
||||
ALPHA_ST_OP(l_c, stl_c);
|
||||
ALPHA_ST_OP(q_c, stq_c);
|
||||
|
||||
#undef MEMSUFFIX
|
|
@ -51,9 +51,6 @@ static TCGv cpu_fir[31];
|
|||
static TCGv cpu_pc;
|
||||
static TCGv cpu_lock;
|
||||
|
||||
/* dyngen register indexes */
|
||||
static TCGv cpu_T[2];
|
||||
|
||||
/* register names */
|
||||
static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
|
||||
|
||||
|
@ -70,16 +67,6 @@ static void alpha_translate_init(void)
|
|||
|
||||
cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
|
||||
|
||||
#if TARGET_LONG_BITS > HOST_LONG_BITS
|
||||
cpu_T[0] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
|
||||
offsetof(CPUState, t0), "T0");
|
||||
cpu_T[1] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
|
||||
offsetof(CPUState, t1), "T1");
|
||||
#else
|
||||
cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I64, TCG_AREG1, "T0");
|
||||
cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I64, TCG_AREG2, "T1");
|
||||
#endif
|
||||
|
||||
p = cpu_reg_names;
|
||||
for (i = 0; i < 31; i++) {
|
||||
sprintf(p, "ir%d", i);
|
||||
|
@ -107,56 +94,6 @@ static void alpha_translate_init(void)
|
|||
done_init = 1;
|
||||
}
|
||||
|
||||
/* Memory moves */
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
#define OP_LD_TABLE(width) \
|
||||
static GenOpFunc *gen_op_ld##width[] = { \
|
||||
&gen_op_ld##width##_raw, \
|
||||
}
|
||||
#define OP_ST_TABLE(width) \
|
||||
static GenOpFunc *gen_op_st##width[] = { \
|
||||
&gen_op_st##width##_raw, \
|
||||
}
|
||||
#else
|
||||
#define OP_LD_TABLE(width) \
|
||||
static GenOpFunc *gen_op_ld##width[] = { \
|
||||
&gen_op_ld##width##_kernel, \
|
||||
&gen_op_ld##width##_executive, \
|
||||
&gen_op_ld##width##_supervisor, \
|
||||
&gen_op_ld##width##_user, \
|
||||
}
|
||||
#define OP_ST_TABLE(width) \
|
||||
static GenOpFunc *gen_op_st##width[] = { \
|
||||
&gen_op_st##width##_kernel, \
|
||||
&gen_op_st##width##_executive, \
|
||||
&gen_op_st##width##_supervisor, \
|
||||
&gen_op_st##width##_user, \
|
||||
}
|
||||
#endif
|
||||
|
||||
#define GEN_LD(width) \
|
||||
OP_LD_TABLE(width); \
|
||||
static always_inline void gen_ld##width (DisasContext *ctx) \
|
||||
{ \
|
||||
(*gen_op_ld##width[ctx->mem_idx])(); \
|
||||
}
|
||||
|
||||
#define GEN_ST(width) \
|
||||
OP_ST_TABLE(width); \
|
||||
static always_inline void gen_st##width (DisasContext *ctx) \
|
||||
{ \
|
||||
(*gen_op_st##width[ctx->mem_idx])(); \
|
||||
}
|
||||
|
||||
GEN_LD(l);
|
||||
GEN_ST(l);
|
||||
GEN_LD(q);
|
||||
GEN_ST(q);
|
||||
GEN_LD(l_l);
|
||||
GEN_ST(l_c);
|
||||
GEN_LD(q_l);
|
||||
GEN_ST(q_c);
|
||||
|
||||
static always_inline void gen_excp (DisasContext *ctx,
|
||||
int exception, int error_code)
|
||||
{
|
||||
|
@ -1027,7 +964,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
|
|||
if (ra != 31) {
|
||||
if (islit)
|
||||
tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
|
||||
else
|
||||
else
|
||||
tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
|
||||
} else {
|
||||
if (islit)
|
||||
|
@ -1701,9 +1638,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
|
|||
#else
|
||||
if (!ctx->pal_mode)
|
||||
goto invalid_opc;
|
||||
gen_op_mfpr(insn & 0xFF);
|
||||
if (ra != 31)
|
||||
tcg_gen_mov_i64(cpu_ir[ra], cpu_T[0]);
|
||||
if (ra != 31) {
|
||||
TCGv tmp = tcg_const_i32(insn & 0xFF);
|
||||
tcg_gen_helper_1_2(helper_mfpr, cpu_ir[ra], tmp, cpu_ir[ra]);
|
||||
tcg_temp_free(tmp);
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
case 0x1A:
|
||||
|
@ -1737,94 +1676,94 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
|
|||
#else
|
||||
if (!ctx->pal_mode)
|
||||
goto invalid_opc;
|
||||
if (rb != 31)
|
||||
tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]);
|
||||
else
|
||||
tcg_gen_movi_i64(cpu_T[0], 0);
|
||||
tcg_gen_movi_i64(cpu_T[1], disp12);
|
||||
tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
|
||||
switch ((insn >> 12) & 0xF) {
|
||||
case 0x0:
|
||||
/* Longword physical access */
|
||||
gen_op_ldl_raw();
|
||||
break;
|
||||
case 0x1:
|
||||
/* Quadword physical access */
|
||||
gen_op_ldq_raw();
|
||||
break;
|
||||
case 0x2:
|
||||
/* Longword physical access with lock */
|
||||
gen_op_ldl_l_raw();
|
||||
break;
|
||||
case 0x3:
|
||||
/* Quadword physical access with lock */
|
||||
gen_op_ldq_l_raw();
|
||||
break;
|
||||
case 0x4:
|
||||
/* Longword virtual PTE fetch */
|
||||
gen_op_ldl_kernel();
|
||||
break;
|
||||
case 0x5:
|
||||
/* Quadword virtual PTE fetch */
|
||||
gen_op_ldq_kernel();
|
||||
break;
|
||||
case 0x6:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
case 0x7:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
case 0x8:
|
||||
/* Longword virtual access */
|
||||
gen_op_ld_phys_to_virt();
|
||||
gen_op_ldl_raw();
|
||||
break;
|
||||
case 0x9:
|
||||
/* Quadword virtual access */
|
||||
gen_op_ld_phys_to_virt();
|
||||
gen_op_ldq_raw();
|
||||
break;
|
||||
case 0xA:
|
||||
/* Longword virtual access with protection check */
|
||||
gen_ldl(ctx);
|
||||
break;
|
||||
case 0xB:
|
||||
/* Quadword virtual access with protection check */
|
||||
gen_ldq(ctx);
|
||||
break;
|
||||
case 0xC:
|
||||
/* Longword virtual access with altenate access mode */
|
||||
gen_op_set_alt_mode();
|
||||
gen_op_ld_phys_to_virt();
|
||||
gen_op_ldl_raw();
|
||||
gen_op_restore_mode();
|
||||
break;
|
||||
case 0xD:
|
||||
/* Quadword virtual access with altenate access mode */
|
||||
gen_op_set_alt_mode();
|
||||
gen_op_ld_phys_to_virt();
|
||||
gen_op_ldq_raw();
|
||||
gen_op_restore_mode();
|
||||
break;
|
||||
case 0xE:
|
||||
/* Longword virtual access with alternate access mode and
|
||||
* protection checks
|
||||
*/
|
||||
gen_op_set_alt_mode();
|
||||
gen_op_ldl_data();
|
||||
gen_op_restore_mode();
|
||||
break;
|
||||
case 0xF:
|
||||
/* Quadword virtual access with alternate access mode and
|
||||
* protection checks
|
||||
*/
|
||||
gen_op_set_alt_mode();
|
||||
gen_op_ldq_data();
|
||||
gen_op_restore_mode();
|
||||
break;
|
||||
if (ra != 31) {
|
||||
TCGv addr = tcg_temp_new(TCG_TYPE_I64);
|
||||
if (rb != 31)
|
||||
tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
|
||||
else
|
||||
tcg_gen_movi_i64(addr, disp12);
|
||||
switch ((insn >> 12) & 0xF) {
|
||||
case 0x0:
|
||||
/* Longword physical access */
|
||||
tcg_gen_helper_0_2(helper_ldl_raw, cpu_ir[ra], addr);
|
||||
break;
|
||||
case 0x1:
|
||||
/* Quadword physical access */
|
||||
tcg_gen_helper_0_2(helper_ldq_raw, cpu_ir[ra], addr);
|
||||
break;
|
||||
case 0x2:
|
||||
/* Longword physical access with lock */
|
||||
tcg_gen_helper_0_2(helper_ldl_l_raw, cpu_ir[ra], addr);
|
||||
break;
|
||||
case 0x3:
|
||||
/* Quadword physical access with lock */
|
||||
tcg_gen_helper_0_2(helper_ldq_l_raw, cpu_ir[ra], addr);
|
||||
break;
|
||||
case 0x4:
|
||||
/* Longword virtual PTE fetch */
|
||||
tcg_gen_helper_0_2(helper_ldl_kernel, cpu_ir[ra], addr);
|
||||
break;
|
||||
case 0x5:
|
||||
/* Quadword virtual PTE fetch */
|
||||
tcg_gen_helper_0_2(helper_ldq_kernel, cpu_ir[ra], addr);
|
||||
break;
|
||||
case 0x6:
|
||||
/* Incpu_ir[ra]id */
|
||||
goto incpu_ir[ra]id_opc;
|
||||
case 0x7:
|
||||
/* Incpu_ir[ra]id */
|
||||
goto incpu_ir[ra]id_opc;
|
||||
case 0x8:
|
||||
/* Longword virtual access */
|
||||
tcg_gen_helper_1_1(helper_st_virt_to_phys, addr, addr);
|
||||
tcg_gen_helper_0_2(helper_ldl_raw, cpu_ir[ra], addr);
|
||||
break;
|
||||
case 0x9:
|
||||
/* Quadword virtual access */
|
||||
tcg_gen_helper_1_1(helper_st_virt_to_phys, addr, addr);
|
||||
tcg_gen_helper_0_2(helper_ldq_raw, cpu_ir[ra], addr);
|
||||
break;
|
||||
case 0xA:
|
||||
/* Longword virtual access with protection check */
|
||||
tcg_gen_qemu_ld32s(cpu_ir[ra], addr, ctx->flags);
|
||||
break;
|
||||
case 0xB:
|
||||
/* Quadword virtual access with protection check */
|
||||
tcg_gen_qemu_ld64(cpu_ir[ra], addr, ctx->flags);
|
||||
break;
|
||||
case 0xC:
|
||||
/* Longword virtual access with altenate access mode */
|
||||
tcg_gen_helper_0_0(helper_set_alt_mode);
|
||||
tcg_gen_helper_1_1(helper_st_virt_to_phys, addr, addr);
|
||||
tcg_gen_helper_0_2(helper_ldl_raw, cpu_ir[ra], addr);
|
||||
tcg_gen_helper_0_0(helper_restore_mode);
|
||||
break;
|
||||
case 0xD:
|
||||
/* Quadword virtual access with altenate access mode */
|
||||
tcg_gen_helper_0_0(helper_set_alt_mode);
|
||||
tcg_gen_helper_1_1(helper_st_virt_to_phys, addr, addr);
|
||||
tcg_gen_helper_0_2(helper_ldq_raw, cpu_ir[ra], addr);
|
||||
tcg_gen_helper_0_0(helper_restore_mode);
|
||||
break;
|
||||
case 0xE:
|
||||
/* Longword virtual access with alternate access mode and
|
||||
* protection checks
|
||||
*/
|
||||
tcg_gen_helper_0_0(helper_set_alt_mode);
|
||||
tcg_gen_helper_0_2(helper_ldl_data, cpu_ir[ra], addr);
|
||||
tcg_gen_helper_0_0(helper_restore_mode);
|
||||
break;
|
||||
case 0xF:
|
||||
/* Quadword virtual access with alternate access mode and
|
||||
* protection checks
|
||||
*/
|
||||
tcg_gen_helper_0_0(helper_set_alt_mode);
|
||||
tcg_gen_helper_0_2(helper_ldq_data, cpu_ir[ra], addr);
|
||||
tcg_gen_helper_0_0(helper_restore_mode);
|
||||
break;
|
||||
}
|
||||
tcg_temp_free(addr);
|
||||
}
|
||||
if (ra != 31)
|
||||
tcg_gen_mov_i64(cpu_ir[ra], cpu_T[1]);
|
||||
break;
|
||||
#endif
|
||||
case 0x1C:
|
||||
|
@ -2014,12 +1953,18 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
|
|||
#else
|
||||
if (!ctx->pal_mode)
|
||||
goto invalid_opc;
|
||||
if (ra != 31)
|
||||
tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]);
|
||||
else
|
||||
tcg_gen_movi_i64(cpu_T[0], 0);
|
||||
gen_op_mtpr(insn & 0xFF);
|
||||
ret = 2;
|
||||
else {
|
||||
TCGv tmp1 = tcg_const_i32(insn & 0xFF);
|
||||
if (ra != 31)
|
||||
tcg_gen_helper(helper_mtpr, tmp1, cpu_ir[ra]);
|
||||
else {
|
||||
TCGv tmp2 = tcg_const_i64(0);
|
||||
tcg_gen_helper(helper_mtpr, tmp1, tmp2);
|
||||
tcg_temp_free(tmp2);
|
||||
}
|
||||
tcg_temp_free(tmp1);
|
||||
ret = 2;
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
case 0x1E:
|
||||
|
@ -2031,15 +1976,17 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
|
|||
goto invalid_opc;
|
||||
if (rb == 31) {
|
||||
/* "Old" alpha */
|
||||
gen_op_hw_rei();
|
||||
tcg_gen_helper_0_0(helper_hw_rei);
|
||||
} else {
|
||||
if (ra != 31)
|
||||
tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]);
|
||||
else
|
||||
tcg_gen_movi_i64(cpu_T[0], 0);
|
||||
tcg_gen_movi_i64(cpu_T[1], (((int64_t)insn << 51) >> 51));
|
||||
tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
|
||||
gen_op_hw_ret();
|
||||
TCGv tmp;
|
||||
|
||||
if (ra != 31) {
|
||||
tmp = tcg_temp_new(TCG_TYPE_I64);
|
||||
tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
|
||||
} else
|
||||
tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
|
||||
tcg_gen_helper_0_1(helper_hw_ret, tmp);
|
||||
tcg_temp_free(tmp);
|
||||
}
|
||||
ret = 2;
|
||||
break;
|
||||
|
@ -2051,79 +1998,88 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
|
|||
#else
|
||||
if (!ctx->pal_mode)
|
||||
goto invalid_opc;
|
||||
if (ra != 31)
|
||||
tcg_gen_addi_i64(cpu_T[0], cpu_ir[rb], disp12);
|
||||
else
|
||||
tcg_gen_movi_i64(cpu_T[0], disp12);
|
||||
if (ra != 31)
|
||||
tcg_gen_mov_i64(cpu_T[1], cpu_ir[ra]);
|
||||
else
|
||||
tcg_gen_movi_i64(cpu_T[1], 0);
|
||||
switch ((insn >> 12) & 0xF) {
|
||||
case 0x0:
|
||||
/* Longword physical access */
|
||||
gen_op_stl_raw();
|
||||
break;
|
||||
case 0x1:
|
||||
/* Quadword physical access */
|
||||
gen_op_stq_raw();
|
||||
break;
|
||||
case 0x2:
|
||||
/* Longword physical access with lock */
|
||||
gen_op_stl_c_raw();
|
||||
break;
|
||||
case 0x3:
|
||||
/* Quadword physical access with lock */
|
||||
gen_op_stq_c_raw();
|
||||
break;
|
||||
case 0x4:
|
||||
/* Longword virtual access */
|
||||
gen_op_st_phys_to_virt();
|
||||
gen_op_stl_raw();
|
||||
break;
|
||||
case 0x5:
|
||||
/* Quadword virtual access */
|
||||
gen_op_st_phys_to_virt();
|
||||
gen_op_stq_raw();
|
||||
break;
|
||||
case 0x6:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
case 0x7:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
case 0x8:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
case 0x9:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
case 0xA:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
case 0xB:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
case 0xC:
|
||||
/* Longword virtual access with alternate access mode */
|
||||
gen_op_set_alt_mode();
|
||||
gen_op_st_phys_to_virt();
|
||||
gen_op_ldl_raw();
|
||||
gen_op_restore_mode();
|
||||
break;
|
||||
case 0xD:
|
||||
/* Quadword virtual access with alternate access mode */
|
||||
gen_op_set_alt_mode();
|
||||
gen_op_st_phys_to_virt();
|
||||
gen_op_ldq_raw();
|
||||
gen_op_restore_mode();
|
||||
break;
|
||||
case 0xE:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
case 0xF:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
else {
|
||||
TCGv addr, val;
|
||||
addr = tcg_temp_new(TCG_TYPE_I64);
|
||||
if (rb != 31)
|
||||
tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
|
||||
else
|
||||
tcg_gen_movi_i64(addr, disp12);
|
||||
if (ra != 31)
|
||||
val = cpu_ir[ra];
|
||||
else {
|
||||
val = tcg_temp_new(TCG_TYPE_I64);
|
||||
tcg_gen_movi_i64(val, 0);
|
||||
}
|
||||
switch ((insn >> 12) & 0xF) {
|
||||
case 0x0:
|
||||
/* Longword physical access */
|
||||
tcg_gen_helper_0_2(helper_stl_raw, val, addr);
|
||||
break;
|
||||
case 0x1:
|
||||
/* Quadword physical access */
|
||||
tcg_gen_helper_0_2(helper_stq_raw, val, addr);
|
||||
break;
|
||||
case 0x2:
|
||||
/* Longword physical access with lock */
|
||||
tcg_gen_helper_1_2(helper_stl_c_raw, val, val, addr);
|
||||
break;
|
||||
case 0x3:
|
||||
/* Quadword physical access with lock */
|
||||
tcg_gen_helper_1_2(helper_stq_c_raw, val, val, addr);
|
||||
break;
|
||||
case 0x4:
|
||||
/* Longword virtual access */
|
||||
tcg_gen_helper_1_1(helper_st_virt_to_phys, addr, addr);
|
||||
tcg_gen_helper_0_2(helper_stl_raw, val, addr);
|
||||
break;
|
||||
case 0x5:
|
||||
/* Quadword virtual access */
|
||||
tcg_gen_helper_1_1(helper_st_virt_to_phys, addr, addr);
|
||||
tcg_gen_helper_0_2(helper_stq_raw, val, addr);
|
||||
break;
|
||||
case 0x6:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
case 0x7:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
case 0x8:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
case 0x9:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
case 0xA:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
case 0xB:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
case 0xC:
|
||||
/* Longword virtual access with alternate access mode */
|
||||
tcg_gen_helper_0_0(helper_set_alt_mode);
|
||||
tcg_gen_helper_1_1(helper_st_virt_to_phys, addr, addr);
|
||||
tcg_gen_helper_0_2(helper_stl_raw, val, addr);
|
||||
tcg_gen_helper_0_0(helper_restore_mode);
|
||||
break;
|
||||
case 0xD:
|
||||
/* Quadword virtual access with alternate access mode */
|
||||
tcg_gen_helper_0_0(helper_set_alt_mode);
|
||||
tcg_gen_helper_1_1(helper_st_virt_to_phys, addr, addr);
|
||||
tcg_gen_helper_0_2(helper_stl_raw, val, addr);
|
||||
tcg_gen_helper_0_0(helper_restore_mode);
|
||||
break;
|
||||
case 0xE:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
case 0xF:
|
||||
/* Invalid */
|
||||
goto invalid_opc;
|
||||
}
|
||||
if (ra != 31)
|
||||
tcg_temp_free(val);
|
||||
tcg_temp_free(addr);
|
||||
}
|
||||
ret = 2;
|
||||
break;
|
||||
|
|
Loading…
Reference in New Issue