target-arm: A64: Add SIMD ld/st single

Implement the SIMD ld/st single structure instructions.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
Peter Maydell 2014-01-31 14:47:30 +00:00
parent 72430bf5eb
commit df54e47d2b
1 changed files with 142 additions and 2 deletions

View File

@ -2087,10 +2087,150 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
tcg_temp_free_i64(tcg_addr);
}
/* AdvSIMD load/store single structure */
/* C3.3.3 AdvSIMD load/store single structure
*
* 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
* +---+---+---------------+-----+-----------+-----+---+------+------+------+
* | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
* +---+---+---------------+-----+-----------+-----+---+------+------+------+
*
* C3.3.4 AdvSIMD load/store single structure (post-indexed)
*
* 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
* +---+---+---------------+-----+-----------+-----+---+------+------+------+
* | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
* +---+---+---------------+-----+-----------+-----+---+------+------+------+
*
* Rt: first (or only) SIMD&FP register to be transferred
* Rn: base address or SP
* Rm (post-index only): post-index register (when !31) or size dependent #imm
* index = encoded in Q:S:size dependent on size
*
* lane_size = encoded in R, opc
* transfer width = encoded in opc, S, size
*/
static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
{
unsupported_encoding(s, insn);
int rt = extract32(insn, 0, 5);
int rn = extract32(insn, 5, 5);
int size = extract32(insn, 10, 2);
int S = extract32(insn, 12, 1);
int opc = extract32(insn, 13, 3);
int R = extract32(insn, 21, 1);
int is_load = extract32(insn, 22, 1);
int is_postidx = extract32(insn, 23, 1);
int is_q = extract32(insn, 30, 1);
int scale = extract32(opc, 1, 2);
int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
bool replicate = false;
int index = is_q << 3 | S << 2 | size;
int ebytes, xs;
TCGv_i64 tcg_addr, tcg_rn;
switch (scale) {
case 3:
if (!is_load || S) {
unallocated_encoding(s);
return;
}
scale = size;
replicate = true;
break;
case 0:
break;
case 1:
if (extract32(size, 0, 1)) {
unallocated_encoding(s);
return;
}
index >>= 1;
break;
case 2:
if (extract32(size, 1, 1)) {
unallocated_encoding(s);
return;
}
if (!extract32(size, 0, 1)) {
index >>= 2;
} else {
if (S) {
unallocated_encoding(s);
return;
}
index >>= 3;
scale = 3;
}
break;
default:
g_assert_not_reached();
}
ebytes = 1 << scale;
if (rn == 31) {
gen_check_sp_alignment(s);
}
tcg_rn = cpu_reg_sp(s, rn);
tcg_addr = tcg_temp_new_i64();
tcg_gen_mov_i64(tcg_addr, tcg_rn);
for (xs = 0; xs < selem; xs++) {
if (replicate) {
/* Load and replicate to all elements */
uint64_t mulconst;
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
get_mem_index(s), MO_TE + scale);
switch (scale) {
case 0:
mulconst = 0x0101010101010101ULL;
break;
case 1:
mulconst = 0x0001000100010001ULL;
break;
case 2:
mulconst = 0x0000000100000001ULL;
break;
case 3:
mulconst = 0;
break;
default:
g_assert_not_reached();
}
if (mulconst) {
tcg_gen_muli_i64(tcg_tmp, tcg_tmp, mulconst);
}
write_vec_element(s, tcg_tmp, rt, 0, MO_64);
if (is_q) {
write_vec_element(s, tcg_tmp, rt, 1, MO_64);
} else {
clear_vec_high(s, rt);
}
tcg_temp_free_i64(tcg_tmp);
} else {
/* Load/store one element per register */
if (is_load) {
do_vec_ld(s, rt, index, tcg_addr, MO_TE + scale);
} else {
do_vec_st(s, rt, index, tcg_addr, MO_TE + scale);
}
}
tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
rt = (rt + 1) % 32;
}
if (is_postidx) {
int rm = extract32(insn, 16, 5);
if (rm == 31) {
tcg_gen_mov_i64(tcg_rn, tcg_addr);
} else {
tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
}
}
tcg_temp_free_i64(tcg_addr);
}
/* C3.3 Loads and stores */