mirror of https://gitee.com/openkylin/qemu.git
Queued TCG patches
-----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJaqqqrAAoJEGTfOOivfiFfR1gIAIt7D5WocZnyf0WZzrZHqCad 8vYHUk84HEYzEm3dmMA5iXVsZaN1chSWjuxKYInHuIf8xKlHAnuZh0AXtvtOxHR+ PL881JcvhvwVW++nstjZy7ls39UCT7bLETjmJQBGHYVe8Blm+fWDjN7dGVQpHSph 4S8vSs+QhFNPogXVEmJuOB+K+zOIwZC4j9EiEV+5qRXlO0gN+QFjL5o2hLd5dnoi cllQImA3QtWSKesFR87lZG8h6qrFjAFiJKQYX3sc7C0IoS/27vhnBHlPGnQJUDZN 0/r788G7Fw2IIIgfXNBWurX2k57t6flxUIHnCjEXF0xdfwN9HzlvKEjv83pp+7E= =0Mdj -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20180316' into staging Queued TCG patches # gpg: Signature made Thu 15 Mar 2018 17:17:31 GMT # gpg: using RSA key 64DF38E8AF7E215F # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/pull-tcg-20180316: tcg: Add choose_vector_size tcg/i386: Support INDEX_op_dup2_vec for -m32 tcg: Improve tcg_gen_muli_i32/i64 Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
2bb39a657a
|
@ -2696,6 +2696,12 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
|
|||
case INDEX_op_x86_packus_vec:
|
||||
insn = packus_insn[vece];
|
||||
goto gen_simd;
|
||||
#if TCG_TARGET_REG_BITS == 32
|
||||
case INDEX_op_dup2_vec:
|
||||
/* Constraints have already placed both 32-bit inputs in xmm regs. */
|
||||
insn = OPC_PUNPCKLDQ;
|
||||
goto gen_simd;
|
||||
#endif
|
||||
gen_simd:
|
||||
tcg_debug_assert(insn != OPC_UD2);
|
||||
if (type == TCG_TYPE_V256) {
|
||||
|
@ -3045,6 +3051,9 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
|||
case INDEX_op_x86_vperm2i128_vec:
|
||||
case INDEX_op_x86_punpckl_vec:
|
||||
case INDEX_op_x86_punpckh_vec:
|
||||
#if TCG_TARGET_REG_BITS == 32
|
||||
case INDEX_op_dup2_vec:
|
||||
#endif
|
||||
return &x_x_x;
|
||||
case INDEX_op_dup_vec:
|
||||
case INDEX_op_shli_vec:
|
||||
|
|
|
@ -351,6 +351,42 @@ static void gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in)
|
|||
}
|
||||
}
|
||||
|
||||
/* Select a supported vector type for implementing an operation on SIZE
|
||||
* bytes. If OP is 0, assume that the real operation to be performed is
|
||||
* required by all backends. Otherwise, make sure than OP can be performed
|
||||
* on elements of size VECE in the selected type. Do not select V64 if
|
||||
* PREFER_I64 is true. Return 0 if no vector type is selected.
|
||||
*/
|
||||
static TCGType choose_vector_type(TCGOpcode op, unsigned vece, uint32_t size,
|
||||
bool prefer_i64)
|
||||
{
|
||||
if (TCG_TARGET_HAS_v256 && check_size_impl(size, 32)) {
|
||||
if (op == 0) {
|
||||
return TCG_TYPE_V256;
|
||||
}
|
||||
/* Recall that ARM SVE allows vector sizes that are not a
|
||||
* power of 2, but always a multiple of 16. The intent is
|
||||
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
|
||||
* It is hard to imagine a case in which v256 is supported
|
||||
* but v128 is not, but check anyway.
|
||||
*/
|
||||
if (tcg_can_emit_vec_op(op, TCG_TYPE_V256, vece)
|
||||
&& (size % 32 == 0
|
||||
|| tcg_can_emit_vec_op(op, TCG_TYPE_V128, vece))) {
|
||||
return TCG_TYPE_V256;
|
||||
}
|
||||
}
|
||||
if (TCG_TARGET_HAS_v128 && check_size_impl(size, 16)
|
||||
&& (op == 0 || tcg_can_emit_vec_op(op, TCG_TYPE_V128, vece))) {
|
||||
return TCG_TYPE_V128;
|
||||
}
|
||||
if (TCG_TARGET_HAS_v64 && !prefer_i64 && check_size_impl(size, 8)
|
||||
&& (op == 0 || tcg_can_emit_vec_op(op, TCG_TYPE_V64, vece))) {
|
||||
return TCG_TYPE_V64;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set OPRSZ bytes at DOFS to replications of IN_32, IN_64 or IN_C.
|
||||
* Only one of IN_32 or IN_64 may be set;
|
||||
* IN_C is used if IN_32 and IN_64 are unset.
|
||||
|
@ -376,19 +412,12 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
|
|||
}
|
||||
}
|
||||
|
||||
type = 0;
|
||||
if (TCG_TARGET_HAS_v256 && check_size_impl(oprsz, 32)) {
|
||||
type = TCG_TYPE_V256;
|
||||
} else if (TCG_TARGET_HAS_v128 && check_size_impl(oprsz, 16)) {
|
||||
type = TCG_TYPE_V128;
|
||||
} else if (TCG_TARGET_HAS_v64 && check_size_impl(oprsz, 8)
|
||||
/* Prefer integer when 64-bit host and no variable dup. */
|
||||
&& !(TCG_TARGET_REG_BITS == 64 && in_32 == NULL
|
||||
&& (in_64 == NULL || vece == MO_64))) {
|
||||
type = TCG_TYPE_V64;
|
||||
}
|
||||
|
||||
/* Implement inline with a vector type, if possible. */
|
||||
/* Implement inline with a vector type, if possible.
|
||||
* Prefer integer when 64-bit host and no variable dup.
|
||||
*/
|
||||
type = choose_vector_type(0, vece, oprsz,
|
||||
(TCG_TARGET_REG_BITS == 64 && in_32 == NULL
|
||||
&& (in_64 == NULL || vece == MO_64)));
|
||||
if (type != 0) {
|
||||
TCGv_vec t_vec = tcg_temp_new_vec(type);
|
||||
|
||||
|
@ -414,21 +443,30 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
|
|||
}
|
||||
|
||||
i = 0;
|
||||
if (TCG_TARGET_HAS_v256) {
|
||||
switch (type) {
|
||||
case TCG_TYPE_V256:
|
||||
/* Recall that ARM SVE allows vector sizes that are not a
|
||||
* power of 2, but always a multiple of 16. The intent is
|
||||
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
|
||||
*/
|
||||
for (; i + 32 <= oprsz; i += 32) {
|
||||
tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V256);
|
||||
}
|
||||
}
|
||||
if (TCG_TARGET_HAS_v128) {
|
||||
/* fallthru */
|
||||
case TCG_TYPE_V128:
|
||||
for (; i + 16 <= oprsz; i += 16) {
|
||||
tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V128);
|
||||
}
|
||||
}
|
||||
if (TCG_TARGET_HAS_v64) {
|
||||
break;
|
||||
case TCG_TYPE_V64:
|
||||
for (; i < oprsz; i += 8) {
|
||||
tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V64);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
tcg_temp_free_vec(t_vec);
|
||||
goto done;
|
||||
}
|
||||
|
@ -484,7 +522,7 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
|
|||
}
|
||||
tcg_temp_free_i64(t_64);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Otherwise implement out of line. */
|
||||
|
@ -866,49 +904,55 @@ static void expand_4_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
|
|||
void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
|
||||
uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g)
|
||||
{
|
||||
TCGType type;
|
||||
uint32_t some;
|
||||
|
||||
check_size_align(oprsz, maxsz, dofs | aofs);
|
||||
check_overlap_2(dofs, aofs, maxsz);
|
||||
|
||||
/* Recall that ARM SVE allows vector sizes that are not a power of 2.
|
||||
Expand with successively smaller host vector sizes. The intent is
|
||||
that e.g. oprsz == 80 would be expanded with 2x32 + 1x16. */
|
||||
/* ??? For maxsz > oprsz, the host may be able to use an opr-sized
|
||||
operation, zeroing the balance of the register. We can then
|
||||
use a max-sized store to implement the clearing without an extra
|
||||
store operation. This is true for aarch64 and x86_64 hosts. */
|
||||
|
||||
if (TCG_TARGET_HAS_v256 && g->fniv && check_size_impl(oprsz, 32)
|
||||
&& (!g->opc || tcg_can_emit_vec_op(g->opc, TCG_TYPE_V256, g->vece))) {
|
||||
uint32_t some = QEMU_ALIGN_DOWN(oprsz, 32);
|
||||
type = 0;
|
||||
if (g->fniv) {
|
||||
type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
|
||||
}
|
||||
switch (type) {
|
||||
case TCG_TYPE_V256:
|
||||
/* Recall that ARM SVE allows vector sizes that are not a
|
||||
* power of 2, but always a multiple of 16. The intent is
|
||||
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
|
||||
*/
|
||||
some = QEMU_ALIGN_DOWN(oprsz, 32);
|
||||
expand_2_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, g->fniv);
|
||||
if (some == oprsz) {
|
||||
goto done;
|
||||
break;
|
||||
}
|
||||
dofs += some;
|
||||
aofs += some;
|
||||
oprsz -= some;
|
||||
maxsz -= some;
|
||||
}
|
||||
|
||||
if (TCG_TARGET_HAS_v128 && g->fniv && check_size_impl(oprsz, 16)
|
||||
&& (!g->opc || tcg_can_emit_vec_op(g->opc, TCG_TYPE_V128, g->vece))) {
|
||||
/* fallthru */
|
||||
case TCG_TYPE_V128:
|
||||
expand_2_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, g->fniv);
|
||||
} else if (TCG_TARGET_HAS_v64 && !g->prefer_i64
|
||||
&& g->fniv && check_size_impl(oprsz, 8)
|
||||
&& (!g->opc
|
||||
|| tcg_can_emit_vec_op(g->opc, TCG_TYPE_V64, g->vece))) {
|
||||
break;
|
||||
case TCG_TYPE_V64:
|
||||
expand_2_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, g->fniv);
|
||||
} else if (g->fni8 && check_size_impl(oprsz, 8)) {
|
||||
expand_2_i64(dofs, aofs, oprsz, g->fni8);
|
||||
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
|
||||
expand_2_i32(dofs, aofs, oprsz, g->fni4);
|
||||
} else {
|
||||
assert(g->fno != NULL);
|
||||
tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, g->data, g->fno);
|
||||
return;
|
||||
break;
|
||||
|
||||
case 0:
|
||||
if (g->fni8 && check_size_impl(oprsz, 8)) {
|
||||
expand_2_i64(dofs, aofs, oprsz, g->fni8);
|
||||
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
|
||||
expand_2_i32(dofs, aofs, oprsz, g->fni4);
|
||||
} else {
|
||||
assert(g->fno != NULL);
|
||||
tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, g->data, g->fno);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
done:
|
||||
if (oprsz < maxsz) {
|
||||
expand_clr(dofs + oprsz, maxsz - oprsz);
|
||||
}
|
||||
|
@ -918,53 +962,64 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
|
|||
void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
|
||||
uint32_t maxsz, int64_t c, const GVecGen2i *g)
|
||||
{
|
||||
TCGType type;
|
||||
uint32_t some;
|
||||
|
||||
check_size_align(oprsz, maxsz, dofs | aofs);
|
||||
check_overlap_2(dofs, aofs, maxsz);
|
||||
|
||||
/* Recall that ARM SVE allows vector sizes that are not a power of 2.
|
||||
Expand with successively smaller host vector sizes. The intent is
|
||||
that e.g. oprsz == 80 would be expanded with 2x32 + 1x16. */
|
||||
|
||||
if (TCG_TARGET_HAS_v256 && g->fniv && check_size_impl(oprsz, 32)
|
||||
&& (!g->opc || tcg_can_emit_vec_op(g->opc, TCG_TYPE_V256, g->vece))) {
|
||||
uint32_t some = QEMU_ALIGN_DOWN(oprsz, 32);
|
||||
type = 0;
|
||||
if (g->fniv) {
|
||||
type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
|
||||
}
|
||||
switch (type) {
|
||||
case TCG_TYPE_V256:
|
||||
/* Recall that ARM SVE allows vector sizes that are not a
|
||||
* power of 2, but always a multiple of 16. The intent is
|
||||
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
|
||||
*/
|
||||
some = QEMU_ALIGN_DOWN(oprsz, 32);
|
||||
expand_2i_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
|
||||
c, g->load_dest, g->fniv);
|
||||
if (some == oprsz) {
|
||||
goto done;
|
||||
break;
|
||||
}
|
||||
dofs += some;
|
||||
aofs += some;
|
||||
oprsz -= some;
|
||||
maxsz -= some;
|
||||
}
|
||||
|
||||
if (TCG_TARGET_HAS_v128 && g->fniv && check_size_impl(oprsz, 16)
|
||||
&& (!g->opc || tcg_can_emit_vec_op(g->opc, TCG_TYPE_V128, g->vece))) {
|
||||
/* fallthru */
|
||||
case TCG_TYPE_V128:
|
||||
expand_2i_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
|
||||
c, g->load_dest, g->fniv);
|
||||
} else if (TCG_TARGET_HAS_v64 && !g->prefer_i64
|
||||
&& g->fniv && check_size_impl(oprsz, 8)
|
||||
&& (!g->opc
|
||||
|| tcg_can_emit_vec_op(g->opc, TCG_TYPE_V64, g->vece))) {
|
||||
break;
|
||||
case TCG_TYPE_V64:
|
||||
expand_2i_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
|
||||
c, g->load_dest, g->fniv);
|
||||
} else if (g->fni8 && check_size_impl(oprsz, 8)) {
|
||||
expand_2i_i64(dofs, aofs, oprsz, c, g->load_dest, g->fni8);
|
||||
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
|
||||
expand_2i_i32(dofs, aofs, oprsz, c, g->load_dest, g->fni4);
|
||||
} else {
|
||||
if (g->fno) {
|
||||
tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, c, g->fno);
|
||||
break;
|
||||
|
||||
case 0:
|
||||
if (g->fni8 && check_size_impl(oprsz, 8)) {
|
||||
expand_2i_i64(dofs, aofs, oprsz, c, g->load_dest, g->fni8);
|
||||
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
|
||||
expand_2i_i32(dofs, aofs, oprsz, c, g->load_dest, g->fni4);
|
||||
} else {
|
||||
TCGv_i64 tcg_c = tcg_const_i64(c);
|
||||
tcg_gen_gvec_2i_ool(dofs, aofs, tcg_c, oprsz, maxsz, c, g->fnoi);
|
||||
tcg_temp_free_i64(tcg_c);
|
||||
if (g->fno) {
|
||||
tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, c, g->fno);
|
||||
} else {
|
||||
TCGv_i64 tcg_c = tcg_const_i64(c);
|
||||
tcg_gen_gvec_2i_ool(dofs, aofs, tcg_c, oprsz,
|
||||
maxsz, c, g->fnoi);
|
||||
tcg_temp_free_i64(tcg_c);
|
||||
}
|
||||
return;
|
||||
}
|
||||
return;
|
||||
break;
|
||||
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
done:
|
||||
if (oprsz < maxsz) {
|
||||
expand_clr(dofs + oprsz, maxsz - oprsz);
|
||||
}
|
||||
|
@ -981,37 +1036,30 @@ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
|
|||
|
||||
type = 0;
|
||||
if (g->fniv) {
|
||||
if (TCG_TARGET_HAS_v256 && check_size_impl(oprsz, 32)) {
|
||||
type = TCG_TYPE_V256;
|
||||
} else if (TCG_TARGET_HAS_v128 && check_size_impl(oprsz, 16)) {
|
||||
type = TCG_TYPE_V128;
|
||||
} else if (TCG_TARGET_HAS_v64 && !g->prefer_i64
|
||||
&& check_size_impl(oprsz, 8)) {
|
||||
type = TCG_TYPE_V64;
|
||||
}
|
||||
type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
|
||||
}
|
||||
if (type != 0) {
|
||||
TCGv_vec t_vec = tcg_temp_new_vec(type);
|
||||
uint32_t some;
|
||||
|
||||
tcg_gen_dup_i64_vec(g->vece, t_vec, c);
|
||||
|
||||
/* Recall that ARM SVE allows vector sizes that are not a power of 2.
|
||||
Expand with successively smaller host vector sizes. The intent is
|
||||
that e.g. oprsz == 80 would be expanded with 2x32 + 1x16. */
|
||||
switch (type) {
|
||||
case TCG_TYPE_V256:
|
||||
{
|
||||
uint32_t some = QEMU_ALIGN_DOWN(oprsz, 32);
|
||||
expand_2s_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
|
||||
t_vec, g->scalar_first, g->fniv);
|
||||
if (some == oprsz) {
|
||||
break;
|
||||
}
|
||||
dofs += some;
|
||||
aofs += some;
|
||||
oprsz -= some;
|
||||
maxsz -= some;
|
||||
/* Recall that ARM SVE allows vector sizes that are not a
|
||||
* power of 2, but always a multiple of 16. The intent is
|
||||
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
|
||||
*/
|
||||
some = QEMU_ALIGN_DOWN(oprsz, 32);
|
||||
expand_2s_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
|
||||
t_vec, g->scalar_first, g->fniv);
|
||||
if (some == oprsz) {
|
||||
break;
|
||||
}
|
||||
dofs += some;
|
||||
aofs += some;
|
||||
oprsz -= some;
|
||||
maxsz -= some;
|
||||
/* fallthru */
|
||||
|
||||
case TCG_TYPE_V128:
|
||||
|
@ -1055,48 +1103,60 @@ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
|
|||
void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
|
||||
uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g)
|
||||
{
|
||||
TCGType type;
|
||||
uint32_t some;
|
||||
|
||||
check_size_align(oprsz, maxsz, dofs | aofs | bofs);
|
||||
check_overlap_3(dofs, aofs, bofs, maxsz);
|
||||
|
||||
/* Recall that ARM SVE allows vector sizes that are not a power of 2.
|
||||
Expand with successively smaller host vector sizes. The intent is
|
||||
that e.g. oprsz == 80 would be expanded with 2x32 + 1x16. */
|
||||
|
||||
if (TCG_TARGET_HAS_v256 && g->fniv && check_size_impl(oprsz, 32)
|
||||
&& (!g->opc || tcg_can_emit_vec_op(g->opc, TCG_TYPE_V256, g->vece))) {
|
||||
uint32_t some = QEMU_ALIGN_DOWN(oprsz, 32);
|
||||
type = 0;
|
||||
if (g->fniv) {
|
||||
type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
|
||||
}
|
||||
switch (type) {
|
||||
case TCG_TYPE_V256:
|
||||
/* Recall that ARM SVE allows vector sizes that are not a
|
||||
* power of 2, but always a multiple of 16. The intent is
|
||||
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
|
||||
*/
|
||||
some = QEMU_ALIGN_DOWN(oprsz, 32);
|
||||
expand_3_vec(g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256,
|
||||
g->load_dest, g->fniv);
|
||||
if (some == oprsz) {
|
||||
goto done;
|
||||
break;
|
||||
}
|
||||
dofs += some;
|
||||
aofs += some;
|
||||
bofs += some;
|
||||
oprsz -= some;
|
||||
maxsz -= some;
|
||||
}
|
||||
|
||||
if (TCG_TARGET_HAS_v128 && g->fniv && check_size_impl(oprsz, 16)
|
||||
&& (!g->opc || tcg_can_emit_vec_op(g->opc, TCG_TYPE_V128, g->vece))) {
|
||||
/* fallthru */
|
||||
case TCG_TYPE_V128:
|
||||
expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128,
|
||||
g->load_dest, g->fniv);
|
||||
} else if (TCG_TARGET_HAS_v64 && !g->prefer_i64
|
||||
&& g->fniv && check_size_impl(oprsz, 8)
|
||||
&& (!g->opc
|
||||
|| tcg_can_emit_vec_op(g->opc, TCG_TYPE_V64, g->vece))) {
|
||||
break;
|
||||
case TCG_TYPE_V64:
|
||||
expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64,
|
||||
g->load_dest, g->fniv);
|
||||
} else if (g->fni8 && check_size_impl(oprsz, 8)) {
|
||||
expand_3_i64(dofs, aofs, bofs, oprsz, g->load_dest, g->fni8);
|
||||
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
|
||||
expand_3_i32(dofs, aofs, bofs, oprsz, g->load_dest, g->fni4);
|
||||
} else {
|
||||
assert(g->fno != NULL);
|
||||
tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, g->data, g->fno);
|
||||
break;
|
||||
|
||||
case 0:
|
||||
if (g->fni8 && check_size_impl(oprsz, 8)) {
|
||||
expand_3_i64(dofs, aofs, bofs, oprsz, g->load_dest, g->fni8);
|
||||
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
|
||||
expand_3_i32(dofs, aofs, bofs, oprsz, g->load_dest, g->fni4);
|
||||
} else {
|
||||
assert(g->fno != NULL);
|
||||
tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz,
|
||||
maxsz, g->data, g->fno);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
done:
|
||||
if (oprsz < maxsz) {
|
||||
expand_clr(dofs + oprsz, maxsz - oprsz);
|
||||
}
|
||||
|
@ -1106,20 +1166,27 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
|
|||
void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
|
||||
uint32_t oprsz, uint32_t maxsz, const GVecGen4 *g)
|
||||
{
|
||||
TCGType type;
|
||||
uint32_t some;
|
||||
|
||||
check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs);
|
||||
check_overlap_4(dofs, aofs, bofs, cofs, maxsz);
|
||||
|
||||
/* Recall that ARM SVE allows vector sizes that are not a power of 2.
|
||||
Expand with successively smaller host vector sizes. The intent is
|
||||
that e.g. oprsz == 80 would be expanded with 2x32 + 1x16. */
|
||||
|
||||
if (TCG_TARGET_HAS_v256 && g->fniv && check_size_impl(oprsz, 32)
|
||||
&& (!g->opc || tcg_can_emit_vec_op(g->opc, TCG_TYPE_V256, g->vece))) {
|
||||
uint32_t some = QEMU_ALIGN_DOWN(oprsz, 32);
|
||||
type = 0;
|
||||
if (g->fniv) {
|
||||
type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
|
||||
}
|
||||
switch (type) {
|
||||
case TCG_TYPE_V256:
|
||||
/* Recall that ARM SVE allows vector sizes that are not a
|
||||
* power of 2, but always a multiple of 16. The intent is
|
||||
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
|
||||
*/
|
||||
some = QEMU_ALIGN_DOWN(oprsz, 32);
|
||||
expand_4_vec(g->vece, dofs, aofs, bofs, cofs, some,
|
||||
32, TCG_TYPE_V256, g->fniv);
|
||||
if (some == oprsz) {
|
||||
goto done;
|
||||
break;
|
||||
}
|
||||
dofs += some;
|
||||
aofs += some;
|
||||
|
@ -1127,30 +1194,33 @@ void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
|
|||
cofs += some;
|
||||
oprsz -= some;
|
||||
maxsz -= some;
|
||||
}
|
||||
|
||||
if (TCG_TARGET_HAS_v128 && g->fniv && check_size_impl(oprsz, 16)
|
||||
&& (!g->opc || tcg_can_emit_vec_op(g->opc, TCG_TYPE_V128, g->vece))) {
|
||||
/* fallthru */
|
||||
case TCG_TYPE_V128:
|
||||
expand_4_vec(g->vece, dofs, aofs, bofs, cofs, oprsz,
|
||||
16, TCG_TYPE_V128, g->fniv);
|
||||
} else if (TCG_TARGET_HAS_v64 && !g->prefer_i64
|
||||
&& g->fniv && check_size_impl(oprsz, 8)
|
||||
&& (!g->opc
|
||||
|| tcg_can_emit_vec_op(g->opc, TCG_TYPE_V64, g->vece))) {
|
||||
break;
|
||||
case TCG_TYPE_V64:
|
||||
expand_4_vec(g->vece, dofs, aofs, bofs, cofs, oprsz,
|
||||
8, TCG_TYPE_V64, g->fniv);
|
||||
} else if (g->fni8 && check_size_impl(oprsz, 8)) {
|
||||
expand_4_i64(dofs, aofs, bofs, cofs, oprsz, g->fni8);
|
||||
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
|
||||
expand_4_i32(dofs, aofs, bofs, cofs, oprsz, g->fni4);
|
||||
} else {
|
||||
assert(g->fno != NULL);
|
||||
tcg_gen_gvec_4_ool(dofs, aofs, bofs, cofs,
|
||||
oprsz, maxsz, g->data, g->fno);
|
||||
return;
|
||||
break;
|
||||
|
||||
case 0:
|
||||
if (g->fni8 && check_size_impl(oprsz, 8)) {
|
||||
expand_4_i64(dofs, aofs, bofs, cofs, oprsz, g->fni8);
|
||||
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
|
||||
expand_4_i32(dofs, aofs, bofs, cofs, oprsz, g->fni4);
|
||||
} else {
|
||||
assert(g->fno != NULL);
|
||||
tcg_gen_gvec_4_ool(dofs, aofs, bofs, cofs,
|
||||
oprsz, maxsz, g->data, g->fno);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
done:
|
||||
if (oprsz < maxsz) {
|
||||
expand_clr(dofs + oprsz, maxsz - oprsz);
|
||||
}
|
||||
|
@ -2155,6 +2225,8 @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
|
|||
[TCG_COND_LTU] = ltu_fn,
|
||||
[TCG_COND_LEU] = leu_fn,
|
||||
};
|
||||
TCGType type;
|
||||
uint32_t some;
|
||||
|
||||
check_size_align(oprsz, maxsz, dofs | aofs | bofs);
|
||||
check_overlap_3(dofs, aofs, bofs, maxsz);
|
||||
|
@ -2165,51 +2237,59 @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
|
|||
return;
|
||||
}
|
||||
|
||||
/* Recall that ARM SVE allows vector sizes that are not a power of 2.
|
||||
Expand with successively smaller host vector sizes. The intent is
|
||||
that e.g. oprsz == 80 would be expanded with 2x32 + 1x16. */
|
||||
|
||||
if (TCG_TARGET_HAS_v256 && check_size_impl(oprsz, 32)
|
||||
&& tcg_can_emit_vec_op(INDEX_op_cmp_vec, TCG_TYPE_V256, vece)) {
|
||||
uint32_t some = QEMU_ALIGN_DOWN(oprsz, 32);
|
||||
/* Implement inline with a vector type, if possible.
|
||||
* Prefer integer when 64-bit host and 64-bit comparison.
|
||||
*/
|
||||
type = choose_vector_type(INDEX_op_cmp_vec, vece, oprsz,
|
||||
TCG_TARGET_REG_BITS == 64 && vece == MO_64);
|
||||
switch (type) {
|
||||
case TCG_TYPE_V256:
|
||||
/* Recall that ARM SVE allows vector sizes that are not a
|
||||
* power of 2, but always a multiple of 16. The intent is
|
||||
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
|
||||
*/
|
||||
some = QEMU_ALIGN_DOWN(oprsz, 32);
|
||||
expand_cmp_vec(vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, cond);
|
||||
if (some == oprsz) {
|
||||
goto done;
|
||||
break;
|
||||
}
|
||||
dofs += some;
|
||||
aofs += some;
|
||||
bofs += some;
|
||||
oprsz -= some;
|
||||
maxsz -= some;
|
||||
}
|
||||
|
||||
if (TCG_TARGET_HAS_v128 && check_size_impl(oprsz, 16)
|
||||
&& tcg_can_emit_vec_op(INDEX_op_cmp_vec, TCG_TYPE_V128, vece)) {
|
||||
/* fallthru */
|
||||
case TCG_TYPE_V128:
|
||||
expand_cmp_vec(vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, cond);
|
||||
} else if (TCG_TARGET_HAS_v64
|
||||
&& check_size_impl(oprsz, 8)
|
||||
&& (TCG_TARGET_REG_BITS == 32 || vece != MO_64)
|
||||
&& tcg_can_emit_vec_op(INDEX_op_cmp_vec, TCG_TYPE_V64, vece)) {
|
||||
break;
|
||||
case TCG_TYPE_V64:
|
||||
expand_cmp_vec(vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, cond);
|
||||
} else if (vece == MO_64 && check_size_impl(oprsz, 8)) {
|
||||
expand_cmp_i64(dofs, aofs, bofs, oprsz, cond);
|
||||
} else if (vece == MO_32 && check_size_impl(oprsz, 4)) {
|
||||
expand_cmp_i32(dofs, aofs, bofs, oprsz, cond);
|
||||
} else {
|
||||
gen_helper_gvec_3 * const *fn = fns[cond];
|
||||
break;
|
||||
|
||||
if (fn == NULL) {
|
||||
uint32_t tmp;
|
||||
tmp = aofs, aofs = bofs, bofs = tmp;
|
||||
cond = tcg_swap_cond(cond);
|
||||
fn = fns[cond];
|
||||
assert(fn != NULL);
|
||||
case 0:
|
||||
if (vece == MO_64 && check_size_impl(oprsz, 8)) {
|
||||
expand_cmp_i64(dofs, aofs, bofs, oprsz, cond);
|
||||
} else if (vece == MO_32 && check_size_impl(oprsz, 4)) {
|
||||
expand_cmp_i32(dofs, aofs, bofs, oprsz, cond);
|
||||
} else {
|
||||
gen_helper_gvec_3 * const *fn = fns[cond];
|
||||
|
||||
if (fn == NULL) {
|
||||
uint32_t tmp;
|
||||
tmp = aofs, aofs = bofs, bofs = tmp;
|
||||
cond = tcg_swap_cond(cond);
|
||||
fn = fns[cond];
|
||||
assert(fn != NULL);
|
||||
}
|
||||
tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, 0, fn[vece]);
|
||||
return;
|
||||
}
|
||||
tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, 0, fn[vece]);
|
||||
return;
|
||||
break;
|
||||
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
done:
|
||||
if (oprsz < maxsz) {
|
||||
expand_clr(dofs + oprsz, maxsz - oprsz);
|
||||
}
|
||||
|
|
24
tcg/tcg-op.c
24
tcg/tcg-op.c
|
@ -277,9 +277,15 @@ void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret,
|
|||
|
||||
void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
|
||||
{
|
||||
TCGv_i32 t0 = tcg_const_i32(arg2);
|
||||
tcg_gen_mul_i32(ret, arg1, t0);
|
||||
tcg_temp_free_i32(t0);
|
||||
if (arg2 == 0) {
|
||||
tcg_gen_movi_i32(ret, 0);
|
||||
} else if (is_power_of_2(arg2)) {
|
||||
tcg_gen_shli_i32(ret, arg1, ctz32(arg2));
|
||||
} else {
|
||||
TCGv_i32 t0 = tcg_const_i32(arg2);
|
||||
tcg_gen_mul_i32(ret, arg1, t0);
|
||||
tcg_temp_free_i32(t0);
|
||||
}
|
||||
}
|
||||
|
||||
void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
|
||||
|
@ -1430,9 +1436,15 @@ void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
|
|||
|
||||
void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
|
||||
{
|
||||
TCGv_i64 t0 = tcg_const_i64(arg2);
|
||||
tcg_gen_mul_i64(ret, arg1, t0);
|
||||
tcg_temp_free_i64(t0);
|
||||
if (arg2 == 0) {
|
||||
tcg_gen_movi_i64(ret, 0);
|
||||
} else if (is_power_of_2(arg2)) {
|
||||
tcg_gen_shli_i64(ret, arg1, ctz64(arg2));
|
||||
} else {
|
||||
TCGv_i64 t0 = tcg_const_i64(arg2);
|
||||
tcg_gen_mul_i64(ret, arg1, t0);
|
||||
tcg_temp_free_i64(t0);
|
||||
}
|
||||
}
|
||||
|
||||
void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
|
||||
|
|
Loading…
Reference in New Issue