mirror of https://gitee.com/openkylin/qemu.git
host-utils: Use __int128_t for mul[us]64
Replace some x86_64 specific inline assembly with something that all 64-bit hosts ought to optimize well. At worst this becomes a call to the gcc __multi3 routine, which is no worse than our implementation in util/host-utils.c. With gcc 4.7, we get identical code generation for x86_64. We now get native multiplication on ia64 and s390x hosts. With minor improvements to gcc we can get it for ppc64 as well. Signed-off-by: Richard Henderson <rth@twiddle.net> Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
This commit is contained in:
parent
be96bd3fbf
commit
f540166b7d
|
@ -3150,6 +3150,22 @@ if compile_prog "" "" ; then
|
|||
cpuid_h=yes
|
||||
fi
|
||||
|
||||
########################################
|
||||
# check if __[u]int128_t is usable.
|
||||
|
||||
int128=no
|
||||
cat > $TMPC << EOF
|
||||
__int128_t a;
|
||||
__uint128_t b;
|
||||
int main (void) {
|
||||
a = a + b;
|
||||
b = a * b;
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
if compile_prog "" "" ; then
|
||||
int128=yes
|
||||
fi
|
||||
|
||||
##########################################
|
||||
# End of CC checks
|
||||
|
@ -3692,6 +3708,10 @@ if test "$cpuid_h" = "yes" ; then
|
|||
echo "CONFIG_CPUID_H=y" >> $config_host_mak
|
||||
fi
|
||||
|
||||
if test "$int128" = "yes" ; then
|
||||
echo "CONFIG_INT128=y" >> $config_host_mak
|
||||
fi
|
||||
|
||||
if test "$glusterfs" = "yes" ; then
|
||||
echo "CONFIG_GLUSTERFS=y" >> $config_host_mak
|
||||
fi
|
||||
|
|
|
@ -28,22 +28,21 @@
|
|||
#include "qemu/compiler.h" /* QEMU_GNUC_PREREQ */
|
||||
#include <limits.h>
|
||||
|
||||
#if defined(__x86_64__)
|
||||
#define __HAVE_FAST_MULU64__
|
||||
#ifdef CONFIG_INT128
|
||||
static inline void mulu64(uint64_t *plow, uint64_t *phigh,
|
||||
uint64_t a, uint64_t b)
|
||||
{
|
||||
__asm__ ("mul %0\n\t"
|
||||
: "=d" (*phigh), "=a" (*plow)
|
||||
: "a" (a), "0" (b));
|
||||
__uint128_t r = (__uint128_t)a * b;
|
||||
*plow = r;
|
||||
*phigh = r >> 64;
|
||||
}
|
||||
#define __HAVE_FAST_MULS64__
|
||||
|
||||
static inline void muls64(uint64_t *plow, uint64_t *phigh,
|
||||
int64_t a, int64_t b)
|
||||
{
|
||||
__asm__ ("imul %0\n\t"
|
||||
: "=d" (*phigh), "=a" (*plow)
|
||||
: "a" (a), "0" (b));
|
||||
__int128_t r = (__int128_t)a * b;
|
||||
*plow = r;
|
||||
*phigh = r >> 64;
|
||||
}
|
||||
#else
|
||||
void muls64(uint64_t *phigh, uint64_t *plow, int64_t a, int64_t b);
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
//#define DEBUG_MULDIV
|
||||
|
||||
/* Long integer helpers */
|
||||
#if !defined(__x86_64__)
|
||||
#ifndef CONFIG_INT128
|
||||
static void add128 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
|
||||
{
|
||||
*plow += a;
|
||||
|
@ -102,4 +102,4 @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
|
|||
a, b, *phigh, *plow);
|
||||
#endif
|
||||
}
|
||||
#endif /* !defined(__x86_64__) */
|
||||
#endif /* !CONFIG_INT128 */
|
||||
|
|
Loading…
Reference in New Issue