Import Upstream version 8.1.0+r23
This commit is contained in:
commit
30fcec3ad3
|
@ -0,0 +1,215 @@
|
|||
// Note that some host libraries have the same module name as the target
|
||||
// libraries. This is currently needed to build, for example, adb. But it's
|
||||
// probably something that should be changed.
|
||||
|
||||
// Pull in the autogenerated sources modules
|
||||
build = ["sources.bp"]
|
||||
|
||||
// Used by libcrypto, libssl, bssl tool, and native tests
|
||||
cc_defaults {
|
||||
name: "boringssl_flags",
|
||||
vendor_available: true,
|
||||
|
||||
cflags: [
|
||||
"-fvisibility=hidden",
|
||||
"-DBORINGSSL_SHARED_LIBRARY",
|
||||
"-DBORINGSSL_IMPLEMENTATION",
|
||||
"-DOPENSSL_SMALL",
|
||||
"-D_XOPEN_SOURCE=700",
|
||||
"-Wno-unused-parameter",
|
||||
],
|
||||
|
||||
cppflags: [
|
||||
"-Wall",
|
||||
"-Werror",
|
||||
],
|
||||
|
||||
conlyflags: ["-std=c99"],
|
||||
}
|
||||
|
||||
// Used by libcrypto + libssl
|
||||
cc_defaults {
|
||||
name: "boringssl_defaults",
|
||||
|
||||
local_include_dirs: ["src/include"],
|
||||
export_include_dirs: ["src/include"],
|
||||
stl: "libc++_static",
|
||||
sdk_version: "9",
|
||||
|
||||
cflags: ["-DBORINGSSL_ANDROID_SYSTEM"],
|
||||
}
|
||||
|
||||
//// libcrypto
|
||||
|
||||
// This should be removed when clang can compile everything.
|
||||
libcrypto_sources_no_clang = [
|
||||
"linux-arm/crypto/fipsmodule/aes-armv4.S",
|
||||
"linux-arm/crypto/fipsmodule/bsaes-armv7.S",
|
||||
]
|
||||
|
||||
cc_defaults {
|
||||
name: "libcrypto_defaults",
|
||||
host_supported: true,
|
||||
|
||||
// Windows and Macs both have problems with assembly files
|
||||
target: {
|
||||
windows: {
|
||||
enabled: true,
|
||||
cflags: ["-DOPENSSL_NO_ASM"],
|
||||
host_ldlibs: ["-lws2_32"],
|
||||
},
|
||||
darwin: {
|
||||
cflags: ["-DOPENSSL_NO_ASM"],
|
||||
},
|
||||
host: {
|
||||
host_ldlibs: ["-lpthread"],
|
||||
},
|
||||
},
|
||||
|
||||
local_include_dirs: ["src/crypto"],
|
||||
|
||||
arch: {
|
||||
arm64: {
|
||||
clang_asflags: ["-march=armv8-a+crypto"],
|
||||
},
|
||||
},
|
||||
|
||||
// This should be removed when clang can compile everything.
|
||||
exclude_srcs: libcrypto_sources_no_clang,
|
||||
whole_static_libs: ["libcrypto_no_clang"],
|
||||
}
|
||||
|
||||
// Target and host library
|
||||
cc_library {
|
||||
name: "libcrypto",
|
||||
vendor_available: true,
|
||||
vndk: {
|
||||
enabled: true,
|
||||
},
|
||||
defaults: ["libcrypto_sources", "libcrypto_defaults", "boringssl_defaults", "boringssl_flags"],
|
||||
unique_host_soname: true,
|
||||
}
|
||||
|
||||
// Target and host library: files that don't compile with clang. This should
|
||||
// go away when clang can compile everything with integrated assembler.
|
||||
cc_library_static {
|
||||
name: "libcrypto_no_clang",
|
||||
defaults: ["boringssl_defaults", "boringssl_flags"],
|
||||
host_supported: true,
|
||||
|
||||
target: {
|
||||
windows: {
|
||||
enabled: true,
|
||||
},
|
||||
},
|
||||
|
||||
local_include_dirs: ["src/crypto"],
|
||||
|
||||
arch: {
|
||||
arm: {
|
||||
clang_asflags: ["-no-integrated-as"],
|
||||
srcs: libcrypto_sources_no_clang,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Static library
|
||||
// This should only be used for host modules that will be in a JVM, all other
|
||||
// modules should use the static variant of libcrypto.
|
||||
cc_library_static {
|
||||
name: "libcrypto_static",
|
||||
defaults: ["libcrypto_sources", "libcrypto_defaults", "boringssl_defaults", "boringssl_flags"],
|
||||
cflags: ["-DOPENSSL_NO_CXX"],
|
||||
|
||||
target: {
|
||||
host: {
|
||||
// TODO: b/26160319. ASAN breaks use of this library in JVM.
|
||||
// Re-enable sanitization when the issue with making clients of this library
|
||||
// preload ASAN runtime is resolved. Without that, clients are getting runtime
|
||||
// errors due to unresolved ASAN symbols, such as
|
||||
// __asan_option_detect_stack_use_after_return.
|
||||
sanitize: {
|
||||
never: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
//// libssl
|
||||
|
||||
// Target static library
|
||||
// Deprecated: all users should move to libssl
|
||||
cc_library_static {
|
||||
name: "libssl_static",
|
||||
defaults: ["libssl_sources", "boringssl_defaults", "boringssl_flags"],
|
||||
}
|
||||
|
||||
// Static and Shared library
|
||||
cc_library {
|
||||
name: "libssl",
|
||||
vendor_available: true,
|
||||
vndk: {
|
||||
enabled: true,
|
||||
},
|
||||
host_supported: true,
|
||||
defaults: ["libssl_sources", "boringssl_defaults", "boringssl_flags"],
|
||||
unique_host_soname: true,
|
||||
|
||||
shared_libs: ["libcrypto"],
|
||||
}
|
||||
|
||||
// Tool
|
||||
cc_binary {
|
||||
name: "bssl",
|
||||
host_supported: true,
|
||||
defaults: ["bssl_sources", "boringssl_flags"],
|
||||
|
||||
shared_libs: [
|
||||
"libcrypto",
|
||||
"libssl",
|
||||
],
|
||||
target: {
|
||||
host: {
|
||||
// Needed for clock_gettime.
|
||||
host_ldlibs: ["-lrt"],
|
||||
},
|
||||
darwin: {
|
||||
enabled: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Test support library
|
||||
cc_library_static {
|
||||
name: "boringssl_test_support",
|
||||
host_supported: true,
|
||||
defaults: ["boringssl_test_support_sources", "boringssl_flags"],
|
||||
|
||||
shared_libs: [
|
||||
"libcrypto",
|
||||
"libssl",
|
||||
],
|
||||
}
|
||||
|
||||
// Tests
|
||||
cc_test {
|
||||
name: "boringssl_crypto_test",
|
||||
test_suites: ["device-tests"],
|
||||
host_supported: true,
|
||||
defaults: ["boringssl_crypto_test_sources", "boringssl_flags"],
|
||||
whole_static_libs: ["boringssl_test_support"],
|
||||
|
||||
cflags: ["-DBORINGSSL_ANDROID_SYSTEM"],
|
||||
shared_libs: ["libcrypto"],
|
||||
}
|
||||
|
||||
cc_test {
|
||||
name: "boringssl_ssl_test",
|
||||
test_suites: ["device-tests"],
|
||||
host_supported: true,
|
||||
defaults: ["boringssl_ssl_test_sources", "boringssl_flags"],
|
||||
whole_static_libs: ["boringssl_test_support"],
|
||||
|
||||
cflags: ["-DBORINGSSL_ANDROID_SYSTEM"],
|
||||
shared_libs: ["libcrypto", "libssl"],
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Copyright (C) 2017 The Android Open Source Project
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<configuration description="Config for BoringSSL device tests">
|
||||
<target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
|
||||
<option name="cleanup" value="true" />
|
||||
<option name="push" value="boringssl_ssl_test->/data/local/tmp/boringssl_ssl_test" />
|
||||
<option name="push" value="boringssl_crypto_test->/data/local/tmp/boringssl_crypto_test" />
|
||||
</target_preparer>
|
||||
<option name="test-suite-tag" value="apct" />
|
||||
<test class="com.android.tradefed.testtype.GTest" >
|
||||
<option name="native-test-device-path" value="/data/local/tmp/boringssl_ssl_test" />
|
||||
<option name="module-name" value="boringssl_ssl_test" />
|
||||
</test>
|
||||
<test class="com.android.tradefed.testtype.GTest" >
|
||||
<option name="native-test-device-path" value="/data/local/tmp/boringssl_crypto_test" />
|
||||
<option name="module-name" value="boringssl_crypto_test" />
|
||||
</test>
|
||||
</configuration>
|
|
@ -0,0 +1 @@
|
|||
14308731e5446a73ac2258688a9688b524483cb6
|
|
@ -0,0 +1,127 @@
|
|||
|
||||
LICENSE ISSUES
|
||||
==============
|
||||
|
||||
The OpenSSL toolkit stays under a dual license, i.e. both the conditions of
|
||||
the OpenSSL License and the original SSLeay license apply to the toolkit.
|
||||
See below for the actual license texts. Actually both licenses are BSD-style
|
||||
Open Source licenses. In case of any license issues related to OpenSSL
|
||||
please contact openssl-core@openssl.org.
|
||||
|
||||
OpenSSL License
|
||||
---------------
|
||||
|
||||
/* ====================================================================
|
||||
* Copyright (c) 1998-2011 The OpenSSL Project. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* 3. All advertising materials mentioning features or use of this
|
||||
* software must display the following acknowledgment:
|
||||
* "This product includes software developed by the OpenSSL Project
|
||||
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
|
||||
*
|
||||
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
|
||||
* endorse or promote products derived from this software without
|
||||
* prior written permission. For written permission, please contact
|
||||
* openssl-core@openssl.org.
|
||||
*
|
||||
* 5. Products derived from this software may not be called "OpenSSL"
|
||||
* nor may "OpenSSL" appear in their names without prior written
|
||||
* permission of the OpenSSL Project.
|
||||
*
|
||||
* 6. Redistributions of any form whatsoever must retain the following
|
||||
* acknowledgment:
|
||||
* "This product includes software developed by the OpenSSL Project
|
||||
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
|
||||
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
|
||||
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
||||
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
|
||||
* OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
* ====================================================================
|
||||
*
|
||||
* This product includes cryptographic software written by Eric Young
|
||||
* (eay@cryptsoft.com). This product includes software written by Tim
|
||||
* Hudson (tjh@cryptsoft.com).
|
||||
*
|
||||
*/
|
||||
|
||||
Original SSLeay License
|
||||
-----------------------
|
||||
|
||||
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
|
||||
* All rights reserved.
|
||||
*
|
||||
* This package is an SSL implementation written
|
||||
* by Eric Young (eay@cryptsoft.com).
|
||||
* The implementation was written so as to conform with Netscapes SSL.
|
||||
*
|
||||
* This library is free for commercial and non-commercial use as long as
|
||||
* the following conditions are aheared to. The following conditions
|
||||
* apply to all code found in this distribution, be it the RC4, RSA,
|
||||
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
|
||||
* included with this distribution is covered by the same copyright terms
|
||||
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
|
||||
*
|
||||
* Copyright remains Eric Young's, and as such any Copyright notices in
|
||||
* the code are not to be removed.
|
||||
* If this package is used in a product, Eric Young should be given attribution
|
||||
* as the author of the parts of the library used.
|
||||
* This can be in the form of a textual message at program startup or
|
||||
* in documentation (online or textual) provided with the package.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. All advertising materials mentioning features or use of this software
|
||||
* must display the following acknowledgement:
|
||||
* "This product includes cryptographic software written by
|
||||
* Eric Young (eay@cryptsoft.com)"
|
||||
* The word 'cryptographic' can be left out if the rouines from the library
|
||||
* being used are not cryptographic related :-).
|
||||
* 4. If you include any Windows specific code (or a derivative thereof) from
|
||||
* the apps directory (application code) you must include an acknowledgement:
|
||||
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* The licence and distribution terms for any publically available version or
|
||||
* derivative of this code cannot be changed. i.e. this code cannot simply be
|
||||
* copied and put under another distribution licence
|
||||
* [including the GNU Public Licence.]
|
||||
*/
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -xe
|
||||
|
||||
old_revision=$(cat BORINGSSL_REVISION)
|
||||
rm -Rf src
|
||||
git clone https://boringssl.googlesource.com/boringssl src
|
||||
cd src
|
||||
new_revision=$(git show -s --pretty=%H)
|
||||
cd ..
|
||||
echo ${new_revision} > BORINGSSL_REVISION
|
||||
|
||||
rm -Rf src/.git
|
||||
rm -Rf src/fuzz
|
||||
rm -Rf src/third_party/googletest
|
||||
rm -Rf linux-aarch64/ linux-arm/ linux-x86/ linux-x86_64/ mac-x86/ mac-x86_64/ win-x86_64/ win-x86/
|
||||
python src/util/generate_build_files.py android
|
||||
|
||||
git add .
|
||||
git commit -m "external/boringssl: Sync to ${new_revision}.
|
||||
|
||||
This includes the following changes:
|
||||
|
||||
https://boringssl.googlesource.com/boringssl/+log/${old_revision}..${new_revision}
|
||||
"
|
|
@ -0,0 +1,12 @@
|
|||
# Now used only by Trusty
|
||||
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/sources.mk
|
||||
include $(LOCAL_PATH)/sources.mk
|
||||
|
||||
LOCAL_CFLAGS += -I$(LOCAL_PATH)/src/include -I$(LOCAL_PATH)/src/crypto -Wno-unused-parameter -DBORINGSSL_ANDROID_SYSTEM
|
||||
LOCAL_ASFLAGS += -I$(LOCAL_PATH)/src/include -I$(LOCAL_PATH)/src/crypto -Wno-unused-parameter
|
||||
# Do not add in the architecture-specific files if we don't want to build assembly
|
||||
LOCAL_SRC_FILES_x86 := $(linux_x86_sources)
|
||||
LOCAL_SRC_FILES_x86_64 := $(linux_x86_64_sources)
|
||||
LOCAL_SRC_FILES_arm := $(linux_arm_sources)
|
||||
LOCAL_SRC_FILES_arm64 := $(linux_aarch64_sources)
|
||||
LOCAL_SRC_FILES += $(crypto_sources)
|
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,755 @@
|
|||
#include <openssl/arm_arch.h>
|
||||
|
||||
#if __ARM_MAX_ARCH__>=7
|
||||
.text
|
||||
#if !defined(__clang__) || defined(BORINGSSL_CLANG_SUPPORTS_DOT_ARCH)
|
||||
|
||||
#endif
|
||||
.align 5
|
||||
Lrcon:
|
||||
.long 0x01,0x01,0x01,0x01
|
||||
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
|
||||
.long 0x1b,0x1b,0x1b,0x1b
|
||||
|
||||
.globl _aes_hw_set_encrypt_key
|
||||
.private_extern _aes_hw_set_encrypt_key
|
||||
|
||||
.align 5
|
||||
_aes_hw_set_encrypt_key:
|
||||
Lenc_key:
|
||||
stp x29,x30,[sp,#-16]!
|
||||
add x29,sp,#0
|
||||
mov x3,#-1
|
||||
cmp x0,#0
|
||||
b.eq Lenc_key_abort
|
||||
cmp x2,#0
|
||||
b.eq Lenc_key_abort
|
||||
mov x3,#-2
|
||||
cmp w1,#128
|
||||
b.lt Lenc_key_abort
|
||||
cmp w1,#256
|
||||
b.gt Lenc_key_abort
|
||||
tst w1,#0x3f
|
||||
b.ne Lenc_key_abort
|
||||
|
||||
adr x3,Lrcon
|
||||
cmp w1,#192
|
||||
|
||||
eor v0.16b,v0.16b,v0.16b
|
||||
ld1 {v3.16b},[x0],#16
|
||||
mov w1,#8 // reuse w1
|
||||
ld1 {v1.4s,v2.4s},[x3],#32
|
||||
|
||||
b.lt Loop128
|
||||
b.eq L192
|
||||
b L256
|
||||
|
||||
.align 4
|
||||
Loop128:
|
||||
tbl v6.16b,{v3.16b},v2.16b
|
||||
ext v5.16b,v0.16b,v3.16b,#12
|
||||
st1 {v3.4s},[x2],#16
|
||||
aese v6.16b,v0.16b
|
||||
subs w1,w1,#1
|
||||
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v6.16b,v6.16b,v1.16b
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
shl v1.16b,v1.16b,#1
|
||||
eor v3.16b,v3.16b,v6.16b
|
||||
b.ne Loop128
|
||||
|
||||
ld1 {v1.4s},[x3]
|
||||
|
||||
tbl v6.16b,{v3.16b},v2.16b
|
||||
ext v5.16b,v0.16b,v3.16b,#12
|
||||
st1 {v3.4s},[x2],#16
|
||||
aese v6.16b,v0.16b
|
||||
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v6.16b,v6.16b,v1.16b
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
shl v1.16b,v1.16b,#1
|
||||
eor v3.16b,v3.16b,v6.16b
|
||||
|
||||
tbl v6.16b,{v3.16b},v2.16b
|
||||
ext v5.16b,v0.16b,v3.16b,#12
|
||||
st1 {v3.4s},[x2],#16
|
||||
aese v6.16b,v0.16b
|
||||
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v6.16b,v6.16b,v1.16b
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
eor v3.16b,v3.16b,v6.16b
|
||||
st1 {v3.4s},[x2]
|
||||
add x2,x2,#0x50
|
||||
|
||||
mov w12,#10
|
||||
b Ldone
|
||||
|
||||
.align 4
|
||||
L192:
|
||||
ld1 {v4.8b},[x0],#8
|
||||
movi v6.16b,#8 // borrow v6.16b
|
||||
st1 {v3.4s},[x2],#16
|
||||
sub v2.16b,v2.16b,v6.16b // adjust the mask
|
||||
|
||||
Loop192:
|
||||
tbl v6.16b,{v4.16b},v2.16b
|
||||
ext v5.16b,v0.16b,v3.16b,#12
|
||||
st1 {v4.8b},[x2],#8
|
||||
aese v6.16b,v0.16b
|
||||
subs w1,w1,#1
|
||||
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
|
||||
dup v5.4s,v3.s[3]
|
||||
eor v5.16b,v5.16b,v4.16b
|
||||
eor v6.16b,v6.16b,v1.16b
|
||||
ext v4.16b,v0.16b,v4.16b,#12
|
||||
shl v1.16b,v1.16b,#1
|
||||
eor v4.16b,v4.16b,v5.16b
|
||||
eor v3.16b,v3.16b,v6.16b
|
||||
eor v4.16b,v4.16b,v6.16b
|
||||
st1 {v3.4s},[x2],#16
|
||||
b.ne Loop192
|
||||
|
||||
mov w12,#12
|
||||
add x2,x2,#0x20
|
||||
b Ldone
|
||||
|
||||
.align 4
|
||||
L256:
|
||||
ld1 {v4.16b},[x0]
|
||||
mov w1,#7
|
||||
mov w12,#14
|
||||
st1 {v3.4s},[x2],#16
|
||||
|
||||
Loop256:
|
||||
tbl v6.16b,{v4.16b},v2.16b
|
||||
ext v5.16b,v0.16b,v3.16b,#12
|
||||
st1 {v4.4s},[x2],#16
|
||||
aese v6.16b,v0.16b
|
||||
subs w1,w1,#1
|
||||
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v6.16b,v6.16b,v1.16b
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
shl v1.16b,v1.16b,#1
|
||||
eor v3.16b,v3.16b,v6.16b
|
||||
st1 {v3.4s},[x2],#16
|
||||
b.eq Ldone
|
||||
|
||||
dup v6.4s,v3.s[3] // just splat
|
||||
ext v5.16b,v0.16b,v4.16b,#12
|
||||
aese v6.16b,v0.16b
|
||||
|
||||
eor v4.16b,v4.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v4.16b,v4.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v4.16b,v4.16b,v5.16b
|
||||
|
||||
eor v4.16b,v4.16b,v6.16b
|
||||
b Loop256
|
||||
|
||||
Ldone:
|
||||
str w12,[x2]
|
||||
mov x3,#0
|
||||
|
||||
Lenc_key_abort:
|
||||
mov x0,x3 // return value
|
||||
ldr x29,[sp],#16
|
||||
ret
|
||||
|
||||
|
||||
.globl _aes_hw_set_decrypt_key
|
||||
.private_extern _aes_hw_set_decrypt_key
|
||||
|
||||
.align 5
|
||||
_aes_hw_set_decrypt_key:
|
||||
stp x29,x30,[sp,#-16]!
|
||||
add x29,sp,#0
|
||||
bl Lenc_key
|
||||
|
||||
cmp x0,#0
|
||||
b.ne Ldec_key_abort
|
||||
|
||||
sub x2,x2,#240 // restore original x2
|
||||
mov x4,#-16
|
||||
add x0,x2,x12,lsl#4 // end of key schedule
|
||||
|
||||
ld1 {v0.4s},[x2]
|
||||
ld1 {v1.4s},[x0]
|
||||
st1 {v0.4s},[x0],x4
|
||||
st1 {v1.4s},[x2],#16
|
||||
|
||||
Loop_imc:
|
||||
ld1 {v0.4s},[x2]
|
||||
ld1 {v1.4s},[x0]
|
||||
aesimc v0.16b,v0.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
st1 {v0.4s},[x0],x4
|
||||
st1 {v1.4s},[x2],#16
|
||||
cmp x0,x2
|
||||
b.hi Loop_imc
|
||||
|
||||
ld1 {v0.4s},[x2]
|
||||
aesimc v0.16b,v0.16b
|
||||
st1 {v0.4s},[x0]
|
||||
|
||||
eor x0,x0,x0 // return value
|
||||
Ldec_key_abort:
|
||||
ldp x29,x30,[sp],#16
|
||||
ret
|
||||
|
||||
.globl _aes_hw_encrypt
|
||||
.private_extern _aes_hw_encrypt
|
||||
|
||||
.align 5
|
||||
_aes_hw_encrypt:
|
||||
ldr w3,[x2,#240]
|
||||
ld1 {v0.4s},[x2],#16
|
||||
ld1 {v2.16b},[x0]
|
||||
sub w3,w3,#2
|
||||
ld1 {v1.4s},[x2],#16
|
||||
|
||||
Loop_enc:
|
||||
aese v2.16b,v0.16b
|
||||
aesmc v2.16b,v2.16b
|
||||
ld1 {v0.4s},[x2],#16
|
||||
subs w3,w3,#2
|
||||
aese v2.16b,v1.16b
|
||||
aesmc v2.16b,v2.16b
|
||||
ld1 {v1.4s},[x2],#16
|
||||
b.gt Loop_enc
|
||||
|
||||
aese v2.16b,v0.16b
|
||||
aesmc v2.16b,v2.16b
|
||||
ld1 {v0.4s},[x2]
|
||||
aese v2.16b,v1.16b
|
||||
eor v2.16b,v2.16b,v0.16b
|
||||
|
||||
st1 {v2.16b},[x1]
|
||||
ret
|
||||
|
||||
.globl _aes_hw_decrypt
|
||||
.private_extern _aes_hw_decrypt
|
||||
|
||||
.align 5
|
||||
_aes_hw_decrypt:
|
||||
ldr w3,[x2,#240]
|
||||
ld1 {v0.4s},[x2],#16
|
||||
ld1 {v2.16b},[x0]
|
||||
sub w3,w3,#2
|
||||
ld1 {v1.4s},[x2],#16
|
||||
|
||||
Loop_dec:
|
||||
aesd v2.16b,v0.16b
|
||||
aesimc v2.16b,v2.16b
|
||||
ld1 {v0.4s},[x2],#16
|
||||
subs w3,w3,#2
|
||||
aesd v2.16b,v1.16b
|
||||
aesimc v2.16b,v2.16b
|
||||
ld1 {v1.4s},[x2],#16
|
||||
b.gt Loop_dec
|
||||
|
||||
aesd v2.16b,v0.16b
|
||||
aesimc v2.16b,v2.16b
|
||||
ld1 {v0.4s},[x2]
|
||||
aesd v2.16b,v1.16b
|
||||
eor v2.16b,v2.16b,v0.16b
|
||||
|
||||
st1 {v2.16b},[x1]
|
||||
ret
|
||||
|
||||
.globl _aes_hw_cbc_encrypt
|
||||
.private_extern _aes_hw_cbc_encrypt
|
||||
|
||||
.align 5
|
||||
_aes_hw_cbc_encrypt:
|
||||
stp x29,x30,[sp,#-16]!
|
||||
add x29,sp,#0
|
||||
subs x2,x2,#16
|
||||
mov x8,#16
|
||||
b.lo Lcbc_abort
|
||||
csel x8,xzr,x8,eq
|
||||
|
||||
cmp w5,#0 // en- or decrypting?
|
||||
ldr w5,[x3,#240]
|
||||
and x2,x2,#-16
|
||||
ld1 {v6.16b},[x4]
|
||||
ld1 {v0.16b},[x0],x8
|
||||
|
||||
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
|
||||
sub w5,w5,#6
|
||||
add x7,x3,x5,lsl#4 // pointer to last 7 round keys
|
||||
sub w5,w5,#2
|
||||
ld1 {v18.4s,v19.4s},[x7],#32
|
||||
ld1 {v20.4s,v21.4s},[x7],#32
|
||||
ld1 {v22.4s,v23.4s},[x7],#32
|
||||
ld1 {v7.4s},[x7]
|
||||
|
||||
add x7,x3,#32
|
||||
mov w6,w5
|
||||
b.eq Lcbc_dec
|
||||
|
||||
cmp w5,#2
|
||||
eor v0.16b,v0.16b,v6.16b
|
||||
eor v5.16b,v16.16b,v7.16b
|
||||
b.eq Lcbc_enc128
|
||||
|
||||
ld1 {v2.4s,v3.4s},[x7]
|
||||
add x7,x3,#16
|
||||
add x6,x3,#16*4
|
||||
add x12,x3,#16*5
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
add x14,x3,#16*6
|
||||
add x3,x3,#16*7
|
||||
b Lenter_cbc_enc
|
||||
|
||||
.align 4
|
||||
Loop_cbc_enc:
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
st1 {v6.16b},[x1],#16
|
||||
Lenter_cbc_enc:
|
||||
aese v0.16b,v17.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v0.16b,v2.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
ld1 {v16.4s},[x6]
|
||||
cmp w5,#4
|
||||
aese v0.16b,v3.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
ld1 {v17.4s},[x12]
|
||||
b.eq Lcbc_enc192
|
||||
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
ld1 {v16.4s},[x14]
|
||||
aese v0.16b,v17.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
ld1 {v17.4s},[x3]
|
||||
nop
|
||||
|
||||
Lcbc_enc192:
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
subs x2,x2,#16
|
||||
aese v0.16b,v17.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
csel x8,xzr,x8,eq
|
||||
aese v0.16b,v18.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v0.16b,v19.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
ld1 {v16.16b},[x0],x8
|
||||
aese v0.16b,v20.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
eor v16.16b,v16.16b,v5.16b
|
||||
aese v0.16b,v21.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
ld1 {v17.4s},[x7] // re-pre-load rndkey[1]
|
||||
aese v0.16b,v22.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v0.16b,v23.16b
|
||||
eor v6.16b,v0.16b,v7.16b
|
||||
b.hs Loop_cbc_enc
|
||||
|
||||
st1 {v6.16b},[x1],#16
|
||||
b Lcbc_done
|
||||
|
||||
.align 5
|
||||
Lcbc_enc128:
|
||||
ld1 {v2.4s,v3.4s},[x7]
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
b Lenter_cbc_enc128
|
||||
Loop_cbc_enc128:
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
st1 {v6.16b},[x1],#16
|
||||
Lenter_cbc_enc128:
|
||||
aese v0.16b,v17.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
subs x2,x2,#16
|
||||
aese v0.16b,v2.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
csel x8,xzr,x8,eq
|
||||
aese v0.16b,v3.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v0.16b,v18.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v0.16b,v19.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
ld1 {v16.16b},[x0],x8
|
||||
aese v0.16b,v20.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v0.16b,v21.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v0.16b,v22.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
eor v16.16b,v16.16b,v5.16b
|
||||
aese v0.16b,v23.16b
|
||||
eor v6.16b,v0.16b,v7.16b
|
||||
b.hs Loop_cbc_enc128
|
||||
|
||||
st1 {v6.16b},[x1],#16
|
||||
b Lcbc_done
|
||||
.align 5
|
||||
Lcbc_dec:
|
||||
ld1 {v18.16b},[x0],#16
|
||||
subs x2,x2,#32 // bias
|
||||
add w6,w5,#2
|
||||
orr v3.16b,v0.16b,v0.16b
|
||||
orr v1.16b,v0.16b,v0.16b
|
||||
orr v19.16b,v18.16b,v18.16b
|
||||
b.lo Lcbc_dec_tail
|
||||
|
||||
orr v1.16b,v18.16b,v18.16b
|
||||
ld1 {v18.16b},[x0],#16
|
||||
orr v2.16b,v0.16b,v0.16b
|
||||
orr v3.16b,v1.16b,v1.16b
|
||||
orr v19.16b,v18.16b,v18.16b
|
||||
|
||||
Loop3x_cbc_dec:
|
||||
aesd v0.16b,v16.16b
|
||||
aesimc v0.16b,v0.16b
|
||||
aesd v1.16b,v16.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v16.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
ld1 {v16.4s},[x7],#16
|
||||
subs w6,w6,#2
|
||||
aesd v0.16b,v17.16b
|
||||
aesimc v0.16b,v0.16b
|
||||
aesd v1.16b,v17.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v17.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
ld1 {v17.4s},[x7],#16
|
||||
b.gt Loop3x_cbc_dec
|
||||
|
||||
aesd v0.16b,v16.16b
|
||||
aesimc v0.16b,v0.16b
|
||||
aesd v1.16b,v16.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v16.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
eor v4.16b,v6.16b,v7.16b
|
||||
subs x2,x2,#0x30
|
||||
eor v5.16b,v2.16b,v7.16b
|
||||
csel x6,x2,x6,lo // x6, w6, is zero at this point
|
||||
aesd v0.16b,v17.16b
|
||||
aesimc v0.16b,v0.16b
|
||||
aesd v1.16b,v17.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v17.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
eor v17.16b,v3.16b,v7.16b
|
||||
add x0,x0,x6 // x0 is adjusted in such way that
|
||||
// at exit from the loop v1.16b-v18.16b
|
||||
// are loaded with last "words"
|
||||
orr v6.16b,v19.16b,v19.16b
|
||||
mov x7,x3
|
||||
aesd v0.16b,v20.16b
|
||||
aesimc v0.16b,v0.16b
|
||||
aesd v1.16b,v20.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v20.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
ld1 {v2.16b},[x0],#16
|
||||
aesd v0.16b,v21.16b
|
||||
aesimc v0.16b,v0.16b
|
||||
aesd v1.16b,v21.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v21.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
ld1 {v3.16b},[x0],#16
|
||||
aesd v0.16b,v22.16b
|
||||
aesimc v0.16b,v0.16b
|
||||
aesd v1.16b,v22.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v22.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
ld1 {v19.16b},[x0],#16
|
||||
aesd v0.16b,v23.16b
|
||||
aesd v1.16b,v23.16b
|
||||
aesd v18.16b,v23.16b
|
||||
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
|
||||
add w6,w5,#2
|
||||
eor v4.16b,v4.16b,v0.16b
|
||||
eor v5.16b,v5.16b,v1.16b
|
||||
eor v18.16b,v18.16b,v17.16b
|
||||
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
|
||||
st1 {v4.16b},[x1],#16
|
||||
orr v0.16b,v2.16b,v2.16b
|
||||
st1 {v5.16b},[x1],#16
|
||||
orr v1.16b,v3.16b,v3.16b
|
||||
st1 {v18.16b},[x1],#16
|
||||
orr v18.16b,v19.16b,v19.16b
|
||||
b.hs Loop3x_cbc_dec
|
||||
|
||||
cmn x2,#0x30
|
||||
b.eq Lcbc_done
|
||||
nop
|
||||
|
||||
Lcbc_dec_tail:
|
||||
aesd v1.16b,v16.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v16.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
ld1 {v16.4s},[x7],#16
|
||||
subs w6,w6,#2
|
||||
aesd v1.16b,v17.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v17.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
ld1 {v17.4s},[x7],#16
|
||||
b.gt Lcbc_dec_tail
|
||||
|
||||
aesd v1.16b,v16.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v16.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
aesd v1.16b,v17.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v17.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
aesd v1.16b,v20.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v20.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
cmn x2,#0x20
|
||||
aesd v1.16b,v21.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v21.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
eor v5.16b,v6.16b,v7.16b
|
||||
aesd v1.16b,v22.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v22.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
eor v17.16b,v3.16b,v7.16b
|
||||
aesd v1.16b,v23.16b
|
||||
aesd v18.16b,v23.16b
|
||||
b.eq Lcbc_dec_one
|
||||
eor v5.16b,v5.16b,v1.16b
|
||||
eor v17.16b,v17.16b,v18.16b
|
||||
orr v6.16b,v19.16b,v19.16b
|
||||
st1 {v5.16b},[x1],#16
|
||||
st1 {v17.16b},[x1],#16
|
||||
b Lcbc_done
|
||||
|
||||
Lcbc_dec_one:
|
||||
eor v5.16b,v5.16b,v18.16b
|
||||
orr v6.16b,v19.16b,v19.16b
|
||||
st1 {v5.16b},[x1],#16
|
||||
|
||||
Lcbc_done:
|
||||
st1 {v6.16b},[x4]
|
||||
Lcbc_abort:
|
||||
ldr x29,[sp],#16
|
||||
ret
|
||||
|
||||
.globl _aes_hw_ctr32_encrypt_blocks
|
||||
.private_extern _aes_hw_ctr32_encrypt_blocks
|
||||
|
||||
.align 5
|
||||
_aes_hw_ctr32_encrypt_blocks:
|
||||
stp x29,x30,[sp,#-16]!
|
||||
add x29,sp,#0
|
||||
ldr w5,[x3,#240]
|
||||
|
||||
ldr w8, [x4, #12]
|
||||
ld1 {v0.4s},[x4]
|
||||
|
||||
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
|
||||
sub w5,w5,#4
|
||||
mov x12,#16
|
||||
cmp x2,#2
|
||||
add x7,x3,x5,lsl#4 // pointer to last 5 round keys
|
||||
sub w5,w5,#2
|
||||
ld1 {v20.4s,v21.4s},[x7],#32
|
||||
ld1 {v22.4s,v23.4s},[x7],#32
|
||||
ld1 {v7.4s},[x7]
|
||||
add x7,x3,#32
|
||||
mov w6,w5
|
||||
csel x12,xzr,x12,lo
|
||||
#ifndef __ARMEB__
|
||||
rev w8, w8
|
||||
#endif
|
||||
orr v1.16b,v0.16b,v0.16b
|
||||
add w10, w8, #1
|
||||
orr v18.16b,v0.16b,v0.16b
|
||||
add w8, w8, #2
|
||||
orr v6.16b,v0.16b,v0.16b
|
||||
rev w10, w10
|
||||
mov v1.s[3],w10
|
||||
b.ls Lctr32_tail
|
||||
rev w12, w8
|
||||
sub x2,x2,#3 // bias
|
||||
mov v18.s[3],w12
|
||||
b Loop3x_ctr32
|
||||
|
||||
.align 4
|
||||
Loop3x_ctr32:
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v16.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
aese v18.16b,v16.16b
|
||||
aesmc v18.16b,v18.16b
|
||||
ld1 {v16.4s},[x7],#16
|
||||
subs w6,w6,#2
|
||||
aese v0.16b,v17.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v17.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
aese v18.16b,v17.16b
|
||||
aesmc v18.16b,v18.16b
|
||||
ld1 {v17.4s},[x7],#16
|
||||
b.gt Loop3x_ctr32
|
||||
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v4.16b,v0.16b
|
||||
aese v1.16b,v16.16b
|
||||
aesmc v5.16b,v1.16b
|
||||
ld1 {v2.16b},[x0],#16
|
||||
orr v0.16b,v6.16b,v6.16b
|
||||
aese v18.16b,v16.16b
|
||||
aesmc v18.16b,v18.16b
|
||||
ld1 {v3.16b},[x0],#16
|
||||
orr v1.16b,v6.16b,v6.16b
|
||||
aese v4.16b,v17.16b
|
||||
aesmc v4.16b,v4.16b
|
||||
aese v5.16b,v17.16b
|
||||
aesmc v5.16b,v5.16b
|
||||
ld1 {v19.16b},[x0],#16
|
||||
mov x7,x3
|
||||
aese v18.16b,v17.16b
|
||||
aesmc v17.16b,v18.16b
|
||||
orr v18.16b,v6.16b,v6.16b
|
||||
add w9,w8,#1
|
||||
aese v4.16b,v20.16b
|
||||
aesmc v4.16b,v4.16b
|
||||
aese v5.16b,v20.16b
|
||||
aesmc v5.16b,v5.16b
|
||||
eor v2.16b,v2.16b,v7.16b
|
||||
add w10,w8,#2
|
||||
aese v17.16b,v20.16b
|
||||
aesmc v17.16b,v17.16b
|
||||
eor v3.16b,v3.16b,v7.16b
|
||||
add w8,w8,#3
|
||||
aese v4.16b,v21.16b
|
||||
aesmc v4.16b,v4.16b
|
||||
aese v5.16b,v21.16b
|
||||
aesmc v5.16b,v5.16b
|
||||
eor v19.16b,v19.16b,v7.16b
|
||||
rev w9,w9
|
||||
aese v17.16b,v21.16b
|
||||
aesmc v17.16b,v17.16b
|
||||
mov v0.s[3], w9
|
||||
rev w10,w10
|
||||
aese v4.16b,v22.16b
|
||||
aesmc v4.16b,v4.16b
|
||||
aese v5.16b,v22.16b
|
||||
aesmc v5.16b,v5.16b
|
||||
mov v1.s[3], w10
|
||||
rev w12,w8
|
||||
aese v17.16b,v22.16b
|
||||
aesmc v17.16b,v17.16b
|
||||
mov v18.s[3], w12
|
||||
subs x2,x2,#3
|
||||
aese v4.16b,v23.16b
|
||||
aese v5.16b,v23.16b
|
||||
aese v17.16b,v23.16b
|
||||
|
||||
eor v2.16b,v2.16b,v4.16b
|
||||
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
|
||||
st1 {v2.16b},[x1],#16
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
mov w6,w5
|
||||
st1 {v3.16b},[x1],#16
|
||||
eor v19.16b,v19.16b,v17.16b
|
||||
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
|
||||
st1 {v19.16b},[x1],#16
|
||||
b.hs Loop3x_ctr32
|
||||
|
||||
adds x2,x2,#3
|
||||
b.eq Lctr32_done
|
||||
cmp x2,#1
|
||||
mov x12,#16
|
||||
csel x12,xzr,x12,eq
|
||||
|
||||
Lctr32_tail:
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v16.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
ld1 {v16.4s},[x7],#16
|
||||
subs w6,w6,#2
|
||||
aese v0.16b,v17.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v17.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
ld1 {v17.4s},[x7],#16
|
||||
b.gt Lctr32_tail
|
||||
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v16.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
aese v0.16b,v17.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v17.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
ld1 {v2.16b},[x0],x12
|
||||
aese v0.16b,v20.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v20.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
ld1 {v3.16b},[x0]
|
||||
aese v0.16b,v21.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v21.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
eor v2.16b,v2.16b,v7.16b
|
||||
aese v0.16b,v22.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v22.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
eor v3.16b,v3.16b,v7.16b
|
||||
aese v0.16b,v23.16b
|
||||
aese v1.16b,v23.16b
|
||||
|
||||
cmp x2,#1
|
||||
eor v2.16b,v2.16b,v0.16b
|
||||
eor v3.16b,v3.16b,v1.16b
|
||||
st1 {v2.16b},[x1],#16
|
||||
b.eq Lctr32_done
|
||||
st1 {v3.16b},[x1]
|
||||
|
||||
Lctr32_done:
|
||||
ldr x29,[sp],#16
|
||||
ret
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,233 @@
|
|||
#include <openssl/arm_arch.h>
|
||||
|
||||
.text
|
||||
#if !defined(__clang__) || defined(BORINGSSL_CLANG_SUPPORTS_DOT_ARCH)
|
||||
|
||||
#endif
|
||||
.globl _gcm_init_v8
|
||||
.private_extern _gcm_init_v8
|
||||
|
||||
.align 4
|
||||
_gcm_init_v8:
|
||||
ld1 {v17.2d},[x1] //load input H
|
||||
movi v19.16b,#0xe1
|
||||
shl v19.2d,v19.2d,#57 //0xc2.0
|
||||
ext v3.16b,v17.16b,v17.16b,#8
|
||||
ushr v18.2d,v19.2d,#63
|
||||
dup v17.4s,v17.s[1]
|
||||
ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
|
||||
ushr v18.2d,v3.2d,#63
|
||||
sshr v17.4s,v17.4s,#31 //broadcast carry bit
|
||||
and v18.16b,v18.16b,v16.16b
|
||||
shl v3.2d,v3.2d,#1
|
||||
ext v18.16b,v18.16b,v18.16b,#8
|
||||
and v16.16b,v16.16b,v17.16b
|
||||
orr v3.16b,v3.16b,v18.16b //H<<<=1
|
||||
eor v20.16b,v3.16b,v16.16b //twisted H
|
||||
st1 {v20.2d},[x0],#16 //store Htable[0]
|
||||
|
||||
//calculate H^2
|
||||
ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
|
||||
pmull v0.1q,v20.1d,v20.1d
|
||||
eor v16.16b,v16.16b,v20.16b
|
||||
pmull2 v2.1q,v20.2d,v20.2d
|
||||
pmull v1.1q,v16.1d,v16.1d
|
||||
|
||||
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
|
||||
eor v18.16b,v0.16b,v2.16b
|
||||
eor v1.16b,v1.16b,v17.16b
|
||||
eor v1.16b,v1.16b,v18.16b
|
||||
pmull v18.1q,v0.1d,v19.1d //1st phase
|
||||
|
||||
ins v2.d[0],v1.d[1]
|
||||
ins v1.d[1],v0.d[0]
|
||||
eor v0.16b,v1.16b,v18.16b
|
||||
|
||||
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
|
||||
pmull v0.1q,v0.1d,v19.1d
|
||||
eor v18.16b,v18.16b,v2.16b
|
||||
eor v22.16b,v0.16b,v18.16b
|
||||
|
||||
ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
|
||||
eor v17.16b,v17.16b,v22.16b
|
||||
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
|
||||
st1 {v21.2d,v22.2d},[x0] //store Htable[1..2]
|
||||
|
||||
ret
|
||||
|
||||
.globl _gcm_gmult_v8
|
||||
.private_extern _gcm_gmult_v8
|
||||
|
||||
.align 4
|
||||
_gcm_gmult_v8:
|
||||
ld1 {v17.2d},[x0] //load Xi
|
||||
movi v19.16b,#0xe1
|
||||
ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
|
||||
shl v19.2d,v19.2d,#57
|
||||
#ifndef __ARMEB__
|
||||
rev64 v17.16b,v17.16b
|
||||
#endif
|
||||
ext v3.16b,v17.16b,v17.16b,#8
|
||||
|
||||
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
|
||||
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
|
||||
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
|
||||
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
|
||||
|
||||
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
|
||||
eor v18.16b,v0.16b,v2.16b
|
||||
eor v1.16b,v1.16b,v17.16b
|
||||
eor v1.16b,v1.16b,v18.16b
|
||||
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
|
||||
|
||||
ins v2.d[0],v1.d[1]
|
||||
ins v1.d[1],v0.d[0]
|
||||
eor v0.16b,v1.16b,v18.16b
|
||||
|
||||
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
|
||||
pmull v0.1q,v0.1d,v19.1d
|
||||
eor v18.16b,v18.16b,v2.16b
|
||||
eor v0.16b,v0.16b,v18.16b
|
||||
|
||||
#ifndef __ARMEB__
|
||||
rev64 v0.16b,v0.16b
|
||||
#endif
|
||||
ext v0.16b,v0.16b,v0.16b,#8
|
||||
st1 {v0.2d},[x0] //write out Xi
|
||||
|
||||
ret
|
||||
|
||||
.globl _gcm_ghash_v8
|
||||
.private_extern _gcm_ghash_v8
|
||||
|
||||
.align 4
|
||||
_gcm_ghash_v8:
|
||||
ld1 {v0.2d},[x0] //load [rotated] Xi
|
||||
//"[rotated]" means that
|
||||
//loaded value would have
|
||||
//to be rotated in order to
|
||||
//make it appear as in
|
||||
//alorithm specification
|
||||
subs x3,x3,#32 //see if x3 is 32 or larger
|
||||
mov x12,#16 //x12 is used as post-
|
||||
//increment for input pointer;
|
||||
//as loop is modulo-scheduled
|
||||
//x12 is zeroed just in time
|
||||
//to preclude oversteping
|
||||
//inp[len], which means that
|
||||
//last block[s] are actually
|
||||
//loaded twice, but last
|
||||
//copy is not processed
|
||||
ld1 {v20.2d,v21.2d},[x1],#32 //load twisted H, ..., H^2
|
||||
movi v19.16b,#0xe1
|
||||
ld1 {v22.2d},[x1]
|
||||
csel x12,xzr,x12,eq //is it time to zero x12?
|
||||
ext v0.16b,v0.16b,v0.16b,#8 //rotate Xi
|
||||
ld1 {v16.2d},[x2],#16 //load [rotated] I[0]
|
||||
shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant
|
||||
#ifndef __ARMEB__
|
||||
rev64 v16.16b,v16.16b
|
||||
rev64 v0.16b,v0.16b
|
||||
#endif
|
||||
ext v3.16b,v16.16b,v16.16b,#8 //rotate I[0]
|
||||
b.lo Lodd_tail_v8 //x3 was less than 32
|
||||
ld1 {v17.2d},[x2],x12 //load [rotated] I[1]
|
||||
#ifndef __ARMEB__
|
||||
rev64 v17.16b,v17.16b
|
||||
#endif
|
||||
ext v7.16b,v17.16b,v17.16b,#8
|
||||
eor v3.16b,v3.16b,v0.16b //I[i]^=Xi
|
||||
pmull v4.1q,v20.1d,v7.1d //H·Ii+1
|
||||
eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
|
||||
pmull2 v6.1q,v20.2d,v7.2d
|
||||
b Loop_mod2x_v8
|
||||
|
||||
.align 4
|
||||
Loop_mod2x_v8:
|
||||
ext v18.16b,v3.16b,v3.16b,#8
|
||||
subs x3,x3,#32 //is there more data?
|
||||
pmull v0.1q,v22.1d,v3.1d //H^2.lo·Xi.lo
|
||||
csel x12,xzr,x12,lo //is it time to zero x12?
|
||||
|
||||
pmull v5.1q,v21.1d,v17.1d
|
||||
eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing
|
||||
pmull2 v2.1q,v22.2d,v3.2d //H^2.hi·Xi.hi
|
||||
eor v0.16b,v0.16b,v4.16b //accumulate
|
||||
pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
|
||||
ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2]
|
||||
|
||||
eor v2.16b,v2.16b,v6.16b
|
||||
csel x12,xzr,x12,eq //is it time to zero x12?
|
||||
eor v1.16b,v1.16b,v5.16b
|
||||
|
||||
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
|
||||
eor v18.16b,v0.16b,v2.16b
|
||||
eor v1.16b,v1.16b,v17.16b
|
||||
ld1 {v17.2d},[x2],x12 //load [rotated] I[i+3]
|
||||
#ifndef __ARMEB__
|
||||
rev64 v16.16b,v16.16b
|
||||
#endif
|
||||
eor v1.16b,v1.16b,v18.16b
|
||||
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
|
||||
|
||||
#ifndef __ARMEB__
|
||||
rev64 v17.16b,v17.16b
|
||||
#endif
|
||||
ins v2.d[0],v1.d[1]
|
||||
ins v1.d[1],v0.d[0]
|
||||
ext v7.16b,v17.16b,v17.16b,#8
|
||||
ext v3.16b,v16.16b,v16.16b,#8
|
||||
eor v0.16b,v1.16b,v18.16b
|
||||
pmull v4.1q,v20.1d,v7.1d //H·Ii+1
|
||||
eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early
|
||||
|
||||
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
|
||||
pmull v0.1q,v0.1d,v19.1d
|
||||
eor v3.16b,v3.16b,v18.16b
|
||||
eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
|
||||
eor v3.16b,v3.16b,v0.16b
|
||||
pmull2 v6.1q,v20.2d,v7.2d
|
||||
b.hs Loop_mod2x_v8 //there was at least 32 more bytes
|
||||
|
||||
eor v2.16b,v2.16b,v18.16b
|
||||
ext v3.16b,v16.16b,v16.16b,#8 //re-construct v3.16b
|
||||
adds x3,x3,#32 //re-construct x3
|
||||
eor v0.16b,v0.16b,v2.16b //re-construct v0.16b
|
||||
b.eq Ldone_v8 //is x3 zero?
|
||||
Lodd_tail_v8:
|
||||
ext v18.16b,v0.16b,v0.16b,#8
|
||||
eor v3.16b,v3.16b,v0.16b //inp^=Xi
|
||||
eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi
|
||||
|
||||
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
|
||||
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
|
||||
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
|
||||
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
|
||||
|
||||
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
|
||||
eor v18.16b,v0.16b,v2.16b
|
||||
eor v1.16b,v1.16b,v17.16b
|
||||
eor v1.16b,v1.16b,v18.16b
|
||||
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
|
||||
|
||||
ins v2.d[0],v1.d[1]
|
||||
ins v1.d[1],v0.d[0]
|
||||
eor v0.16b,v1.16b,v18.16b
|
||||
|
||||
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
|
||||
pmull v0.1q,v0.1d,v19.1d
|
||||
eor v18.16b,v18.16b,v2.16b
|
||||
eor v0.16b,v0.16b,v18.16b
|
||||
|
||||
Ldone_v8:
|
||||
#ifndef __ARMEB__
|
||||
rev64 v0.16b,v0.16b
|
||||
#endif
|
||||
ext v0.16b,v0.16b,v0.16b,#8
|
||||
st1 {v0.2d},[x0] //write out Xi
|
||||
|
||||
ret
|
||||
|
||||
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
||||
.align 2
|
||||
.align 2
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,773 @@
|
|||
#include <openssl/arm_arch.h>
|
||||
|
||||
#if __ARM_MAX_ARCH__>=7
|
||||
.text
|
||||
|
||||
|
||||
.code 32
|
||||
#undef __thumb2__
|
||||
.align 5
|
||||
Lrcon:
|
||||
.long 0x01,0x01,0x01,0x01
|
||||
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d @ rotate-n-splat
|
||||
.long 0x1b,0x1b,0x1b,0x1b
|
||||
|
||||
.globl _aes_hw_set_encrypt_key
|
||||
.private_extern _aes_hw_set_encrypt_key
|
||||
#ifdef __thumb2__
|
||||
.thumb_func _aes_hw_set_encrypt_key
|
||||
#endif
|
||||
.align 5
|
||||
_aes_hw_set_encrypt_key:
|
||||
Lenc_key:
|
||||
mov r3,#-1
|
||||
cmp r0,#0
|
||||
beq Lenc_key_abort
|
||||
cmp r2,#0
|
||||
beq Lenc_key_abort
|
||||
mov r3,#-2
|
||||
cmp r1,#128
|
||||
blt Lenc_key_abort
|
||||
cmp r1,#256
|
||||
bgt Lenc_key_abort
|
||||
tst r1,#0x3f
|
||||
bne Lenc_key_abort
|
||||
|
||||
adr r3,Lrcon
|
||||
cmp r1,#192
|
||||
|
||||
veor q0,q0,q0
|
||||
vld1.8 {q3},[r0]!
|
||||
mov r1,#8 @ reuse r1
|
||||
vld1.32 {q1,q2},[r3]!
|
||||
|
||||
blt Loop128
|
||||
beq L192
|
||||
b L256
|
||||
|
||||
.align 4
|
||||
Loop128:
|
||||
vtbl.8 d20,{q3},d4
|
||||
vtbl.8 d21,{q3},d5
|
||||
vext.8 q9,q0,q3,#12
|
||||
vst1.32 {q3},[r2]!
|
||||
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
|
||||
subs r1,r1,#1
|
||||
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q10,q10,q1
|
||||
veor q3,q3,q9
|
||||
vshl.u8 q1,q1,#1
|
||||
veor q3,q3,q10
|
||||
bne Loop128
|
||||
|
||||
vld1.32 {q1},[r3]
|
||||
|
||||
vtbl.8 d20,{q3},d4
|
||||
vtbl.8 d21,{q3},d5
|
||||
vext.8 q9,q0,q3,#12
|
||||
vst1.32 {q3},[r2]!
|
||||
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
|
||||
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q10,q10,q1
|
||||
veor q3,q3,q9
|
||||
vshl.u8 q1,q1,#1
|
||||
veor q3,q3,q10
|
||||
|
||||
vtbl.8 d20,{q3},d4
|
||||
vtbl.8 d21,{q3},d5
|
||||
vext.8 q9,q0,q3,#12
|
||||
vst1.32 {q3},[r2]!
|
||||
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
|
||||
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q10,q10,q1
|
||||
veor q3,q3,q9
|
||||
veor q3,q3,q10
|
||||
vst1.32 {q3},[r2]
|
||||
add r2,r2,#0x50
|
||||
|
||||
mov r12,#10
|
||||
b Ldone
|
||||
|
||||
.align 4
|
||||
L192:
|
||||
vld1.8 {d16},[r0]!
|
||||
vmov.i8 q10,#8 @ borrow q10
|
||||
vst1.32 {q3},[r2]!
|
||||
vsub.i8 q2,q2,q10 @ adjust the mask
|
||||
|
||||
Loop192:
|
||||
vtbl.8 d20,{q8},d4
|
||||
vtbl.8 d21,{q8},d5
|
||||
vext.8 q9,q0,q3,#12
|
||||
vst1.32 {d16},[r2]!
|
||||
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
|
||||
subs r1,r1,#1
|
||||
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q3,q3,q9
|
||||
|
||||
vdup.32 q9,d7[1]
|
||||
veor q9,q9,q8
|
||||
veor q10,q10,q1
|
||||
vext.8 q8,q0,q8,#12
|
||||
vshl.u8 q1,q1,#1
|
||||
veor q8,q8,q9
|
||||
veor q3,q3,q10
|
||||
veor q8,q8,q10
|
||||
vst1.32 {q3},[r2]!
|
||||
bne Loop192
|
||||
|
||||
mov r12,#12
|
||||
add r2,r2,#0x20
|
||||
b Ldone
|
||||
|
||||
.align 4
|
||||
L256:
|
||||
vld1.8 {q8},[r0]
|
||||
mov r1,#7
|
||||
mov r12,#14
|
||||
vst1.32 {q3},[r2]!
|
||||
|
||||
Loop256:
|
||||
vtbl.8 d20,{q8},d4
|
||||
vtbl.8 d21,{q8},d5
|
||||
vext.8 q9,q0,q3,#12
|
||||
vst1.32 {q8},[r2]!
|
||||
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
|
||||
subs r1,r1,#1
|
||||
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q10,q10,q1
|
||||
veor q3,q3,q9
|
||||
vshl.u8 q1,q1,#1
|
||||
veor q3,q3,q10
|
||||
vst1.32 {q3},[r2]!
|
||||
beq Ldone
|
||||
|
||||
vdup.32 q10,d7[1]
|
||||
vext.8 q9,q0,q8,#12
|
||||
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
|
||||
|
||||
veor q8,q8,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q8,q8,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q8,q8,q9
|
||||
|
||||
veor q8,q8,q10
|
||||
b Loop256
|
||||
|
||||
Ldone:
|
||||
str r12,[r2]
|
||||
mov r3,#0
|
||||
|
||||
Lenc_key_abort:
|
||||
mov r0,r3 @ return value
|
||||
|
||||
bx lr
|
||||
|
||||
|
||||
.globl _aes_hw_set_decrypt_key
|
||||
.private_extern _aes_hw_set_decrypt_key
|
||||
#ifdef __thumb2__
|
||||
.thumb_func _aes_hw_set_decrypt_key
|
||||
#endif
|
||||
.align 5
|
||||
_aes_hw_set_decrypt_key:
|
||||
stmdb sp!,{r4,lr}
|
||||
bl Lenc_key
|
||||
|
||||
cmp r0,#0
|
||||
bne Ldec_key_abort
|
||||
|
||||
sub r2,r2,#240 @ restore original r2
|
||||
mov r4,#-16
|
||||
add r0,r2,r12,lsl#4 @ end of key schedule
|
||||
|
||||
vld1.32 {q0},[r2]
|
||||
vld1.32 {q1},[r0]
|
||||
vst1.32 {q0},[r0],r4
|
||||
vst1.32 {q1},[r2]!
|
||||
|
||||
Loop_imc:
|
||||
vld1.32 {q0},[r2]
|
||||
vld1.32 {q1},[r0]
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
vst1.32 {q0},[r0],r4
|
||||
vst1.32 {q1},[r2]!
|
||||
cmp r0,r2
|
||||
bhi Loop_imc
|
||||
|
||||
vld1.32 {q0},[r2]
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
vst1.32 {q0},[r0]
|
||||
|
||||
eor r0,r0,r0 @ return value
|
||||
Ldec_key_abort:
|
||||
ldmia sp!,{r4,pc}
|
||||
|
||||
.globl _aes_hw_encrypt
|
||||
.private_extern _aes_hw_encrypt
|
||||
#ifdef __thumb2__
|
||||
.thumb_func _aes_hw_encrypt
|
||||
#endif
|
||||
.align 5
|
||||
_aes_hw_encrypt:
|
||||
ldr r3,[r2,#240]
|
||||
vld1.32 {q0},[r2]!
|
||||
vld1.8 {q2},[r0]
|
||||
sub r3,r3,#2
|
||||
vld1.32 {q1},[r2]!
|
||||
|
||||
Loop_enc:
|
||||
.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
|
||||
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
|
||||
vld1.32 {q0},[r2]!
|
||||
subs r3,r3,#2
|
||||
.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
|
||||
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
|
||||
vld1.32 {q1},[r2]!
|
||||
bgt Loop_enc
|
||||
|
||||
.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
|
||||
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
|
||||
vld1.32 {q0},[r2]
|
||||
.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
|
||||
veor q2,q2,q0
|
||||
|
||||
vst1.8 {q2},[r1]
|
||||
bx lr
|
||||
|
||||
.globl _aes_hw_decrypt
|
||||
.private_extern _aes_hw_decrypt
|
||||
#ifdef __thumb2__
|
||||
.thumb_func _aes_hw_decrypt
|
||||
#endif
|
||||
.align 5
|
||||
_aes_hw_decrypt:
|
||||
ldr r3,[r2,#240]
|
||||
vld1.32 {q0},[r2]!
|
||||
vld1.8 {q2},[r0]
|
||||
sub r3,r3,#2
|
||||
vld1.32 {q1},[r2]!
|
||||
|
||||
Loop_dec:
|
||||
.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
|
||||
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
|
||||
vld1.32 {q0},[r2]!
|
||||
subs r3,r3,#2
|
||||
.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
|
||||
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
|
||||
vld1.32 {q1},[r2]!
|
||||
bgt Loop_dec
|
||||
|
||||
.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
|
||||
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
|
||||
vld1.32 {q0},[r2]
|
||||
.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
|
||||
veor q2,q2,q0
|
||||
|
||||
vst1.8 {q2},[r1]
|
||||
bx lr
|
||||
|
||||
.globl _aes_hw_cbc_encrypt
|
||||
.private_extern _aes_hw_cbc_encrypt
|
||||
#ifdef __thumb2__
|
||||
.thumb_func _aes_hw_cbc_encrypt
|
||||
#endif
|
||||
.align 5
|
||||
_aes_hw_cbc_encrypt:
|
||||
mov ip,sp
|
||||
stmdb sp!,{r4,r5,r6,r7,r8,lr}
|
||||
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
|
||||
ldmia ip,{r4,r5} @ load remaining args
|
||||
subs r2,r2,#16
|
||||
mov r8,#16
|
||||
blo Lcbc_abort
|
||||
moveq r8,#0
|
||||
|
||||
cmp r5,#0 @ en- or decrypting?
|
||||
ldr r5,[r3,#240]
|
||||
and r2,r2,#-16
|
||||
vld1.8 {q6},[r4]
|
||||
vld1.8 {q0},[r0],r8
|
||||
|
||||
vld1.32 {q8,q9},[r3] @ load key schedule...
|
||||
sub r5,r5,#6
|
||||
add r7,r3,r5,lsl#4 @ pointer to last 7 round keys
|
||||
sub r5,r5,#2
|
||||
vld1.32 {q10,q11},[r7]!
|
||||
vld1.32 {q12,q13},[r7]!
|
||||
vld1.32 {q14,q15},[r7]!
|
||||
vld1.32 {q7},[r7]
|
||||
|
||||
add r7,r3,#32
|
||||
mov r6,r5
|
||||
beq Lcbc_dec
|
||||
|
||||
cmp r5,#2
|
||||
veor q0,q0,q6
|
||||
veor q5,q8,q7
|
||||
beq Lcbc_enc128
|
||||
|
||||
vld1.32 {q2,q3},[r7]
|
||||
add r7,r3,#16
|
||||
add r6,r3,#16*4
|
||||
add r12,r3,#16*5
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
add r14,r3,#16*6
|
||||
add r3,r3,#16*7
|
||||
b Lenter_cbc_enc
|
||||
|
||||
.align 4
|
||||
Loop_cbc_enc:
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vst1.8 {q6},[r1]!
|
||||
Lenter_cbc_enc:
|
||||
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vld1.32 {q8},[r6]
|
||||
cmp r5,#4
|
||||
.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vld1.32 {q9},[r12]
|
||||
beq Lcbc_enc192
|
||||
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vld1.32 {q8},[r14]
|
||||
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vld1.32 {q9},[r3]
|
||||
nop
|
||||
|
||||
Lcbc_enc192:
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
subs r2,r2,#16
|
||||
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
moveq r8,#0
|
||||
.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vld1.8 {q8},[r0],r8
|
||||
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
veor q8,q8,q5
|
||||
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vld1.32 {q9},[r7] @ re-pre-load rndkey[1]
|
||||
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
|
||||
veor q6,q0,q7
|
||||
bhs Loop_cbc_enc
|
||||
|
||||
vst1.8 {q6},[r1]!
|
||||
b Lcbc_done
|
||||
|
||||
.align 5
|
||||
Lcbc_enc128:
|
||||
vld1.32 {q2,q3},[r7]
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
b Lenter_cbc_enc128
|
||||
Loop_cbc_enc128:
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vst1.8 {q6},[r1]!
|
||||
Lenter_cbc_enc128:
|
||||
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
subs r2,r2,#16
|
||||
.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
moveq r8,#0
|
||||
.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vld1.8 {q8},[r0],r8
|
||||
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
veor q8,q8,q5
|
||||
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
|
||||
veor q6,q0,q7
|
||||
bhs Loop_cbc_enc128
|
||||
|
||||
vst1.8 {q6},[r1]!
|
||||
b Lcbc_done
|
||||
.align 5
|
||||
Lcbc_dec:
|
||||
vld1.8 {q10},[r0]!
|
||||
subs r2,r2,#32 @ bias
|
||||
add r6,r5,#2
|
||||
vorr q3,q0,q0
|
||||
vorr q1,q0,q0
|
||||
vorr q11,q10,q10
|
||||
blo Lcbc_dec_tail
|
||||
|
||||
vorr q1,q10,q10
|
||||
vld1.8 {q10},[r0]!
|
||||
vorr q2,q0,q0
|
||||
vorr q3,q1,q1
|
||||
vorr q11,q10,q10
|
||||
|
||||
Loop3x_cbc_dec:
|
||||
.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
vld1.32 {q8},[r7]!
|
||||
subs r6,r6,#2
|
||||
.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
vld1.32 {q9},[r7]!
|
||||
bgt Loop3x_cbc_dec
|
||||
|
||||
.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
veor q4,q6,q7
|
||||
subs r2,r2,#0x30
|
||||
veor q5,q2,q7
|
||||
movlo r6,r2 @ r6, r6, is zero at this point
|
||||
.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
veor q9,q3,q7
|
||||
add r0,r0,r6 @ r0 is adjusted in such way that
|
||||
@ at exit from the loop q1-q10
|
||||
@ are loaded with last "words"
|
||||
vorr q6,q11,q11
|
||||
mov r7,r3
|
||||
.byte 0x68,0x03,0xb0,0xf3 @ aesd q0,q12
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
vld1.8 {q2},[r0]!
|
||||
.byte 0x6a,0x03,0xb0,0xf3 @ aesd q0,q13
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
vld1.8 {q3},[r0]!
|
||||
.byte 0x6c,0x03,0xb0,0xf3 @ aesd q0,q14
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
vld1.8 {q11},[r0]!
|
||||
.byte 0x6e,0x03,0xb0,0xf3 @ aesd q0,q15
|
||||
.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
|
||||
.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15
|
||||
vld1.32 {q8},[r7]! @ re-pre-load rndkey[0]
|
||||
add r6,r5,#2
|
||||
veor q4,q4,q0
|
||||
veor q5,q5,q1
|
||||
veor q10,q10,q9
|
||||
vld1.32 {q9},[r7]! @ re-pre-load rndkey[1]
|
||||
vst1.8 {q4},[r1]!
|
||||
vorr q0,q2,q2
|
||||
vst1.8 {q5},[r1]!
|
||||
vorr q1,q3,q3
|
||||
vst1.8 {q10},[r1]!
|
||||
vorr q10,q11,q11
|
||||
bhs Loop3x_cbc_dec
|
||||
|
||||
cmn r2,#0x30
|
||||
beq Lcbc_done
|
||||
nop
|
||||
|
||||
Lcbc_dec_tail:
|
||||
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
vld1.32 {q8},[r7]!
|
||||
subs r6,r6,#2
|
||||
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
vld1.32 {q9},[r7]!
|
||||
bgt Lcbc_dec_tail
|
||||
|
||||
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
cmn r2,#0x20
|
||||
.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
veor q5,q6,q7
|
||||
.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
veor q9,q3,q7
|
||||
.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
|
||||
.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15
|
||||
beq Lcbc_dec_one
|
||||
veor q5,q5,q1
|
||||
veor q9,q9,q10
|
||||
vorr q6,q11,q11
|
||||
vst1.8 {q5},[r1]!
|
||||
vst1.8 {q9},[r1]!
|
||||
b Lcbc_done
|
||||
|
||||
Lcbc_dec_one:
|
||||
veor q5,q5,q10
|
||||
vorr q6,q11,q11
|
||||
vst1.8 {q5},[r1]!
|
||||
|
||||
Lcbc_done:
|
||||
vst1.8 {q6},[r4]
|
||||
Lcbc_abort:
|
||||
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
|
||||
ldmia sp!,{r4,r5,r6,r7,r8,pc}
|
||||
|
||||
.globl _aes_hw_ctr32_encrypt_blocks
|
||||
.private_extern _aes_hw_ctr32_encrypt_blocks
|
||||
#ifdef __thumb2__
|
||||
.thumb_func _aes_hw_ctr32_encrypt_blocks
|
||||
#endif
|
||||
.align 5
|
||||
_aes_hw_ctr32_encrypt_blocks:
|
||||
mov ip,sp
|
||||
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,lr}
|
||||
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
|
||||
ldr r4, [ip] @ load remaining arg
|
||||
ldr r5,[r3,#240]
|
||||
|
||||
ldr r8, [r4, #12]
|
||||
vld1.32 {q0},[r4]
|
||||
|
||||
vld1.32 {q8,q9},[r3] @ load key schedule...
|
||||
sub r5,r5,#4
|
||||
mov r12,#16
|
||||
cmp r2,#2
|
||||
add r7,r3,r5,lsl#4 @ pointer to last 5 round keys
|
||||
sub r5,r5,#2
|
||||
vld1.32 {q12,q13},[r7]!
|
||||
vld1.32 {q14,q15},[r7]!
|
||||
vld1.32 {q7},[r7]
|
||||
add r7,r3,#32
|
||||
mov r6,r5
|
||||
movlo r12,#0
|
||||
#ifndef __ARMEB__
|
||||
rev r8, r8
|
||||
#endif
|
||||
vorr q1,q0,q0
|
||||
add r10, r8, #1
|
||||
vorr q10,q0,q0
|
||||
add r8, r8, #2
|
||||
vorr q6,q0,q0
|
||||
rev r10, r10
|
||||
vmov.32 d3[1],r10
|
||||
bls Lctr32_tail
|
||||
rev r12, r8
|
||||
sub r2,r2,#3 @ bias
|
||||
vmov.32 d21[1],r12
|
||||
b Loop3x_ctr32
|
||||
|
||||
.align 4
|
||||
Loop3x_ctr32:
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
|
||||
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
|
||||
vld1.32 {q8},[r7]!
|
||||
subs r6,r6,#2
|
||||
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
|
||||
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
|
||||
vld1.32 {q9},[r7]!
|
||||
bgt Loop3x_ctr32
|
||||
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x83,0xb0,0xf3 @ aesmc q4,q0
|
||||
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
|
||||
.byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1
|
||||
vld1.8 {q2},[r0]!
|
||||
vorr q0,q6,q6
|
||||
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
|
||||
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
|
||||
vld1.8 {q3},[r0]!
|
||||
vorr q1,q6,q6
|
||||
.byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9
|
||||
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
|
||||
.byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9
|
||||
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
|
||||
vld1.8 {q11},[r0]!
|
||||
mov r7,r3
|
||||
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
|
||||
.byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10
|
||||
vorr q10,q6,q6
|
||||
add r9,r8,#1
|
||||
.byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12
|
||||
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
|
||||
.byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12
|
||||
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
|
||||
veor q2,q2,q7
|
||||
add r10,r8,#2
|
||||
.byte 0x28,0x23,0xf0,0xf3 @ aese q9,q12
|
||||
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
|
||||
veor q3,q3,q7
|
||||
add r8,r8,#3
|
||||
.byte 0x2a,0x83,0xb0,0xf3 @ aese q4,q13
|
||||
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
|
||||
.byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13
|
||||
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
|
||||
veor q11,q11,q7
|
||||
rev r9,r9
|
||||
.byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13
|
||||
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
|
||||
vmov.32 d1[1], r9
|
||||
rev r10,r10
|
||||
.byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14
|
||||
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
|
||||
.byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14
|
||||
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
|
||||
vmov.32 d3[1], r10
|
||||
rev r12,r8
|
||||
.byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14
|
||||
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
|
||||
vmov.32 d21[1], r12
|
||||
subs r2,r2,#3
|
||||
.byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15
|
||||
.byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15
|
||||
.byte 0x2e,0x23,0xf0,0xf3 @ aese q9,q15
|
||||
|
||||
veor q2,q2,q4
|
||||
vld1.32 {q8},[r7]! @ re-pre-load rndkey[0]
|
||||
vst1.8 {q2},[r1]!
|
||||
veor q3,q3,q5
|
||||
mov r6,r5
|
||||
vst1.8 {q3},[r1]!
|
||||
veor q11,q11,q9
|
||||
vld1.32 {q9},[r7]! @ re-pre-load rndkey[1]
|
||||
vst1.8 {q11},[r1]!
|
||||
bhs Loop3x_ctr32
|
||||
|
||||
adds r2,r2,#3
|
||||
beq Lctr32_done
|
||||
cmp r2,#1
|
||||
mov r12,#16
|
||||
moveq r12,#0
|
||||
|
||||
Lctr32_tail:
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
vld1.32 {q8},[r7]!
|
||||
subs r6,r6,#2
|
||||
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
vld1.32 {q9},[r7]!
|
||||
bgt Lctr32_tail
|
||||
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
vld1.8 {q2},[r0],r12
|
||||
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x28,0x23,0xb0,0xf3 @ aese q1,q12
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
vld1.8 {q3},[r0]
|
||||
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x2a,0x23,0xb0,0xf3 @ aese q1,q13
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
veor q2,q2,q7
|
||||
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x2c,0x23,0xb0,0xf3 @ aese q1,q14
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
veor q3,q3,q7
|
||||
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
|
||||
.byte 0x2e,0x23,0xb0,0xf3 @ aese q1,q15
|
||||
|
||||
cmp r2,#1
|
||||
veor q2,q2,q0
|
||||
veor q3,q3,q1
|
||||
vst1.8 {q2},[r1]!
|
||||
beq Lctr32_done
|
||||
vst1.8 {q3},[r1]
|
||||
|
||||
Lctr32_done:
|
||||
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
|
||||
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,pc}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,962 @@
|
|||
#include <openssl/arm_arch.h>
|
||||
|
||||
.text
|
||||
#if defined(__thumb2__)
|
||||
.syntax unified
|
||||
.thumb
|
||||
#else
|
||||
.code 32
|
||||
#endif
|
||||
|
||||
#if __ARM_MAX_ARCH__>=7
|
||||
.align 5
|
||||
LOPENSSL_armcap:
|
||||
.word OPENSSL_armcap_P-Lbn_mul_mont
|
||||
#endif
|
||||
|
||||
.globl _bn_mul_mont
|
||||
.private_extern _bn_mul_mont
|
||||
#ifdef __thumb2__
|
||||
.thumb_func _bn_mul_mont
|
||||
#endif
|
||||
|
||||
.align 5
|
||||
_bn_mul_mont:
|
||||
Lbn_mul_mont:
|
||||
ldr ip,[sp,#4] @ load num
|
||||
stmdb sp!,{r0,r2} @ sp points at argument block
|
||||
#if __ARM_MAX_ARCH__>=7
|
||||
tst ip,#7
|
||||
bne Lialu
|
||||
adr r0,Lbn_mul_mont
|
||||
ldr r2,LOPENSSL_armcap
|
||||
ldr r0,[r0,r2]
|
||||
#ifdef __APPLE__
|
||||
ldr r0,[r0]
|
||||
#endif
|
||||
tst r0,#ARMV7_NEON @ NEON available?
|
||||
ldmia sp, {r0,r2}
|
||||
beq Lialu
|
||||
add sp,sp,#8
|
||||
b bn_mul8x_mont_neon
|
||||
.align 4
|
||||
Lialu:
|
||||
#endif
|
||||
cmp ip,#2
|
||||
mov r0,ip @ load num
|
||||
#ifdef __thumb2__
|
||||
ittt lt
|
||||
#endif
|
||||
movlt r0,#0
|
||||
addlt sp,sp,#2*4
|
||||
blt Labrt
|
||||
|
||||
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ save 10 registers
|
||||
|
||||
mov r0,r0,lsl#2 @ rescale r0 for byte count
|
||||
sub sp,sp,r0 @ alloca(4*num)
|
||||
sub sp,sp,#4 @ +extra dword
|
||||
sub r0,r0,#4 @ "num=num-1"
|
||||
add r4,r2,r0 @ &bp[num-1]
|
||||
|
||||
add r0,sp,r0 @ r0 to point at &tp[num-1]
|
||||
ldr r8,[r0,#14*4] @ &n0
|
||||
ldr r2,[r2] @ bp[0]
|
||||
ldr r5,[r1],#4 @ ap[0],ap++
|
||||
ldr r6,[r3],#4 @ np[0],np++
|
||||
ldr r8,[r8] @ *n0
|
||||
str r4,[r0,#15*4] @ save &bp[num]
|
||||
|
||||
umull r10,r11,r5,r2 @ ap[0]*bp[0]
|
||||
str r8,[r0,#14*4] @ save n0 value
|
||||
mul r8,r10,r8 @ "tp[0]"*n0
|
||||
mov r12,#0
|
||||
umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]"
|
||||
mov r4,sp
|
||||
|
||||
L1st:
|
||||
ldr r5,[r1],#4 @ ap[j],ap++
|
||||
mov r10,r11
|
||||
ldr r6,[r3],#4 @ np[j],np++
|
||||
mov r11,#0
|
||||
umlal r10,r11,r5,r2 @ ap[j]*bp[0]
|
||||
mov r14,#0
|
||||
umlal r12,r14,r6,r8 @ np[j]*n0
|
||||
adds r12,r12,r10
|
||||
str r12,[r4],#4 @ tp[j-1]=,tp++
|
||||
adc r12,r14,#0
|
||||
cmp r4,r0
|
||||
bne L1st
|
||||
|
||||
adds r12,r12,r11
|
||||
ldr r4,[r0,#13*4] @ restore bp
|
||||
mov r14,#0
|
||||
ldr r8,[r0,#14*4] @ restore n0
|
||||
adc r14,r14,#0
|
||||
str r12,[r0] @ tp[num-1]=
|
||||
mov r7,sp
|
||||
str r14,[r0,#4] @ tp[num]=
|
||||
|
||||
Louter:
|
||||
sub r7,r0,r7 @ "original" r0-1 value
|
||||
sub r1,r1,r7 @ "rewind" ap to &ap[1]
|
||||
ldr r2,[r4,#4]! @ *(++bp)
|
||||
sub r3,r3,r7 @ "rewind" np to &np[1]
|
||||
ldr r5,[r1,#-4] @ ap[0]
|
||||
ldr r10,[sp] @ tp[0]
|
||||
ldr r6,[r3,#-4] @ np[0]
|
||||
ldr r7,[sp,#4] @ tp[1]
|
||||
|
||||
mov r11,#0
|
||||
umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0]
|
||||
str r4,[r0,#13*4] @ save bp
|
||||
mul r8,r10,r8
|
||||
mov r12,#0
|
||||
umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]"
|
||||
mov r4,sp
|
||||
|
||||
Linner:
|
||||
ldr r5,[r1],#4 @ ap[j],ap++
|
||||
adds r10,r11,r7 @ +=tp[j]
|
||||
ldr r6,[r3],#4 @ np[j],np++
|
||||
mov r11,#0
|
||||
umlal r10,r11,r5,r2 @ ap[j]*bp[i]
|
||||
mov r14,#0
|
||||
umlal r12,r14,r6,r8 @ np[j]*n0
|
||||
adc r11,r11,#0
|
||||
ldr r7,[r4,#8] @ tp[j+1]
|
||||
adds r12,r12,r10
|
||||
str r12,[r4],#4 @ tp[j-1]=,tp++
|
||||
adc r12,r14,#0
|
||||
cmp r4,r0
|
||||
bne Linner
|
||||
|
||||
adds r12,r12,r11
|
||||
mov r14,#0
|
||||
ldr r4,[r0,#13*4] @ restore bp
|
||||
adc r14,r14,#0
|
||||
ldr r8,[r0,#14*4] @ restore n0
|
||||
adds r12,r12,r7
|
||||
ldr r7,[r0,#15*4] @ restore &bp[num]
|
||||
adc r14,r14,#0
|
||||
str r12,[r0] @ tp[num-1]=
|
||||
str r14,[r0,#4] @ tp[num]=
|
||||
|
||||
cmp r4,r7
|
||||
#ifdef __thumb2__
|
||||
itt ne
|
||||
#endif
|
||||
movne r7,sp
|
||||
bne Louter
|
||||
|
||||
ldr r2,[r0,#12*4] @ pull rp
|
||||
mov r5,sp
|
||||
add r0,r0,#4 @ r0 to point at &tp[num]
|
||||
sub r5,r0,r5 @ "original" num value
|
||||
mov r4,sp @ "rewind" r4
|
||||
mov r1,r4 @ "borrow" r1
|
||||
sub r3,r3,r5 @ "rewind" r3 to &np[0]
|
||||
|
||||
subs r7,r7,r7 @ "clear" carry flag
|
||||
Lsub: ldr r7,[r4],#4
|
||||
ldr r6,[r3],#4
|
||||
sbcs r7,r7,r6 @ tp[j]-np[j]
|
||||
str r7,[r2],#4 @ rp[j]=
|
||||
teq r4,r0 @ preserve carry
|
||||
bne Lsub
|
||||
sbcs r14,r14,#0 @ upmost carry
|
||||
mov r4,sp @ "rewind" r4
|
||||
sub r2,r2,r5 @ "rewind" r2
|
||||
|
||||
and r1,r4,r14
|
||||
bic r3,r2,r14
|
||||
orr r1,r1,r3 @ ap=borrow?tp:rp
|
||||
|
||||
Lcopy: ldr r7,[r1],#4 @ copy or in-place refresh
|
||||
str sp,[r4],#4 @ zap tp
|
||||
str r7,[r2],#4
|
||||
cmp r4,r0
|
||||
bne Lcopy
|
||||
|
||||
mov sp,r0
|
||||
add sp,sp,#4 @ skip over tp[num+1]
|
||||
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ restore registers
|
||||
add sp,sp,#2*4 @ skip over {r0,r2}
|
||||
mov r0,#1
|
||||
Labrt:
|
||||
#if __ARM_ARCH__>=5
|
||||
bx lr @ bx lr
|
||||
#else
|
||||
tst lr,#1
|
||||
moveq pc,lr @ be binary compatible with V4, yet
|
||||
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
|
||||
#endif
|
||||
|
||||
#if __ARM_MAX_ARCH__>=7
|
||||
|
||||
|
||||
|
||||
#ifdef __thumb2__
|
||||
.thumb_func bn_mul8x_mont_neon
|
||||
#endif
|
||||
.align 5
|
||||
bn_mul8x_mont_neon:
|
||||
mov ip,sp
|
||||
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11}
|
||||
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
|
||||
ldmia ip,{r4,r5} @ load rest of parameter block
|
||||
mov ip,sp
|
||||
|
||||
cmp r5,#8
|
||||
bhi LNEON_8n
|
||||
|
||||
@ special case for r5==8, everything is in register bank...
|
||||
|
||||
vld1.32 {d28[0]}, [r2,:32]!
|
||||
veor d8,d8,d8
|
||||
sub r7,sp,r5,lsl#4
|
||||
vld1.32 {d0,d1,d2,d3}, [r1]! @ can't specify :32 :-(
|
||||
and r7,r7,#-64
|
||||
vld1.32 {d30[0]}, [r4,:32]
|
||||
mov sp,r7 @ alloca
|
||||
vzip.16 d28,d8
|
||||
|
||||
vmull.u32 q6,d28,d0[0]
|
||||
vmull.u32 q7,d28,d0[1]
|
||||
vmull.u32 q8,d28,d1[0]
|
||||
vshl.i64 d29,d13,#16
|
||||
vmull.u32 q9,d28,d1[1]
|
||||
|
||||
vadd.u64 d29,d29,d12
|
||||
veor d8,d8,d8
|
||||
vmul.u32 d29,d29,d30
|
||||
|
||||
vmull.u32 q10,d28,d2[0]
|
||||
vld1.32 {d4,d5,d6,d7}, [r3]!
|
||||
vmull.u32 q11,d28,d2[1]
|
||||
vmull.u32 q12,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmull.u32 q13,d28,d3[1]
|
||||
|
||||
vmlal.u32 q6,d29,d4[0]
|
||||
sub r9,r5,#1
|
||||
vmlal.u32 q7,d29,d4[1]
|
||||
vmlal.u32 q8,d29,d5[0]
|
||||
vmlal.u32 q9,d29,d5[1]
|
||||
|
||||
vmlal.u32 q10,d29,d6[0]
|
||||
vmov q5,q6
|
||||
vmlal.u32 q11,d29,d6[1]
|
||||
vmov q6,q7
|
||||
vmlal.u32 q12,d29,d7[0]
|
||||
vmov q7,q8
|
||||
vmlal.u32 q13,d29,d7[1]
|
||||
vmov q8,q9
|
||||
vmov q9,q10
|
||||
vshr.u64 d10,d10,#16
|
||||
vmov q10,q11
|
||||
vmov q11,q12
|
||||
vadd.u64 d10,d10,d11
|
||||
vmov q12,q13
|
||||
veor q13,q13
|
||||
vshr.u64 d10,d10,#16
|
||||
|
||||
b LNEON_outer8
|
||||
|
||||
.align 4
|
||||
LNEON_outer8:
|
||||
vld1.32 {d28[0]}, [r2,:32]!
|
||||
veor d8,d8,d8
|
||||
vzip.16 d28,d8
|
||||
vadd.u64 d12,d12,d10
|
||||
|
||||
vmlal.u32 q6,d28,d0[0]
|
||||
vmlal.u32 q7,d28,d0[1]
|
||||
vmlal.u32 q8,d28,d1[0]
|
||||
vshl.i64 d29,d13,#16
|
||||
vmlal.u32 q9,d28,d1[1]
|
||||
|
||||
vadd.u64 d29,d29,d12
|
||||
veor d8,d8,d8
|
||||
subs r9,r9,#1
|
||||
vmul.u32 d29,d29,d30
|
||||
|
||||
vmlal.u32 q10,d28,d2[0]
|
||||
vmlal.u32 q11,d28,d2[1]
|
||||
vmlal.u32 q12,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q13,d28,d3[1]
|
||||
|
||||
vmlal.u32 q6,d29,d4[0]
|
||||
vmlal.u32 q7,d29,d4[1]
|
||||
vmlal.u32 q8,d29,d5[0]
|
||||
vmlal.u32 q9,d29,d5[1]
|
||||
|
||||
vmlal.u32 q10,d29,d6[0]
|
||||
vmov q5,q6
|
||||
vmlal.u32 q11,d29,d6[1]
|
||||
vmov q6,q7
|
||||
vmlal.u32 q12,d29,d7[0]
|
||||
vmov q7,q8
|
||||
vmlal.u32 q13,d29,d7[1]
|
||||
vmov q8,q9
|
||||
vmov q9,q10
|
||||
vshr.u64 d10,d10,#16
|
||||
vmov q10,q11
|
||||
vmov q11,q12
|
||||
vadd.u64 d10,d10,d11
|
||||
vmov q12,q13
|
||||
veor q13,q13
|
||||
vshr.u64 d10,d10,#16
|
||||
|
||||
bne LNEON_outer8
|
||||
|
||||
vadd.u64 d12,d12,d10
|
||||
mov r7,sp
|
||||
vshr.u64 d10,d12,#16
|
||||
mov r8,r5
|
||||
vadd.u64 d13,d13,d10
|
||||
add r6,sp,#96
|
||||
vshr.u64 d10,d13,#16
|
||||
vzip.16 d12,d13
|
||||
|
||||
b LNEON_tail_entry
|
||||
|
||||
.align 4
|
||||
LNEON_8n:
|
||||
veor q6,q6,q6
|
||||
sub r7,sp,#128
|
||||
veor q7,q7,q7
|
||||
sub r7,r7,r5,lsl#4
|
||||
veor q8,q8,q8
|
||||
and r7,r7,#-64
|
||||
veor q9,q9,q9
|
||||
mov sp,r7 @ alloca
|
||||
veor q10,q10,q10
|
||||
add r7,r7,#256
|
||||
veor q11,q11,q11
|
||||
sub r8,r5,#8
|
||||
veor q12,q12,q12
|
||||
veor q13,q13,q13
|
||||
|
||||
LNEON_8n_init:
|
||||
vst1.64 {q6,q7},[r7,:256]!
|
||||
subs r8,r8,#8
|
||||
vst1.64 {q8,q9},[r7,:256]!
|
||||
vst1.64 {q10,q11},[r7,:256]!
|
||||
vst1.64 {q12,q13},[r7,:256]!
|
||||
bne LNEON_8n_init
|
||||
|
||||
add r6,sp,#256
|
||||
vld1.32 {d0,d1,d2,d3},[r1]!
|
||||
add r10,sp,#8
|
||||
vld1.32 {d30[0]},[r4,:32]
|
||||
mov r9,r5
|
||||
b LNEON_8n_outer
|
||||
|
||||
.align 4
|
||||
LNEON_8n_outer:
|
||||
vld1.32 {d28[0]},[r2,:32]! @ *b++
|
||||
veor d8,d8,d8
|
||||
vzip.16 d28,d8
|
||||
add r7,sp,#128
|
||||
vld1.32 {d4,d5,d6,d7},[r3]!
|
||||
|
||||
vmlal.u32 q6,d28,d0[0]
|
||||
vmlal.u32 q7,d28,d0[1]
|
||||
veor d8,d8,d8
|
||||
vmlal.u32 q8,d28,d1[0]
|
||||
vshl.i64 d29,d13,#16
|
||||
vmlal.u32 q9,d28,d1[1]
|
||||
vadd.u64 d29,d29,d12
|
||||
vmlal.u32 q10,d28,d2[0]
|
||||
vmul.u32 d29,d29,d30
|
||||
vmlal.u32 q11,d28,d2[1]
|
||||
vst1.32 {d28},[sp,:64] @ put aside smashed b[8*i+0]
|
||||
vmlal.u32 q12,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q13,d28,d3[1]
|
||||
vld1.32 {d28[0]},[r2,:32]! @ *b++
|
||||
vmlal.u32 q6,d29,d4[0]
|
||||
veor d10,d10,d10
|
||||
vmlal.u32 q7,d29,d4[1]
|
||||
vzip.16 d28,d10
|
||||
vmlal.u32 q8,d29,d5[0]
|
||||
vshr.u64 d12,d12,#16
|
||||
vmlal.u32 q9,d29,d5[1]
|
||||
vmlal.u32 q10,d29,d6[0]
|
||||
vadd.u64 d12,d12,d13
|
||||
vmlal.u32 q11,d29,d6[1]
|
||||
vshr.u64 d12,d12,#16
|
||||
vmlal.u32 q12,d29,d7[0]
|
||||
vmlal.u32 q13,d29,d7[1]
|
||||
vadd.u64 d14,d14,d12
|
||||
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+0]
|
||||
vmlal.u32 q7,d28,d0[0]
|
||||
vld1.64 {q6},[r6,:128]!
|
||||
vmlal.u32 q8,d28,d0[1]
|
||||
veor d8,d8,d8
|
||||
vmlal.u32 q9,d28,d1[0]
|
||||
vshl.i64 d29,d15,#16
|
||||
vmlal.u32 q10,d28,d1[1]
|
||||
vadd.u64 d29,d29,d14
|
||||
vmlal.u32 q11,d28,d2[0]
|
||||
vmul.u32 d29,d29,d30
|
||||
vmlal.u32 q12,d28,d2[1]
|
||||
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+1]
|
||||
vmlal.u32 q13,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q6,d28,d3[1]
|
||||
vld1.32 {d28[0]},[r2,:32]! @ *b++
|
||||
vmlal.u32 q7,d29,d4[0]
|
||||
veor d10,d10,d10
|
||||
vmlal.u32 q8,d29,d4[1]
|
||||
vzip.16 d28,d10
|
||||
vmlal.u32 q9,d29,d5[0]
|
||||
vshr.u64 d14,d14,#16
|
||||
vmlal.u32 q10,d29,d5[1]
|
||||
vmlal.u32 q11,d29,d6[0]
|
||||
vadd.u64 d14,d14,d15
|
||||
vmlal.u32 q12,d29,d6[1]
|
||||
vshr.u64 d14,d14,#16
|
||||
vmlal.u32 q13,d29,d7[0]
|
||||
vmlal.u32 q6,d29,d7[1]
|
||||
vadd.u64 d16,d16,d14
|
||||
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+1]
|
||||
vmlal.u32 q8,d28,d0[0]
|
||||
vld1.64 {q7},[r6,:128]!
|
||||
vmlal.u32 q9,d28,d0[1]
|
||||
veor d8,d8,d8
|
||||
vmlal.u32 q10,d28,d1[0]
|
||||
vshl.i64 d29,d17,#16
|
||||
vmlal.u32 q11,d28,d1[1]
|
||||
vadd.u64 d29,d29,d16
|
||||
vmlal.u32 q12,d28,d2[0]
|
||||
vmul.u32 d29,d29,d30
|
||||
vmlal.u32 q13,d28,d2[1]
|
||||
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+2]
|
||||
vmlal.u32 q6,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q7,d28,d3[1]
|
||||
vld1.32 {d28[0]},[r2,:32]! @ *b++
|
||||
vmlal.u32 q8,d29,d4[0]
|
||||
veor d10,d10,d10
|
||||
vmlal.u32 q9,d29,d4[1]
|
||||
vzip.16 d28,d10
|
||||
vmlal.u32 q10,d29,d5[0]
|
||||
vshr.u64 d16,d16,#16
|
||||
vmlal.u32 q11,d29,d5[1]
|
||||
vmlal.u32 q12,d29,d6[0]
|
||||
vadd.u64 d16,d16,d17
|
||||
vmlal.u32 q13,d29,d6[1]
|
||||
vshr.u64 d16,d16,#16
|
||||
vmlal.u32 q6,d29,d7[0]
|
||||
vmlal.u32 q7,d29,d7[1]
|
||||
vadd.u64 d18,d18,d16
|
||||
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+2]
|
||||
vmlal.u32 q9,d28,d0[0]
|
||||
vld1.64 {q8},[r6,:128]!
|
||||
vmlal.u32 q10,d28,d0[1]
|
||||
veor d8,d8,d8
|
||||
vmlal.u32 q11,d28,d1[0]
|
||||
vshl.i64 d29,d19,#16
|
||||
vmlal.u32 q12,d28,d1[1]
|
||||
vadd.u64 d29,d29,d18
|
||||
vmlal.u32 q13,d28,d2[0]
|
||||
vmul.u32 d29,d29,d30
|
||||
vmlal.u32 q6,d28,d2[1]
|
||||
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+3]
|
||||
vmlal.u32 q7,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q8,d28,d3[1]
|
||||
vld1.32 {d28[0]},[r2,:32]! @ *b++
|
||||
vmlal.u32 q9,d29,d4[0]
|
||||
veor d10,d10,d10
|
||||
vmlal.u32 q10,d29,d4[1]
|
||||
vzip.16 d28,d10
|
||||
vmlal.u32 q11,d29,d5[0]
|
||||
vshr.u64 d18,d18,#16
|
||||
vmlal.u32 q12,d29,d5[1]
|
||||
vmlal.u32 q13,d29,d6[0]
|
||||
vadd.u64 d18,d18,d19
|
||||
vmlal.u32 q6,d29,d6[1]
|
||||
vshr.u64 d18,d18,#16
|
||||
vmlal.u32 q7,d29,d7[0]
|
||||
vmlal.u32 q8,d29,d7[1]
|
||||
vadd.u64 d20,d20,d18
|
||||
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+3]
|
||||
vmlal.u32 q10,d28,d0[0]
|
||||
vld1.64 {q9},[r6,:128]!
|
||||
vmlal.u32 q11,d28,d0[1]
|
||||
veor d8,d8,d8
|
||||
vmlal.u32 q12,d28,d1[0]
|
||||
vshl.i64 d29,d21,#16
|
||||
vmlal.u32 q13,d28,d1[1]
|
||||
vadd.u64 d29,d29,d20
|
||||
vmlal.u32 q6,d28,d2[0]
|
||||
vmul.u32 d29,d29,d30
|
||||
vmlal.u32 q7,d28,d2[1]
|
||||
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+4]
|
||||
vmlal.u32 q8,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q9,d28,d3[1]
|
||||
vld1.32 {d28[0]},[r2,:32]! @ *b++
|
||||
vmlal.u32 q10,d29,d4[0]
|
||||
veor d10,d10,d10
|
||||
vmlal.u32 q11,d29,d4[1]
|
||||
vzip.16 d28,d10
|
||||
vmlal.u32 q12,d29,d5[0]
|
||||
vshr.u64 d20,d20,#16
|
||||
vmlal.u32 q13,d29,d5[1]
|
||||
vmlal.u32 q6,d29,d6[0]
|
||||
vadd.u64 d20,d20,d21
|
||||
vmlal.u32 q7,d29,d6[1]
|
||||
vshr.u64 d20,d20,#16
|
||||
vmlal.u32 q8,d29,d7[0]
|
||||
vmlal.u32 q9,d29,d7[1]
|
||||
vadd.u64 d22,d22,d20
|
||||
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+4]
|
||||
vmlal.u32 q11,d28,d0[0]
|
||||
vld1.64 {q10},[r6,:128]!
|
||||
vmlal.u32 q12,d28,d0[1]
|
||||
veor d8,d8,d8
|
||||
vmlal.u32 q13,d28,d1[0]
|
||||
vshl.i64 d29,d23,#16
|
||||
vmlal.u32 q6,d28,d1[1]
|
||||
vadd.u64 d29,d29,d22
|
||||
vmlal.u32 q7,d28,d2[0]
|
||||
vmul.u32 d29,d29,d30
|
||||
vmlal.u32 q8,d28,d2[1]
|
||||
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+5]
|
||||
vmlal.u32 q9,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q10,d28,d3[1]
|
||||
vld1.32 {d28[0]},[r2,:32]! @ *b++
|
||||
vmlal.u32 q11,d29,d4[0]
|
||||
veor d10,d10,d10
|
||||
vmlal.u32 q12,d29,d4[1]
|
||||
vzip.16 d28,d10
|
||||
vmlal.u32 q13,d29,d5[0]
|
||||
vshr.u64 d22,d22,#16
|
||||
vmlal.u32 q6,d29,d5[1]
|
||||
vmlal.u32 q7,d29,d6[0]
|
||||
vadd.u64 d22,d22,d23
|
||||
vmlal.u32 q8,d29,d6[1]
|
||||
vshr.u64 d22,d22,#16
|
||||
vmlal.u32 q9,d29,d7[0]
|
||||
vmlal.u32 q10,d29,d7[1]
|
||||
vadd.u64 d24,d24,d22
|
||||
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+5]
|
||||
vmlal.u32 q12,d28,d0[0]
|
||||
vld1.64 {q11},[r6,:128]!
|
||||
vmlal.u32 q13,d28,d0[1]
|
||||
veor d8,d8,d8
|
||||
vmlal.u32 q6,d28,d1[0]
|
||||
vshl.i64 d29,d25,#16
|
||||
vmlal.u32 q7,d28,d1[1]
|
||||
vadd.u64 d29,d29,d24
|
||||
vmlal.u32 q8,d28,d2[0]
|
||||
vmul.u32 d29,d29,d30
|
||||
vmlal.u32 q9,d28,d2[1]
|
||||
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+6]
|
||||
vmlal.u32 q10,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q11,d28,d3[1]
|
||||
vld1.32 {d28[0]},[r2,:32]! @ *b++
|
||||
vmlal.u32 q12,d29,d4[0]
|
||||
veor d10,d10,d10
|
||||
vmlal.u32 q13,d29,d4[1]
|
||||
vzip.16 d28,d10
|
||||
vmlal.u32 q6,d29,d5[0]
|
||||
vshr.u64 d24,d24,#16
|
||||
vmlal.u32 q7,d29,d5[1]
|
||||
vmlal.u32 q8,d29,d6[0]
|
||||
vadd.u64 d24,d24,d25
|
||||
vmlal.u32 q9,d29,d6[1]
|
||||
vshr.u64 d24,d24,#16
|
||||
vmlal.u32 q10,d29,d7[0]
|
||||
vmlal.u32 q11,d29,d7[1]
|
||||
vadd.u64 d26,d26,d24
|
||||
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+6]
|
||||
vmlal.u32 q13,d28,d0[0]
|
||||
vld1.64 {q12},[r6,:128]!
|
||||
vmlal.u32 q6,d28,d0[1]
|
||||
veor d8,d8,d8
|
||||
vmlal.u32 q7,d28,d1[0]
|
||||
vshl.i64 d29,d27,#16
|
||||
vmlal.u32 q8,d28,d1[1]
|
||||
vadd.u64 d29,d29,d26
|
||||
vmlal.u32 q9,d28,d2[0]
|
||||
vmul.u32 d29,d29,d30
|
||||
vmlal.u32 q10,d28,d2[1]
|
||||
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+7]
|
||||
vmlal.u32 q11,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q12,d28,d3[1]
|
||||
vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0]
|
||||
vmlal.u32 q13,d29,d4[0]
|
||||
vld1.32 {d0,d1,d2,d3},[r1]!
|
||||
vmlal.u32 q6,d29,d4[1]
|
||||
vmlal.u32 q7,d29,d5[0]
|
||||
vshr.u64 d26,d26,#16
|
||||
vmlal.u32 q8,d29,d5[1]
|
||||
vmlal.u32 q9,d29,d6[0]
|
||||
vadd.u64 d26,d26,d27
|
||||
vmlal.u32 q10,d29,d6[1]
|
||||
vshr.u64 d26,d26,#16
|
||||
vmlal.u32 q11,d29,d7[0]
|
||||
vmlal.u32 q12,d29,d7[1]
|
||||
vadd.u64 d12,d12,d26
|
||||
vst1.32 {d29},[r10,:64] @ put aside smashed m[8*i+7]
|
||||
add r10,sp,#8 @ rewind
|
||||
sub r8,r5,#8
|
||||
b LNEON_8n_inner
|
||||
|
||||
.align 4
|
||||
LNEON_8n_inner:
|
||||
subs r8,r8,#8
|
||||
vmlal.u32 q6,d28,d0[0]
|
||||
vld1.64 {q13},[r6,:128]
|
||||
vmlal.u32 q7,d28,d0[1]
|
||||
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+0]
|
||||
vmlal.u32 q8,d28,d1[0]
|
||||
vld1.32 {d4,d5,d6,d7},[r3]!
|
||||
vmlal.u32 q9,d28,d1[1]
|
||||
it ne
|
||||
addne r6,r6,#16 @ don't advance in last iteration
|
||||
vmlal.u32 q10,d28,d2[0]
|
||||
vmlal.u32 q11,d28,d2[1]
|
||||
vmlal.u32 q12,d28,d3[0]
|
||||
vmlal.u32 q13,d28,d3[1]
|
||||
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+1]
|
||||
vmlal.u32 q6,d29,d4[0]
|
||||
vmlal.u32 q7,d29,d4[1]
|
||||
vmlal.u32 q8,d29,d5[0]
|
||||
vmlal.u32 q9,d29,d5[1]
|
||||
vmlal.u32 q10,d29,d6[0]
|
||||
vmlal.u32 q11,d29,d6[1]
|
||||
vmlal.u32 q12,d29,d7[0]
|
||||
vmlal.u32 q13,d29,d7[1]
|
||||
vst1.64 {q6},[r7,:128]!
|
||||
vmlal.u32 q7,d28,d0[0]
|
||||
vld1.64 {q6},[r6,:128]
|
||||
vmlal.u32 q8,d28,d0[1]
|
||||
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+1]
|
||||
vmlal.u32 q9,d28,d1[0]
|
||||
it ne
|
||||
addne r6,r6,#16 @ don't advance in last iteration
|
||||
vmlal.u32 q10,d28,d1[1]
|
||||
vmlal.u32 q11,d28,d2[0]
|
||||
vmlal.u32 q12,d28,d2[1]
|
||||
vmlal.u32 q13,d28,d3[0]
|
||||
vmlal.u32 q6,d28,d3[1]
|
||||
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+2]
|
||||
vmlal.u32 q7,d29,d4[0]
|
||||
vmlal.u32 q8,d29,d4[1]
|
||||
vmlal.u32 q9,d29,d5[0]
|
||||
vmlal.u32 q10,d29,d5[1]
|
||||
vmlal.u32 q11,d29,d6[0]
|
||||
vmlal.u32 q12,d29,d6[1]
|
||||
vmlal.u32 q13,d29,d7[0]
|
||||
vmlal.u32 q6,d29,d7[1]
|
||||
vst1.64 {q7},[r7,:128]!
|
||||
vmlal.u32 q8,d28,d0[0]
|
||||
vld1.64 {q7},[r6,:128]
|
||||
vmlal.u32 q9,d28,d0[1]
|
||||
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+2]
|
||||
vmlal.u32 q10,d28,d1[0]
|
||||
it ne
|
||||
addne r6,r6,#16 @ don't advance in last iteration
|
||||
vmlal.u32 q11,d28,d1[1]
|
||||
vmlal.u32 q12,d28,d2[0]
|
||||
vmlal.u32 q13,d28,d2[1]
|
||||
vmlal.u32 q6,d28,d3[0]
|
||||
vmlal.u32 q7,d28,d3[1]
|
||||
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+3]
|
||||
vmlal.u32 q8,d29,d4[0]
|
||||
vmlal.u32 q9,d29,d4[1]
|
||||
vmlal.u32 q10,d29,d5[0]
|
||||
vmlal.u32 q11,d29,d5[1]
|
||||
vmlal.u32 q12,d29,d6[0]
|
||||
vmlal.u32 q13,d29,d6[1]
|
||||
vmlal.u32 q6,d29,d7[0]
|
||||
vmlal.u32 q7,d29,d7[1]
|
||||
vst1.64 {q8},[r7,:128]!
|
||||
vmlal.u32 q9,d28,d0[0]
|
||||
vld1.64 {q8},[r6,:128]
|
||||
vmlal.u32 q10,d28,d0[1]
|
||||
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+3]
|
||||
vmlal.u32 q11,d28,d1[0]
|
||||
it ne
|
||||
addne r6,r6,#16 @ don't advance in last iteration
|
||||
vmlal.u32 q12,d28,d1[1]
|
||||
vmlal.u32 q13,d28,d2[0]
|
||||
vmlal.u32 q6,d28,d2[1]
|
||||
vmlal.u32 q7,d28,d3[0]
|
||||
vmlal.u32 q8,d28,d3[1]
|
||||
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+4]
|
||||
vmlal.u32 q9,d29,d4[0]
|
||||
vmlal.u32 q10,d29,d4[1]
|
||||
vmlal.u32 q11,d29,d5[0]
|
||||
vmlal.u32 q12,d29,d5[1]
|
||||
vmlal.u32 q13,d29,d6[0]
|
||||
vmlal.u32 q6,d29,d6[1]
|
||||
vmlal.u32 q7,d29,d7[0]
|
||||
vmlal.u32 q8,d29,d7[1]
|
||||
vst1.64 {q9},[r7,:128]!
|
||||
vmlal.u32 q10,d28,d0[0]
|
||||
vld1.64 {q9},[r6,:128]
|
||||
vmlal.u32 q11,d28,d0[1]
|
||||
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+4]
|
||||
vmlal.u32 q12,d28,d1[0]
|
||||
it ne
|
||||
addne r6,r6,#16 @ don't advance in last iteration
|
||||
vmlal.u32 q13,d28,d1[1]
|
||||
vmlal.u32 q6,d28,d2[0]
|
||||
vmlal.u32 q7,d28,d2[1]
|
||||
vmlal.u32 q8,d28,d3[0]
|
||||
vmlal.u32 q9,d28,d3[1]
|
||||
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+5]
|
||||
vmlal.u32 q10,d29,d4[0]
|
||||
vmlal.u32 q11,d29,d4[1]
|
||||
vmlal.u32 q12,d29,d5[0]
|
||||
vmlal.u32 q13,d29,d5[1]
|
||||
vmlal.u32 q6,d29,d6[0]
|
||||
vmlal.u32 q7,d29,d6[1]
|
||||
vmlal.u32 q8,d29,d7[0]
|
||||
vmlal.u32 q9,d29,d7[1]
|
||||
vst1.64 {q10},[r7,:128]!
|
||||
vmlal.u32 q11,d28,d0[0]
|
||||
vld1.64 {q10},[r6,:128]
|
||||
vmlal.u32 q12,d28,d0[1]
|
||||
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+5]
|
||||
vmlal.u32 q13,d28,d1[0]
|
||||
it ne
|
||||
addne r6,r6,#16 @ don't advance in last iteration
|
||||
vmlal.u32 q6,d28,d1[1]
|
||||
vmlal.u32 q7,d28,d2[0]
|
||||
vmlal.u32 q8,d28,d2[1]
|
||||
vmlal.u32 q9,d28,d3[0]
|
||||
vmlal.u32 q10,d28,d3[1]
|
||||
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+6]
|
||||
vmlal.u32 q11,d29,d4[0]
|
||||
vmlal.u32 q12,d29,d4[1]
|
||||
vmlal.u32 q13,d29,d5[0]
|
||||
vmlal.u32 q6,d29,d5[1]
|
||||
vmlal.u32 q7,d29,d6[0]
|
||||
vmlal.u32 q8,d29,d6[1]
|
||||
vmlal.u32 q9,d29,d7[0]
|
||||
vmlal.u32 q10,d29,d7[1]
|
||||
vst1.64 {q11},[r7,:128]!
|
||||
vmlal.u32 q12,d28,d0[0]
|
||||
vld1.64 {q11},[r6,:128]
|
||||
vmlal.u32 q13,d28,d0[1]
|
||||
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+6]
|
||||
vmlal.u32 q6,d28,d1[0]
|
||||
it ne
|
||||
addne r6,r6,#16 @ don't advance in last iteration
|
||||
vmlal.u32 q7,d28,d1[1]
|
||||
vmlal.u32 q8,d28,d2[0]
|
||||
vmlal.u32 q9,d28,d2[1]
|
||||
vmlal.u32 q10,d28,d3[0]
|
||||
vmlal.u32 q11,d28,d3[1]
|
||||
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+7]
|
||||
vmlal.u32 q12,d29,d4[0]
|
||||
vmlal.u32 q13,d29,d4[1]
|
||||
vmlal.u32 q6,d29,d5[0]
|
||||
vmlal.u32 q7,d29,d5[1]
|
||||
vmlal.u32 q8,d29,d6[0]
|
||||
vmlal.u32 q9,d29,d6[1]
|
||||
vmlal.u32 q10,d29,d7[0]
|
||||
vmlal.u32 q11,d29,d7[1]
|
||||
vst1.64 {q12},[r7,:128]!
|
||||
vmlal.u32 q13,d28,d0[0]
|
||||
vld1.64 {q12},[r6,:128]
|
||||
vmlal.u32 q6,d28,d0[1]
|
||||
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+7]
|
||||
vmlal.u32 q7,d28,d1[0]
|
||||
it ne
|
||||
addne r6,r6,#16 @ don't advance in last iteration
|
||||
vmlal.u32 q8,d28,d1[1]
|
||||
vmlal.u32 q9,d28,d2[0]
|
||||
vmlal.u32 q10,d28,d2[1]
|
||||
vmlal.u32 q11,d28,d3[0]
|
||||
vmlal.u32 q12,d28,d3[1]
|
||||
it eq
|
||||
subeq r1,r1,r5,lsl#2 @ rewind
|
||||
vmlal.u32 q13,d29,d4[0]
|
||||
vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0]
|
||||
vmlal.u32 q6,d29,d4[1]
|
||||
vld1.32 {d0,d1,d2,d3},[r1]!
|
||||
vmlal.u32 q7,d29,d5[0]
|
||||
add r10,sp,#8 @ rewind
|
||||
vmlal.u32 q8,d29,d5[1]
|
||||
vmlal.u32 q9,d29,d6[0]
|
||||
vmlal.u32 q10,d29,d6[1]
|
||||
vmlal.u32 q11,d29,d7[0]
|
||||
vst1.64 {q13},[r7,:128]!
|
||||
vmlal.u32 q12,d29,d7[1]
|
||||
|
||||
bne LNEON_8n_inner
|
||||
add r6,sp,#128
|
||||
vst1.64 {q6,q7},[r7,:256]!
|
||||
veor q2,q2,q2 @ d4-d5
|
||||
vst1.64 {q8,q9},[r7,:256]!
|
||||
veor q3,q3,q3 @ d6-d7
|
||||
vst1.64 {q10,q11},[r7,:256]!
|
||||
vst1.64 {q12},[r7,:128]
|
||||
|
||||
subs r9,r9,#8
|
||||
vld1.64 {q6,q7},[r6,:256]!
|
||||
vld1.64 {q8,q9},[r6,:256]!
|
||||
vld1.64 {q10,q11},[r6,:256]!
|
||||
vld1.64 {q12,q13},[r6,:256]!
|
||||
|
||||
itt ne
|
||||
subne r3,r3,r5,lsl#2 @ rewind
|
||||
bne LNEON_8n_outer
|
||||
|
||||
add r7,sp,#128
|
||||
vst1.64 {q2,q3}, [sp,:256]! @ start wiping stack frame
|
||||
vshr.u64 d10,d12,#16
|
||||
vst1.64 {q2,q3},[sp,:256]!
|
||||
vadd.u64 d13,d13,d10
|
||||
vst1.64 {q2,q3}, [sp,:256]!
|
||||
vshr.u64 d10,d13,#16
|
||||
vst1.64 {q2,q3}, [sp,:256]!
|
||||
vzip.16 d12,d13
|
||||
|
||||
mov r8,r5
|
||||
b LNEON_tail_entry
|
||||
|
||||
.align 4
|
||||
LNEON_tail:
|
||||
vadd.u64 d12,d12,d10
|
||||
vshr.u64 d10,d12,#16
|
||||
vld1.64 {q8,q9}, [r6, :256]!
|
||||
vadd.u64 d13,d13,d10
|
||||
vld1.64 {q10,q11}, [r6, :256]!
|
||||
vshr.u64 d10,d13,#16
|
||||
vld1.64 {q12,q13}, [r6, :256]!
|
||||
vzip.16 d12,d13
|
||||
|
||||
LNEON_tail_entry:
|
||||
vadd.u64 d14,d14,d10
|
||||
vst1.32 {d12[0]}, [r7, :32]!
|
||||
vshr.u64 d10,d14,#16
|
||||
vadd.u64 d15,d15,d10
|
||||
vshr.u64 d10,d15,#16
|
||||
vzip.16 d14,d15
|
||||
vadd.u64 d16,d16,d10
|
||||
vst1.32 {d14[0]}, [r7, :32]!
|
||||
vshr.u64 d10,d16,#16
|
||||
vadd.u64 d17,d17,d10
|
||||
vshr.u64 d10,d17,#16
|
||||
vzip.16 d16,d17
|
||||
vadd.u64 d18,d18,d10
|
||||
vst1.32 {d16[0]}, [r7, :32]!
|
||||
vshr.u64 d10,d18,#16
|
||||
vadd.u64 d19,d19,d10
|
||||
vshr.u64 d10,d19,#16
|
||||
vzip.16 d18,d19
|
||||
vadd.u64 d20,d20,d10
|
||||
vst1.32 {d18[0]}, [r7, :32]!
|
||||
vshr.u64 d10,d20,#16
|
||||
vadd.u64 d21,d21,d10
|
||||
vshr.u64 d10,d21,#16
|
||||
vzip.16 d20,d21
|
||||
vadd.u64 d22,d22,d10
|
||||
vst1.32 {d20[0]}, [r7, :32]!
|
||||
vshr.u64 d10,d22,#16
|
||||
vadd.u64 d23,d23,d10
|
||||
vshr.u64 d10,d23,#16
|
||||
vzip.16 d22,d23
|
||||
vadd.u64 d24,d24,d10
|
||||
vst1.32 {d22[0]}, [r7, :32]!
|
||||
vshr.u64 d10,d24,#16
|
||||
vadd.u64 d25,d25,d10
|
||||
vshr.u64 d10,d25,#16
|
||||
vzip.16 d24,d25
|
||||
vadd.u64 d26,d26,d10
|
||||
vst1.32 {d24[0]}, [r7, :32]!
|
||||
vshr.u64 d10,d26,#16
|
||||
vadd.u64 d27,d27,d10
|
||||
vshr.u64 d10,d27,#16
|
||||
vzip.16 d26,d27
|
||||
vld1.64 {q6,q7}, [r6, :256]!
|
||||
subs r8,r8,#8
|
||||
vst1.32 {d26[0]}, [r7, :32]!
|
||||
bne LNEON_tail
|
||||
|
||||
vst1.32 {d10[0]}, [r7, :32] @ top-most bit
|
||||
sub r3,r3,r5,lsl#2 @ rewind r3
|
||||
subs r1,sp,#0 @ clear carry flag
|
||||
add r2,sp,r5,lsl#2
|
||||
|
||||
LNEON_sub:
|
||||
ldmia r1!, {r4,r5,r6,r7}
|
||||
ldmia r3!, {r8,r9,r10,r11}
|
||||
sbcs r8, r4,r8
|
||||
sbcs r9, r5,r9
|
||||
sbcs r10,r6,r10
|
||||
sbcs r11,r7,r11
|
||||
teq r1,r2 @ preserves carry
|
||||
stmia r0!, {r8,r9,r10,r11}
|
||||
bne LNEON_sub
|
||||
|
||||
ldr r10, [r1] @ load top-most bit
|
||||
mov r11,sp
|
||||
veor q0,q0,q0
|
||||
sub r11,r2,r11 @ this is num*4
|
||||
veor q1,q1,q1
|
||||
mov r1,sp
|
||||
sub r0,r0,r11 @ rewind r0
|
||||
mov r3,r2 @ second 3/4th of frame
|
||||
sbcs r10,r10,#0 @ result is carry flag
|
||||
|
||||
LNEON_copy_n_zap:
|
||||
ldmia r1!, {r4,r5,r6,r7}
|
||||
ldmia r0, {r8,r9,r10,r11}
|
||||
it cc
|
||||
movcc r8, r4
|
||||
vst1.64 {q0,q1}, [r3,:256]! @ wipe
|
||||
itt cc
|
||||
movcc r9, r5
|
||||
movcc r10,r6
|
||||
vst1.64 {q0,q1}, [r3,:256]! @ wipe
|
||||
it cc
|
||||
movcc r11,r7
|
||||
ldmia r1, {r4,r5,r6,r7}
|
||||
stmia r0!, {r8,r9,r10,r11}
|
||||
sub r1,r1,#16
|
||||
ldmia r0, {r8,r9,r10,r11}
|
||||
it cc
|
||||
movcc r8, r4
|
||||
vst1.64 {q0,q1}, [r1,:256]! @ wipe
|
||||
itt cc
|
||||
movcc r9, r5
|
||||
movcc r10,r6
|
||||
vst1.64 {q0,q1}, [r3,:256]! @ wipe
|
||||
it cc
|
||||
movcc r11,r7
|
||||
teq r1,r2 @ preserves carry
|
||||
stmia r0!, {r8,r9,r10,r11}
|
||||
bne LNEON_copy_n_zap
|
||||
|
||||
mov sp,ip
|
||||
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
|
||||
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11}
|
||||
bx lr @ bx lr
|
||||
|
||||
#endif
|
||||
.byte 77,111,110,116,103,111,109,101,114,121,32,109,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
||||
.align 2
|
||||
.align 2
|
||||
#if __ARM_MAX_ARCH__>=7
|
||||
.comm _OPENSSL_armcap_P,4
|
||||
.non_lazy_symbol_pointer
|
||||
OPENSSL_armcap_P:
|
||||
.indirect_symbol _OPENSSL_armcap_P
|
||||
.long 0
|
||||
.private_extern _OPENSSL_armcap_P
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,583 @@
|
|||
#include <openssl/arm_arch.h>
|
||||
|
||||
.text
|
||||
#if defined(__thumb2__) || defined(__clang__)
|
||||
.syntax unified
|
||||
#endif
|
||||
#if defined(__thumb2__)
|
||||
.thumb
|
||||
#else
|
||||
.code 32
|
||||
#endif
|
||||
|
||||
#ifdef __clang__
|
||||
#define ldrplb ldrbpl
|
||||
#define ldrneb ldrbne
|
||||
#endif
|
||||
|
||||
|
||||
.align 5
|
||||
rem_4bit:
|
||||
.short 0x0000,0x1C20,0x3840,0x2460
|
||||
.short 0x7080,0x6CA0,0x48C0,0x54E0
|
||||
.short 0xE100,0xFD20,0xD940,0xC560
|
||||
.short 0x9180,0x8DA0,0xA9C0,0xB5E0
|
||||
|
||||
|
||||
#ifdef __thumb2__
|
||||
.thumb_func rem_4bit_get
|
||||
#endif
|
||||
rem_4bit_get:
|
||||
#if defined(__thumb2__)
|
||||
adr r2,rem_4bit
|
||||
#else
|
||||
sub r2,pc,#8+32 @ &rem_4bit
|
||||
#endif
|
||||
b Lrem_4bit_got
|
||||
nop
|
||||
nop
|
||||
|
||||
|
||||
.globl _gcm_ghash_4bit
|
||||
.private_extern _gcm_ghash_4bit
|
||||
#ifdef __thumb2__
|
||||
.thumb_func _gcm_ghash_4bit
|
||||
#endif
|
||||
.align 4
|
||||
_gcm_ghash_4bit:
|
||||
#if defined(__thumb2__)
|
||||
adr r12,rem_4bit
|
||||
#else
|
||||
sub r12,pc,#8+48 @ &rem_4bit
|
||||
#endif
|
||||
add r3,r2,r3 @ r3 to point at the end
|
||||
stmdb sp!,{r3,r4,r5,r6,r7,r8,r9,r10,r11,lr} @ save r3/end too
|
||||
|
||||
ldmia r12,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy rem_4bit ...
|
||||
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} @ ... to stack
|
||||
|
||||
ldrb r12,[r2,#15]
|
||||
ldrb r14,[r0,#15]
|
||||
Louter:
|
||||
eor r12,r12,r14
|
||||
and r14,r12,#0xf0
|
||||
and r12,r12,#0x0f
|
||||
mov r3,#14
|
||||
|
||||
add r7,r1,r12,lsl#4
|
||||
ldmia r7,{r4,r5,r6,r7} @ load Htbl[nlo]
|
||||
add r11,r1,r14
|
||||
ldrb r12,[r2,#14]
|
||||
|
||||
and r14,r4,#0xf @ rem
|
||||
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi]
|
||||
add r14,r14,r14
|
||||
eor r4,r8,r4,lsr#4
|
||||
ldrh r8,[sp,r14] @ rem_4bit[rem]
|
||||
eor r4,r4,r5,lsl#28
|
||||
ldrb r14,[r0,#14]
|
||||
eor r5,r9,r5,lsr#4
|
||||
eor r5,r5,r6,lsl#28
|
||||
eor r6,r10,r6,lsr#4
|
||||
eor r6,r6,r7,lsl#28
|
||||
eor r7,r11,r7,lsr#4
|
||||
eor r12,r12,r14
|
||||
and r14,r12,#0xf0
|
||||
and r12,r12,#0x0f
|
||||
eor r7,r7,r8,lsl#16
|
||||
|
||||
Linner:
|
||||
add r11,r1,r12,lsl#4
|
||||
and r12,r4,#0xf @ rem
|
||||
subs r3,r3,#1
|
||||
add r12,r12,r12
|
||||
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nlo]
|
||||
eor r4,r8,r4,lsr#4
|
||||
eor r4,r4,r5,lsl#28
|
||||
eor r5,r9,r5,lsr#4
|
||||
eor r5,r5,r6,lsl#28
|
||||
ldrh r8,[sp,r12] @ rem_4bit[rem]
|
||||
eor r6,r10,r6,lsr#4
|
||||
#ifdef __thumb2__
|
||||
it pl
|
||||
#endif
|
||||
ldrplb r12,[r2,r3]
|
||||
eor r6,r6,r7,lsl#28
|
||||
eor r7,r11,r7,lsr#4
|
||||
|
||||
add r11,r1,r14
|
||||
and r14,r4,#0xf @ rem
|
||||
eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
|
||||
add r14,r14,r14
|
||||
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi]
|
||||
eor r4,r8,r4,lsr#4
|
||||
#ifdef __thumb2__
|
||||
it pl
|
||||
#endif
|
||||
ldrplb r8,[r0,r3]
|
||||
eor r4,r4,r5,lsl#28
|
||||
eor r5,r9,r5,lsr#4
|
||||
ldrh r9,[sp,r14]
|
||||
eor r5,r5,r6,lsl#28
|
||||
eor r6,r10,r6,lsr#4
|
||||
eor r6,r6,r7,lsl#28
|
||||
#ifdef __thumb2__
|
||||
it pl
|
||||
#endif
|
||||
eorpl r12,r12,r8
|
||||
eor r7,r11,r7,lsr#4
|
||||
#ifdef __thumb2__
|
||||
itt pl
|
||||
#endif
|
||||
andpl r14,r12,#0xf0
|
||||
andpl r12,r12,#0x0f
|
||||
eor r7,r7,r9,lsl#16 @ ^= rem_4bit[rem]
|
||||
bpl Linner
|
||||
|
||||
ldr r3,[sp,#32] @ re-load r3/end
|
||||
add r2,r2,#16
|
||||
mov r14,r4
|
||||
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
|
||||
rev r4,r4
|
||||
str r4,[r0,#12]
|
||||
#elif defined(__ARMEB__)
|
||||
str r4,[r0,#12]
|
||||
#else
|
||||
mov r9,r4,lsr#8
|
||||
strb r4,[r0,#12+3]
|
||||
mov r10,r4,lsr#16
|
||||
strb r9,[r0,#12+2]
|
||||
mov r11,r4,lsr#24
|
||||
strb r10,[r0,#12+1]
|
||||
strb r11,[r0,#12]
|
||||
#endif
|
||||
cmp r2,r3
|
||||
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
|
||||
rev r5,r5
|
||||
str r5,[r0,#8]
|
||||
#elif defined(__ARMEB__)
|
||||
str r5,[r0,#8]
|
||||
#else
|
||||
mov r9,r5,lsr#8
|
||||
strb r5,[r0,#8+3]
|
||||
mov r10,r5,lsr#16
|
||||
strb r9,[r0,#8+2]
|
||||
mov r11,r5,lsr#24
|
||||
strb r10,[r0,#8+1]
|
||||
strb r11,[r0,#8]
|
||||
#endif
|
||||
|
||||
#ifdef __thumb2__
|
||||
it ne
|
||||
#endif
|
||||
ldrneb r12,[r2,#15]
|
||||
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
|
||||
rev r6,r6
|
||||
str r6,[r0,#4]
|
||||
#elif defined(__ARMEB__)
|
||||
str r6,[r0,#4]
|
||||
#else
|
||||
mov r9,r6,lsr#8
|
||||
strb r6,[r0,#4+3]
|
||||
mov r10,r6,lsr#16
|
||||
strb r9,[r0,#4+2]
|
||||
mov r11,r6,lsr#24
|
||||
strb r10,[r0,#4+1]
|
||||
strb r11,[r0,#4]
|
||||
#endif
|
||||
|
||||
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
|
||||
rev r7,r7
|
||||
str r7,[r0,#0]
|
||||
#elif defined(__ARMEB__)
|
||||
str r7,[r0,#0]
|
||||
#else
|
||||
mov r9,r7,lsr#8
|
||||
strb r7,[r0,#0+3]
|
||||
mov r10,r7,lsr#16
|
||||
strb r9,[r0,#0+2]
|
||||
mov r11,r7,lsr#24
|
||||
strb r10,[r0,#0+1]
|
||||
strb r11,[r0,#0]
|
||||
#endif
|
||||
|
||||
bne Louter
|
||||
|
||||
add sp,sp,#36
|
||||
#if __ARM_ARCH__>=5
|
||||
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
|
||||
#else
|
||||
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
|
||||
tst lr,#1
|
||||
moveq pc,lr @ be binary compatible with V4, yet
|
||||
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
|
||||
#endif
|
||||
|
||||
|
||||
.globl _gcm_gmult_4bit
|
||||
.private_extern _gcm_gmult_4bit
|
||||
#ifdef __thumb2__
|
||||
.thumb_func _gcm_gmult_4bit
|
||||
#endif
|
||||
_gcm_gmult_4bit:
|
||||
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
|
||||
ldrb r12,[r0,#15]
|
||||
b rem_4bit_get
|
||||
Lrem_4bit_got:
|
||||
and r14,r12,#0xf0
|
||||
and r12,r12,#0x0f
|
||||
mov r3,#14
|
||||
|
||||
add r7,r1,r12,lsl#4
|
||||
ldmia r7,{r4,r5,r6,r7} @ load Htbl[nlo]
|
||||
ldrb r12,[r0,#14]
|
||||
|
||||
add r11,r1,r14
|
||||
and r14,r4,#0xf @ rem
|
||||
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi]
|
||||
add r14,r14,r14
|
||||
eor r4,r8,r4,lsr#4
|
||||
ldrh r8,[r2,r14] @ rem_4bit[rem]
|
||||
eor r4,r4,r5,lsl#28
|
||||
eor r5,r9,r5,lsr#4
|
||||
eor r5,r5,r6,lsl#28
|
||||
eor r6,r10,r6,lsr#4
|
||||
eor r6,r6,r7,lsl#28
|
||||
eor r7,r11,r7,lsr#4
|
||||
and r14,r12,#0xf0
|
||||
eor r7,r7,r8,lsl#16
|
||||
and r12,r12,#0x0f
|
||||
|
||||
Loop:
|
||||
add r11,r1,r12,lsl#4
|
||||
and r12,r4,#0xf @ rem
|
||||
subs r3,r3,#1
|
||||
add r12,r12,r12
|
||||
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nlo]
|
||||
eor r4,r8,r4,lsr#4
|
||||
eor r4,r4,r5,lsl#28
|
||||
eor r5,r9,r5,lsr#4
|
||||
eor r5,r5,r6,lsl#28
|
||||
ldrh r8,[r2,r12] @ rem_4bit[rem]
|
||||
eor r6,r10,r6,lsr#4
|
||||
#ifdef __thumb2__
|
||||
it pl
|
||||
#endif
|
||||
ldrplb r12,[r0,r3]
|
||||
eor r6,r6,r7,lsl#28
|
||||
eor r7,r11,r7,lsr#4
|
||||
|
||||
add r11,r1,r14
|
||||
and r14,r4,#0xf @ rem
|
||||
eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
|
||||
add r14,r14,r14
|
||||
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi]
|
||||
eor r4,r8,r4,lsr#4
|
||||
eor r4,r4,r5,lsl#28
|
||||
eor r5,r9,r5,lsr#4
|
||||
ldrh r8,[r2,r14] @ rem_4bit[rem]
|
||||
eor r5,r5,r6,lsl#28
|
||||
eor r6,r10,r6,lsr#4
|
||||
eor r6,r6,r7,lsl#28
|
||||
eor r7,r11,r7,lsr#4
|
||||
#ifdef __thumb2__
|
||||
itt pl
|
||||
#endif
|
||||
andpl r14,r12,#0xf0
|
||||
andpl r12,r12,#0x0f
|
||||
eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
|
||||
bpl Loop
|
||||
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
|
||||
rev r4,r4
|
||||
str r4,[r0,#12]
|
||||
#elif defined(__ARMEB__)
|
||||
str r4,[r0,#12]
|
||||
#else
|
||||
mov r9,r4,lsr#8
|
||||
strb r4,[r0,#12+3]
|
||||
mov r10,r4,lsr#16
|
||||
strb r9,[r0,#12+2]
|
||||
mov r11,r4,lsr#24
|
||||
strb r10,[r0,#12+1]
|
||||
strb r11,[r0,#12]
|
||||
#endif
|
||||
|
||||
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
|
||||
rev r5,r5
|
||||
str r5,[r0,#8]
|
||||
#elif defined(__ARMEB__)
|
||||
str r5,[r0,#8]
|
||||
#else
|
||||
mov r9,r5,lsr#8
|
||||
strb r5,[r0,#8+3]
|
||||
mov r10,r5,lsr#16
|
||||
strb r9,[r0,#8+2]
|
||||
mov r11,r5,lsr#24
|
||||
strb r10,[r0,#8+1]
|
||||
strb r11,[r0,#8]
|
||||
#endif
|
||||
|
||||
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
|
||||
rev r6,r6
|
||||
str r6,[r0,#4]
|
||||
#elif defined(__ARMEB__)
|
||||
str r6,[r0,#4]
|
||||
#else
|
||||
mov r9,r6,lsr#8
|
||||
strb r6,[r0,#4+3]
|
||||
mov r10,r6,lsr#16
|
||||
strb r9,[r0,#4+2]
|
||||
mov r11,r6,lsr#24
|
||||
strb r10,[r0,#4+1]
|
||||
strb r11,[r0,#4]
|
||||
#endif
|
||||
|
||||
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
|
||||
rev r7,r7
|
||||
str r7,[r0,#0]
|
||||
#elif defined(__ARMEB__)
|
||||
str r7,[r0,#0]
|
||||
#else
|
||||
mov r9,r7,lsr#8
|
||||
strb r7,[r0,#0+3]
|
||||
mov r10,r7,lsr#16
|
||||
strb r9,[r0,#0+2]
|
||||
mov r11,r7,lsr#24
|
||||
strb r10,[r0,#0+1]
|
||||
strb r11,[r0,#0]
|
||||
#endif
|
||||
|
||||
#if __ARM_ARCH__>=5
|
||||
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
|
||||
#else
|
||||
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
|
||||
tst lr,#1
|
||||
moveq pc,lr @ be binary compatible with V4, yet
|
||||
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
|
||||
#endif
|
||||
|
||||
#if __ARM_MAX_ARCH__>=7
|
||||
|
||||
|
||||
|
||||
.globl _gcm_init_neon
|
||||
.private_extern _gcm_init_neon
|
||||
#ifdef __thumb2__
|
||||
.thumb_func _gcm_init_neon
|
||||
#endif
|
||||
.align 4
|
||||
_gcm_init_neon:
|
||||
vld1.64 d7,[r1]! @ load H
|
||||
vmov.i8 q8,#0xe1
|
||||
vld1.64 d6,[r1]
|
||||
vshl.i64 d17,#57
|
||||
vshr.u64 d16,#63 @ t0=0xc2....01
|
||||
vdup.8 q9,d7[7]
|
||||
vshr.u64 d26,d6,#63
|
||||
vshr.s8 q9,#7 @ broadcast carry bit
|
||||
vshl.i64 q3,q3,#1
|
||||
vand q8,q8,q9
|
||||
vorr d7,d26 @ H<<<=1
|
||||
veor q3,q3,q8 @ twisted H
|
||||
vstmia r0,{q3}
|
||||
|
||||
bx lr @ bx lr
|
||||
|
||||
|
||||
.globl _gcm_gmult_neon
|
||||
.private_extern _gcm_gmult_neon
|
||||
#ifdef __thumb2__
|
||||
.thumb_func _gcm_gmult_neon
|
||||
#endif
|
||||
.align 4
|
||||
_gcm_gmult_neon:
|
||||
vld1.64 d7,[r0]! @ load Xi
|
||||
vld1.64 d6,[r0]!
|
||||
vmov.i64 d29,#0x0000ffffffffffff
|
||||
vldmia r1,{d26,d27} @ load twisted H
|
||||
vmov.i64 d30,#0x00000000ffffffff
|
||||
#ifdef __ARMEL__
|
||||
vrev64.8 q3,q3
|
||||
#endif
|
||||
vmov.i64 d31,#0x000000000000ffff
|
||||
veor d28,d26,d27 @ Karatsuba pre-processing
|
||||
mov r3,#16
|
||||
b Lgmult_neon
|
||||
|
||||
|
||||
.globl _gcm_ghash_neon
|
||||
.private_extern _gcm_ghash_neon
|
||||
#ifdef __thumb2__
|
||||
.thumb_func _gcm_ghash_neon
|
||||
#endif
|
||||
.align 4
|
||||
_gcm_ghash_neon:
|
||||
vld1.64 d1,[r0]! @ load Xi
|
||||
vld1.64 d0,[r0]!
|
||||
vmov.i64 d29,#0x0000ffffffffffff
|
||||
vldmia r1,{d26,d27} @ load twisted H
|
||||
vmov.i64 d30,#0x00000000ffffffff
|
||||
#ifdef __ARMEL__
|
||||
vrev64.8 q0,q0
|
||||
#endif
|
||||
vmov.i64 d31,#0x000000000000ffff
|
||||
veor d28,d26,d27 @ Karatsuba pre-processing
|
||||
|
||||
Loop_neon:
|
||||
vld1.64 d7,[r2]! @ load inp
|
||||
vld1.64 d6,[r2]!
|
||||
#ifdef __ARMEL__
|
||||
vrev64.8 q3,q3
|
||||
#endif
|
||||
veor q3,q0 @ inp^=Xi
|
||||
Lgmult_neon:
|
||||
vext.8 d16, d26, d26, #1 @ A1
|
||||
vmull.p8 q8, d16, d6 @ F = A1*B
|
||||
vext.8 d0, d6, d6, #1 @ B1
|
||||
vmull.p8 q0, d26, d0 @ E = A*B1
|
||||
vext.8 d18, d26, d26, #2 @ A2
|
||||
vmull.p8 q9, d18, d6 @ H = A2*B
|
||||
vext.8 d22, d6, d6, #2 @ B2
|
||||
vmull.p8 q11, d26, d22 @ G = A*B2
|
||||
vext.8 d20, d26, d26, #3 @ A3
|
||||
veor q8, q8, q0 @ L = E + F
|
||||
vmull.p8 q10, d20, d6 @ J = A3*B
|
||||
vext.8 d0, d6, d6, #3 @ B3
|
||||
veor q9, q9, q11 @ M = G + H
|
||||
vmull.p8 q0, d26, d0 @ I = A*B3
|
||||
veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
|
||||
vand d17, d17, d29
|
||||
vext.8 d22, d6, d6, #4 @ B4
|
||||
veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
|
||||
vand d19, d19, d30
|
||||
vmull.p8 q11, d26, d22 @ K = A*B4
|
||||
veor q10, q10, q0 @ N = I + J
|
||||
veor d16, d16, d17
|
||||
veor d18, d18, d19
|
||||
veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
|
||||
vand d21, d21, d31
|
||||
vext.8 q8, q8, q8, #15
|
||||
veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
|
||||
vmov.i64 d23, #0
|
||||
vext.8 q9, q9, q9, #14
|
||||
veor d20, d20, d21
|
||||
vmull.p8 q0, d26, d6 @ D = A*B
|
||||
vext.8 q11, q11, q11, #12
|
||||
vext.8 q10, q10, q10, #13
|
||||
veor q8, q8, q9
|
||||
veor q10, q10, q11
|
||||
veor q0, q0, q8
|
||||
veor q0, q0, q10
|
||||
veor d6,d6,d7 @ Karatsuba pre-processing
|
||||
vext.8 d16, d28, d28, #1 @ A1
|
||||
vmull.p8 q8, d16, d6 @ F = A1*B
|
||||
vext.8 d2, d6, d6, #1 @ B1
|
||||
vmull.p8 q1, d28, d2 @ E = A*B1
|
||||
vext.8 d18, d28, d28, #2 @ A2
|
||||
vmull.p8 q9, d18, d6 @ H = A2*B
|
||||
vext.8 d22, d6, d6, #2 @ B2
|
||||
vmull.p8 q11, d28, d22 @ G = A*B2
|
||||
vext.8 d20, d28, d28, #3 @ A3
|
||||
veor q8, q8, q1 @ L = E + F
|
||||
vmull.p8 q10, d20, d6 @ J = A3*B
|
||||
vext.8 d2, d6, d6, #3 @ B3
|
||||
veor q9, q9, q11 @ M = G + H
|
||||
vmull.p8 q1, d28, d2 @ I = A*B3
|
||||
veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
|
||||
vand d17, d17, d29
|
||||
vext.8 d22, d6, d6, #4 @ B4
|
||||
veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
|
||||
vand d19, d19, d30
|
||||
vmull.p8 q11, d28, d22 @ K = A*B4
|
||||
veor q10, q10, q1 @ N = I + J
|
||||
veor d16, d16, d17
|
||||
veor d18, d18, d19
|
||||
veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
|
||||
vand d21, d21, d31
|
||||
vext.8 q8, q8, q8, #15
|
||||
veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
|
||||
vmov.i64 d23, #0
|
||||
vext.8 q9, q9, q9, #14
|
||||
veor d20, d20, d21
|
||||
vmull.p8 q1, d28, d6 @ D = A*B
|
||||
vext.8 q11, q11, q11, #12
|
||||
vext.8 q10, q10, q10, #13
|
||||
veor q8, q8, q9
|
||||
veor q10, q10, q11
|
||||
veor q1, q1, q8
|
||||
veor q1, q1, q10
|
||||
vext.8 d16, d27, d27, #1 @ A1
|
||||
vmull.p8 q8, d16, d7 @ F = A1*B
|
||||
vext.8 d4, d7, d7, #1 @ B1
|
||||
vmull.p8 q2, d27, d4 @ E = A*B1
|
||||
vext.8 d18, d27, d27, #2 @ A2
|
||||
vmull.p8 q9, d18, d7 @ H = A2*B
|
||||
vext.8 d22, d7, d7, #2 @ B2
|
||||
vmull.p8 q11, d27, d22 @ G = A*B2
|
||||
vext.8 d20, d27, d27, #3 @ A3
|
||||
veor q8, q8, q2 @ L = E + F
|
||||
vmull.p8 q10, d20, d7 @ J = A3*B
|
||||
vext.8 d4, d7, d7, #3 @ B3
|
||||
veor q9, q9, q11 @ M = G + H
|
||||
vmull.p8 q2, d27, d4 @ I = A*B3
|
||||
veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
|
||||
vand d17, d17, d29
|
||||
vext.8 d22, d7, d7, #4 @ B4
|
||||
veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
|
||||
vand d19, d19, d30
|
||||
vmull.p8 q11, d27, d22 @ K = A*B4
|
||||
veor q10, q10, q2 @ N = I + J
|
||||
veor d16, d16, d17
|
||||
veor d18, d18, d19
|
||||
veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
|
||||
vand d21, d21, d31
|
||||
vext.8 q8, q8, q8, #15
|
||||
veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
|
||||
vmov.i64 d23, #0
|
||||
vext.8 q9, q9, q9, #14
|
||||
veor d20, d20, d21
|
||||
vmull.p8 q2, d27, d7 @ D = A*B
|
||||
vext.8 q11, q11, q11, #12
|
||||
vext.8 q10, q10, q10, #13
|
||||
veor q8, q8, q9
|
||||
veor q10, q10, q11
|
||||
veor q2, q2, q8
|
||||
veor q2, q2, q10
|
||||
veor q1,q1,q0 @ Karatsuba post-processing
|
||||
veor q1,q1,q2
|
||||
veor d1,d1,d2
|
||||
veor d4,d4,d3 @ Xh|Xl - 256-bit result
|
||||
|
||||
@ equivalent of reduction_avx from ghash-x86_64.pl
|
||||
vshl.i64 q9,q0,#57 @ 1st phase
|
||||
vshl.i64 q10,q0,#62
|
||||
veor q10,q10,q9 @
|
||||
vshl.i64 q9,q0,#63
|
||||
veor q10, q10, q9 @
|
||||
veor d1,d1,d20 @
|
||||
veor d4,d4,d21
|
||||
|
||||
vshr.u64 q10,q0,#1 @ 2nd phase
|
||||
veor q2,q2,q0
|
||||
veor q0,q0,q10 @
|
||||
vshr.u64 q10,q10,#6
|
||||
vshr.u64 q0,q0,#1 @
|
||||
veor q0,q0,q2 @
|
||||
veor q0,q0,q10 @
|
||||
|
||||
subs r3,#16
|
||||
bne Loop_neon
|
||||
|
||||
#ifdef __ARMEL__
|
||||
vrev64.8 q0,q0
|
||||
#endif
|
||||
sub r0,#16
|
||||
vst1.64 d1,[r0]! @ write out Xi
|
||||
vst1.64 d0,[r0]
|
||||
|
||||
bx lr @ bx lr
|
||||
|
||||
#endif
|
||||
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
||||
.align 2
|
||||
.align 2
|
|
@ -0,0 +1,241 @@
|
|||
#include <openssl/arm_arch.h>
|
||||
|
||||
.text
|
||||
|
||||
.code 32
|
||||
#undef __thumb2__
|
||||
.globl _gcm_init_v8
|
||||
.private_extern _gcm_init_v8
|
||||
#ifdef __thumb2__
|
||||
.thumb_func _gcm_init_v8
|
||||
#endif
|
||||
.align 4
|
||||
_gcm_init_v8:
|
||||
vld1.64 {q9},[r1] @ load input H
|
||||
vmov.i8 q11,#0xe1
|
||||
vshl.i64 q11,q11,#57 @ 0xc2.0
|
||||
vext.8 q3,q9,q9,#8
|
||||
vshr.u64 q10,q11,#63
|
||||
vdup.32 q9,d18[1]
|
||||
vext.8 q8,q10,q11,#8 @ t0=0xc2....01
|
||||
vshr.u64 q10,q3,#63
|
||||
vshr.s32 q9,q9,#31 @ broadcast carry bit
|
||||
vand q10,q10,q8
|
||||
vshl.i64 q3,q3,#1
|
||||
vext.8 q10,q10,q10,#8
|
||||
vand q8,q8,q9
|
||||
vorr q3,q3,q10 @ H<<<=1
|
||||
veor q12,q3,q8 @ twisted H
|
||||
vst1.64 {q12},[r0]! @ store Htable[0]
|
||||
|
||||
@ calculate H^2
|
||||
vext.8 q8,q12,q12,#8 @ Karatsuba pre-processing
|
||||
.byte 0xa8,0x0e,0xa8,0xf2 @ pmull q0,q12,q12
|
||||
veor q8,q8,q12
|
||||
.byte 0xa9,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q12
|
||||
.byte 0xa0,0x2e,0xa0,0xf2 @ pmull q1,q8,q8
|
||||
|
||||
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
|
||||
veor q10,q0,q2
|
||||
veor q1,q1,q9
|
||||
veor q1,q1,q10
|
||||
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase
|
||||
|
||||
vmov d4,d3 @ Xh|Xm - 256-bit result
|
||||
vmov d3,d0 @ Xm is rotated Xl
|
||||
veor q0,q1,q10
|
||||
|
||||
vext.8 q10,q0,q0,#8 @ 2nd phase
|
||||
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
|
||||
veor q10,q10,q2
|
||||
veor q14,q0,q10
|
||||
|
||||
vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing
|
||||
veor q9,q9,q14
|
||||
vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed
|
||||
vst1.64 {q13,q14},[r0] @ store Htable[1..2]
|
||||
|
||||
bx lr
|
||||
|
||||
.globl _gcm_gmult_v8
|
||||
.private_extern _gcm_gmult_v8
|
||||
#ifdef __thumb2__
|
||||
.thumb_func _gcm_gmult_v8
|
||||
#endif
|
||||
.align 4
|
||||
_gcm_gmult_v8:
|
||||
vld1.64 {q9},[r0] @ load Xi
|
||||
vmov.i8 q11,#0xe1
|
||||
vld1.64 {q12,q13},[r1] @ load twisted H, ...
|
||||
vshl.u64 q11,q11,#57
|
||||
#ifndef __ARMEB__
|
||||
vrev64.8 q9,q9
|
||||
#endif
|
||||
vext.8 q3,q9,q9,#8
|
||||
|
||||
.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
|
||||
veor q9,q9,q3 @ Karatsuba pre-processing
|
||||
.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
|
||||
.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
|
||||
|
||||
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
|
||||
veor q10,q0,q2
|
||||
veor q1,q1,q9
|
||||
veor q1,q1,q10
|
||||
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
|
||||
|
||||
vmov d4,d3 @ Xh|Xm - 256-bit result
|
||||
vmov d3,d0 @ Xm is rotated Xl
|
||||
veor q0,q1,q10
|
||||
|
||||
vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
|
||||
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
|
||||
veor q10,q10,q2
|
||||
veor q0,q0,q10
|
||||
|
||||
#ifndef __ARMEB__
|
||||
vrev64.8 q0,q0
|
||||
#endif
|
||||
vext.8 q0,q0,q0,#8
|
||||
vst1.64 {q0},[r0] @ write out Xi
|
||||
|
||||
bx lr
|
||||
|
||||
.globl _gcm_ghash_v8
|
||||
.private_extern _gcm_ghash_v8
|
||||
#ifdef __thumb2__
|
||||
.thumb_func _gcm_ghash_v8
|
||||
#endif
|
||||
.align 4
|
||||
_gcm_ghash_v8:
|
||||
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so
|
||||
vld1.64 {q0},[r0] @ load [rotated] Xi
|
||||
@ "[rotated]" means that
|
||||
@ loaded value would have
|
||||
@ to be rotated in order to
|
||||
@ make it appear as in
|
||||
@ alorithm specification
|
||||
subs r3,r3,#32 @ see if r3 is 32 or larger
|
||||
mov r12,#16 @ r12 is used as post-
|
||||
@ increment for input pointer;
|
||||
@ as loop is modulo-scheduled
|
||||
@ r12 is zeroed just in time
|
||||
@ to preclude oversteping
|
||||
@ inp[len], which means that
|
||||
@ last block[s] are actually
|
||||
@ loaded twice, but last
|
||||
@ copy is not processed
|
||||
vld1.64 {q12,q13},[r1]! @ load twisted H, ..., H^2
|
||||
vmov.i8 q11,#0xe1
|
||||
vld1.64 {q14},[r1]
|
||||
moveq r12,#0 @ is it time to zero r12?
|
||||
vext.8 q0,q0,q0,#8 @ rotate Xi
|
||||
vld1.64 {q8},[r2]! @ load [rotated] I[0]
|
||||
vshl.u64 q11,q11,#57 @ compose 0xc2.0 constant
|
||||
#ifndef __ARMEB__
|
||||
vrev64.8 q8,q8
|
||||
vrev64.8 q0,q0
|
||||
#endif
|
||||
vext.8 q3,q8,q8,#8 @ rotate I[0]
|
||||
blo Lodd_tail_v8 @ r3 was less than 32
|
||||
vld1.64 {q9},[r2],r12 @ load [rotated] I[1]
|
||||
#ifndef __ARMEB__
|
||||
vrev64.8 q9,q9
|
||||
#endif
|
||||
vext.8 q7,q9,q9,#8
|
||||
veor q3,q3,q0 @ I[i]^=Xi
|
||||
.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
|
||||
veor q9,q9,q7 @ Karatsuba pre-processing
|
||||
.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
|
||||
b Loop_mod2x_v8
|
||||
|
||||
.align 4
|
||||
Loop_mod2x_v8:
|
||||
vext.8 q10,q3,q3,#8
|
||||
subs r3,r3,#32 @ is there more data?
|
||||
.byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
|
||||
movlo r12,#0 @ is it time to zero r12?
|
||||
|
||||
.byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9
|
||||
veor q10,q10,q3 @ Karatsuba pre-processing
|
||||
.byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
|
||||
veor q0,q0,q4 @ accumulate
|
||||
.byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
|
||||
vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2]
|
||||
|
||||
veor q2,q2,q6
|
||||
moveq r12,#0 @ is it time to zero r12?
|
||||
veor q1,q1,q5
|
||||
|
||||
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
|
||||
veor q10,q0,q2
|
||||
veor q1,q1,q9
|
||||
vld1.64 {q9},[r2],r12 @ load [rotated] I[i+3]
|
||||
#ifndef __ARMEB__
|
||||
vrev64.8 q8,q8
|
||||
#endif
|
||||
veor q1,q1,q10
|
||||
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
|
||||
|
||||
#ifndef __ARMEB__
|
||||
vrev64.8 q9,q9
|
||||
#endif
|
||||
vmov d4,d3 @ Xh|Xm - 256-bit result
|
||||
vmov d3,d0 @ Xm is rotated Xl
|
||||
vext.8 q7,q9,q9,#8
|
||||
vext.8 q3,q8,q8,#8
|
||||
veor q0,q1,q10
|
||||
.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
|
||||
veor q3,q3,q2 @ accumulate q3 early
|
||||
|
||||
vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
|
||||
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
|
||||
veor q3,q3,q10
|
||||
veor q9,q9,q7 @ Karatsuba pre-processing
|
||||
veor q3,q3,q0
|
||||
.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
|
||||
bhs Loop_mod2x_v8 @ there was at least 32 more bytes
|
||||
|
||||
veor q2,q2,q10
|
||||
vext.8 q3,q8,q8,#8 @ re-construct q3
|
||||
adds r3,r3,#32 @ re-construct r3
|
||||
veor q0,q0,q2 @ re-construct q0
|
||||
beq Ldone_v8 @ is r3 zero?
|
||||
Lodd_tail_v8:
|
||||
vext.8 q10,q0,q0,#8
|
||||
veor q3,q3,q0 @ inp^=Xi
|
||||
veor q9,q8,q10 @ q9 is rotated inp^Xi
|
||||
|
||||
.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
|
||||
veor q9,q9,q3 @ Karatsuba pre-processing
|
||||
.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
|
||||
.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
|
||||
|
||||
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
|
||||
veor q10,q0,q2
|
||||
veor q1,q1,q9
|
||||
veor q1,q1,q10
|
||||
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
|
||||
|
||||
vmov d4,d3 @ Xh|Xm - 256-bit result
|
||||
vmov d3,d0 @ Xm is rotated Xl
|
||||
veor q0,q1,q10
|
||||
|
||||
vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
|
||||
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
|
||||
veor q10,q10,q2
|
||||
veor q0,q0,q10
|
||||
|
||||
Ldone_v8:
|
||||
#ifndef __ARMEB__
|
||||
vrev64.8 q0,q0
|
||||
#endif
|
||||
vext.8 q0,q0,q0,#8
|
||||
vst1.64 {q0},[r0] @ write out Xi
|
||||
|
||||
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so
|
||||
bx lr
|
||||
|
||||
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
||||
.align 2
|
||||
.align 2
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,757 @@
|
|||
#if defined(__aarch64__)
|
||||
#include <openssl/arm_arch.h>
|
||||
|
||||
#if __ARM_MAX_ARCH__>=7
|
||||
.text
|
||||
#if !defined(__clang__) || defined(BORINGSSL_CLANG_SUPPORTS_DOT_ARCH)
|
||||
.arch armv8-a+crypto
|
||||
#endif
|
||||
.align 5
|
||||
.Lrcon:
|
||||
.long 0x01,0x01,0x01,0x01
|
||||
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
|
||||
.long 0x1b,0x1b,0x1b,0x1b
|
||||
|
||||
.globl aes_hw_set_encrypt_key
|
||||
.hidden aes_hw_set_encrypt_key
|
||||
.type aes_hw_set_encrypt_key,%function
|
||||
.align 5
|
||||
aes_hw_set_encrypt_key:
|
||||
.Lenc_key:
|
||||
stp x29,x30,[sp,#-16]!
|
||||
add x29,sp,#0
|
||||
mov x3,#-1
|
||||
cmp x0,#0
|
||||
b.eq .Lenc_key_abort
|
||||
cmp x2,#0
|
||||
b.eq .Lenc_key_abort
|
||||
mov x3,#-2
|
||||
cmp w1,#128
|
||||
b.lt .Lenc_key_abort
|
||||
cmp w1,#256
|
||||
b.gt .Lenc_key_abort
|
||||
tst w1,#0x3f
|
||||
b.ne .Lenc_key_abort
|
||||
|
||||
adr x3,.Lrcon
|
||||
cmp w1,#192
|
||||
|
||||
eor v0.16b,v0.16b,v0.16b
|
||||
ld1 {v3.16b},[x0],#16
|
||||
mov w1,#8 // reuse w1
|
||||
ld1 {v1.4s,v2.4s},[x3],#32
|
||||
|
||||
b.lt .Loop128
|
||||
b.eq .L192
|
||||
b .L256
|
||||
|
||||
.align 4
|
||||
.Loop128:
|
||||
tbl v6.16b,{v3.16b},v2.16b
|
||||
ext v5.16b,v0.16b,v3.16b,#12
|
||||
st1 {v3.4s},[x2],#16
|
||||
aese v6.16b,v0.16b
|
||||
subs w1,w1,#1
|
||||
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v6.16b,v6.16b,v1.16b
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
shl v1.16b,v1.16b,#1
|
||||
eor v3.16b,v3.16b,v6.16b
|
||||
b.ne .Loop128
|
||||
|
||||
ld1 {v1.4s},[x3]
|
||||
|
||||
tbl v6.16b,{v3.16b},v2.16b
|
||||
ext v5.16b,v0.16b,v3.16b,#12
|
||||
st1 {v3.4s},[x2],#16
|
||||
aese v6.16b,v0.16b
|
||||
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v6.16b,v6.16b,v1.16b
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
shl v1.16b,v1.16b,#1
|
||||
eor v3.16b,v3.16b,v6.16b
|
||||
|
||||
tbl v6.16b,{v3.16b},v2.16b
|
||||
ext v5.16b,v0.16b,v3.16b,#12
|
||||
st1 {v3.4s},[x2],#16
|
||||
aese v6.16b,v0.16b
|
||||
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v6.16b,v6.16b,v1.16b
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
eor v3.16b,v3.16b,v6.16b
|
||||
st1 {v3.4s},[x2]
|
||||
add x2,x2,#0x50
|
||||
|
||||
mov w12,#10
|
||||
b .Ldone
|
||||
|
||||
.align 4
|
||||
.L192:
|
||||
ld1 {v4.8b},[x0],#8
|
||||
movi v6.16b,#8 // borrow v6.16b
|
||||
st1 {v3.4s},[x2],#16
|
||||
sub v2.16b,v2.16b,v6.16b // adjust the mask
|
||||
|
||||
.Loop192:
|
||||
tbl v6.16b,{v4.16b},v2.16b
|
||||
ext v5.16b,v0.16b,v3.16b,#12
|
||||
st1 {v4.8b},[x2],#8
|
||||
aese v6.16b,v0.16b
|
||||
subs w1,w1,#1
|
||||
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
|
||||
dup v5.4s,v3.s[3]
|
||||
eor v5.16b,v5.16b,v4.16b
|
||||
eor v6.16b,v6.16b,v1.16b
|
||||
ext v4.16b,v0.16b,v4.16b,#12
|
||||
shl v1.16b,v1.16b,#1
|
||||
eor v4.16b,v4.16b,v5.16b
|
||||
eor v3.16b,v3.16b,v6.16b
|
||||
eor v4.16b,v4.16b,v6.16b
|
||||
st1 {v3.4s},[x2],#16
|
||||
b.ne .Loop192
|
||||
|
||||
mov w12,#12
|
||||
add x2,x2,#0x20
|
||||
b .Ldone
|
||||
|
||||
.align 4
|
||||
.L256:
|
||||
ld1 {v4.16b},[x0]
|
||||
mov w1,#7
|
||||
mov w12,#14
|
||||
st1 {v3.4s},[x2],#16
|
||||
|
||||
.Loop256:
|
||||
tbl v6.16b,{v4.16b},v2.16b
|
||||
ext v5.16b,v0.16b,v3.16b,#12
|
||||
st1 {v4.4s},[x2],#16
|
||||
aese v6.16b,v0.16b
|
||||
subs w1,w1,#1
|
||||
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v6.16b,v6.16b,v1.16b
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
shl v1.16b,v1.16b,#1
|
||||
eor v3.16b,v3.16b,v6.16b
|
||||
st1 {v3.4s},[x2],#16
|
||||
b.eq .Ldone
|
||||
|
||||
dup v6.4s,v3.s[3] // just splat
|
||||
ext v5.16b,v0.16b,v4.16b,#12
|
||||
aese v6.16b,v0.16b
|
||||
|
||||
eor v4.16b,v4.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v4.16b,v4.16b,v5.16b
|
||||
ext v5.16b,v0.16b,v5.16b,#12
|
||||
eor v4.16b,v4.16b,v5.16b
|
||||
|
||||
eor v4.16b,v4.16b,v6.16b
|
||||
b .Loop256
|
||||
|
||||
.Ldone:
|
||||
str w12,[x2]
|
||||
mov x3,#0
|
||||
|
||||
.Lenc_key_abort:
|
||||
mov x0,x3 // return value
|
||||
ldr x29,[sp],#16
|
||||
ret
|
||||
.size aes_hw_set_encrypt_key,.-aes_hw_set_encrypt_key
|
||||
|
||||
.globl aes_hw_set_decrypt_key
|
||||
.hidden aes_hw_set_decrypt_key
|
||||
.type aes_hw_set_decrypt_key,%function
|
||||
.align 5
|
||||
aes_hw_set_decrypt_key:
|
||||
stp x29,x30,[sp,#-16]!
|
||||
add x29,sp,#0
|
||||
bl .Lenc_key
|
||||
|
||||
cmp x0,#0
|
||||
b.ne .Ldec_key_abort
|
||||
|
||||
sub x2,x2,#240 // restore original x2
|
||||
mov x4,#-16
|
||||
add x0,x2,x12,lsl#4 // end of key schedule
|
||||
|
||||
ld1 {v0.4s},[x2]
|
||||
ld1 {v1.4s},[x0]
|
||||
st1 {v0.4s},[x0],x4
|
||||
st1 {v1.4s},[x2],#16
|
||||
|
||||
.Loop_imc:
|
||||
ld1 {v0.4s},[x2]
|
||||
ld1 {v1.4s},[x0]
|
||||
aesimc v0.16b,v0.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
st1 {v0.4s},[x0],x4
|
||||
st1 {v1.4s},[x2],#16
|
||||
cmp x0,x2
|
||||
b.hi .Loop_imc
|
||||
|
||||
ld1 {v0.4s},[x2]
|
||||
aesimc v0.16b,v0.16b
|
||||
st1 {v0.4s},[x0]
|
||||
|
||||
eor x0,x0,x0 // return value
|
||||
.Ldec_key_abort:
|
||||
ldp x29,x30,[sp],#16
|
||||
ret
|
||||
.size aes_hw_set_decrypt_key,.-aes_hw_set_decrypt_key
|
||||
.globl aes_hw_encrypt
|
||||
.hidden aes_hw_encrypt
|
||||
.type aes_hw_encrypt,%function
|
||||
.align 5
|
||||
aes_hw_encrypt:
|
||||
ldr w3,[x2,#240]
|
||||
ld1 {v0.4s},[x2],#16
|
||||
ld1 {v2.16b},[x0]
|
||||
sub w3,w3,#2
|
||||
ld1 {v1.4s},[x2],#16
|
||||
|
||||
.Loop_enc:
|
||||
aese v2.16b,v0.16b
|
||||
aesmc v2.16b,v2.16b
|
||||
ld1 {v0.4s},[x2],#16
|
||||
subs w3,w3,#2
|
||||
aese v2.16b,v1.16b
|
||||
aesmc v2.16b,v2.16b
|
||||
ld1 {v1.4s},[x2],#16
|
||||
b.gt .Loop_enc
|
||||
|
||||
aese v2.16b,v0.16b
|
||||
aesmc v2.16b,v2.16b
|
||||
ld1 {v0.4s},[x2]
|
||||
aese v2.16b,v1.16b
|
||||
eor v2.16b,v2.16b,v0.16b
|
||||
|
||||
st1 {v2.16b},[x1]
|
||||
ret
|
||||
.size aes_hw_encrypt,.-aes_hw_encrypt
|
||||
.globl aes_hw_decrypt
|
||||
.hidden aes_hw_decrypt
|
||||
.type aes_hw_decrypt,%function
|
||||
.align 5
|
||||
aes_hw_decrypt:
|
||||
ldr w3,[x2,#240]
|
||||
ld1 {v0.4s},[x2],#16
|
||||
ld1 {v2.16b},[x0]
|
||||
sub w3,w3,#2
|
||||
ld1 {v1.4s},[x2],#16
|
||||
|
||||
.Loop_dec:
|
||||
aesd v2.16b,v0.16b
|
||||
aesimc v2.16b,v2.16b
|
||||
ld1 {v0.4s},[x2],#16
|
||||
subs w3,w3,#2
|
||||
aesd v2.16b,v1.16b
|
||||
aesimc v2.16b,v2.16b
|
||||
ld1 {v1.4s},[x2],#16
|
||||
b.gt .Loop_dec
|
||||
|
||||
aesd v2.16b,v0.16b
|
||||
aesimc v2.16b,v2.16b
|
||||
ld1 {v0.4s},[x2]
|
||||
aesd v2.16b,v1.16b
|
||||
eor v2.16b,v2.16b,v0.16b
|
||||
|
||||
st1 {v2.16b},[x1]
|
||||
ret
|
||||
.size aes_hw_decrypt,.-aes_hw_decrypt
|
||||
.globl aes_hw_cbc_encrypt
|
||||
.hidden aes_hw_cbc_encrypt
|
||||
.type aes_hw_cbc_encrypt,%function
|
||||
.align 5
|
||||
aes_hw_cbc_encrypt:
|
||||
stp x29,x30,[sp,#-16]!
|
||||
add x29,sp,#0
|
||||
subs x2,x2,#16
|
||||
mov x8,#16
|
||||
b.lo .Lcbc_abort
|
||||
csel x8,xzr,x8,eq
|
||||
|
||||
cmp w5,#0 // en- or decrypting?
|
||||
ldr w5,[x3,#240]
|
||||
and x2,x2,#-16
|
||||
ld1 {v6.16b},[x4]
|
||||
ld1 {v0.16b},[x0],x8
|
||||
|
||||
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
|
||||
sub w5,w5,#6
|
||||
add x7,x3,x5,lsl#4 // pointer to last 7 round keys
|
||||
sub w5,w5,#2
|
||||
ld1 {v18.4s,v19.4s},[x7],#32
|
||||
ld1 {v20.4s,v21.4s},[x7],#32
|
||||
ld1 {v22.4s,v23.4s},[x7],#32
|
||||
ld1 {v7.4s},[x7]
|
||||
|
||||
add x7,x3,#32
|
||||
mov w6,w5
|
||||
b.eq .Lcbc_dec
|
||||
|
||||
cmp w5,#2
|
||||
eor v0.16b,v0.16b,v6.16b
|
||||
eor v5.16b,v16.16b,v7.16b
|
||||
b.eq .Lcbc_enc128
|
||||
|
||||
ld1 {v2.4s,v3.4s},[x7]
|
||||
add x7,x3,#16
|
||||
add x6,x3,#16*4
|
||||
add x12,x3,#16*5
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
add x14,x3,#16*6
|
||||
add x3,x3,#16*7
|
||||
b .Lenter_cbc_enc
|
||||
|
||||
.align 4
|
||||
.Loop_cbc_enc:
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
st1 {v6.16b},[x1],#16
|
||||
.Lenter_cbc_enc:
|
||||
aese v0.16b,v17.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v0.16b,v2.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
ld1 {v16.4s},[x6]
|
||||
cmp w5,#4
|
||||
aese v0.16b,v3.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
ld1 {v17.4s},[x12]
|
||||
b.eq .Lcbc_enc192
|
||||
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
ld1 {v16.4s},[x14]
|
||||
aese v0.16b,v17.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
ld1 {v17.4s},[x3]
|
||||
nop
|
||||
|
||||
.Lcbc_enc192:
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
subs x2,x2,#16
|
||||
aese v0.16b,v17.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
csel x8,xzr,x8,eq
|
||||
aese v0.16b,v18.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v0.16b,v19.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
ld1 {v16.16b},[x0],x8
|
||||
aese v0.16b,v20.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
eor v16.16b,v16.16b,v5.16b
|
||||
aese v0.16b,v21.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
ld1 {v17.4s},[x7] // re-pre-load rndkey[1]
|
||||
aese v0.16b,v22.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v0.16b,v23.16b
|
||||
eor v6.16b,v0.16b,v7.16b
|
||||
b.hs .Loop_cbc_enc
|
||||
|
||||
st1 {v6.16b},[x1],#16
|
||||
b .Lcbc_done
|
||||
|
||||
.align 5
|
||||
.Lcbc_enc128:
|
||||
ld1 {v2.4s,v3.4s},[x7]
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
b .Lenter_cbc_enc128
|
||||
.Loop_cbc_enc128:
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
st1 {v6.16b},[x1],#16
|
||||
.Lenter_cbc_enc128:
|
||||
aese v0.16b,v17.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
subs x2,x2,#16
|
||||
aese v0.16b,v2.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
csel x8,xzr,x8,eq
|
||||
aese v0.16b,v3.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v0.16b,v18.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v0.16b,v19.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
ld1 {v16.16b},[x0],x8
|
||||
aese v0.16b,v20.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v0.16b,v21.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v0.16b,v22.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
eor v16.16b,v16.16b,v5.16b
|
||||
aese v0.16b,v23.16b
|
||||
eor v6.16b,v0.16b,v7.16b
|
||||
b.hs .Loop_cbc_enc128
|
||||
|
||||
st1 {v6.16b},[x1],#16
|
||||
b .Lcbc_done
|
||||
.align 5
|
||||
.Lcbc_dec:
|
||||
ld1 {v18.16b},[x0],#16
|
||||
subs x2,x2,#32 // bias
|
||||
add w6,w5,#2
|
||||
orr v3.16b,v0.16b,v0.16b
|
||||
orr v1.16b,v0.16b,v0.16b
|
||||
orr v19.16b,v18.16b,v18.16b
|
||||
b.lo .Lcbc_dec_tail
|
||||
|
||||
orr v1.16b,v18.16b,v18.16b
|
||||
ld1 {v18.16b},[x0],#16
|
||||
orr v2.16b,v0.16b,v0.16b
|
||||
orr v3.16b,v1.16b,v1.16b
|
||||
orr v19.16b,v18.16b,v18.16b
|
||||
|
||||
.Loop3x_cbc_dec:
|
||||
aesd v0.16b,v16.16b
|
||||
aesimc v0.16b,v0.16b
|
||||
aesd v1.16b,v16.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v16.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
ld1 {v16.4s},[x7],#16
|
||||
subs w6,w6,#2
|
||||
aesd v0.16b,v17.16b
|
||||
aesimc v0.16b,v0.16b
|
||||
aesd v1.16b,v17.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v17.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
ld1 {v17.4s},[x7],#16
|
||||
b.gt .Loop3x_cbc_dec
|
||||
|
||||
aesd v0.16b,v16.16b
|
||||
aesimc v0.16b,v0.16b
|
||||
aesd v1.16b,v16.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v16.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
eor v4.16b,v6.16b,v7.16b
|
||||
subs x2,x2,#0x30
|
||||
eor v5.16b,v2.16b,v7.16b
|
||||
csel x6,x2,x6,lo // x6, w6, is zero at this point
|
||||
aesd v0.16b,v17.16b
|
||||
aesimc v0.16b,v0.16b
|
||||
aesd v1.16b,v17.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v17.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
eor v17.16b,v3.16b,v7.16b
|
||||
add x0,x0,x6 // x0 is adjusted in such way that
|
||||
// at exit from the loop v1.16b-v18.16b
|
||||
// are loaded with last "words"
|
||||
orr v6.16b,v19.16b,v19.16b
|
||||
mov x7,x3
|
||||
aesd v0.16b,v20.16b
|
||||
aesimc v0.16b,v0.16b
|
||||
aesd v1.16b,v20.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v20.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
ld1 {v2.16b},[x0],#16
|
||||
aesd v0.16b,v21.16b
|
||||
aesimc v0.16b,v0.16b
|
||||
aesd v1.16b,v21.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v21.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
ld1 {v3.16b},[x0],#16
|
||||
aesd v0.16b,v22.16b
|
||||
aesimc v0.16b,v0.16b
|
||||
aesd v1.16b,v22.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v22.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
ld1 {v19.16b},[x0],#16
|
||||
aesd v0.16b,v23.16b
|
||||
aesd v1.16b,v23.16b
|
||||
aesd v18.16b,v23.16b
|
||||
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
|
||||
add w6,w5,#2
|
||||
eor v4.16b,v4.16b,v0.16b
|
||||
eor v5.16b,v5.16b,v1.16b
|
||||
eor v18.16b,v18.16b,v17.16b
|
||||
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
|
||||
st1 {v4.16b},[x1],#16
|
||||
orr v0.16b,v2.16b,v2.16b
|
||||
st1 {v5.16b},[x1],#16
|
||||
orr v1.16b,v3.16b,v3.16b
|
||||
st1 {v18.16b},[x1],#16
|
||||
orr v18.16b,v19.16b,v19.16b
|
||||
b.hs .Loop3x_cbc_dec
|
||||
|
||||
cmn x2,#0x30
|
||||
b.eq .Lcbc_done
|
||||
nop
|
||||
|
||||
.Lcbc_dec_tail:
|
||||
aesd v1.16b,v16.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v16.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
ld1 {v16.4s},[x7],#16
|
||||
subs w6,w6,#2
|
||||
aesd v1.16b,v17.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v17.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
ld1 {v17.4s},[x7],#16
|
||||
b.gt .Lcbc_dec_tail
|
||||
|
||||
aesd v1.16b,v16.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v16.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
aesd v1.16b,v17.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v17.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
aesd v1.16b,v20.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v20.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
cmn x2,#0x20
|
||||
aesd v1.16b,v21.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v21.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
eor v5.16b,v6.16b,v7.16b
|
||||
aesd v1.16b,v22.16b
|
||||
aesimc v1.16b,v1.16b
|
||||
aesd v18.16b,v22.16b
|
||||
aesimc v18.16b,v18.16b
|
||||
eor v17.16b,v3.16b,v7.16b
|
||||
aesd v1.16b,v23.16b
|
||||
aesd v18.16b,v23.16b
|
||||
b.eq .Lcbc_dec_one
|
||||
eor v5.16b,v5.16b,v1.16b
|
||||
eor v17.16b,v17.16b,v18.16b
|
||||
orr v6.16b,v19.16b,v19.16b
|
||||
st1 {v5.16b},[x1],#16
|
||||
st1 {v17.16b},[x1],#16
|
||||
b .Lcbc_done
|
||||
|
||||
.Lcbc_dec_one:
|
||||
eor v5.16b,v5.16b,v18.16b
|
||||
orr v6.16b,v19.16b,v19.16b
|
||||
st1 {v5.16b},[x1],#16
|
||||
|
||||
.Lcbc_done:
|
||||
st1 {v6.16b},[x4]
|
||||
.Lcbc_abort:
|
||||
ldr x29,[sp],#16
|
||||
ret
|
||||
.size aes_hw_cbc_encrypt,.-aes_hw_cbc_encrypt
|
||||
.globl aes_hw_ctr32_encrypt_blocks
|
||||
.hidden aes_hw_ctr32_encrypt_blocks
|
||||
.type aes_hw_ctr32_encrypt_blocks,%function
|
||||
.align 5
|
||||
aes_hw_ctr32_encrypt_blocks:
|
||||
stp x29,x30,[sp,#-16]!
|
||||
add x29,sp,#0
|
||||
ldr w5,[x3,#240]
|
||||
|
||||
ldr w8, [x4, #12]
|
||||
ld1 {v0.4s},[x4]
|
||||
|
||||
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
|
||||
sub w5,w5,#4
|
||||
mov x12,#16
|
||||
cmp x2,#2
|
||||
add x7,x3,x5,lsl#4 // pointer to last 5 round keys
|
||||
sub w5,w5,#2
|
||||
ld1 {v20.4s,v21.4s},[x7],#32
|
||||
ld1 {v22.4s,v23.4s},[x7],#32
|
||||
ld1 {v7.4s},[x7]
|
||||
add x7,x3,#32
|
||||
mov w6,w5
|
||||
csel x12,xzr,x12,lo
|
||||
#ifndef __ARMEB__
|
||||
rev w8, w8
|
||||
#endif
|
||||
orr v1.16b,v0.16b,v0.16b
|
||||
add w10, w8, #1
|
||||
orr v18.16b,v0.16b,v0.16b
|
||||
add w8, w8, #2
|
||||
orr v6.16b,v0.16b,v0.16b
|
||||
rev w10, w10
|
||||
mov v1.s[3],w10
|
||||
b.ls .Lctr32_tail
|
||||
rev w12, w8
|
||||
sub x2,x2,#3 // bias
|
||||
mov v18.s[3],w12
|
||||
b .Loop3x_ctr32
|
||||
|
||||
.align 4
|
||||
.Loop3x_ctr32:
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v16.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
aese v18.16b,v16.16b
|
||||
aesmc v18.16b,v18.16b
|
||||
ld1 {v16.4s},[x7],#16
|
||||
subs w6,w6,#2
|
||||
aese v0.16b,v17.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v17.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
aese v18.16b,v17.16b
|
||||
aesmc v18.16b,v18.16b
|
||||
ld1 {v17.4s},[x7],#16
|
||||
b.gt .Loop3x_ctr32
|
||||
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v4.16b,v0.16b
|
||||
aese v1.16b,v16.16b
|
||||
aesmc v5.16b,v1.16b
|
||||
ld1 {v2.16b},[x0],#16
|
||||
orr v0.16b,v6.16b,v6.16b
|
||||
aese v18.16b,v16.16b
|
||||
aesmc v18.16b,v18.16b
|
||||
ld1 {v3.16b},[x0],#16
|
||||
orr v1.16b,v6.16b,v6.16b
|
||||
aese v4.16b,v17.16b
|
||||
aesmc v4.16b,v4.16b
|
||||
aese v5.16b,v17.16b
|
||||
aesmc v5.16b,v5.16b
|
||||
ld1 {v19.16b},[x0],#16
|
||||
mov x7,x3
|
||||
aese v18.16b,v17.16b
|
||||
aesmc v17.16b,v18.16b
|
||||
orr v18.16b,v6.16b,v6.16b
|
||||
add w9,w8,#1
|
||||
aese v4.16b,v20.16b
|
||||
aesmc v4.16b,v4.16b
|
||||
aese v5.16b,v20.16b
|
||||
aesmc v5.16b,v5.16b
|
||||
eor v2.16b,v2.16b,v7.16b
|
||||
add w10,w8,#2
|
||||
aese v17.16b,v20.16b
|
||||
aesmc v17.16b,v17.16b
|
||||
eor v3.16b,v3.16b,v7.16b
|
||||
add w8,w8,#3
|
||||
aese v4.16b,v21.16b
|
||||
aesmc v4.16b,v4.16b
|
||||
aese v5.16b,v21.16b
|
||||
aesmc v5.16b,v5.16b
|
||||
eor v19.16b,v19.16b,v7.16b
|
||||
rev w9,w9
|
||||
aese v17.16b,v21.16b
|
||||
aesmc v17.16b,v17.16b
|
||||
mov v0.s[3], w9
|
||||
rev w10,w10
|
||||
aese v4.16b,v22.16b
|
||||
aesmc v4.16b,v4.16b
|
||||
aese v5.16b,v22.16b
|
||||
aesmc v5.16b,v5.16b
|
||||
mov v1.s[3], w10
|
||||
rev w12,w8
|
||||
aese v17.16b,v22.16b
|
||||
aesmc v17.16b,v17.16b
|
||||
mov v18.s[3], w12
|
||||
subs x2,x2,#3
|
||||
aese v4.16b,v23.16b
|
||||
aese v5.16b,v23.16b
|
||||
aese v17.16b,v23.16b
|
||||
|
||||
eor v2.16b,v2.16b,v4.16b
|
||||
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
|
||||
st1 {v2.16b},[x1],#16
|
||||
eor v3.16b,v3.16b,v5.16b
|
||||
mov w6,w5
|
||||
st1 {v3.16b},[x1],#16
|
||||
eor v19.16b,v19.16b,v17.16b
|
||||
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
|
||||
st1 {v19.16b},[x1],#16
|
||||
b.hs .Loop3x_ctr32
|
||||
|
||||
adds x2,x2,#3
|
||||
b.eq .Lctr32_done
|
||||
cmp x2,#1
|
||||
mov x12,#16
|
||||
csel x12,xzr,x12,eq
|
||||
|
||||
.Lctr32_tail:
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v16.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
ld1 {v16.4s},[x7],#16
|
||||
subs w6,w6,#2
|
||||
aese v0.16b,v17.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v17.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
ld1 {v17.4s},[x7],#16
|
||||
b.gt .Lctr32_tail
|
||||
|
||||
aese v0.16b,v16.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v16.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
aese v0.16b,v17.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v17.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
ld1 {v2.16b},[x0],x12
|
||||
aese v0.16b,v20.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v20.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
ld1 {v3.16b},[x0]
|
||||
aese v0.16b,v21.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v21.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
eor v2.16b,v2.16b,v7.16b
|
||||
aese v0.16b,v22.16b
|
||||
aesmc v0.16b,v0.16b
|
||||
aese v1.16b,v22.16b
|
||||
aesmc v1.16b,v1.16b
|
||||
eor v3.16b,v3.16b,v7.16b
|
||||
aese v0.16b,v23.16b
|
||||
aese v1.16b,v23.16b
|
||||
|
||||
cmp x2,#1
|
||||
eor v2.16b,v2.16b,v0.16b
|
||||
eor v3.16b,v3.16b,v1.16b
|
||||
st1 {v2.16b},[x1],#16
|
||||
b.eq .Lctr32_done
|
||||
st1 {v3.16b},[x1]
|
||||
|
||||
.Lctr32_done:
|
||||
ldr x29,[sp],#16
|
||||
ret
|
||||
.size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks
|
||||
#endif
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,235 @@
|
|||
#if defined(__aarch64__)
|
||||
#include <openssl/arm_arch.h>
|
||||
|
||||
.text
|
||||
#if !defined(__clang__) || defined(BORINGSSL_CLANG_SUPPORTS_DOT_ARCH)
|
||||
.arch armv8-a+crypto
|
||||
#endif
|
||||
.globl gcm_init_v8
|
||||
.hidden gcm_init_v8
|
||||
.type gcm_init_v8,%function
|
||||
.align 4
|
||||
gcm_init_v8:
|
||||
ld1 {v17.2d},[x1] //load input H
|
||||
movi v19.16b,#0xe1
|
||||
shl v19.2d,v19.2d,#57 //0xc2.0
|
||||
ext v3.16b,v17.16b,v17.16b,#8
|
||||
ushr v18.2d,v19.2d,#63
|
||||
dup v17.4s,v17.s[1]
|
||||
ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
|
||||
ushr v18.2d,v3.2d,#63
|
||||
sshr v17.4s,v17.4s,#31 //broadcast carry bit
|
||||
and v18.16b,v18.16b,v16.16b
|
||||
shl v3.2d,v3.2d,#1
|
||||
ext v18.16b,v18.16b,v18.16b,#8
|
||||
and v16.16b,v16.16b,v17.16b
|
||||
orr v3.16b,v3.16b,v18.16b //H<<<=1
|
||||
eor v20.16b,v3.16b,v16.16b //twisted H
|
||||
st1 {v20.2d},[x0],#16 //store Htable[0]
|
||||
|
||||
//calculate H^2
|
||||
ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
|
||||
pmull v0.1q,v20.1d,v20.1d
|
||||
eor v16.16b,v16.16b,v20.16b
|
||||
pmull2 v2.1q,v20.2d,v20.2d
|
||||
pmull v1.1q,v16.1d,v16.1d
|
||||
|
||||
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
|
||||
eor v18.16b,v0.16b,v2.16b
|
||||
eor v1.16b,v1.16b,v17.16b
|
||||
eor v1.16b,v1.16b,v18.16b
|
||||
pmull v18.1q,v0.1d,v19.1d //1st phase
|
||||
|
||||
ins v2.d[0],v1.d[1]
|
||||
ins v1.d[1],v0.d[0]
|
||||
eor v0.16b,v1.16b,v18.16b
|
||||
|
||||
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
|
||||
pmull v0.1q,v0.1d,v19.1d
|
||||
eor v18.16b,v18.16b,v2.16b
|
||||
eor v22.16b,v0.16b,v18.16b
|
||||
|
||||
ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
|
||||
eor v17.16b,v17.16b,v22.16b
|
||||
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
|
||||
st1 {v21.2d,v22.2d},[x0] //store Htable[1..2]
|
||||
|
||||
ret
|
||||
.size gcm_init_v8,.-gcm_init_v8
|
||||
.globl gcm_gmult_v8
|
||||
.hidden gcm_gmult_v8
|
||||
.type gcm_gmult_v8,%function
|
||||
.align 4
|
||||
gcm_gmult_v8:
|
||||
ld1 {v17.2d},[x0] //load Xi
|
||||
movi v19.16b,#0xe1
|
||||
ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
|
||||
shl v19.2d,v19.2d,#57
|
||||
#ifndef __ARMEB__
|
||||
rev64 v17.16b,v17.16b
|
||||
#endif
|
||||
ext v3.16b,v17.16b,v17.16b,#8
|
||||
|
||||
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
|
||||
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
|
||||
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
|
||||
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
|
||||
|
||||
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
|
||||
eor v18.16b,v0.16b,v2.16b
|
||||
eor v1.16b,v1.16b,v17.16b
|
||||
eor v1.16b,v1.16b,v18.16b
|
||||
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
|
||||
|
||||
ins v2.d[0],v1.d[1]
|
||||
ins v1.d[1],v0.d[0]
|
||||
eor v0.16b,v1.16b,v18.16b
|
||||
|
||||
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
|
||||
pmull v0.1q,v0.1d,v19.1d
|
||||
eor v18.16b,v18.16b,v2.16b
|
||||
eor v0.16b,v0.16b,v18.16b
|
||||
|
||||
#ifndef __ARMEB__
|
||||
rev64 v0.16b,v0.16b
|
||||
#endif
|
||||
ext v0.16b,v0.16b,v0.16b,#8
|
||||
st1 {v0.2d},[x0] //write out Xi
|
||||
|
||||
ret
|
||||
.size gcm_gmult_v8,.-gcm_gmult_v8
|
||||
.globl gcm_ghash_v8
|
||||
.hidden gcm_ghash_v8
|
||||
.type gcm_ghash_v8,%function
|
||||
.align 4
|
||||
gcm_ghash_v8:
|
||||
ld1 {v0.2d},[x0] //load [rotated] Xi
|
||||
//"[rotated]" means that
|
||||
//loaded value would have
|
||||
//to be rotated in order to
|
||||
//make it appear as in
|
||||
//alorithm specification
|
||||
subs x3,x3,#32 //see if x3 is 32 or larger
|
||||
mov x12,#16 //x12 is used as post-
|
||||
//increment for input pointer;
|
||||
//as loop is modulo-scheduled
|
||||
//x12 is zeroed just in time
|
||||
//to preclude oversteping
|
||||
//inp[len], which means that
|
||||
//last block[s] are actually
|
||||
//loaded twice, but last
|
||||
//copy is not processed
|
||||
ld1 {v20.2d,v21.2d},[x1],#32 //load twisted H, ..., H^2
|
||||
movi v19.16b,#0xe1
|
||||
ld1 {v22.2d},[x1]
|
||||
csel x12,xzr,x12,eq //is it time to zero x12?
|
||||
ext v0.16b,v0.16b,v0.16b,#8 //rotate Xi
|
||||
ld1 {v16.2d},[x2],#16 //load [rotated] I[0]
|
||||
shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant
|
||||
#ifndef __ARMEB__
|
||||
rev64 v16.16b,v16.16b
|
||||
rev64 v0.16b,v0.16b
|
||||
#endif
|
||||
ext v3.16b,v16.16b,v16.16b,#8 //rotate I[0]
|
||||
b.lo .Lodd_tail_v8 //x3 was less than 32
|
||||
ld1 {v17.2d},[x2],x12 //load [rotated] I[1]
|
||||
#ifndef __ARMEB__
|
||||
rev64 v17.16b,v17.16b
|
||||
#endif
|
||||
ext v7.16b,v17.16b,v17.16b,#8
|
||||
eor v3.16b,v3.16b,v0.16b //I[i]^=Xi
|
||||
pmull v4.1q,v20.1d,v7.1d //H·Ii+1
|
||||
eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
|
||||
pmull2 v6.1q,v20.2d,v7.2d
|
||||
b .Loop_mod2x_v8
|
||||
|
||||
.align 4
|
||||
.Loop_mod2x_v8:
|
||||
ext v18.16b,v3.16b,v3.16b,#8
|
||||
subs x3,x3,#32 //is there more data?
|
||||
pmull v0.1q,v22.1d,v3.1d //H^2.lo·Xi.lo
|
||||
csel x12,xzr,x12,lo //is it time to zero x12?
|
||||
|
||||
pmull v5.1q,v21.1d,v17.1d
|
||||
eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing
|
||||
pmull2 v2.1q,v22.2d,v3.2d //H^2.hi·Xi.hi
|
||||
eor v0.16b,v0.16b,v4.16b //accumulate
|
||||
pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
|
||||
ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2]
|
||||
|
||||
eor v2.16b,v2.16b,v6.16b
|
||||
csel x12,xzr,x12,eq //is it time to zero x12?
|
||||
eor v1.16b,v1.16b,v5.16b
|
||||
|
||||
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
|
||||
eor v18.16b,v0.16b,v2.16b
|
||||
eor v1.16b,v1.16b,v17.16b
|
||||
ld1 {v17.2d},[x2],x12 //load [rotated] I[i+3]
|
||||
#ifndef __ARMEB__
|
||||
rev64 v16.16b,v16.16b
|
||||
#endif
|
||||
eor v1.16b,v1.16b,v18.16b
|
||||
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
|
||||
|
||||
#ifndef __ARMEB__
|
||||
rev64 v17.16b,v17.16b
|
||||
#endif
|
||||
ins v2.d[0],v1.d[1]
|
||||
ins v1.d[1],v0.d[0]
|
||||
ext v7.16b,v17.16b,v17.16b,#8
|
||||
ext v3.16b,v16.16b,v16.16b,#8
|
||||
eor v0.16b,v1.16b,v18.16b
|
||||
pmull v4.1q,v20.1d,v7.1d //H·Ii+1
|
||||
eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early
|
||||
|
||||
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
|
||||
pmull v0.1q,v0.1d,v19.1d
|
||||
eor v3.16b,v3.16b,v18.16b
|
||||
eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
|
||||
eor v3.16b,v3.16b,v0.16b
|
||||
pmull2 v6.1q,v20.2d,v7.2d
|
||||
b.hs .Loop_mod2x_v8 //there was at least 32 more bytes
|
||||
|
||||
eor v2.16b,v2.16b,v18.16b
|
||||
ext v3.16b,v16.16b,v16.16b,#8 //re-construct v3.16b
|
||||
adds x3,x3,#32 //re-construct x3
|
||||
eor v0.16b,v0.16b,v2.16b //re-construct v0.16b
|
||||
b.eq .Ldone_v8 //is x3 zero?
|
||||
.Lodd_tail_v8:
|
||||
ext v18.16b,v0.16b,v0.16b,#8
|
||||
eor v3.16b,v3.16b,v0.16b //inp^=Xi
|
||||
eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi
|
||||
|
||||
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
|
||||
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
|
||||
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
|
||||
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
|
||||
|
||||
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
|
||||
eor v18.16b,v0.16b,v2.16b
|
||||
eor v1.16b,v1.16b,v17.16b
|
||||
eor v1.16b,v1.16b,v18.16b
|
||||
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
|
||||
|
||||
ins v2.d[0],v1.d[1]
|
||||
ins v1.d[1],v0.d[0]
|
||||
eor v0.16b,v1.16b,v18.16b
|
||||
|
||||
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
|
||||
pmull v0.1q,v0.1d,v19.1d
|
||||
eor v18.16b,v18.16b,v2.16b
|
||||
eor v0.16b,v0.16b,v18.16b
|
||||
|
||||
.Ldone_v8:
|
||||
#ifndef __ARMEB__
|
||||
rev64 v0.16b,v0.16b
|
||||
#endif
|
||||
ext v0.16b,v0.16b,v0.16b,#8
|
||||
st1 {v0.2d},[x0] //write out Xi
|
||||
|
||||
ret
|
||||
.size gcm_ghash_v8,.-gcm_ghash_v8
|
||||
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
||||
.align 2
|
||||
.align 2
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,763 @@
|
|||
#if defined(__arm__)
|
||||
#include <openssl/arm_arch.h>
|
||||
|
||||
#if __ARM_MAX_ARCH__>=7
|
||||
.text
|
||||
.arch armv7-a @ don't confuse not-so-latest binutils with argv8 :-)
|
||||
.fpu neon
|
||||
.code 32
|
||||
#undef __thumb2__
|
||||
.align 5
|
||||
.Lrcon:
|
||||
.long 0x01,0x01,0x01,0x01
|
||||
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d @ rotate-n-splat
|
||||
.long 0x1b,0x1b,0x1b,0x1b
|
||||
|
||||
.globl aes_hw_set_encrypt_key
|
||||
.hidden aes_hw_set_encrypt_key
|
||||
.type aes_hw_set_encrypt_key,%function
|
||||
.align 5
|
||||
aes_hw_set_encrypt_key:
|
||||
.Lenc_key:
|
||||
mov r3,#-1
|
||||
cmp r0,#0
|
||||
beq .Lenc_key_abort
|
||||
cmp r2,#0
|
||||
beq .Lenc_key_abort
|
||||
mov r3,#-2
|
||||
cmp r1,#128
|
||||
blt .Lenc_key_abort
|
||||
cmp r1,#256
|
||||
bgt .Lenc_key_abort
|
||||
tst r1,#0x3f
|
||||
bne .Lenc_key_abort
|
||||
|
||||
adr r3,.Lrcon
|
||||
cmp r1,#192
|
||||
|
||||
veor q0,q0,q0
|
||||
vld1.8 {q3},[r0]!
|
||||
mov r1,#8 @ reuse r1
|
||||
vld1.32 {q1,q2},[r3]!
|
||||
|
||||
blt .Loop128
|
||||
beq .L192
|
||||
b .L256
|
||||
|
||||
.align 4
|
||||
.Loop128:
|
||||
vtbl.8 d20,{q3},d4
|
||||
vtbl.8 d21,{q3},d5
|
||||
vext.8 q9,q0,q3,#12
|
||||
vst1.32 {q3},[r2]!
|
||||
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
|
||||
subs r1,r1,#1
|
||||
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q10,q10,q1
|
||||
veor q3,q3,q9
|
||||
vshl.u8 q1,q1,#1
|
||||
veor q3,q3,q10
|
||||
bne .Loop128
|
||||
|
||||
vld1.32 {q1},[r3]
|
||||
|
||||
vtbl.8 d20,{q3},d4
|
||||
vtbl.8 d21,{q3},d5
|
||||
vext.8 q9,q0,q3,#12
|
||||
vst1.32 {q3},[r2]!
|
||||
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
|
||||
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q10,q10,q1
|
||||
veor q3,q3,q9
|
||||
vshl.u8 q1,q1,#1
|
||||
veor q3,q3,q10
|
||||
|
||||
vtbl.8 d20,{q3},d4
|
||||
vtbl.8 d21,{q3},d5
|
||||
vext.8 q9,q0,q3,#12
|
||||
vst1.32 {q3},[r2]!
|
||||
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
|
||||
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q10,q10,q1
|
||||
veor q3,q3,q9
|
||||
veor q3,q3,q10
|
||||
vst1.32 {q3},[r2]
|
||||
add r2,r2,#0x50
|
||||
|
||||
mov r12,#10
|
||||
b .Ldone
|
||||
|
||||
.align 4
|
||||
.L192:
|
||||
vld1.8 {d16},[r0]!
|
||||
vmov.i8 q10,#8 @ borrow q10
|
||||
vst1.32 {q3},[r2]!
|
||||
vsub.i8 q2,q2,q10 @ adjust the mask
|
||||
|
||||
.Loop192:
|
||||
vtbl.8 d20,{q8},d4
|
||||
vtbl.8 d21,{q8},d5
|
||||
vext.8 q9,q0,q3,#12
|
||||
vst1.32 {d16},[r2]!
|
||||
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
|
||||
subs r1,r1,#1
|
||||
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q3,q3,q9
|
||||
|
||||
vdup.32 q9,d7[1]
|
||||
veor q9,q9,q8
|
||||
veor q10,q10,q1
|
||||
vext.8 q8,q0,q8,#12
|
||||
vshl.u8 q1,q1,#1
|
||||
veor q8,q8,q9
|
||||
veor q3,q3,q10
|
||||
veor q8,q8,q10
|
||||
vst1.32 {q3},[r2]!
|
||||
bne .Loop192
|
||||
|
||||
mov r12,#12
|
||||
add r2,r2,#0x20
|
||||
b .Ldone
|
||||
|
||||
.align 4
|
||||
.L256:
|
||||
vld1.8 {q8},[r0]
|
||||
mov r1,#7
|
||||
mov r12,#14
|
||||
vst1.32 {q3},[r2]!
|
||||
|
||||
.Loop256:
|
||||
vtbl.8 d20,{q8},d4
|
||||
vtbl.8 d21,{q8},d5
|
||||
vext.8 q9,q0,q3,#12
|
||||
vst1.32 {q8},[r2]!
|
||||
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
|
||||
subs r1,r1,#1
|
||||
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q3,q3,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q10,q10,q1
|
||||
veor q3,q3,q9
|
||||
vshl.u8 q1,q1,#1
|
||||
veor q3,q3,q10
|
||||
vst1.32 {q3},[r2]!
|
||||
beq .Ldone
|
||||
|
||||
vdup.32 q10,d7[1]
|
||||
vext.8 q9,q0,q8,#12
|
||||
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
|
||||
|
||||
veor q8,q8,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q8,q8,q9
|
||||
vext.8 q9,q0,q9,#12
|
||||
veor q8,q8,q9
|
||||
|
||||
veor q8,q8,q10
|
||||
b .Loop256
|
||||
|
||||
.Ldone:
|
||||
str r12,[r2]
|
||||
mov r3,#0
|
||||
|
||||
.Lenc_key_abort:
|
||||
mov r0,r3 @ return value
|
||||
|
||||
bx lr
|
||||
.size aes_hw_set_encrypt_key,.-aes_hw_set_encrypt_key
|
||||
|
||||
.globl aes_hw_set_decrypt_key
|
||||
.hidden aes_hw_set_decrypt_key
|
||||
.type aes_hw_set_decrypt_key,%function
|
||||
.align 5
|
||||
aes_hw_set_decrypt_key:
|
||||
stmdb sp!,{r4,lr}
|
||||
bl .Lenc_key
|
||||
|
||||
cmp r0,#0
|
||||
bne .Ldec_key_abort
|
||||
|
||||
sub r2,r2,#240 @ restore original r2
|
||||
mov r4,#-16
|
||||
add r0,r2,r12,lsl#4 @ end of key schedule
|
||||
|
||||
vld1.32 {q0},[r2]
|
||||
vld1.32 {q1},[r0]
|
||||
vst1.32 {q0},[r0],r4
|
||||
vst1.32 {q1},[r2]!
|
||||
|
||||
.Loop_imc:
|
||||
vld1.32 {q0},[r2]
|
||||
vld1.32 {q1},[r0]
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
vst1.32 {q0},[r0],r4
|
||||
vst1.32 {q1},[r2]!
|
||||
cmp r0,r2
|
||||
bhi .Loop_imc
|
||||
|
||||
vld1.32 {q0},[r2]
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
vst1.32 {q0},[r0]
|
||||
|
||||
eor r0,r0,r0 @ return value
|
||||
.Ldec_key_abort:
|
||||
ldmia sp!,{r4,pc}
|
||||
.size aes_hw_set_decrypt_key,.-aes_hw_set_decrypt_key
|
||||
.globl aes_hw_encrypt
|
||||
.hidden aes_hw_encrypt
|
||||
.type aes_hw_encrypt,%function
|
||||
.align 5
|
||||
aes_hw_encrypt:
|
||||
ldr r3,[r2,#240]
|
||||
vld1.32 {q0},[r2]!
|
||||
vld1.8 {q2},[r0]
|
||||
sub r3,r3,#2
|
||||
vld1.32 {q1},[r2]!
|
||||
|
||||
.Loop_enc:
|
||||
.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
|
||||
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
|
||||
vld1.32 {q0},[r2]!
|
||||
subs r3,r3,#2
|
||||
.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
|
||||
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
|
||||
vld1.32 {q1},[r2]!
|
||||
bgt .Loop_enc
|
||||
|
||||
.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
|
||||
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
|
||||
vld1.32 {q0},[r2]
|
||||
.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
|
||||
veor q2,q2,q0
|
||||
|
||||
vst1.8 {q2},[r1]
|
||||
bx lr
|
||||
.size aes_hw_encrypt,.-aes_hw_encrypt
|
||||
.globl aes_hw_decrypt
|
||||
.hidden aes_hw_decrypt
|
||||
.type aes_hw_decrypt,%function
|
||||
.align 5
|
||||
aes_hw_decrypt:
|
||||
ldr r3,[r2,#240]
|
||||
vld1.32 {q0},[r2]!
|
||||
vld1.8 {q2},[r0]
|
||||
sub r3,r3,#2
|
||||
vld1.32 {q1},[r2]!
|
||||
|
||||
.Loop_dec:
|
||||
.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
|
||||
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
|
||||
vld1.32 {q0},[r2]!
|
||||
subs r3,r3,#2
|
||||
.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
|
||||
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
|
||||
vld1.32 {q1},[r2]!
|
||||
bgt .Loop_dec
|
||||
|
||||
.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
|
||||
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
|
||||
vld1.32 {q0},[r2]
|
||||
.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
|
||||
veor q2,q2,q0
|
||||
|
||||
vst1.8 {q2},[r1]
|
||||
bx lr
|
||||
.size aes_hw_decrypt,.-aes_hw_decrypt
|
||||
.globl aes_hw_cbc_encrypt
|
||||
.hidden aes_hw_cbc_encrypt
|
||||
.type aes_hw_cbc_encrypt,%function
|
||||
.align 5
|
||||
aes_hw_cbc_encrypt:
|
||||
mov ip,sp
|
||||
stmdb sp!,{r4,r5,r6,r7,r8,lr}
|
||||
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
|
||||
ldmia ip,{r4,r5} @ load remaining args
|
||||
subs r2,r2,#16
|
||||
mov r8,#16
|
||||
blo .Lcbc_abort
|
||||
moveq r8,#0
|
||||
|
||||
cmp r5,#0 @ en- or decrypting?
|
||||
ldr r5,[r3,#240]
|
||||
and r2,r2,#-16
|
||||
vld1.8 {q6},[r4]
|
||||
vld1.8 {q0},[r0],r8
|
||||
|
||||
vld1.32 {q8,q9},[r3] @ load key schedule...
|
||||
sub r5,r5,#6
|
||||
add r7,r3,r5,lsl#4 @ pointer to last 7 round keys
|
||||
sub r5,r5,#2
|
||||
vld1.32 {q10,q11},[r7]!
|
||||
vld1.32 {q12,q13},[r7]!
|
||||
vld1.32 {q14,q15},[r7]!
|
||||
vld1.32 {q7},[r7]
|
||||
|
||||
add r7,r3,#32
|
||||
mov r6,r5
|
||||
beq .Lcbc_dec
|
||||
|
||||
cmp r5,#2
|
||||
veor q0,q0,q6
|
||||
veor q5,q8,q7
|
||||
beq .Lcbc_enc128
|
||||
|
||||
vld1.32 {q2,q3},[r7]
|
||||
add r7,r3,#16
|
||||
add r6,r3,#16*4
|
||||
add r12,r3,#16*5
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
add r14,r3,#16*6
|
||||
add r3,r3,#16*7
|
||||
b .Lenter_cbc_enc
|
||||
|
||||
.align 4
|
||||
.Loop_cbc_enc:
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vst1.8 {q6},[r1]!
|
||||
.Lenter_cbc_enc:
|
||||
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vld1.32 {q8},[r6]
|
||||
cmp r5,#4
|
||||
.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vld1.32 {q9},[r12]
|
||||
beq .Lcbc_enc192
|
||||
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vld1.32 {q8},[r14]
|
||||
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vld1.32 {q9},[r3]
|
||||
nop
|
||||
|
||||
.Lcbc_enc192:
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
subs r2,r2,#16
|
||||
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
moveq r8,#0
|
||||
.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vld1.8 {q8},[r0],r8
|
||||
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
veor q8,q8,q5
|
||||
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vld1.32 {q9},[r7] @ re-pre-load rndkey[1]
|
||||
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
|
||||
veor q6,q0,q7
|
||||
bhs .Loop_cbc_enc
|
||||
|
||||
vst1.8 {q6},[r1]!
|
||||
b .Lcbc_done
|
||||
|
||||
.align 5
|
||||
.Lcbc_enc128:
|
||||
vld1.32 {q2,q3},[r7]
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
b .Lenter_cbc_enc128
|
||||
.Loop_cbc_enc128:
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vst1.8 {q6},[r1]!
|
||||
.Lenter_cbc_enc128:
|
||||
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
subs r2,r2,#16
|
||||
.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
moveq r8,#0
|
||||
.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
vld1.8 {q8},[r0],r8
|
||||
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
veor q8,q8,q5
|
||||
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
|
||||
veor q6,q0,q7
|
||||
bhs .Loop_cbc_enc128
|
||||
|
||||
vst1.8 {q6},[r1]!
|
||||
b .Lcbc_done
|
||||
.align 5
|
||||
.Lcbc_dec:
|
||||
vld1.8 {q10},[r0]!
|
||||
subs r2,r2,#32 @ bias
|
||||
add r6,r5,#2
|
||||
vorr q3,q0,q0
|
||||
vorr q1,q0,q0
|
||||
vorr q11,q10,q10
|
||||
blo .Lcbc_dec_tail
|
||||
|
||||
vorr q1,q10,q10
|
||||
vld1.8 {q10},[r0]!
|
||||
vorr q2,q0,q0
|
||||
vorr q3,q1,q1
|
||||
vorr q11,q10,q10
|
||||
|
||||
.Loop3x_cbc_dec:
|
||||
.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
vld1.32 {q8},[r7]!
|
||||
subs r6,r6,#2
|
||||
.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
vld1.32 {q9},[r7]!
|
||||
bgt .Loop3x_cbc_dec
|
||||
|
||||
.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
veor q4,q6,q7
|
||||
subs r2,r2,#0x30
|
||||
veor q5,q2,q7
|
||||
movlo r6,r2 @ r6, r6, is zero at this point
|
||||
.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
veor q9,q3,q7
|
||||
add r0,r0,r6 @ r0 is adjusted in such way that
|
||||
@ at exit from the loop q1-q10
|
||||
@ are loaded with last "words"
|
||||
vorr q6,q11,q11
|
||||
mov r7,r3
|
||||
.byte 0x68,0x03,0xb0,0xf3 @ aesd q0,q12
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
vld1.8 {q2},[r0]!
|
||||
.byte 0x6a,0x03,0xb0,0xf3 @ aesd q0,q13
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
vld1.8 {q3},[r0]!
|
||||
.byte 0x6c,0x03,0xb0,0xf3 @ aesd q0,q14
|
||||
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
|
||||
.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
vld1.8 {q11},[r0]!
|
||||
.byte 0x6e,0x03,0xb0,0xf3 @ aesd q0,q15
|
||||
.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
|
||||
.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15
|
||||
vld1.32 {q8},[r7]! @ re-pre-load rndkey[0]
|
||||
add r6,r5,#2
|
||||
veor q4,q4,q0
|
||||
veor q5,q5,q1
|
||||
veor q10,q10,q9
|
||||
vld1.32 {q9},[r7]! @ re-pre-load rndkey[1]
|
||||
vst1.8 {q4},[r1]!
|
||||
vorr q0,q2,q2
|
||||
vst1.8 {q5},[r1]!
|
||||
vorr q1,q3,q3
|
||||
vst1.8 {q10},[r1]!
|
||||
vorr q10,q11,q11
|
||||
bhs .Loop3x_cbc_dec
|
||||
|
||||
cmn r2,#0x30
|
||||
beq .Lcbc_done
|
||||
nop
|
||||
|
||||
.Lcbc_dec_tail:
|
||||
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
vld1.32 {q8},[r7]!
|
||||
subs r6,r6,#2
|
||||
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
vld1.32 {q9},[r7]!
|
||||
bgt .Lcbc_dec_tail
|
||||
|
||||
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
cmn r2,#0x20
|
||||
.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
veor q5,q6,q7
|
||||
.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
|
||||
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
|
||||
.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14
|
||||
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
|
||||
veor q9,q3,q7
|
||||
.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
|
||||
.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15
|
||||
beq .Lcbc_dec_one
|
||||
veor q5,q5,q1
|
||||
veor q9,q9,q10
|
||||
vorr q6,q11,q11
|
||||
vst1.8 {q5},[r1]!
|
||||
vst1.8 {q9},[r1]!
|
||||
b .Lcbc_done
|
||||
|
||||
.Lcbc_dec_one:
|
||||
veor q5,q5,q10
|
||||
vorr q6,q11,q11
|
||||
vst1.8 {q5},[r1]!
|
||||
|
||||
.Lcbc_done:
|
||||
vst1.8 {q6},[r4]
|
||||
.Lcbc_abort:
|
||||
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
|
||||
ldmia sp!,{r4,r5,r6,r7,r8,pc}
|
||||
.size aes_hw_cbc_encrypt,.-aes_hw_cbc_encrypt
|
||||
.globl aes_hw_ctr32_encrypt_blocks
|
||||
.hidden aes_hw_ctr32_encrypt_blocks
|
||||
.type aes_hw_ctr32_encrypt_blocks,%function
|
||||
.align 5
|
||||
aes_hw_ctr32_encrypt_blocks:
|
||||
mov ip,sp
|
||||
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,lr}
|
||||
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
|
||||
ldr r4, [ip] @ load remaining arg
|
||||
ldr r5,[r3,#240]
|
||||
|
||||
ldr r8, [r4, #12]
|
||||
vld1.32 {q0},[r4]
|
||||
|
||||
vld1.32 {q8,q9},[r3] @ load key schedule...
|
||||
sub r5,r5,#4
|
||||
mov r12,#16
|
||||
cmp r2,#2
|
||||
add r7,r3,r5,lsl#4 @ pointer to last 5 round keys
|
||||
sub r5,r5,#2
|
||||
vld1.32 {q12,q13},[r7]!
|
||||
vld1.32 {q14,q15},[r7]!
|
||||
vld1.32 {q7},[r7]
|
||||
add r7,r3,#32
|
||||
mov r6,r5
|
||||
movlo r12,#0
|
||||
#ifndef __ARMEB__
|
||||
rev r8, r8
|
||||
#endif
|
||||
vorr q1,q0,q0
|
||||
add r10, r8, #1
|
||||
vorr q10,q0,q0
|
||||
add r8, r8, #2
|
||||
vorr q6,q0,q0
|
||||
rev r10, r10
|
||||
vmov.32 d3[1],r10
|
||||
bls .Lctr32_tail
|
||||
rev r12, r8
|
||||
sub r2,r2,#3 @ bias
|
||||
vmov.32 d21[1],r12
|
||||
b .Loop3x_ctr32
|
||||
|
||||
.align 4
|
||||
.Loop3x_ctr32:
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
|
||||
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
|
||||
vld1.32 {q8},[r7]!
|
||||
subs r6,r6,#2
|
||||
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
|
||||
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
|
||||
vld1.32 {q9},[r7]!
|
||||
bgt .Loop3x_ctr32
|
||||
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x83,0xb0,0xf3 @ aesmc q4,q0
|
||||
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
|
||||
.byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1
|
||||
vld1.8 {q2},[r0]!
|
||||
vorr q0,q6,q6
|
||||
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
|
||||
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
|
||||
vld1.8 {q3},[r0]!
|
||||
vorr q1,q6,q6
|
||||
.byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9
|
||||
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
|
||||
.byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9
|
||||
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
|
||||
vld1.8 {q11},[r0]!
|
||||
mov r7,r3
|
||||
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
|
||||
.byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10
|
||||
vorr q10,q6,q6
|
||||
add r9,r8,#1
|
||||
.byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12
|
||||
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
|
||||
.byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12
|
||||
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
|
||||
veor q2,q2,q7
|
||||
add r10,r8,#2
|
||||
.byte 0x28,0x23,0xf0,0xf3 @ aese q9,q12
|
||||
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
|
||||
veor q3,q3,q7
|
||||
add r8,r8,#3
|
||||
.byte 0x2a,0x83,0xb0,0xf3 @ aese q4,q13
|
||||
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
|
||||
.byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13
|
||||
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
|
||||
veor q11,q11,q7
|
||||
rev r9,r9
|
||||
.byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13
|
||||
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
|
||||
vmov.32 d1[1], r9
|
||||
rev r10,r10
|
||||
.byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14
|
||||
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
|
||||
.byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14
|
||||
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
|
||||
vmov.32 d3[1], r10
|
||||
rev r12,r8
|
||||
.byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14
|
||||
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
|
||||
vmov.32 d21[1], r12
|
||||
subs r2,r2,#3
|
||||
.byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15
|
||||
.byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15
|
||||
.byte 0x2e,0x23,0xf0,0xf3 @ aese q9,q15
|
||||
|
||||
veor q2,q2,q4
|
||||
vld1.32 {q8},[r7]! @ re-pre-load rndkey[0]
|
||||
vst1.8 {q2},[r1]!
|
||||
veor q3,q3,q5
|
||||
mov r6,r5
|
||||
vst1.8 {q3},[r1]!
|
||||
veor q11,q11,q9
|
||||
vld1.32 {q9},[r7]! @ re-pre-load rndkey[1]
|
||||
vst1.8 {q11},[r1]!
|
||||
bhs .Loop3x_ctr32
|
||||
|
||||
adds r2,r2,#3
|
||||
beq .Lctr32_done
|
||||
cmp r2,#1
|
||||
mov r12,#16
|
||||
moveq r12,#0
|
||||
|
||||
.Lctr32_tail:
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
vld1.32 {q8},[r7]!
|
||||
subs r6,r6,#2
|
||||
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
vld1.32 {q9},[r7]!
|
||||
bgt .Lctr32_tail
|
||||
|
||||
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
vld1.8 {q2},[r0],r12
|
||||
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x28,0x23,0xb0,0xf3 @ aese q1,q12
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
vld1.8 {q3},[r0]
|
||||
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x2a,0x23,0xb0,0xf3 @ aese q1,q13
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
veor q2,q2,q7
|
||||
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
|
||||
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
|
||||
.byte 0x2c,0x23,0xb0,0xf3 @ aese q1,q14
|
||||
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
|
||||
veor q3,q3,q7
|
||||
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
|
||||
.byte 0x2e,0x23,0xb0,0xf3 @ aese q1,q15
|
||||
|
||||
cmp r2,#1
|
||||
veor q2,q2,q0
|
||||
veor q3,q3,q1
|
||||
vst1.8 {q2},[r1]!
|
||||
beq .Lctr32_done
|
||||
vst1.8 {q3},[r1]
|
||||
|
||||
.Lctr32_done:
|
||||
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
|
||||
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,pc}
|
||||
.size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks
|
||||
#endif
|
||||
#endif
|
|
@ -0,0 +1,956 @@
|
|||
#if defined(__arm__)
|
||||
#include <openssl/arm_arch.h>
|
||||
|
||||
.text
|
||||
#if defined(__thumb2__)
|
||||
.syntax unified
|
||||
.thumb
|
||||
#else
|
||||
.code 32
|
||||
#endif
|
||||
|
||||
#if __ARM_MAX_ARCH__>=7
|
||||
.align 5
|
||||
.LOPENSSL_armcap:
|
||||
.word OPENSSL_armcap_P-.Lbn_mul_mont
|
||||
#endif
|
||||
|
||||
.globl bn_mul_mont
|
||||
.hidden bn_mul_mont
|
||||
.type bn_mul_mont,%function
|
||||
|
||||
.align 5
|
||||
bn_mul_mont:
|
||||
.Lbn_mul_mont:
|
||||
ldr ip,[sp,#4] @ load num
|
||||
stmdb sp!,{r0,r2} @ sp points at argument block
|
||||
#if __ARM_MAX_ARCH__>=7
|
||||
tst ip,#7
|
||||
bne .Lialu
|
||||
adr r0,.Lbn_mul_mont
|
||||
ldr r2,.LOPENSSL_armcap
|
||||
ldr r0,[r0,r2]
|
||||
#ifdef __APPLE__
|
||||
ldr r0,[r0]
|
||||
#endif
|
||||
tst r0,#ARMV7_NEON @ NEON available?
|
||||
ldmia sp, {r0,r2}
|
||||
beq .Lialu
|
||||
add sp,sp,#8
|
||||
b bn_mul8x_mont_neon
|
||||
.align 4
|
||||
.Lialu:
|
||||
#endif
|
||||
cmp ip,#2
|
||||
mov r0,ip @ load num
|
||||
#ifdef __thumb2__
|
||||
ittt lt
|
||||
#endif
|
||||
movlt r0,#0
|
||||
addlt sp,sp,#2*4
|
||||
blt .Labrt
|
||||
|
||||
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ save 10 registers
|
||||
|
||||
mov r0,r0,lsl#2 @ rescale r0 for byte count
|
||||
sub sp,sp,r0 @ alloca(4*num)
|
||||
sub sp,sp,#4 @ +extra dword
|
||||
sub r0,r0,#4 @ "num=num-1"
|
||||
add r4,r2,r0 @ &bp[num-1]
|
||||
|
||||
add r0,sp,r0 @ r0 to point at &tp[num-1]
|
||||
ldr r8,[r0,#14*4] @ &n0
|
||||
ldr r2,[r2] @ bp[0]
|
||||
ldr r5,[r1],#4 @ ap[0],ap++
|
||||
ldr r6,[r3],#4 @ np[0],np++
|
||||
ldr r8,[r8] @ *n0
|
||||
str r4,[r0,#15*4] @ save &bp[num]
|
||||
|
||||
umull r10,r11,r5,r2 @ ap[0]*bp[0]
|
||||
str r8,[r0,#14*4] @ save n0 value
|
||||
mul r8,r10,r8 @ "tp[0]"*n0
|
||||
mov r12,#0
|
||||
umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]"
|
||||
mov r4,sp
|
||||
|
||||
.L1st:
|
||||
ldr r5,[r1],#4 @ ap[j],ap++
|
||||
mov r10,r11
|
||||
ldr r6,[r3],#4 @ np[j],np++
|
||||
mov r11,#0
|
||||
umlal r10,r11,r5,r2 @ ap[j]*bp[0]
|
||||
mov r14,#0
|
||||
umlal r12,r14,r6,r8 @ np[j]*n0
|
||||
adds r12,r12,r10
|
||||
str r12,[r4],#4 @ tp[j-1]=,tp++
|
||||
adc r12,r14,#0
|
||||
cmp r4,r0
|
||||
bne .L1st
|
||||
|
||||
adds r12,r12,r11
|
||||
ldr r4,[r0,#13*4] @ restore bp
|
||||
mov r14,#0
|
||||
ldr r8,[r0,#14*4] @ restore n0
|
||||
adc r14,r14,#0
|
||||
str r12,[r0] @ tp[num-1]=
|
||||
mov r7,sp
|
||||
str r14,[r0,#4] @ tp[num]=
|
||||
|
||||
.Louter:
|
||||
sub r7,r0,r7 @ "original" r0-1 value
|
||||
sub r1,r1,r7 @ "rewind" ap to &ap[1]
|
||||
ldr r2,[r4,#4]! @ *(++bp)
|
||||
sub r3,r3,r7 @ "rewind" np to &np[1]
|
||||
ldr r5,[r1,#-4] @ ap[0]
|
||||
ldr r10,[sp] @ tp[0]
|
||||
ldr r6,[r3,#-4] @ np[0]
|
||||
ldr r7,[sp,#4] @ tp[1]
|
||||
|
||||
mov r11,#0
|
||||
umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0]
|
||||
str r4,[r0,#13*4] @ save bp
|
||||
mul r8,r10,r8
|
||||
mov r12,#0
|
||||
umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]"
|
||||
mov r4,sp
|
||||
|
||||
.Linner:
|
||||
ldr r5,[r1],#4 @ ap[j],ap++
|
||||
adds r10,r11,r7 @ +=tp[j]
|
||||
ldr r6,[r3],#4 @ np[j],np++
|
||||
mov r11,#0
|
||||
umlal r10,r11,r5,r2 @ ap[j]*bp[i]
|
||||
mov r14,#0
|
||||
umlal r12,r14,r6,r8 @ np[j]*n0
|
||||
adc r11,r11,#0
|
||||
ldr r7,[r4,#8] @ tp[j+1]
|
||||
adds r12,r12,r10
|
||||
str r12,[r4],#4 @ tp[j-1]=,tp++
|
||||
adc r12,r14,#0
|
||||
cmp r4,r0
|
||||
bne .Linner
|
||||
|
||||
adds r12,r12,r11
|
||||
mov r14,#0
|
||||
ldr r4,[r0,#13*4] @ restore bp
|
||||
adc r14,r14,#0
|
||||
ldr r8,[r0,#14*4] @ restore n0
|
||||
adds r12,r12,r7
|
||||
ldr r7,[r0,#15*4] @ restore &bp[num]
|
||||
adc r14,r14,#0
|
||||
str r12,[r0] @ tp[num-1]=
|
||||
str r14,[r0,#4] @ tp[num]=
|
||||
|
||||
cmp r4,r7
|
||||
#ifdef __thumb2__
|
||||
itt ne
|
||||
#endif
|
||||
movne r7,sp
|
||||
bne .Louter
|
||||
|
||||
ldr r2,[r0,#12*4] @ pull rp
|
||||
mov r5,sp
|
||||
add r0,r0,#4 @ r0 to point at &tp[num]
|
||||
sub r5,r0,r5 @ "original" num value
|
||||
mov r4,sp @ "rewind" r4
|
||||
mov r1,r4 @ "borrow" r1
|
||||
sub r3,r3,r5 @ "rewind" r3 to &np[0]
|
||||
|
||||
subs r7,r7,r7 @ "clear" carry flag
|
||||
.Lsub: ldr r7,[r4],#4
|
||||
ldr r6,[r3],#4
|
||||
sbcs r7,r7,r6 @ tp[j]-np[j]
|
||||
str r7,[r2],#4 @ rp[j]=
|
||||
teq r4,r0 @ preserve carry
|
||||
bne .Lsub
|
||||
sbcs r14,r14,#0 @ upmost carry
|
||||
mov r4,sp @ "rewind" r4
|
||||
sub r2,r2,r5 @ "rewind" r2
|
||||
|
||||
and r1,r4,r14
|
||||
bic r3,r2,r14
|
||||
orr r1,r1,r3 @ ap=borrow?tp:rp
|
||||
|
||||
.Lcopy: ldr r7,[r1],#4 @ copy or in-place refresh
|
||||
str sp,[r4],#4 @ zap tp
|
||||
str r7,[r2],#4
|
||||
cmp r4,r0
|
||||
bne .Lcopy
|
||||
|
||||
mov sp,r0
|
||||
add sp,sp,#4 @ skip over tp[num+1]
|
||||
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ restore registers
|
||||
add sp,sp,#2*4 @ skip over {r0,r2}
|
||||
mov r0,#1
|
||||
.Labrt:
|
||||
#if __ARM_ARCH__>=5
|
||||
bx lr @ bx lr
|
||||
#else
|
||||
tst lr,#1
|
||||
moveq pc,lr @ be binary compatible with V4, yet
|
||||
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
|
||||
#endif
|
||||
.size bn_mul_mont,.-bn_mul_mont
|
||||
#if __ARM_MAX_ARCH__>=7
|
||||
.arch armv7-a
|
||||
.fpu neon
|
||||
|
||||
.type bn_mul8x_mont_neon,%function
|
||||
.align 5
|
||||
bn_mul8x_mont_neon:
|
||||
mov ip,sp
|
||||
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11}
|
||||
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
|
||||
ldmia ip,{r4,r5} @ load rest of parameter block
|
||||
mov ip,sp
|
||||
|
||||
cmp r5,#8
|
||||
bhi .LNEON_8n
|
||||
|
||||
@ special case for r5==8, everything is in register bank...
|
||||
|
||||
vld1.32 {d28[0]}, [r2,:32]!
|
||||
veor d8,d8,d8
|
||||
sub r7,sp,r5,lsl#4
|
||||
vld1.32 {d0,d1,d2,d3}, [r1]! @ can't specify :32 :-(
|
||||
and r7,r7,#-64
|
||||
vld1.32 {d30[0]}, [r4,:32]
|
||||
mov sp,r7 @ alloca
|
||||
vzip.16 d28,d8
|
||||
|
||||
vmull.u32 q6,d28,d0[0]
|
||||
vmull.u32 q7,d28,d0[1]
|
||||
vmull.u32 q8,d28,d1[0]
|
||||
vshl.i64 d29,d13,#16
|
||||
vmull.u32 q9,d28,d1[1]
|
||||
|
||||
vadd.u64 d29,d29,d12
|
||||
veor d8,d8,d8
|
||||
vmul.u32 d29,d29,d30
|
||||
|
||||
vmull.u32 q10,d28,d2[0]
|
||||
vld1.32 {d4,d5,d6,d7}, [r3]!
|
||||
vmull.u32 q11,d28,d2[1]
|
||||
vmull.u32 q12,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmull.u32 q13,d28,d3[1]
|
||||
|
||||
vmlal.u32 q6,d29,d4[0]
|
||||
sub r9,r5,#1
|
||||
vmlal.u32 q7,d29,d4[1]
|
||||
vmlal.u32 q8,d29,d5[0]
|
||||
vmlal.u32 q9,d29,d5[1]
|
||||
|
||||
vmlal.u32 q10,d29,d6[0]
|
||||
vmov q5,q6
|
||||
vmlal.u32 q11,d29,d6[1]
|
||||
vmov q6,q7
|
||||
vmlal.u32 q12,d29,d7[0]
|
||||
vmov q7,q8
|
||||
vmlal.u32 q13,d29,d7[1]
|
||||
vmov q8,q9
|
||||
vmov q9,q10
|
||||
vshr.u64 d10,d10,#16
|
||||
vmov q10,q11
|
||||
vmov q11,q12
|
||||
vadd.u64 d10,d10,d11
|
||||
vmov q12,q13
|
||||
veor q13,q13
|
||||
vshr.u64 d10,d10,#16
|
||||
|
||||
b .LNEON_outer8
|
||||
|
||||
.align 4
|
||||
.LNEON_outer8:
|
||||
vld1.32 {d28[0]}, [r2,:32]!
|
||||
veor d8,d8,d8
|
||||
vzip.16 d28,d8
|
||||
vadd.u64 d12,d12,d10
|
||||
|
||||
vmlal.u32 q6,d28,d0[0]
|
||||
vmlal.u32 q7,d28,d0[1]
|
||||
vmlal.u32 q8,d28,d1[0]
|
||||
vshl.i64 d29,d13,#16
|
||||
vmlal.u32 q9,d28,d1[1]
|
||||
|
||||
vadd.u64 d29,d29,d12
|
||||
veor d8,d8,d8
|
||||
subs r9,r9,#1
|
||||
vmul.u32 d29,d29,d30
|
||||
|
||||
vmlal.u32 q10,d28,d2[0]
|
||||
vmlal.u32 q11,d28,d2[1]
|
||||
vmlal.u32 q12,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q13,d28,d3[1]
|
||||
|
||||
vmlal.u32 q6,d29,d4[0]
|
||||
vmlal.u32 q7,d29,d4[1]
|
||||
vmlal.u32 q8,d29,d5[0]
|
||||
vmlal.u32 q9,d29,d5[1]
|
||||
|
||||
vmlal.u32 q10,d29,d6[0]
|
||||
vmov q5,q6
|
||||
vmlal.u32 q11,d29,d6[1]
|
||||
vmov q6,q7
|
||||
vmlal.u32 q12,d29,d7[0]
|
||||
vmov q7,q8
|
||||
vmlal.u32 q13,d29,d7[1]
|
||||
vmov q8,q9
|
||||
vmov q9,q10
|
||||
vshr.u64 d10,d10,#16
|
||||
vmov q10,q11
|
||||
vmov q11,q12
|
||||
vadd.u64 d10,d10,d11
|
||||
vmov q12,q13
|
||||
veor q13,q13
|
||||
vshr.u64 d10,d10,#16
|
||||
|
||||
bne .LNEON_outer8
|
||||
|
||||
vadd.u64 d12,d12,d10
|
||||
mov r7,sp
|
||||
vshr.u64 d10,d12,#16
|
||||
mov r8,r5
|
||||
vadd.u64 d13,d13,d10
|
||||
add r6,sp,#96
|
||||
vshr.u64 d10,d13,#16
|
||||
vzip.16 d12,d13
|
||||
|
||||
b .LNEON_tail_entry
|
||||
|
||||
.align 4
|
||||
.LNEON_8n:
|
||||
veor q6,q6,q6
|
||||
sub r7,sp,#128
|
||||
veor q7,q7,q7
|
||||
sub r7,r7,r5,lsl#4
|
||||
veor q8,q8,q8
|
||||
and r7,r7,#-64
|
||||
veor q9,q9,q9
|
||||
mov sp,r7 @ alloca
|
||||
veor q10,q10,q10
|
||||
add r7,r7,#256
|
||||
veor q11,q11,q11
|
||||
sub r8,r5,#8
|
||||
veor q12,q12,q12
|
||||
veor q13,q13,q13
|
||||
|
||||
.LNEON_8n_init:
|
||||
vst1.64 {q6,q7},[r7,:256]!
|
||||
subs r8,r8,#8
|
||||
vst1.64 {q8,q9},[r7,:256]!
|
||||
vst1.64 {q10,q11},[r7,:256]!
|
||||
vst1.64 {q12,q13},[r7,:256]!
|
||||
bne .LNEON_8n_init
|
||||
|
||||
add r6,sp,#256
|
||||
vld1.32 {d0,d1,d2,d3},[r1]!
|
||||
add r10,sp,#8
|
||||
vld1.32 {d30[0]},[r4,:32]
|
||||
mov r9,r5
|
||||
b .LNEON_8n_outer
|
||||
|
||||
.align 4
|
||||
.LNEON_8n_outer:
|
||||
vld1.32 {d28[0]},[r2,:32]! @ *b++
|
||||
veor d8,d8,d8
|
||||
vzip.16 d28,d8
|
||||
add r7,sp,#128
|
||||
vld1.32 {d4,d5,d6,d7},[r3]!
|
||||
|
||||
vmlal.u32 q6,d28,d0[0]
|
||||
vmlal.u32 q7,d28,d0[1]
|
||||
veor d8,d8,d8
|
||||
vmlal.u32 q8,d28,d1[0]
|
||||
vshl.i64 d29,d13,#16
|
||||
vmlal.u32 q9,d28,d1[1]
|
||||
vadd.u64 d29,d29,d12
|
||||
vmlal.u32 q10,d28,d2[0]
|
||||
vmul.u32 d29,d29,d30
|
||||
vmlal.u32 q11,d28,d2[1]
|
||||
vst1.32 {d28},[sp,:64] @ put aside smashed b[8*i+0]
|
||||
vmlal.u32 q12,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q13,d28,d3[1]
|
||||
vld1.32 {d28[0]},[r2,:32]! @ *b++
|
||||
vmlal.u32 q6,d29,d4[0]
|
||||
veor d10,d10,d10
|
||||
vmlal.u32 q7,d29,d4[1]
|
||||
vzip.16 d28,d10
|
||||
vmlal.u32 q8,d29,d5[0]
|
||||
vshr.u64 d12,d12,#16
|
||||
vmlal.u32 q9,d29,d5[1]
|
||||
vmlal.u32 q10,d29,d6[0]
|
||||
vadd.u64 d12,d12,d13
|
||||
vmlal.u32 q11,d29,d6[1]
|
||||
vshr.u64 d12,d12,#16
|
||||
vmlal.u32 q12,d29,d7[0]
|
||||
vmlal.u32 q13,d29,d7[1]
|
||||
vadd.u64 d14,d14,d12
|
||||
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+0]
|
||||
vmlal.u32 q7,d28,d0[0]
|
||||
vld1.64 {q6},[r6,:128]!
|
||||
vmlal.u32 q8,d28,d0[1]
|
||||
veor d8,d8,d8
|
||||
vmlal.u32 q9,d28,d1[0]
|
||||
vshl.i64 d29,d15,#16
|
||||
vmlal.u32 q10,d28,d1[1]
|
||||
vadd.u64 d29,d29,d14
|
||||
vmlal.u32 q11,d28,d2[0]
|
||||
vmul.u32 d29,d29,d30
|
||||
vmlal.u32 q12,d28,d2[1]
|
||||
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+1]
|
||||
vmlal.u32 q13,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q6,d28,d3[1]
|
||||
vld1.32 {d28[0]},[r2,:32]! @ *b++
|
||||
vmlal.u32 q7,d29,d4[0]
|
||||
veor d10,d10,d10
|
||||
vmlal.u32 q8,d29,d4[1]
|
||||
vzip.16 d28,d10
|
||||
vmlal.u32 q9,d29,d5[0]
|
||||
vshr.u64 d14,d14,#16
|
||||
vmlal.u32 q10,d29,d5[1]
|
||||
vmlal.u32 q11,d29,d6[0]
|
||||
vadd.u64 d14,d14,d15
|
||||
vmlal.u32 q12,d29,d6[1]
|
||||
vshr.u64 d14,d14,#16
|
||||
vmlal.u32 q13,d29,d7[0]
|
||||
vmlal.u32 q6,d29,d7[1]
|
||||
vadd.u64 d16,d16,d14
|
||||
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+1]
|
||||
vmlal.u32 q8,d28,d0[0]
|
||||
vld1.64 {q7},[r6,:128]!
|
||||
vmlal.u32 q9,d28,d0[1]
|
||||
veor d8,d8,d8
|
||||
vmlal.u32 q10,d28,d1[0]
|
||||
vshl.i64 d29,d17,#16
|
||||
vmlal.u32 q11,d28,d1[1]
|
||||
vadd.u64 d29,d29,d16
|
||||
vmlal.u32 q12,d28,d2[0]
|
||||
vmul.u32 d29,d29,d30
|
||||
vmlal.u32 q13,d28,d2[1]
|
||||
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+2]
|
||||
vmlal.u32 q6,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q7,d28,d3[1]
|
||||
vld1.32 {d28[0]},[r2,:32]! @ *b++
|
||||
vmlal.u32 q8,d29,d4[0]
|
||||
veor d10,d10,d10
|
||||
vmlal.u32 q9,d29,d4[1]
|
||||
vzip.16 d28,d10
|
||||
vmlal.u32 q10,d29,d5[0]
|
||||
vshr.u64 d16,d16,#16
|
||||
vmlal.u32 q11,d29,d5[1]
|
||||
vmlal.u32 q12,d29,d6[0]
|
||||
vadd.u64 d16,d16,d17
|
||||
vmlal.u32 q13,d29,d6[1]
|
||||
vshr.u64 d16,d16,#16
|
||||
vmlal.u32 q6,d29,d7[0]
|
||||
vmlal.u32 q7,d29,d7[1]
|
||||
vadd.u64 d18,d18,d16
|
||||
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+2]
|
||||
vmlal.u32 q9,d28,d0[0]
|
||||
vld1.64 {q8},[r6,:128]!
|
||||
vmlal.u32 q10,d28,d0[1]
|
||||
veor d8,d8,d8
|
||||
vmlal.u32 q11,d28,d1[0]
|
||||
vshl.i64 d29,d19,#16
|
||||
vmlal.u32 q12,d28,d1[1]
|
||||
vadd.u64 d29,d29,d18
|
||||
vmlal.u32 q13,d28,d2[0]
|
||||
vmul.u32 d29,d29,d30
|
||||
vmlal.u32 q6,d28,d2[1]
|
||||
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+3]
|
||||
vmlal.u32 q7,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q8,d28,d3[1]
|
||||
vld1.32 {d28[0]},[r2,:32]! @ *b++
|
||||
vmlal.u32 q9,d29,d4[0]
|
||||
veor d10,d10,d10
|
||||
vmlal.u32 q10,d29,d4[1]
|
||||
vzip.16 d28,d10
|
||||
vmlal.u32 q11,d29,d5[0]
|
||||
vshr.u64 d18,d18,#16
|
||||
vmlal.u32 q12,d29,d5[1]
|
||||
vmlal.u32 q13,d29,d6[0]
|
||||
vadd.u64 d18,d18,d19
|
||||
vmlal.u32 q6,d29,d6[1]
|
||||
vshr.u64 d18,d18,#16
|
||||
vmlal.u32 q7,d29,d7[0]
|
||||
vmlal.u32 q8,d29,d7[1]
|
||||
vadd.u64 d20,d20,d18
|
||||
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+3]
|
||||
vmlal.u32 q10,d28,d0[0]
|
||||
vld1.64 {q9},[r6,:128]!
|
||||
vmlal.u32 q11,d28,d0[1]
|
||||
veor d8,d8,d8
|
||||
vmlal.u32 q12,d28,d1[0]
|
||||
vshl.i64 d29,d21,#16
|
||||
vmlal.u32 q13,d28,d1[1]
|
||||
vadd.u64 d29,d29,d20
|
||||
vmlal.u32 q6,d28,d2[0]
|
||||
vmul.u32 d29,d29,d30
|
||||
vmlal.u32 q7,d28,d2[1]
|
||||
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+4]
|
||||
vmlal.u32 q8,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q9,d28,d3[1]
|
||||
vld1.32 {d28[0]},[r2,:32]! @ *b++
|
||||
vmlal.u32 q10,d29,d4[0]
|
||||
veor d10,d10,d10
|
||||
vmlal.u32 q11,d29,d4[1]
|
||||
vzip.16 d28,d10
|
||||
vmlal.u32 q12,d29,d5[0]
|
||||
vshr.u64 d20,d20,#16
|
||||
vmlal.u32 q13,d29,d5[1]
|
||||
vmlal.u32 q6,d29,d6[0]
|
||||
vadd.u64 d20,d20,d21
|
||||
vmlal.u32 q7,d29,d6[1]
|
||||
vshr.u64 d20,d20,#16
|
||||
vmlal.u32 q8,d29,d7[0]
|
||||
vmlal.u32 q9,d29,d7[1]
|
||||
vadd.u64 d22,d22,d20
|
||||
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+4]
|
||||
vmlal.u32 q11,d28,d0[0]
|
||||
vld1.64 {q10},[r6,:128]!
|
||||
vmlal.u32 q12,d28,d0[1]
|
||||
veor d8,d8,d8
|
||||
vmlal.u32 q13,d28,d1[0]
|
||||
vshl.i64 d29,d23,#16
|
||||
vmlal.u32 q6,d28,d1[1]
|
||||
vadd.u64 d29,d29,d22
|
||||
vmlal.u32 q7,d28,d2[0]
|
||||
vmul.u32 d29,d29,d30
|
||||
vmlal.u32 q8,d28,d2[1]
|
||||
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+5]
|
||||
vmlal.u32 q9,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q10,d28,d3[1]
|
||||
vld1.32 {d28[0]},[r2,:32]! @ *b++
|
||||
vmlal.u32 q11,d29,d4[0]
|
||||
veor d10,d10,d10
|
||||
vmlal.u32 q12,d29,d4[1]
|
||||
vzip.16 d28,d10
|
||||
vmlal.u32 q13,d29,d5[0]
|
||||
vshr.u64 d22,d22,#16
|
||||
vmlal.u32 q6,d29,d5[1]
|
||||
vmlal.u32 q7,d29,d6[0]
|
||||
vadd.u64 d22,d22,d23
|
||||
vmlal.u32 q8,d29,d6[1]
|
||||
vshr.u64 d22,d22,#16
|
||||
vmlal.u32 q9,d29,d7[0]
|
||||
vmlal.u32 q10,d29,d7[1]
|
||||
vadd.u64 d24,d24,d22
|
||||
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+5]
|
||||
vmlal.u32 q12,d28,d0[0]
|
||||
vld1.64 {q11},[r6,:128]!
|
||||
vmlal.u32 q13,d28,d0[1]
|
||||
veor d8,d8,d8
|
||||
vmlal.u32 q6,d28,d1[0]
|
||||
vshl.i64 d29,d25,#16
|
||||
vmlal.u32 q7,d28,d1[1]
|
||||
vadd.u64 d29,d29,d24
|
||||
vmlal.u32 q8,d28,d2[0]
|
||||
vmul.u32 d29,d29,d30
|
||||
vmlal.u32 q9,d28,d2[1]
|
||||
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+6]
|
||||
vmlal.u32 q10,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q11,d28,d3[1]
|
||||
vld1.32 {d28[0]},[r2,:32]! @ *b++
|
||||
vmlal.u32 q12,d29,d4[0]
|
||||
veor d10,d10,d10
|
||||
vmlal.u32 q13,d29,d4[1]
|
||||
vzip.16 d28,d10
|
||||
vmlal.u32 q6,d29,d5[0]
|
||||
vshr.u64 d24,d24,#16
|
||||
vmlal.u32 q7,d29,d5[1]
|
||||
vmlal.u32 q8,d29,d6[0]
|
||||
vadd.u64 d24,d24,d25
|
||||
vmlal.u32 q9,d29,d6[1]
|
||||
vshr.u64 d24,d24,#16
|
||||
vmlal.u32 q10,d29,d7[0]
|
||||
vmlal.u32 q11,d29,d7[1]
|
||||
vadd.u64 d26,d26,d24
|
||||
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+6]
|
||||
vmlal.u32 q13,d28,d0[0]
|
||||
vld1.64 {q12},[r6,:128]!
|
||||
vmlal.u32 q6,d28,d0[1]
|
||||
veor d8,d8,d8
|
||||
vmlal.u32 q7,d28,d1[0]
|
||||
vshl.i64 d29,d27,#16
|
||||
vmlal.u32 q8,d28,d1[1]
|
||||
vadd.u64 d29,d29,d26
|
||||
vmlal.u32 q9,d28,d2[0]
|
||||
vmul.u32 d29,d29,d30
|
||||
vmlal.u32 q10,d28,d2[1]
|
||||
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+7]
|
||||
vmlal.u32 q11,d28,d3[0]
|
||||
vzip.16 d29,d8
|
||||
vmlal.u32 q12,d28,d3[1]
|
||||
vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0]
|
||||
vmlal.u32 q13,d29,d4[0]
|
||||
vld1.32 {d0,d1,d2,d3},[r1]!
|
||||
vmlal.u32 q6,d29,d4[1]
|
||||
vmlal.u32 q7,d29,d5[0]
|
||||
vshr.u64 d26,d26,#16
|
||||
vmlal.u32 q8,d29,d5[1]
|
||||
vmlal.u32 q9,d29,d6[0]
|
||||
vadd.u64 d26,d26,d27
|
||||
vmlal.u32 q10,d29,d6[1]
|
||||
vshr.u64 d26,d26,#16
|
||||
vmlal.u32 q11,d29,d7[0]
|
||||
vmlal.u32 q12,d29,d7[1]
|
||||
vadd.u64 d12,d12,d26
|
||||
vst1.32 {d29},[r10,:64] @ put aside smashed m[8*i+7]
|
||||
add r10,sp,#8 @ rewind
|
||||
sub r8,r5,#8
|
||||
b .LNEON_8n_inner
|
||||
|
||||
.align 4
|
||||
.LNEON_8n_inner:
|
||||
subs r8,r8,#8
|
||||
vmlal.u32 q6,d28,d0[0]
|
||||
vld1.64 {q13},[r6,:128]
|
||||
vmlal.u32 q7,d28,d0[1]
|
||||
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+0]
|
||||
vmlal.u32 q8,d28,d1[0]
|
||||
vld1.32 {d4,d5,d6,d7},[r3]!
|
||||
vmlal.u32 q9,d28,d1[1]
|
||||
it ne
|
||||
addne r6,r6,#16 @ don't advance in last iteration
|
||||
vmlal.u32 q10,d28,d2[0]
|
||||
vmlal.u32 q11,d28,d2[1]
|
||||
vmlal.u32 q12,d28,d3[0]
|
||||
vmlal.u32 q13,d28,d3[1]
|
||||
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+1]
|
||||
vmlal.u32 q6,d29,d4[0]
|
||||
vmlal.u32 q7,d29,d4[1]
|
||||
vmlal.u32 q8,d29,d5[0]
|
||||
vmlal.u32 q9,d29,d5[1]
|
||||
vmlal.u32 q10,d29,d6[0]
|
||||
vmlal.u32 q11,d29,d6[1]
|
||||
vmlal.u32 q12,d29,d7[0]
|
||||
vmlal.u32 q13,d29,d7[1]
|
||||
vst1.64 {q6},[r7,:128]!
|
||||
vmlal.u32 q7,d28,d0[0]
|
||||
vld1.64 {q6},[r6,:128]
|
||||
vmlal.u32 q8,d28,d0[1]
|
||||
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+1]
|
||||
vmlal.u32 q9,d28,d1[0]
|
||||
it ne
|
||||
addne r6,r6,#16 @ don't advance in last iteration
|
||||
vmlal.u32 q10,d28,d1[1]
|
||||
vmlal.u32 q11,d28,d2[0]
|
||||
vmlal.u32 q12,d28,d2[1]
|
||||
vmlal.u32 q13,d28,d3[0]
|
||||
vmlal.u32 q6,d28,d3[1]
|
||||
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+2]
|
||||
vmlal.u32 q7,d29,d4[0]
|
||||
vmlal.u32 q8,d29,d4[1]
|
||||
vmlal.u32 q9,d29,d5[0]
|
||||
vmlal.u32 q10,d29,d5[1]
|
||||
vmlal.u32 q11,d29,d6[0]
|
||||
vmlal.u32 q12,d29,d6[1]
|
||||
vmlal.u32 q13,d29,d7[0]
|
||||
vmlal.u32 q6,d29,d7[1]
|
||||
vst1.64 {q7},[r7,:128]!
|
||||
vmlal.u32 q8,d28,d0[0]
|
||||
vld1.64 {q7},[r6,:128]
|
||||
vmlal.u32 q9,d28,d0[1]
|
||||
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+2]
|
||||
vmlal.u32 q10,d28,d1[0]
|
||||
it ne
|
||||
addne r6,r6,#16 @ don't advance in last iteration
|
||||
vmlal.u32 q11,d28,d1[1]
|
||||
vmlal.u32 q12,d28,d2[0]
|
||||
vmlal.u32 q13,d28,d2[1]
|
||||
vmlal.u32 q6,d28,d3[0]
|
||||
vmlal.u32 q7,d28,d3[1]
|
||||
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+3]
|
||||
vmlal.u32 q8,d29,d4[0]
|
||||
vmlal.u32 q9,d29,d4[1]
|
||||
vmlal.u32 q10,d29,d5[0]
|
||||
vmlal.u32 q11,d29,d5[1]
|
||||
vmlal.u32 q12,d29,d6[0]
|
||||
vmlal.u32 q13,d29,d6[1]
|
||||
vmlal.u32 q6,d29,d7[0]
|
||||
vmlal.u32 q7,d29,d7[1]
|
||||
vst1.64 {q8},[r7,:128]!
|
||||
vmlal.u32 q9,d28,d0[0]
|
||||
vld1.64 {q8},[r6,:128]
|
||||
vmlal.u32 q10,d28,d0[1]
|
||||
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+3]
|
||||
vmlal.u32 q11,d28,d1[0]
|
||||
it ne
|
||||
addne r6,r6,#16 @ don't advance in last iteration
|
||||
vmlal.u32 q12,d28,d1[1]
|
||||
vmlal.u32 q13,d28,d2[0]
|
||||
vmlal.u32 q6,d28,d2[1]
|
||||
vmlal.u32 q7,d28,d3[0]
|
||||
vmlal.u32 q8,d28,d3[1]
|
||||
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+4]
|
||||
vmlal.u32 q9,d29,d4[0]
|
||||
vmlal.u32 q10,d29,d4[1]
|
||||
vmlal.u32 q11,d29,d5[0]
|
||||
vmlal.u32 q12,d29,d5[1]
|
||||
vmlal.u32 q13,d29,d6[0]
|
||||
vmlal.u32 q6,d29,d6[1]
|
||||
vmlal.u32 q7,d29,d7[0]
|
||||
vmlal.u32 q8,d29,d7[1]
|
||||
vst1.64 {q9},[r7,:128]!
|
||||
vmlal.u32 q10,d28,d0[0]
|
||||
vld1.64 {q9},[r6,:128]
|
||||
vmlal.u32 q11,d28,d0[1]
|
||||
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+4]
|
||||
vmlal.u32 q12,d28,d1[0]
|
||||
it ne
|
||||
addne r6,r6,#16 @ don't advance in last iteration
|
||||
vmlal.u32 q13,d28,d1[1]
|
||||
vmlal.u32 q6,d28,d2[0]
|
||||
vmlal.u32 q7,d28,d2[1]
|
||||
vmlal.u32 q8,d28,d3[0]
|
||||
vmlal.u32 q9,d28,d3[1]
|
||||
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+5]
|
||||
vmlal.u32 q10,d29,d4[0]
|
||||
vmlal.u32 q11,d29,d4[1]
|
||||
vmlal.u32 q12,d29,d5[0]
|
||||
vmlal.u32 q13,d29,d5[1]
|
||||
vmlal.u32 q6,d29,d6[0]
|
||||
vmlal.u32 q7,d29,d6[1]
|
||||
vmlal.u32 q8,d29,d7[0]
|
||||
vmlal.u32 q9,d29,d7[1]
|
||||
vst1.64 {q10},[r7,:128]!
|
||||
vmlal.u32 q11,d28,d0[0]
|
||||
vld1.64 {q10},[r6,:128]
|
||||
vmlal.u32 q12,d28,d0[1]
|
||||
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+5]
|
||||
vmlal.u32 q13,d28,d1[0]
|
||||
it ne
|
||||
addne r6,r6,#16 @ don't advance in last iteration
|
||||
vmlal.u32 q6,d28,d1[1]
|
||||
vmlal.u32 q7,d28,d2[0]
|
||||
vmlal.u32 q8,d28,d2[1]
|
||||
vmlal.u32 q9,d28,d3[0]
|
||||
vmlal.u32 q10,d28,d3[1]
|
||||
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+6]
|
||||
vmlal.u32 q11,d29,d4[0]
|
||||
vmlal.u32 q12,d29,d4[1]
|
||||
vmlal.u32 q13,d29,d5[0]
|
||||
vmlal.u32 q6,d29,d5[1]
|
||||
vmlal.u32 q7,d29,d6[0]
|
||||
vmlal.u32 q8,d29,d6[1]
|
||||
vmlal.u32 q9,d29,d7[0]
|
||||
vmlal.u32 q10,d29,d7[1]
|
||||
vst1.64 {q11},[r7,:128]!
|
||||
vmlal.u32 q12,d28,d0[0]
|
||||
vld1.64 {q11},[r6,:128]
|
||||
vmlal.u32 q13,d28,d0[1]
|
||||
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+6]
|
||||
vmlal.u32 q6,d28,d1[0]
|
||||
it ne
|
||||
addne r6,r6,#16 @ don't advance in last iteration
|
||||
vmlal.u32 q7,d28,d1[1]
|
||||
vmlal.u32 q8,d28,d2[0]
|
||||
vmlal.u32 q9,d28,d2[1]
|
||||
vmlal.u32 q10,d28,d3[0]
|
||||
vmlal.u32 q11,d28,d3[1]
|
||||
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+7]
|
||||
vmlal.u32 q12,d29,d4[0]
|
||||
vmlal.u32 q13,d29,d4[1]
|
||||
vmlal.u32 q6,d29,d5[0]
|
||||
vmlal.u32 q7,d29,d5[1]
|
||||
vmlal.u32 q8,d29,d6[0]
|
||||
vmlal.u32 q9,d29,d6[1]
|
||||
vmlal.u32 q10,d29,d7[0]
|
||||
vmlal.u32 q11,d29,d7[1]
|
||||
vst1.64 {q12},[r7,:128]!
|
||||
vmlal.u32 q13,d28,d0[0]
|
||||
vld1.64 {q12},[r6,:128]
|
||||
vmlal.u32 q6,d28,d0[1]
|
||||
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+7]
|
||||
vmlal.u32 q7,d28,d1[0]
|
||||
it ne
|
||||
addne r6,r6,#16 @ don't advance in last iteration
|
||||
vmlal.u32 q8,d28,d1[1]
|
||||
vmlal.u32 q9,d28,d2[0]
|
||||
vmlal.u32 q10,d28,d2[1]
|
||||
vmlal.u32 q11,d28,d3[0]
|
||||
vmlal.u32 q12,d28,d3[1]
|
||||
it eq
|
||||
subeq r1,r1,r5,lsl#2 @ rewind
|
||||
vmlal.u32 q13,d29,d4[0]
|
||||
vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0]
|
||||
vmlal.u32 q6,d29,d4[1]
|
||||
vld1.32 {d0,d1,d2,d3},[r1]!
|
||||
vmlal.u32 q7,d29,d5[0]
|
||||
add r10,sp,#8 @ rewind
|
||||
vmlal.u32 q8,d29,d5[1]
|
||||
vmlal.u32 q9,d29,d6[0]
|
||||
vmlal.u32 q10,d29,d6[1]
|
||||
vmlal.u32 q11,d29,d7[0]
|
||||
vst1.64 {q13},[r7,:128]!
|
||||
vmlal.u32 q12,d29,d7[1]
|
||||
|
||||
bne .LNEON_8n_inner
|
||||
add r6,sp,#128
|
||||
vst1.64 {q6,q7},[r7,:256]!
|
||||
veor q2,q2,q2 @ d4-d5
|
||||
vst1.64 {q8,q9},[r7,:256]!
|
||||
veor q3,q3,q3 @ d6-d7
|
||||
vst1.64 {q10,q11},[r7,:256]!
|
||||
vst1.64 {q12},[r7,:128]
|
||||
|
||||
subs r9,r9,#8
|
||||
vld1.64 {q6,q7},[r6,:256]!
|
||||
vld1.64 {q8,q9},[r6,:256]!
|
||||
vld1.64 {q10,q11},[r6,:256]!
|
||||
vld1.64 {q12,q13},[r6,:256]!
|
||||
|
||||
itt ne
|
||||
subne r3,r3,r5,lsl#2 @ rewind
|
||||
bne .LNEON_8n_outer
|
||||
|
||||
add r7,sp,#128
|
||||
vst1.64 {q2,q3}, [sp,:256]! @ start wiping stack frame
|
||||
vshr.u64 d10,d12,#16
|
||||
vst1.64 {q2,q3},[sp,:256]!
|
||||
vadd.u64 d13,d13,d10
|
||||
vst1.64 {q2,q3}, [sp,:256]!
|
||||
vshr.u64 d10,d13,#16
|
||||
vst1.64 {q2,q3}, [sp,:256]!
|
||||
vzip.16 d12,d13
|
||||
|
||||
mov r8,r5
|
||||
b .LNEON_tail_entry
|
||||
|
||||
.align 4
|
||||
.LNEON_tail:
|
||||
vadd.u64 d12,d12,d10
|
||||
vshr.u64 d10,d12,#16
|
||||
vld1.64 {q8,q9}, [r6, :256]!
|
||||
vadd.u64 d13,d13,d10
|
||||
vld1.64 {q10,q11}, [r6, :256]!
|
||||
vshr.u64 d10,d13,#16
|
||||
vld1.64 {q12,q13}, [r6, :256]!
|
||||
vzip.16 d12,d13
|
||||
|
||||
.LNEON_tail_entry:
|
||||
vadd.u64 d14,d14,d10
|
||||
vst1.32 {d12[0]}, [r7, :32]!
|
||||
vshr.u64 d10,d14,#16
|
||||
vadd.u64 d15,d15,d10
|
||||
vshr.u64 d10,d15,#16
|
||||
vzip.16 d14,d15
|
||||
vadd.u64 d16,d16,d10
|
||||
vst1.32 {d14[0]}, [r7, :32]!
|
||||
vshr.u64 d10,d16,#16
|
||||
vadd.u64 d17,d17,d10
|
||||
vshr.u64 d10,d17,#16
|
||||
vzip.16 d16,d17
|
||||
vadd.u64 d18,d18,d10
|
||||
vst1.32 {d16[0]}, [r7, :32]!
|
||||
vshr.u64 d10,d18,#16
|
||||
vadd.u64 d19,d19,d10
|
||||
vshr.u64 d10,d19,#16
|
||||
vzip.16 d18,d19
|
||||
vadd.u64 d20,d20,d10
|
||||
vst1.32 {d18[0]}, [r7, :32]!
|
||||
vshr.u64 d10,d20,#16
|
||||
vadd.u64 d21,d21,d10
|
||||
vshr.u64 d10,d21,#16
|
||||
vzip.16 d20,d21
|
||||
vadd.u64 d22,d22,d10
|
||||
vst1.32 {d20[0]}, [r7, :32]!
|
||||
vshr.u64 d10,d22,#16
|
||||
vadd.u64 d23,d23,d10
|
||||
vshr.u64 d10,d23,#16
|
||||
vzip.16 d22,d23
|
||||
vadd.u64 d24,d24,d10
|
||||
vst1.32 {d22[0]}, [r7, :32]!
|
||||
vshr.u64 d10,d24,#16
|
||||
vadd.u64 d25,d25,d10
|
||||
vshr.u64 d10,d25,#16
|
||||
vzip.16 d24,d25
|
||||
vadd.u64 d26,d26,d10
|
||||
vst1.32 {d24[0]}, [r7, :32]!
|
||||
vshr.u64 d10,d26,#16
|
||||
vadd.u64 d27,d27,d10
|
||||
vshr.u64 d10,d27,#16
|
||||
vzip.16 d26,d27
|
||||
vld1.64 {q6,q7}, [r6, :256]!
|
||||
subs r8,r8,#8
|
||||
vst1.32 {d26[0]}, [r7, :32]!
|
||||
bne .LNEON_tail
|
||||
|
||||
vst1.32 {d10[0]}, [r7, :32] @ top-most bit
|
||||
sub r3,r3,r5,lsl#2 @ rewind r3
|
||||
subs r1,sp,#0 @ clear carry flag
|
||||
add r2,sp,r5,lsl#2
|
||||
|
||||
.LNEON_sub:
|
||||
ldmia r1!, {r4,r5,r6,r7}
|
||||
ldmia r3!, {r8,r9,r10,r11}
|
||||
sbcs r8, r4,r8
|
||||
sbcs r9, r5,r9
|
||||
sbcs r10,r6,r10
|
||||
sbcs r11,r7,r11
|
||||
teq r1,r2 @ preserves carry
|
||||
stmia r0!, {r8,r9,r10,r11}
|
||||
bne .LNEON_sub
|
||||
|
||||
ldr r10, [r1] @ load top-most bit
|
||||
mov r11,sp
|
||||
veor q0,q0,q0
|
||||
sub r11,r2,r11 @ this is num*4
|
||||
veor q1,q1,q1
|
||||
mov r1,sp
|
||||
sub r0,r0,r11 @ rewind r0
|
||||
mov r3,r2 @ second 3/4th of frame
|
||||
sbcs r10,r10,#0 @ result is carry flag
|
||||
|
||||
.LNEON_copy_n_zap:
|
||||
ldmia r1!, {r4,r5,r6,r7}
|
||||
ldmia r0, {r8,r9,r10,r11}
|
||||
it cc
|
||||
movcc r8, r4
|
||||
vst1.64 {q0,q1}, [r3,:256]! @ wipe
|
||||
itt cc
|
||||
movcc r9, r5
|
||||
movcc r10,r6
|
||||
vst1.64 {q0,q1}, [r3,:256]! @ wipe
|
||||
it cc
|
||||
movcc r11,r7
|
||||
ldmia r1, {r4,r5,r6,r7}
|
||||
stmia r0!, {r8,r9,r10,r11}
|
||||
sub r1,r1,#16
|
||||
ldmia r0, {r8,r9,r10,r11}
|
||||
it cc
|
||||
movcc r8, r4
|
||||
vst1.64 {q0,q1}, [r1,:256]! @ wipe
|
||||
itt cc
|
||||
movcc r9, r5
|
||||
movcc r10,r6
|
||||
vst1.64 {q0,q1}, [r3,:256]! @ wipe
|
||||
it cc
|
||||
movcc r11,r7
|
||||
teq r1,r2 @ preserves carry
|
||||
stmia r0!, {r8,r9,r10,r11}
|
||||
bne .LNEON_copy_n_zap
|
||||
|
||||
mov sp,ip
|
||||
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
|
||||
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11}
|
||||
bx lr @ bx lr
|
||||
.size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
|
||||
#endif
|
||||
.byte 77,111,110,116,103,111,109,101,114,121,32,109,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
||||
.align 2
|
||||
.align 2
|
||||
#if __ARM_MAX_ARCH__>=7
|
||||
.comm OPENSSL_armcap_P,4,4
|
||||
.hidden OPENSSL_armcap_P
|
||||
#endif
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,573 @@
|
|||
#if defined(__arm__)
|
||||
#include <openssl/arm_arch.h>
|
||||
|
||||
.text
|
||||
#if defined(__thumb2__) || defined(__clang__)
|
||||
.syntax unified
|
||||
#endif
|
||||
#if defined(__thumb2__)
|
||||
.thumb
|
||||
#else
|
||||
.code 32
|
||||
#endif
|
||||
|
||||
#ifdef __clang__
|
||||
#define ldrplb ldrbpl
|
||||
#define ldrneb ldrbne
|
||||
#endif
|
||||
|
||||
.type rem_4bit,%object
|
||||
.align 5
|
||||
rem_4bit:
|
||||
.short 0x0000,0x1C20,0x3840,0x2460
|
||||
.short 0x7080,0x6CA0,0x48C0,0x54E0
|
||||
.short 0xE100,0xFD20,0xD940,0xC560
|
||||
.short 0x9180,0x8DA0,0xA9C0,0xB5E0
|
||||
.size rem_4bit,.-rem_4bit
|
||||
|
||||
.type rem_4bit_get,%function
|
||||
rem_4bit_get:
|
||||
#if defined(__thumb2__)
|
||||
adr r2,rem_4bit
|
||||
#else
|
||||
sub r2,pc,#8+32 @ &rem_4bit
|
||||
#endif
|
||||
b .Lrem_4bit_got
|
||||
nop
|
||||
nop
|
||||
.size rem_4bit_get,.-rem_4bit_get
|
||||
|
||||
.globl gcm_ghash_4bit
|
||||
.hidden gcm_ghash_4bit
|
||||
.type gcm_ghash_4bit,%function
|
||||
.align 4
|
||||
gcm_ghash_4bit:
|
||||
#if defined(__thumb2__)
|
||||
adr r12,rem_4bit
|
||||
#else
|
||||
sub r12,pc,#8+48 @ &rem_4bit
|
||||
#endif
|
||||
add r3,r2,r3 @ r3 to point at the end
|
||||
stmdb sp!,{r3,r4,r5,r6,r7,r8,r9,r10,r11,lr} @ save r3/end too
|
||||
|
||||
ldmia r12,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy rem_4bit ...
|
||||
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} @ ... to stack
|
||||
|
||||
ldrb r12,[r2,#15]
|
||||
ldrb r14,[r0,#15]
|
||||
.Louter:
|
||||
eor r12,r12,r14
|
||||
and r14,r12,#0xf0
|
||||
and r12,r12,#0x0f
|
||||
mov r3,#14
|
||||
|
||||
add r7,r1,r12,lsl#4
|
||||
ldmia r7,{r4,r5,r6,r7} @ load Htbl[nlo]
|
||||
add r11,r1,r14
|
||||
ldrb r12,[r2,#14]
|
||||
|
||||
and r14,r4,#0xf @ rem
|
||||
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi]
|
||||
add r14,r14,r14
|
||||
eor r4,r8,r4,lsr#4
|
||||
ldrh r8,[sp,r14] @ rem_4bit[rem]
|
||||
eor r4,r4,r5,lsl#28
|
||||
ldrb r14,[r0,#14]
|
||||
eor r5,r9,r5,lsr#4
|
||||
eor r5,r5,r6,lsl#28
|
||||
eor r6,r10,r6,lsr#4
|
||||
eor r6,r6,r7,lsl#28
|
||||
eor r7,r11,r7,lsr#4
|
||||
eor r12,r12,r14
|
||||
and r14,r12,#0xf0
|
||||
and r12,r12,#0x0f
|
||||
eor r7,r7,r8,lsl#16
|
||||
|
||||
.Linner:
|
||||
add r11,r1,r12,lsl#4
|
||||
and r12,r4,#0xf @ rem
|
||||
subs r3,r3,#1
|
||||
add r12,r12,r12
|
||||
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nlo]
|
||||
eor r4,r8,r4,lsr#4
|
||||
eor r4,r4,r5,lsl#28
|
||||
eor r5,r9,r5,lsr#4
|
||||
eor r5,r5,r6,lsl#28
|
||||
ldrh r8,[sp,r12] @ rem_4bit[rem]
|
||||
eor r6,r10,r6,lsr#4
|
||||
#ifdef __thumb2__
|
||||
it pl
|
||||
#endif
|
||||
ldrplb r12,[r2,r3]
|
||||
eor r6,r6,r7,lsl#28
|
||||
eor r7,r11,r7,lsr#4
|
||||
|
||||
add r11,r1,r14
|
||||
and r14,r4,#0xf @ rem
|
||||
eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
|
||||
add r14,r14,r14
|
||||
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi]
|
||||
eor r4,r8,r4,lsr#4
|
||||
#ifdef __thumb2__
|
||||
it pl
|
||||
#endif
|
||||
ldrplb r8,[r0,r3]
|
||||
eor r4,r4,r5,lsl#28
|
||||
eor r5,r9,r5,lsr#4
|
||||
ldrh r9,[sp,r14]
|
||||
eor r5,r5,r6,lsl#28
|
||||
eor r6,r10,r6,lsr#4
|
||||
eor r6,r6,r7,lsl#28
|
||||
#ifdef __thumb2__
|
||||
it pl
|
||||
#endif
|
||||
eorpl r12,r12,r8
|
||||
eor r7,r11,r7,lsr#4
|
||||
#ifdef __thumb2__
|
||||
itt pl
|
||||
#endif
|
||||
andpl r14,r12,#0xf0
|
||||
andpl r12,r12,#0x0f
|
||||
eor r7,r7,r9,lsl#16 @ ^= rem_4bit[rem]
|
||||
bpl .Linner
|
||||
|
||||
ldr r3,[sp,#32] @ re-load r3/end
|
||||
add r2,r2,#16
|
||||
mov r14,r4
|
||||
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
|
||||
rev r4,r4
|
||||
str r4,[r0,#12]
|
||||
#elif defined(__ARMEB__)
|
||||
str r4,[r0,#12]
|
||||
#else
|
||||
mov r9,r4,lsr#8
|
||||
strb r4,[r0,#12+3]
|
||||
mov r10,r4,lsr#16
|
||||
strb r9,[r0,#12+2]
|
||||
mov r11,r4,lsr#24
|
||||
strb r10,[r0,#12+1]
|
||||
strb r11,[r0,#12]
|
||||
#endif
|
||||
cmp r2,r3
|
||||
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
|
||||
rev r5,r5
|
||||
str r5,[r0,#8]
|
||||
#elif defined(__ARMEB__)
|
||||
str r5,[r0,#8]
|
||||
#else
|
||||
mov r9,r5,lsr#8
|
||||
strb r5,[r0,#8+3]
|
||||
mov r10,r5,lsr#16
|
||||
strb r9,[r0,#8+2]
|
||||
mov r11,r5,lsr#24
|
||||
strb r10,[r0,#8+1]
|
||||
strb r11,[r0,#8]
|
||||
#endif
|
||||
|
||||
#ifdef __thumb2__
|
||||
it ne
|
||||
#endif
|
||||
ldrneb r12,[r2,#15]
|
||||
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
|
||||
rev r6,r6
|
||||
str r6,[r0,#4]
|
||||
#elif defined(__ARMEB__)
|
||||
str r6,[r0,#4]
|
||||
#else
|
||||
mov r9,r6,lsr#8
|
||||
strb r6,[r0,#4+3]
|
||||
mov r10,r6,lsr#16
|
||||
strb r9,[r0,#4+2]
|
||||
mov r11,r6,lsr#24
|
||||
strb r10,[r0,#4+1]
|
||||
strb r11,[r0,#4]
|
||||
#endif
|
||||
|
||||
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
|
||||
rev r7,r7
|
||||
str r7,[r0,#0]
|
||||
#elif defined(__ARMEB__)
|
||||
str r7,[r0,#0]
|
||||
#else
|
||||
mov r9,r7,lsr#8
|
||||
strb r7,[r0,#0+3]
|
||||
mov r10,r7,lsr#16
|
||||
strb r9,[r0,#0+2]
|
||||
mov r11,r7,lsr#24
|
||||
strb r10,[r0,#0+1]
|
||||
strb r11,[r0,#0]
|
||||
#endif
|
||||
|
||||
bne .Louter
|
||||
|
||||
add sp,sp,#36
|
||||
#if __ARM_ARCH__>=5
|
||||
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
|
||||
#else
|
||||
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
|
||||
tst lr,#1
|
||||
moveq pc,lr @ be binary compatible with V4, yet
|
||||
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
|
||||
#endif
|
||||
.size gcm_ghash_4bit,.-gcm_ghash_4bit
|
||||
|
||||
.globl gcm_gmult_4bit
|
||||
.hidden gcm_gmult_4bit
|
||||
.type gcm_gmult_4bit,%function
|
||||
gcm_gmult_4bit:
|
||||
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
|
||||
ldrb r12,[r0,#15]
|
||||
b rem_4bit_get
|
||||
.Lrem_4bit_got:
|
||||
and r14,r12,#0xf0
|
||||
and r12,r12,#0x0f
|
||||
mov r3,#14
|
||||
|
||||
add r7,r1,r12,lsl#4
|
||||
ldmia r7,{r4,r5,r6,r7} @ load Htbl[nlo]
|
||||
ldrb r12,[r0,#14]
|
||||
|
||||
add r11,r1,r14
|
||||
and r14,r4,#0xf @ rem
|
||||
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi]
|
||||
add r14,r14,r14
|
||||
eor r4,r8,r4,lsr#4
|
||||
ldrh r8,[r2,r14] @ rem_4bit[rem]
|
||||
eor r4,r4,r5,lsl#28
|
||||
eor r5,r9,r5,lsr#4
|
||||
eor r5,r5,r6,lsl#28
|
||||
eor r6,r10,r6,lsr#4
|
||||
eor r6,r6,r7,lsl#28
|
||||
eor r7,r11,r7,lsr#4
|
||||
and r14,r12,#0xf0
|
||||
eor r7,r7,r8,lsl#16
|
||||
and r12,r12,#0x0f
|
||||
|
||||
.Loop:
|
||||
add r11,r1,r12,lsl#4
|
||||
and r12,r4,#0xf @ rem
|
||||
subs r3,r3,#1
|
||||
add r12,r12,r12
|
||||
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nlo]
|
||||
eor r4,r8,r4,lsr#4
|
||||
eor r4,r4,r5,lsl#28
|
||||
eor r5,r9,r5,lsr#4
|
||||
eor r5,r5,r6,lsl#28
|
||||
ldrh r8,[r2,r12] @ rem_4bit[rem]
|
||||
eor r6,r10,r6,lsr#4
|
||||
#ifdef __thumb2__
|
||||
it pl
|
||||
#endif
|
||||
ldrplb r12,[r0,r3]
|
||||
eor r6,r6,r7,lsl#28
|
||||
eor r7,r11,r7,lsr#4
|
||||
|
||||
add r11,r1,r14
|
||||
and r14,r4,#0xf @ rem
|
||||
eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
|
||||
add r14,r14,r14
|
||||
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi]
|
||||
eor r4,r8,r4,lsr#4
|
||||
eor r4,r4,r5,lsl#28
|
||||
eor r5,r9,r5,lsr#4
|
||||
ldrh r8,[r2,r14] @ rem_4bit[rem]
|
||||
eor r5,r5,r6,lsl#28
|
||||
eor r6,r10,r6,lsr#4
|
||||
eor r6,r6,r7,lsl#28
|
||||
eor r7,r11,r7,lsr#4
|
||||
#ifdef __thumb2__
|
||||
itt pl
|
||||
#endif
|
||||
andpl r14,r12,#0xf0
|
||||
andpl r12,r12,#0x0f
|
||||
eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
|
||||
bpl .Loop
|
||||
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
|
||||
rev r4,r4
|
||||
str r4,[r0,#12]
|
||||
#elif defined(__ARMEB__)
|
||||
str r4,[r0,#12]
|
||||
#else
|
||||
mov r9,r4,lsr#8
|
||||
strb r4,[r0,#12+3]
|
||||
mov r10,r4,lsr#16
|
||||
strb r9,[r0,#12+2]
|
||||
mov r11,r4,lsr#24
|
||||
strb r10,[r0,#12+1]
|
||||
strb r11,[r0,#12]
|
||||
#endif
|
||||
|
||||
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
|
||||
rev r5,r5
|
||||
str r5,[r0,#8]
|
||||
#elif defined(__ARMEB__)
|
||||
str r5,[r0,#8]
|
||||
#else
|
||||
mov r9,r5,lsr#8
|
||||
strb r5,[r0,#8+3]
|
||||
mov r10,r5,lsr#16
|
||||
strb r9,[r0,#8+2]
|
||||
mov r11,r5,lsr#24
|
||||
strb r10,[r0,#8+1]
|
||||
strb r11,[r0,#8]
|
||||
#endif
|
||||
|
||||
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
|
||||
rev r6,r6
|
||||
str r6,[r0,#4]
|
||||
#elif defined(__ARMEB__)
|
||||
str r6,[r0,#4]
|
||||
#else
|
||||
mov r9,r6,lsr#8
|
||||
strb r6,[r0,#4+3]
|
||||
mov r10,r6,lsr#16
|
||||
strb r9,[r0,#4+2]
|
||||
mov r11,r6,lsr#24
|
||||
strb r10,[r0,#4+1]
|
||||
strb r11,[r0,#4]
|
||||
#endif
|
||||
|
||||
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
|
||||
rev r7,r7
|
||||
str r7,[r0,#0]
|
||||
#elif defined(__ARMEB__)
|
||||
str r7,[r0,#0]
|
||||
#else
|
||||
mov r9,r7,lsr#8
|
||||
strb r7,[r0,#0+3]
|
||||
mov r10,r7,lsr#16
|
||||
strb r9,[r0,#0+2]
|
||||
mov r11,r7,lsr#24
|
||||
strb r10,[r0,#0+1]
|
||||
strb r11,[r0,#0]
|
||||
#endif
|
||||
|
||||
#if __ARM_ARCH__>=5
|
||||
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
|
||||
#else
|
||||
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
|
||||
tst lr,#1
|
||||
moveq pc,lr @ be binary compatible with V4, yet
|
||||
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
|
||||
#endif
|
||||
.size gcm_gmult_4bit,.-gcm_gmult_4bit
|
||||
#if __ARM_MAX_ARCH__>=7
|
||||
.arch armv7-a
|
||||
.fpu neon
|
||||
|
||||
.globl gcm_init_neon
|
||||
.hidden gcm_init_neon
|
||||
.type gcm_init_neon,%function
|
||||
.align 4
|
||||
gcm_init_neon:
|
||||
vld1.64 d7,[r1]! @ load H
|
||||
vmov.i8 q8,#0xe1
|
||||
vld1.64 d6,[r1]
|
||||
vshl.i64 d17,#57
|
||||
vshr.u64 d16,#63 @ t0=0xc2....01
|
||||
vdup.8 q9,d7[7]
|
||||
vshr.u64 d26,d6,#63
|
||||
vshr.s8 q9,#7 @ broadcast carry bit
|
||||
vshl.i64 q3,q3,#1
|
||||
vand q8,q8,q9
|
||||
vorr d7,d26 @ H<<<=1
|
||||
veor q3,q3,q8 @ twisted H
|
||||
vstmia r0,{q3}
|
||||
|
||||
bx lr @ bx lr
|
||||
.size gcm_init_neon,.-gcm_init_neon
|
||||
|
||||
.globl gcm_gmult_neon
|
||||
.hidden gcm_gmult_neon
|
||||
.type gcm_gmult_neon,%function
|
||||
.align 4
|
||||
gcm_gmult_neon:
|
||||
vld1.64 d7,[r0]! @ load Xi
|
||||
vld1.64 d6,[r0]!
|
||||
vmov.i64 d29,#0x0000ffffffffffff
|
||||
vldmia r1,{d26,d27} @ load twisted H
|
||||
vmov.i64 d30,#0x00000000ffffffff
|
||||
#ifdef __ARMEL__
|
||||
vrev64.8 q3,q3
|
||||
#endif
|
||||
vmov.i64 d31,#0x000000000000ffff
|
||||
veor d28,d26,d27 @ Karatsuba pre-processing
|
||||
mov r3,#16
|
||||
b .Lgmult_neon
|
||||
.size gcm_gmult_neon,.-gcm_gmult_neon
|
||||
|
||||
.globl gcm_ghash_neon
|
||||
.hidden gcm_ghash_neon
|
||||
.type gcm_ghash_neon,%function
|
||||
.align 4
|
||||
gcm_ghash_neon:
|
||||
vld1.64 d1,[r0]! @ load Xi
|
||||
vld1.64 d0,[r0]!
|
||||
vmov.i64 d29,#0x0000ffffffffffff
|
||||
vldmia r1,{d26,d27} @ load twisted H
|
||||
vmov.i64 d30,#0x00000000ffffffff
|
||||
#ifdef __ARMEL__
|
||||
vrev64.8 q0,q0
|
||||
#endif
|
||||
vmov.i64 d31,#0x000000000000ffff
|
||||
veor d28,d26,d27 @ Karatsuba pre-processing
|
||||
|
||||
.Loop_neon:
|
||||
vld1.64 d7,[r2]! @ load inp
|
||||
vld1.64 d6,[r2]!
|
||||
#ifdef __ARMEL__
|
||||
vrev64.8 q3,q3
|
||||
#endif
|
||||
veor q3,q0 @ inp^=Xi
|
||||
.Lgmult_neon:
|
||||
vext.8 d16, d26, d26, #1 @ A1
|
||||
vmull.p8 q8, d16, d6 @ F = A1*B
|
||||
vext.8 d0, d6, d6, #1 @ B1
|
||||
vmull.p8 q0, d26, d0 @ E = A*B1
|
||||
vext.8 d18, d26, d26, #2 @ A2
|
||||
vmull.p8 q9, d18, d6 @ H = A2*B
|
||||
vext.8 d22, d6, d6, #2 @ B2
|
||||
vmull.p8 q11, d26, d22 @ G = A*B2
|
||||
vext.8 d20, d26, d26, #3 @ A3
|
||||
veor q8, q8, q0 @ L = E + F
|
||||
vmull.p8 q10, d20, d6 @ J = A3*B
|
||||
vext.8 d0, d6, d6, #3 @ B3
|
||||
veor q9, q9, q11 @ M = G + H
|
||||
vmull.p8 q0, d26, d0 @ I = A*B3
|
||||
veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
|
||||
vand d17, d17, d29
|
||||
vext.8 d22, d6, d6, #4 @ B4
|
||||
veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
|
||||
vand d19, d19, d30
|
||||
vmull.p8 q11, d26, d22 @ K = A*B4
|
||||
veor q10, q10, q0 @ N = I + J
|
||||
veor d16, d16, d17
|
||||
veor d18, d18, d19
|
||||
veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
|
||||
vand d21, d21, d31
|
||||
vext.8 q8, q8, q8, #15
|
||||
veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
|
||||
vmov.i64 d23, #0
|
||||
vext.8 q9, q9, q9, #14
|
||||
veor d20, d20, d21
|
||||
vmull.p8 q0, d26, d6 @ D = A*B
|
||||
vext.8 q11, q11, q11, #12
|
||||
vext.8 q10, q10, q10, #13
|
||||
veor q8, q8, q9
|
||||
veor q10, q10, q11
|
||||
veor q0, q0, q8
|
||||
veor q0, q0, q10
|
||||
veor d6,d6,d7 @ Karatsuba pre-processing
|
||||
vext.8 d16, d28, d28, #1 @ A1
|
||||
vmull.p8 q8, d16, d6 @ F = A1*B
|
||||
vext.8 d2, d6, d6, #1 @ B1
|
||||
vmull.p8 q1, d28, d2 @ E = A*B1
|
||||
vext.8 d18, d28, d28, #2 @ A2
|
||||
vmull.p8 q9, d18, d6 @ H = A2*B
|
||||
vext.8 d22, d6, d6, #2 @ B2
|
||||
vmull.p8 q11, d28, d22 @ G = A*B2
|
||||
vext.8 d20, d28, d28, #3 @ A3
|
||||
veor q8, q8, q1 @ L = E + F
|
||||
vmull.p8 q10, d20, d6 @ J = A3*B
|
||||
vext.8 d2, d6, d6, #3 @ B3
|
||||
veor q9, q9, q11 @ M = G + H
|
||||
vmull.p8 q1, d28, d2 @ I = A*B3
|
||||
veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
|
||||
vand d17, d17, d29
|
||||
vext.8 d22, d6, d6, #4 @ B4
|
||||
veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
|
||||
vand d19, d19, d30
|
||||
vmull.p8 q11, d28, d22 @ K = A*B4
|
||||
veor q10, q10, q1 @ N = I + J
|
||||
veor d16, d16, d17
|
||||
veor d18, d18, d19
|
||||
veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
|
||||
vand d21, d21, d31
|
||||
vext.8 q8, q8, q8, #15
|
||||
veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
|
||||
vmov.i64 d23, #0
|
||||
vext.8 q9, q9, q9, #14
|
||||
veor d20, d20, d21
|
||||
vmull.p8 q1, d28, d6 @ D = A*B
|
||||
vext.8 q11, q11, q11, #12
|
||||
vext.8 q10, q10, q10, #13
|
||||
veor q8, q8, q9
|
||||
veor q10, q10, q11
|
||||
veor q1, q1, q8
|
||||
veor q1, q1, q10
|
||||
vext.8 d16, d27, d27, #1 @ A1
|
||||
vmull.p8 q8, d16, d7 @ F = A1*B
|
||||
vext.8 d4, d7, d7, #1 @ B1
|
||||
vmull.p8 q2, d27, d4 @ E = A*B1
|
||||
vext.8 d18, d27, d27, #2 @ A2
|
||||
vmull.p8 q9, d18, d7 @ H = A2*B
|
||||
vext.8 d22, d7, d7, #2 @ B2
|
||||
vmull.p8 q11, d27, d22 @ G = A*B2
|
||||
vext.8 d20, d27, d27, #3 @ A3
|
||||
veor q8, q8, q2 @ L = E + F
|
||||
vmull.p8 q10, d20, d7 @ J = A3*B
|
||||
vext.8 d4, d7, d7, #3 @ B3
|
||||
veor q9, q9, q11 @ M = G + H
|
||||
vmull.p8 q2, d27, d4 @ I = A*B3
|
||||
veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
|
||||
vand d17, d17, d29
|
||||
vext.8 d22, d7, d7, #4 @ B4
|
||||
veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
|
||||
vand d19, d19, d30
|
||||
vmull.p8 q11, d27, d22 @ K = A*B4
|
||||
veor q10, q10, q2 @ N = I + J
|
||||
veor d16, d16, d17
|
||||
veor d18, d18, d19
|
||||
veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
|
||||
vand d21, d21, d31
|
||||
vext.8 q8, q8, q8, #15
|
||||
veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
|
||||
vmov.i64 d23, #0
|
||||
vext.8 q9, q9, q9, #14
|
||||
veor d20, d20, d21
|
||||
vmull.p8 q2, d27, d7 @ D = A*B
|
||||
vext.8 q11, q11, q11, #12
|
||||
vext.8 q10, q10, q10, #13
|
||||
veor q8, q8, q9
|
||||
veor q10, q10, q11
|
||||
veor q2, q2, q8
|
||||
veor q2, q2, q10
|
||||
veor q1,q1,q0 @ Karatsuba post-processing
|
||||
veor q1,q1,q2
|
||||
veor d1,d1,d2
|
||||
veor d4,d4,d3 @ Xh|Xl - 256-bit result
|
||||
|
||||
@ equivalent of reduction_avx from ghash-x86_64.pl
|
||||
vshl.i64 q9,q0,#57 @ 1st phase
|
||||
vshl.i64 q10,q0,#62
|
||||
veor q10,q10,q9 @
|
||||
vshl.i64 q9,q0,#63
|
||||
veor q10, q10, q9 @
|
||||
veor d1,d1,d20 @
|
||||
veor d4,d4,d21
|
||||
|
||||
vshr.u64 q10,q0,#1 @ 2nd phase
|
||||
veor q2,q2,q0
|
||||
veor q0,q0,q10 @
|
||||
vshr.u64 q10,q10,#6
|
||||
vshr.u64 q0,q0,#1 @
|
||||
veor q0,q0,q2 @
|
||||
veor q0,q0,q10 @
|
||||
|
||||
subs r3,#16
|
||||
bne .Loop_neon
|
||||
|
||||
#ifdef __ARMEL__
|
||||
vrev64.8 q0,q0
|
||||
#endif
|
||||
sub r0,#16
|
||||
vst1.64 d1,[r0]! @ write out Xi
|
||||
vst1.64 d0,[r0]
|
||||
|
||||
bx lr @ bx lr
|
||||
.size gcm_ghash_neon,.-gcm_ghash_neon
|
||||
#endif
|
||||
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
||||
.align 2
|
||||
.align 2
|
||||
#endif
|
|
@ -0,0 +1,237 @@
|
|||
#if defined(__arm__)
|
||||
#include <openssl/arm_arch.h>
|
||||
|
||||
.text
|
||||
.fpu neon
|
||||
.code 32
|
||||
#undef __thumb2__
|
||||
.globl gcm_init_v8
|
||||
.hidden gcm_init_v8
|
||||
.type gcm_init_v8,%function
|
||||
.align 4
|
||||
gcm_init_v8:
|
||||
vld1.64 {q9},[r1] @ load input H
|
||||
vmov.i8 q11,#0xe1
|
||||
vshl.i64 q11,q11,#57 @ 0xc2.0
|
||||
vext.8 q3,q9,q9,#8
|
||||
vshr.u64 q10,q11,#63
|
||||
vdup.32 q9,d18[1]
|
||||
vext.8 q8,q10,q11,#8 @ t0=0xc2....01
|
||||
vshr.u64 q10,q3,#63
|
||||
vshr.s32 q9,q9,#31 @ broadcast carry bit
|
||||
vand q10,q10,q8
|
||||
vshl.i64 q3,q3,#1
|
||||
vext.8 q10,q10,q10,#8
|
||||
vand q8,q8,q9
|
||||
vorr q3,q3,q10 @ H<<<=1
|
||||
veor q12,q3,q8 @ twisted H
|
||||
vst1.64 {q12},[r0]! @ store Htable[0]
|
||||
|
||||
@ calculate H^2
|
||||
vext.8 q8,q12,q12,#8 @ Karatsuba pre-processing
|
||||
.byte 0xa8,0x0e,0xa8,0xf2 @ pmull q0,q12,q12
|
||||
veor q8,q8,q12
|
||||
.byte 0xa9,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q12
|
||||
.byte 0xa0,0x2e,0xa0,0xf2 @ pmull q1,q8,q8
|
||||
|
||||
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
|
||||
veor q10,q0,q2
|
||||
veor q1,q1,q9
|
||||
veor q1,q1,q10
|
||||
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase
|
||||
|
||||
vmov d4,d3 @ Xh|Xm - 256-bit result
|
||||
vmov d3,d0 @ Xm is rotated Xl
|
||||
veor q0,q1,q10
|
||||
|
||||
vext.8 q10,q0,q0,#8 @ 2nd phase
|
||||
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
|
||||
veor q10,q10,q2
|
||||
veor q14,q0,q10
|
||||
|
||||
vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing
|
||||
veor q9,q9,q14
|
||||
vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed
|
||||
vst1.64 {q13,q14},[r0] @ store Htable[1..2]
|
||||
|
||||
bx lr
|
||||
.size gcm_init_v8,.-gcm_init_v8
|
||||
.globl gcm_gmult_v8
|
||||
.hidden gcm_gmult_v8
|
||||
.type gcm_gmult_v8,%function
|
||||
.align 4
|
||||
gcm_gmult_v8:
|
||||
vld1.64 {q9},[r0] @ load Xi
|
||||
vmov.i8 q11,#0xe1
|
||||
vld1.64 {q12,q13},[r1] @ load twisted H, ...
|
||||
vshl.u64 q11,q11,#57
|
||||
#ifndef __ARMEB__
|
||||
vrev64.8 q9,q9
|
||||
#endif
|
||||
vext.8 q3,q9,q9,#8
|
||||
|
||||
.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
|
||||
veor q9,q9,q3 @ Karatsuba pre-processing
|
||||
.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
|
||||
.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
|
||||
|
||||
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
|
||||
veor q10,q0,q2
|
||||
veor q1,q1,q9
|
||||
veor q1,q1,q10
|
||||
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
|
||||
|
||||
vmov d4,d3 @ Xh|Xm - 256-bit result
|
||||
vmov d3,d0 @ Xm is rotated Xl
|
||||
veor q0,q1,q10
|
||||
|
||||
vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
|
||||
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
|
||||
veor q10,q10,q2
|
||||
veor q0,q0,q10
|
||||
|
||||
#ifndef __ARMEB__
|
||||
vrev64.8 q0,q0
|
||||
#endif
|
||||
vext.8 q0,q0,q0,#8
|
||||
vst1.64 {q0},[r0] @ write out Xi
|
||||
|
||||
bx lr
|
||||
.size gcm_gmult_v8,.-gcm_gmult_v8
|
||||
.globl gcm_ghash_v8
|
||||
.hidden gcm_ghash_v8
|
||||
.type gcm_ghash_v8,%function
|
||||
.align 4
|
||||
gcm_ghash_v8:
|
||||
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so
|
||||
vld1.64 {q0},[r0] @ load [rotated] Xi
|
||||
@ "[rotated]" means that
|
||||
@ loaded value would have
|
||||
@ to be rotated in order to
|
||||
@ make it appear as in
|
||||
@ alorithm specification
|
||||
subs r3,r3,#32 @ see if r3 is 32 or larger
|
||||
mov r12,#16 @ r12 is used as post-
|
||||
@ increment for input pointer;
|
||||
@ as loop is modulo-scheduled
|
||||
@ r12 is zeroed just in time
|
||||
@ to preclude oversteping
|
||||
@ inp[len], which means that
|
||||
@ last block[s] are actually
|
||||
@ loaded twice, but last
|
||||
@ copy is not processed
|
||||
vld1.64 {q12,q13},[r1]! @ load twisted H, ..., H^2
|
||||
vmov.i8 q11,#0xe1
|
||||
vld1.64 {q14},[r1]
|
||||
moveq r12,#0 @ is it time to zero r12?
|
||||
vext.8 q0,q0,q0,#8 @ rotate Xi
|
||||
vld1.64 {q8},[r2]! @ load [rotated] I[0]
|
||||
vshl.u64 q11,q11,#57 @ compose 0xc2.0 constant
|
||||
#ifndef __ARMEB__
|
||||
vrev64.8 q8,q8
|
||||
vrev64.8 q0,q0
|
||||
#endif
|
||||
vext.8 q3,q8,q8,#8 @ rotate I[0]
|
||||
blo .Lodd_tail_v8 @ r3 was less than 32
|
||||
vld1.64 {q9},[r2],r12 @ load [rotated] I[1]
|
||||
#ifndef __ARMEB__
|
||||
vrev64.8 q9,q9
|
||||
#endif
|
||||
vext.8 q7,q9,q9,#8
|
||||
veor q3,q3,q0 @ I[i]^=Xi
|
||||
.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
|
||||
veor q9,q9,q7 @ Karatsuba pre-processing
|
||||
.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
|
||||
b .Loop_mod2x_v8
|
||||
|
||||
.align 4
|
||||
.Loop_mod2x_v8:
|
||||
vext.8 q10,q3,q3,#8
|
||||
subs r3,r3,#32 @ is there more data?
|
||||
.byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
|
||||
movlo r12,#0 @ is it time to zero r12?
|
||||
|
||||
.byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9
|
||||
veor q10,q10,q3 @ Karatsuba pre-processing
|
||||
.byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
|
||||
veor q0,q0,q4 @ accumulate
|
||||
.byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
|
||||
vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2]
|
||||
|
||||
veor q2,q2,q6
|
||||
moveq r12,#0 @ is it time to zero r12?
|
||||
veor q1,q1,q5
|
||||
|
||||
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
|
||||
veor q10,q0,q2
|
||||
veor q1,q1,q9
|
||||
vld1.64 {q9},[r2],r12 @ load [rotated] I[i+3]
|
||||
#ifndef __ARMEB__
|
||||
vrev64.8 q8,q8
|
||||
#endif
|
||||
veor q1,q1,q10
|
||||
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
|
||||
|
||||
#ifndef __ARMEB__
|
||||
vrev64.8 q9,q9
|
||||
#endif
|
||||
vmov d4,d3 @ Xh|Xm - 256-bit result
|
||||
vmov d3,d0 @ Xm is rotated Xl
|
||||
vext.8 q7,q9,q9,#8
|
||||
vext.8 q3,q8,q8,#8
|
||||
veor q0,q1,q10
|
||||
.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
|
||||
veor q3,q3,q2 @ accumulate q3 early
|
||||
|
||||
vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
|
||||
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
|
||||
veor q3,q3,q10
|
||||
veor q9,q9,q7 @ Karatsuba pre-processing
|
||||
veor q3,q3,q0
|
||||
.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
|
||||
bhs .Loop_mod2x_v8 @ there was at least 32 more bytes
|
||||
|
||||
veor q2,q2,q10
|
||||
vext.8 q3,q8,q8,#8 @ re-construct q3
|
||||
adds r3,r3,#32 @ re-construct r3
|
||||
veor q0,q0,q2 @ re-construct q0
|
||||
beq .Ldone_v8 @ is r3 zero?
|
||||
.Lodd_tail_v8:
|
||||
vext.8 q10,q0,q0,#8
|
||||
veor q3,q3,q0 @ inp^=Xi
|
||||
veor q9,q8,q10 @ q9 is rotated inp^Xi
|
||||
|
||||
.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
|
||||
veor q9,q9,q3 @ Karatsuba pre-processing
|
||||
.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
|
||||
.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
|
||||
|
||||
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
|
||||
veor q10,q0,q2
|
||||
veor q1,q1,q9
|
||||
veor q1,q1,q10
|
||||
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
|
||||
|
||||
vmov d4,d3 @ Xh|Xm - 256-bit result
|
||||
vmov d3,d0 @ Xm is rotated Xl
|
||||
veor q0,q1,q10
|
||||
|
||||
vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
|
||||
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
|
||||
veor q10,q10,q2
|
||||
veor q0,q0,q10
|
||||
|
||||
.Ldone_v8:
|
||||
#ifndef __ARMEB__
|
||||
vrev64.8 q0,q0
|
||||
#endif
|
||||
vext.8 q0,q0,q0,#8
|
||||
vst1.64 {q0},[r0] @ write out Xi
|
||||
|
||||
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so
|
||||
bx lr
|
||||
.size gcm_ghash_v8,.-gcm_ghash_v8
|
||||
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
||||
.align 2
|
||||
.align 2
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,575 @@
|
|||
.machine "any"
|
||||
|
||||
.abiversion 2
|
||||
.text
|
||||
|
||||
.globl gcm_init_p8
|
||||
.type gcm_init_p8,@function
|
||||
.align 5
|
||||
gcm_init_p8:
|
||||
.localentry gcm_init_p8,0
|
||||
|
||||
li 0,-4096
|
||||
li 8,0x10
|
||||
li 12,-1
|
||||
li 9,0x20
|
||||
or 0,0,0
|
||||
li 10,0x30
|
||||
.long 0x7D202699
|
||||
|
||||
vspltisb 8,-16
|
||||
vspltisb 5,1
|
||||
vaddubm 8,8,8
|
||||
vxor 4,4,4
|
||||
vor 8,8,5
|
||||
vsldoi 8,8,4,15
|
||||
vsldoi 6,4,5,1
|
||||
vaddubm 8,8,8
|
||||
vspltisb 7,7
|
||||
vor 8,8,6
|
||||
vspltb 6,9,0
|
||||
vsl 9,9,5
|
||||
vsrab 6,6,7
|
||||
vand 6,6,8
|
||||
vxor 3,9,6
|
||||
|
||||
vsldoi 9,3,3,8
|
||||
vsldoi 8,4,8,8
|
||||
vsldoi 11,4,9,8
|
||||
vsldoi 10,9,4,8
|
||||
|
||||
.long 0x7D001F99
|
||||
.long 0x7D681F99
|
||||
li 8,0x40
|
||||
.long 0x7D291F99
|
||||
li 9,0x50
|
||||
.long 0x7D4A1F99
|
||||
li 10,0x60
|
||||
|
||||
.long 0x10035CC8
|
||||
.long 0x10234CC8
|
||||
.long 0x104354C8
|
||||
|
||||
.long 0x10E044C8
|
||||
|
||||
vsldoi 5,1,4,8
|
||||
vsldoi 6,4,1,8
|
||||
vxor 0,0,5
|
||||
vxor 2,2,6
|
||||
|
||||
vsldoi 0,0,0,8
|
||||
vxor 0,0,7
|
||||
|
||||
vsldoi 6,0,0,8
|
||||
.long 0x100044C8
|
||||
vxor 6,6,2
|
||||
vxor 16,0,6
|
||||
|
||||
vsldoi 17,16,16,8
|
||||
vsldoi 19,4,17,8
|
||||
vsldoi 18,17,4,8
|
||||
|
||||
.long 0x7E681F99
|
||||
li 8,0x70
|
||||
.long 0x7E291F99
|
||||
li 9,0x80
|
||||
.long 0x7E4A1F99
|
||||
li 10,0x90
|
||||
.long 0x10039CC8
|
||||
.long 0x11B09CC8
|
||||
.long 0x10238CC8
|
||||
.long 0x11D08CC8
|
||||
.long 0x104394C8
|
||||
.long 0x11F094C8
|
||||
|
||||
.long 0x10E044C8
|
||||
.long 0x114D44C8
|
||||
|
||||
vsldoi 5,1,4,8
|
||||
vsldoi 6,4,1,8
|
||||
vsldoi 11,14,4,8
|
||||
vsldoi 9,4,14,8
|
||||
vxor 0,0,5
|
||||
vxor 2,2,6
|
||||
vxor 13,13,11
|
||||
vxor 15,15,9
|
||||
|
||||
vsldoi 0,0,0,8
|
||||
vsldoi 13,13,13,8
|
||||
vxor 0,0,7
|
||||
vxor 13,13,10
|
||||
|
||||
vsldoi 6,0,0,8
|
||||
vsldoi 9,13,13,8
|
||||
.long 0x100044C8
|
||||
.long 0x11AD44C8
|
||||
vxor 6,6,2
|
||||
vxor 9,9,15
|
||||
vxor 0,0,6
|
||||
vxor 13,13,9
|
||||
|
||||
vsldoi 9,0,0,8
|
||||
vsldoi 17,13,13,8
|
||||
vsldoi 11,4,9,8
|
||||
vsldoi 10,9,4,8
|
||||
vsldoi 19,4,17,8
|
||||
vsldoi 18,17,4,8
|
||||
|
||||
.long 0x7D681F99
|
||||
li 8,0xa0
|
||||
.long 0x7D291F99
|
||||
li 9,0xb0
|
||||
.long 0x7D4A1F99
|
||||
li 10,0xc0
|
||||
.long 0x7E681F99
|
||||
.long 0x7E291F99
|
||||
.long 0x7E4A1F99
|
||||
|
||||
or 12,12,12
|
||||
blr
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,2,0
|
||||
.long 0
|
||||
.size gcm_init_p8,.-gcm_init_p8
|
||||
.globl gcm_gmult_p8
|
||||
.type gcm_gmult_p8,@function
|
||||
.align 5
|
||||
gcm_gmult_p8:
|
||||
.localentry gcm_gmult_p8,0
|
||||
|
||||
lis 0,0xfff8
|
||||
li 8,0x10
|
||||
li 12,-1
|
||||
li 9,0x20
|
||||
or 0,0,0
|
||||
li 10,0x30
|
||||
.long 0x7C601E99
|
||||
|
||||
.long 0x7D682699
|
||||
lvsl 12,0,0
|
||||
.long 0x7D292699
|
||||
vspltisb 5,0x07
|
||||
.long 0x7D4A2699
|
||||
vxor 12,12,5
|
||||
.long 0x7D002699
|
||||
vperm 3,3,3,12
|
||||
vxor 4,4,4
|
||||
|
||||
.long 0x10035CC8
|
||||
.long 0x10234CC8
|
||||
.long 0x104354C8
|
||||
|
||||
.long 0x10E044C8
|
||||
|
||||
vsldoi 5,1,4,8
|
||||
vsldoi 6,4,1,8
|
||||
vxor 0,0,5
|
||||
vxor 2,2,6
|
||||
|
||||
vsldoi 0,0,0,8
|
||||
vxor 0,0,7
|
||||
|
||||
vsldoi 6,0,0,8
|
||||
.long 0x100044C8
|
||||
vxor 6,6,2
|
||||
vxor 0,0,6
|
||||
|
||||
vperm 0,0,0,12
|
||||
.long 0x7C001F99
|
||||
|
||||
or 12,12,12
|
||||
blr
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,2,0
|
||||
.long 0
|
||||
.size gcm_gmult_p8,.-gcm_gmult_p8
|
||||
|
||||
.globl gcm_ghash_p8
|
||||
.type gcm_ghash_p8,@function
|
||||
.align 5
|
||||
gcm_ghash_p8:
|
||||
.localentry gcm_ghash_p8,0
|
||||
|
||||
li 0,-4096
|
||||
li 8,0x10
|
||||
li 12,-1
|
||||
li 9,0x20
|
||||
or 0,0,0
|
||||
li 10,0x30
|
||||
.long 0x7C001E99
|
||||
|
||||
.long 0x7D682699
|
||||
li 8,0x40
|
||||
lvsl 12,0,0
|
||||
.long 0x7D292699
|
||||
li 9,0x50
|
||||
vspltisb 5,0x07
|
||||
.long 0x7D4A2699
|
||||
li 10,0x60
|
||||
vxor 12,12,5
|
||||
.long 0x7D002699
|
||||
vperm 0,0,0,12
|
||||
vxor 4,4,4
|
||||
|
||||
cmpldi 6,64
|
||||
bge .Lgcm_ghash_p8_4x
|
||||
|
||||
.long 0x7C602E99
|
||||
addi 5,5,16
|
||||
subic. 6,6,16
|
||||
vperm 3,3,3,12
|
||||
vxor 3,3,0
|
||||
beq .Lshort
|
||||
|
||||
.long 0x7E682699
|
||||
li 8,16
|
||||
.long 0x7E292699
|
||||
add 9,5,6
|
||||
.long 0x7E4A2699
|
||||
|
||||
|
||||
.align 5
|
||||
.Loop_2x:
|
||||
.long 0x7E002E99
|
||||
vperm 16,16,16,12
|
||||
|
||||
subic 6,6,32
|
||||
.long 0x10039CC8
|
||||
.long 0x11B05CC8
|
||||
subfe 0,0,0
|
||||
.long 0x10238CC8
|
||||
.long 0x11D04CC8
|
||||
and 0,0,6
|
||||
.long 0x104394C8
|
||||
.long 0x11F054C8
|
||||
add 5,5,0
|
||||
|
||||
vxor 0,0,13
|
||||
vxor 1,1,14
|
||||
|
||||
.long 0x10E044C8
|
||||
|
||||
vsldoi 5,1,4,8
|
||||
vsldoi 6,4,1,8
|
||||
vxor 2,2,15
|
||||
vxor 0,0,5
|
||||
vxor 2,2,6
|
||||
|
||||
vsldoi 0,0,0,8
|
||||
vxor 0,0,7
|
||||
.long 0x7C682E99
|
||||
addi 5,5,32
|
||||
|
||||
vsldoi 6,0,0,8
|
||||
.long 0x100044C8
|
||||
vperm 3,3,3,12
|
||||
vxor 6,6,2
|
||||
vxor 3,3,6
|
||||
vxor 3,3,0
|
||||
cmpld 9,5
|
||||
bgt .Loop_2x
|
||||
|
||||
cmplwi 6,0
|
||||
bne .Leven
|
||||
|
||||
.Lshort:
|
||||
.long 0x10035CC8
|
||||
.long 0x10234CC8
|
||||
.long 0x104354C8
|
||||
|
||||
.long 0x10E044C8
|
||||
|
||||
vsldoi 5,1,4,8
|
||||
vsldoi 6,4,1,8
|
||||
vxor 0,0,5
|
||||
vxor 2,2,6
|
||||
|
||||
vsldoi 0,0,0,8
|
||||
vxor 0,0,7
|
||||
|
||||
vsldoi 6,0,0,8
|
||||
.long 0x100044C8
|
||||
vxor 6,6,2
|
||||
|
||||
.Leven:
|
||||
vxor 0,0,6
|
||||
vperm 0,0,0,12
|
||||
.long 0x7C001F99
|
||||
|
||||
or 12,12,12
|
||||
blr
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,4,0
|
||||
.long 0
|
||||
.align 5
|
||||
.gcm_ghash_p8_4x:
|
||||
.Lgcm_ghash_p8_4x:
|
||||
stdu 1,-256(1)
|
||||
li 10,63
|
||||
li 11,79
|
||||
stvx 20,10,1
|
||||
addi 10,10,32
|
||||
stvx 21,11,1
|
||||
addi 11,11,32
|
||||
stvx 22,10,1
|
||||
addi 10,10,32
|
||||
stvx 23,11,1
|
||||
addi 11,11,32
|
||||
stvx 24,10,1
|
||||
addi 10,10,32
|
||||
stvx 25,11,1
|
||||
addi 11,11,32
|
||||
stvx 26,10,1
|
||||
addi 10,10,32
|
||||
stvx 27,11,1
|
||||
addi 11,11,32
|
||||
stvx 28,10,1
|
||||
addi 10,10,32
|
||||
stvx 29,11,1
|
||||
addi 11,11,32
|
||||
stvx 30,10,1
|
||||
li 10,0x60
|
||||
stvx 31,11,1
|
||||
li 0,-1
|
||||
stw 12,252(1)
|
||||
or 0,0,0
|
||||
|
||||
lvsl 5,0,8
|
||||
|
||||
li 8,0x70
|
||||
.long 0x7E292699
|
||||
li 9,0x80
|
||||
vspltisb 6,8
|
||||
|
||||
li 10,0x90
|
||||
.long 0x7EE82699
|
||||
li 8,0xa0
|
||||
.long 0x7F092699
|
||||
li 9,0xb0
|
||||
.long 0x7F2A2699
|
||||
li 10,0xc0
|
||||
.long 0x7FA82699
|
||||
li 8,0x10
|
||||
.long 0x7FC92699
|
||||
li 9,0x20
|
||||
.long 0x7FEA2699
|
||||
li 10,0x30
|
||||
|
||||
vsldoi 7,4,6,8
|
||||
vaddubm 18,5,7
|
||||
vaddubm 19,6,18
|
||||
|
||||
srdi 6,6,4
|
||||
|
||||
.long 0x7C602E99
|
||||
.long 0x7E082E99
|
||||
subic. 6,6,8
|
||||
.long 0x7EC92E99
|
||||
.long 0x7F8A2E99
|
||||
addi 5,5,0x40
|
||||
vperm 3,3,3,12
|
||||
vperm 16,16,16,12
|
||||
vperm 22,22,22,12
|
||||
vperm 28,28,28,12
|
||||
|
||||
vxor 2,3,0
|
||||
|
||||
.long 0x11B0BCC8
|
||||
.long 0x11D0C4C8
|
||||
.long 0x11F0CCC8
|
||||
|
||||
vperm 11,17,9,18
|
||||
vperm 5,22,28,19
|
||||
vperm 10,17,9,19
|
||||
vperm 6,22,28,18
|
||||
.long 0x12B68CC8
|
||||
.long 0x12855CC8
|
||||
.long 0x137C4CC8
|
||||
.long 0x134654C8
|
||||
|
||||
vxor 21,21,14
|
||||
vxor 20,20,13
|
||||
vxor 27,27,21
|
||||
vxor 26,26,15
|
||||
|
||||
blt .Ltail_4x
|
||||
|
||||
.Loop_4x:
|
||||
.long 0x7C602E99
|
||||
.long 0x7E082E99
|
||||
subic. 6,6,4
|
||||
.long 0x7EC92E99
|
||||
.long 0x7F8A2E99
|
||||
addi 5,5,0x40
|
||||
vperm 16,16,16,12
|
||||
vperm 22,22,22,12
|
||||
vperm 28,28,28,12
|
||||
vperm 3,3,3,12
|
||||
|
||||
.long 0x1002ECC8
|
||||
.long 0x1022F4C8
|
||||
.long 0x1042FCC8
|
||||
.long 0x11B0BCC8
|
||||
.long 0x11D0C4C8
|
||||
.long 0x11F0CCC8
|
||||
|
||||
vxor 0,0,20
|
||||
vxor 1,1,27
|
||||
vxor 2,2,26
|
||||
vperm 5,22,28,19
|
||||
vperm 6,22,28,18
|
||||
|
||||
.long 0x10E044C8
|
||||
.long 0x12855CC8
|
||||
.long 0x134654C8
|
||||
|
||||
vsldoi 5,1,4,8
|
||||
vsldoi 6,4,1,8
|
||||
vxor 0,0,5
|
||||
vxor 2,2,6
|
||||
|
||||
vsldoi 0,0,0,8
|
||||
vxor 0,0,7
|
||||
|
||||
vsldoi 6,0,0,8
|
||||
.long 0x12B68CC8
|
||||
.long 0x137C4CC8
|
||||
.long 0x100044C8
|
||||
|
||||
vxor 20,20,13
|
||||
vxor 26,26,15
|
||||
vxor 2,2,3
|
||||
vxor 21,21,14
|
||||
vxor 2,2,6
|
||||
vxor 27,27,21
|
||||
vxor 2,2,0
|
||||
bge .Loop_4x
|
||||
|
||||
.Ltail_4x:
|
||||
.long 0x1002ECC8
|
||||
.long 0x1022F4C8
|
||||
.long 0x1042FCC8
|
||||
|
||||
vxor 0,0,20
|
||||
vxor 1,1,27
|
||||
|
||||
.long 0x10E044C8
|
||||
|
||||
vsldoi 5,1,4,8
|
||||
vsldoi 6,4,1,8
|
||||
vxor 2,2,26
|
||||
vxor 0,0,5
|
||||
vxor 2,2,6
|
||||
|
||||
vsldoi 0,0,0,8
|
||||
vxor 0,0,7
|
||||
|
||||
vsldoi 6,0,0,8
|
||||
.long 0x100044C8
|
||||
vxor 6,6,2
|
||||
vxor 0,0,6
|
||||
|
||||
addic. 6,6,4
|
||||
beq .Ldone_4x
|
||||
|
||||
.long 0x7C602E99
|
||||
cmpldi 6,2
|
||||
li 6,-4
|
||||
blt .Lone
|
||||
.long 0x7E082E99
|
||||
beq .Ltwo
|
||||
|
||||
.Lthree:
|
||||
.long 0x7EC92E99
|
||||
vperm 3,3,3,12
|
||||
vperm 16,16,16,12
|
||||
vperm 22,22,22,12
|
||||
|
||||
vxor 2,3,0
|
||||
vor 29,23,23
|
||||
vor 30, 24, 24
|
||||
vor 31,25,25
|
||||
|
||||
vperm 5,16,22,19
|
||||
vperm 6,16,22,18
|
||||
.long 0x12B08CC8
|
||||
.long 0x13764CC8
|
||||
.long 0x12855CC8
|
||||
.long 0x134654C8
|
||||
|
||||
vxor 27,27,21
|
||||
b .Ltail_4x
|
||||
|
||||
.align 4
|
||||
.Ltwo:
|
||||
vperm 3,3,3,12
|
||||
vperm 16,16,16,12
|
||||
|
||||
vxor 2,3,0
|
||||
vperm 5,4,16,19
|
||||
vperm 6,4,16,18
|
||||
|
||||
vsldoi 29,4,17,8
|
||||
vor 30, 17, 17
|
||||
vsldoi 31,17,4,8
|
||||
|
||||
.long 0x12855CC8
|
||||
.long 0x13704CC8
|
||||
.long 0x134654C8
|
||||
|
||||
b .Ltail_4x
|
||||
|
||||
.align 4
|
||||
.Lone:
|
||||
vperm 3,3,3,12
|
||||
|
||||
vsldoi 29,4,9,8
|
||||
vor 30, 9, 9
|
||||
vsldoi 31,9,4,8
|
||||
|
||||
vxor 2,3,0
|
||||
vxor 20,20,20
|
||||
vxor 27,27,27
|
||||
vxor 26,26,26
|
||||
|
||||
b .Ltail_4x
|
||||
|
||||
.Ldone_4x:
|
||||
vperm 0,0,0,12
|
||||
.long 0x7C001F99
|
||||
|
||||
li 10,63
|
||||
li 11,79
|
||||
or 12,12,12
|
||||
lvx 20,10,1
|
||||
addi 10,10,32
|
||||
lvx 21,11,1
|
||||
addi 11,11,32
|
||||
lvx 22,10,1
|
||||
addi 10,10,32
|
||||
lvx 23,11,1
|
||||
addi 11,11,32
|
||||
lvx 24,10,1
|
||||
addi 10,10,32
|
||||
lvx 25,11,1
|
||||
addi 11,11,32
|
||||
lvx 26,10,1
|
||||
addi 10,10,32
|
||||
lvx 27,11,1
|
||||
addi 11,11,32
|
||||
lvx 28,10,1
|
||||
addi 10,10,32
|
||||
lvx 29,11,1
|
||||
addi 11,11,32
|
||||
lvx 30,10,1
|
||||
lvx 31,11,1
|
||||
addi 1,1,256
|
||||
blr
|
||||
.long 0
|
||||
.byte 0,12,0x04,0,0x80,0,4,0
|
||||
.long 0
|
||||
.size gcm_ghash_p8,.-gcm_ghash_p8
|
||||
|
||||
.byte 71,72,65,83,72,32,102,111,114,32,80,111,119,101,114,73,83,65,32,50,46,48,55,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
||||
.align 2
|
||||
.align 2
|
|
@ -0,0 +1,565 @@
|
|||
.machine "any"
|
||||
|
||||
.text
|
||||
|
||||
.globl gcm_init_p8
|
||||
.align 5
|
||||
gcm_init_p8:
|
||||
li 0,-4096
|
||||
li 8,0x10
|
||||
mfspr 12,256
|
||||
li 9,0x20
|
||||
mtspr 256,0
|
||||
li 10,0x30
|
||||
.long 0x7D202699
|
||||
|
||||
vspltisb 8,-16
|
||||
vspltisb 5,1
|
||||
vaddubm 8,8,8
|
||||
vxor 4,4,4
|
||||
vor 8,8,5
|
||||
vsldoi 8,8,4,15
|
||||
vsldoi 6,4,5,1
|
||||
vaddubm 8,8,8
|
||||
vspltisb 7,7
|
||||
vor 8,8,6
|
||||
vspltb 6,9,0
|
||||
vsl 9,9,5
|
||||
vsrab 6,6,7
|
||||
vand 6,6,8
|
||||
vxor 3,9,6
|
||||
|
||||
vsldoi 9,3,3,8
|
||||
vsldoi 8,4,8,8
|
||||
vsldoi 11,4,9,8
|
||||
vsldoi 10,9,4,8
|
||||
|
||||
.long 0x7D001F99
|
||||
.long 0x7D681F99
|
||||
li 8,0x40
|
||||
.long 0x7D291F99
|
||||
li 9,0x50
|
||||
.long 0x7D4A1F99
|
||||
li 10,0x60
|
||||
|
||||
.long 0x10035CC8
|
||||
.long 0x10234CC8
|
||||
.long 0x104354C8
|
||||
|
||||
.long 0x10E044C8
|
||||
|
||||
vsldoi 5,1,4,8
|
||||
vsldoi 6,4,1,8
|
||||
vxor 0,0,5
|
||||
vxor 2,2,6
|
||||
|
||||
vsldoi 0,0,0,8
|
||||
vxor 0,0,7
|
||||
|
||||
vsldoi 6,0,0,8
|
||||
.long 0x100044C8
|
||||
vxor 6,6,2
|
||||
vxor 16,0,6
|
||||
|
||||
vsldoi 17,16,16,8
|
||||
vsldoi 19,4,17,8
|
||||
vsldoi 18,17,4,8
|
||||
|
||||
.long 0x7E681F99
|
||||
li 8,0x70
|
||||
.long 0x7E291F99
|
||||
li 9,0x80
|
||||
.long 0x7E4A1F99
|
||||
li 10,0x90
|
||||
.long 0x10039CC8
|
||||
.long 0x11B09CC8
|
||||
.long 0x10238CC8
|
||||
.long 0x11D08CC8
|
||||
.long 0x104394C8
|
||||
.long 0x11F094C8
|
||||
|
||||
.long 0x10E044C8
|
||||
.long 0x114D44C8
|
||||
|
||||
vsldoi 5,1,4,8
|
||||
vsldoi 6,4,1,8
|
||||
vsldoi 11,14,4,8
|
||||
vsldoi 9,4,14,8
|
||||
vxor 0,0,5
|
||||
vxor 2,2,6
|
||||
vxor 13,13,11
|
||||
vxor 15,15,9
|
||||
|
||||
vsldoi 0,0,0,8
|
||||
vsldoi 13,13,13,8
|
||||
vxor 0,0,7
|
||||
vxor 13,13,10
|
||||
|
||||
vsldoi 6,0,0,8
|
||||
vsldoi 9,13,13,8
|
||||
.long 0x100044C8
|
||||
.long 0x11AD44C8
|
||||
vxor 6,6,2
|
||||
vxor 9,9,15
|
||||
vxor 0,0,6
|
||||
vxor 13,13,9
|
||||
|
||||
vsldoi 9,0,0,8
|
||||
vsldoi 17,13,13,8
|
||||
vsldoi 11,4,9,8
|
||||
vsldoi 10,9,4,8
|
||||
vsldoi 19,4,17,8
|
||||
vsldoi 18,17,4,8
|
||||
|
||||
.long 0x7D681F99
|
||||
li 8,0xa0
|
||||
.long 0x7D291F99
|
||||
li 9,0xb0
|
||||
.long 0x7D4A1F99
|
||||
li 10,0xc0
|
||||
.long 0x7E681F99
|
||||
.long 0x7E291F99
|
||||
.long 0x7E4A1F99
|
||||
|
||||
mtspr 256,12
|
||||
blr
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,2,0
|
||||
.long 0
|
||||
|
||||
.globl gcm_gmult_p8
|
||||
.align 5
|
||||
gcm_gmult_p8:
|
||||
lis 0,0xfff8
|
||||
li 8,0x10
|
||||
mfspr 12,256
|
||||
li 9,0x20
|
||||
mtspr 256,0
|
||||
li 10,0x30
|
||||
.long 0x7C601E99
|
||||
|
||||
.long 0x7D682699
|
||||
lvsl 12,0,0
|
||||
.long 0x7D292699
|
||||
vspltisb 5,0x07
|
||||
.long 0x7D4A2699
|
||||
vxor 12,12,5
|
||||
.long 0x7D002699
|
||||
vperm 3,3,3,12
|
||||
vxor 4,4,4
|
||||
|
||||
.long 0x10035CC8
|
||||
.long 0x10234CC8
|
||||
.long 0x104354C8
|
||||
|
||||
.long 0x10E044C8
|
||||
|
||||
vsldoi 5,1,4,8
|
||||
vsldoi 6,4,1,8
|
||||
vxor 0,0,5
|
||||
vxor 2,2,6
|
||||
|
||||
vsldoi 0,0,0,8
|
||||
vxor 0,0,7
|
||||
|
||||
vsldoi 6,0,0,8
|
||||
.long 0x100044C8
|
||||
vxor 6,6,2
|
||||
vxor 0,0,6
|
||||
|
||||
vperm 0,0,0,12
|
||||
.long 0x7C001F99
|
||||
|
||||
mtspr 256,12
|
||||
blr
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,2,0
|
||||
.long 0
|
||||
|
||||
|
||||
.globl gcm_ghash_p8
|
||||
.align 5
|
||||
gcm_ghash_p8:
|
||||
li 0,-4096
|
||||
li 8,0x10
|
||||
mfspr 12,256
|
||||
li 9,0x20
|
||||
mtspr 256,0
|
||||
li 10,0x30
|
||||
.long 0x7C001E99
|
||||
|
||||
.long 0x7D682699
|
||||
li 8,0x40
|
||||
lvsl 12,0,0
|
||||
.long 0x7D292699
|
||||
li 9,0x50
|
||||
vspltisb 5,0x07
|
||||
.long 0x7D4A2699
|
||||
li 10,0x60
|
||||
vxor 12,12,5
|
||||
.long 0x7D002699
|
||||
vperm 0,0,0,12
|
||||
vxor 4,4,4
|
||||
|
||||
cmpldi 6,64
|
||||
bge Lgcm_ghash_p8_4x
|
||||
|
||||
.long 0x7C602E99
|
||||
addi 5,5,16
|
||||
subic. 6,6,16
|
||||
vperm 3,3,3,12
|
||||
vxor 3,3,0
|
||||
beq Lshort
|
||||
|
||||
.long 0x7E682699
|
||||
li 8,16
|
||||
.long 0x7E292699
|
||||
add 9,5,6
|
||||
.long 0x7E4A2699
|
||||
|
||||
|
||||
.align 5
|
||||
Loop_2x:
|
||||
.long 0x7E002E99
|
||||
vperm 16,16,16,12
|
||||
|
||||
subic 6,6,32
|
||||
.long 0x10039CC8
|
||||
.long 0x11B05CC8
|
||||
subfe 0,0,0
|
||||
.long 0x10238CC8
|
||||
.long 0x11D04CC8
|
||||
and 0,0,6
|
||||
.long 0x104394C8
|
||||
.long 0x11F054C8
|
||||
add 5,5,0
|
||||
|
||||
vxor 0,0,13
|
||||
vxor 1,1,14
|
||||
|
||||
.long 0x10E044C8
|
||||
|
||||
vsldoi 5,1,4,8
|
||||
vsldoi 6,4,1,8
|
||||
vxor 2,2,15
|
||||
vxor 0,0,5
|
||||
vxor 2,2,6
|
||||
|
||||
vsldoi 0,0,0,8
|
||||
vxor 0,0,7
|
||||
.long 0x7C682E99
|
||||
addi 5,5,32
|
||||
|
||||
vsldoi 6,0,0,8
|
||||
.long 0x100044C8
|
||||
vperm 3,3,3,12
|
||||
vxor 6,6,2
|
||||
vxor 3,3,6
|
||||
vxor 3,3,0
|
||||
cmpld 9,5
|
||||
bgt Loop_2x
|
||||
|
||||
cmplwi 6,0
|
||||
bne Leven
|
||||
|
||||
Lshort:
|
||||
.long 0x10035CC8
|
||||
.long 0x10234CC8
|
||||
.long 0x104354C8
|
||||
|
||||
.long 0x10E044C8
|
||||
|
||||
vsldoi 5,1,4,8
|
||||
vsldoi 6,4,1,8
|
||||
vxor 0,0,5
|
||||
vxor 2,2,6
|
||||
|
||||
vsldoi 0,0,0,8
|
||||
vxor 0,0,7
|
||||
|
||||
vsldoi 6,0,0,8
|
||||
.long 0x100044C8
|
||||
vxor 6,6,2
|
||||
|
||||
Leven:
|
||||
vxor 0,0,6
|
||||
vperm 0,0,0,12
|
||||
.long 0x7C001F99
|
||||
|
||||
mtspr 256,12
|
||||
blr
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,4,0
|
||||
.long 0
|
||||
.align 5
|
||||
.gcm_ghash_p8_4x:
|
||||
Lgcm_ghash_p8_4x:
|
||||
stdu 1,-256(1)
|
||||
li 10,63
|
||||
li 11,79
|
||||
stvx 20,10,1
|
||||
addi 10,10,32
|
||||
stvx 21,11,1
|
||||
addi 11,11,32
|
||||
stvx 22,10,1
|
||||
addi 10,10,32
|
||||
stvx 23,11,1
|
||||
addi 11,11,32
|
||||
stvx 24,10,1
|
||||
addi 10,10,32
|
||||
stvx 25,11,1
|
||||
addi 11,11,32
|
||||
stvx 26,10,1
|
||||
addi 10,10,32
|
||||
stvx 27,11,1
|
||||
addi 11,11,32
|
||||
stvx 28,10,1
|
||||
addi 10,10,32
|
||||
stvx 29,11,1
|
||||
addi 11,11,32
|
||||
stvx 30,10,1
|
||||
li 10,0x60
|
||||
stvx 31,11,1
|
||||
li 0,-1
|
||||
stw 12,252(1)
|
||||
mtspr 256,0
|
||||
|
||||
lvsl 5,0,8
|
||||
|
||||
li 8,0x70
|
||||
.long 0x7E292699
|
||||
li 9,0x80
|
||||
vspltisb 6,8
|
||||
|
||||
li 10,0x90
|
||||
.long 0x7EE82699
|
||||
li 8,0xa0
|
||||
.long 0x7F092699
|
||||
li 9,0xb0
|
||||
.long 0x7F2A2699
|
||||
li 10,0xc0
|
||||
.long 0x7FA82699
|
||||
li 8,0x10
|
||||
.long 0x7FC92699
|
||||
li 9,0x20
|
||||
.long 0x7FEA2699
|
||||
li 10,0x30
|
||||
|
||||
vsldoi 7,4,6,8
|
||||
vaddubm 18,5,7
|
||||
vaddubm 19,6,18
|
||||
|
||||
srdi 6,6,4
|
||||
|
||||
.long 0x7C602E99
|
||||
.long 0x7E082E99
|
||||
subic. 6,6,8
|
||||
.long 0x7EC92E99
|
||||
.long 0x7F8A2E99
|
||||
addi 5,5,0x40
|
||||
vperm 3,3,3,12
|
||||
vperm 16,16,16,12
|
||||
vperm 22,22,22,12
|
||||
vperm 28,28,28,12
|
||||
|
||||
vxor 2,3,0
|
||||
|
||||
.long 0x11B0BCC8
|
||||
.long 0x11D0C4C8
|
||||
.long 0x11F0CCC8
|
||||
|
||||
vperm 11,17,9,18
|
||||
vperm 5,22,28,19
|
||||
vperm 10,17,9,19
|
||||
vperm 6,22,28,18
|
||||
.long 0x12B68CC8
|
||||
.long 0x12855CC8
|
||||
.long 0x137C4CC8
|
||||
.long 0x134654C8
|
||||
|
||||
vxor 21,21,14
|
||||
vxor 20,20,13
|
||||
vxor 27,27,21
|
||||
vxor 26,26,15
|
||||
|
||||
blt Ltail_4x
|
||||
|
||||
Loop_4x:
|
||||
.long 0x7C602E99
|
||||
.long 0x7E082E99
|
||||
subic. 6,6,4
|
||||
.long 0x7EC92E99
|
||||
.long 0x7F8A2E99
|
||||
addi 5,5,0x40
|
||||
vperm 16,16,16,12
|
||||
vperm 22,22,22,12
|
||||
vperm 28,28,28,12
|
||||
vperm 3,3,3,12
|
||||
|
||||
.long 0x1002ECC8
|
||||
.long 0x1022F4C8
|
||||
.long 0x1042FCC8
|
||||
.long 0x11B0BCC8
|
||||
.long 0x11D0C4C8
|
||||
.long 0x11F0CCC8
|
||||
|
||||
vxor 0,0,20
|
||||
vxor 1,1,27
|
||||
vxor 2,2,26
|
||||
vperm 5,22,28,19
|
||||
vperm 6,22,28,18
|
||||
|
||||
.long 0x10E044C8
|
||||
.long 0x12855CC8
|
||||
.long 0x134654C8
|
||||
|
||||
vsldoi 5,1,4,8
|
||||
vsldoi 6,4,1,8
|
||||
vxor 0,0,5
|
||||
vxor 2,2,6
|
||||
|
||||
vsldoi 0,0,0,8
|
||||
vxor 0,0,7
|
||||
|
||||
vsldoi 6,0,0,8
|
||||
.long 0x12B68CC8
|
||||
.long 0x137C4CC8
|
||||
.long 0x100044C8
|
||||
|
||||
vxor 20,20,13
|
||||
vxor 26,26,15
|
||||
vxor 2,2,3
|
||||
vxor 21,21,14
|
||||
vxor 2,2,6
|
||||
vxor 27,27,21
|
||||
vxor 2,2,0
|
||||
bge Loop_4x
|
||||
|
||||
Ltail_4x:
|
||||
.long 0x1002ECC8
|
||||
.long 0x1022F4C8
|
||||
.long 0x1042FCC8
|
||||
|
||||
vxor 0,0,20
|
||||
vxor 1,1,27
|
||||
|
||||
.long 0x10E044C8
|
||||
|
||||
vsldoi 5,1,4,8
|
||||
vsldoi 6,4,1,8
|
||||
vxor 2,2,26
|
||||
vxor 0,0,5
|
||||
vxor 2,2,6
|
||||
|
||||
vsldoi 0,0,0,8
|
||||
vxor 0,0,7
|
||||
|
||||
vsldoi 6,0,0,8
|
||||
.long 0x100044C8
|
||||
vxor 6,6,2
|
||||
vxor 0,0,6
|
||||
|
||||
addic. 6,6,4
|
||||
beq Ldone_4x
|
||||
|
||||
.long 0x7C602E99
|
||||
cmpldi 6,2
|
||||
li 6,-4
|
||||
blt Lone
|
||||
.long 0x7E082E99
|
||||
beq Ltwo
|
||||
|
||||
Lthree:
|
||||
.long 0x7EC92E99
|
||||
vperm 3,3,3,12
|
||||
vperm 16,16,16,12
|
||||
vperm 22,22,22,12
|
||||
|
||||
vxor 2,3,0
|
||||
vor 29,23,23
|
||||
vor 30, 24, 24
|
||||
vor 31,25,25
|
||||
|
||||
vperm 5,16,22,19
|
||||
vperm 6,16,22,18
|
||||
.long 0x12B08CC8
|
||||
.long 0x13764CC8
|
||||
.long 0x12855CC8
|
||||
.long 0x134654C8
|
||||
|
||||
vxor 27,27,21
|
||||
b Ltail_4x
|
||||
|
||||
.align 4
|
||||
Ltwo:
|
||||
vperm 3,3,3,12
|
||||
vperm 16,16,16,12
|
||||
|
||||
vxor 2,3,0
|
||||
vperm 5,4,16,19
|
||||
vperm 6,4,16,18
|
||||
|
||||
vsldoi 29,4,17,8
|
||||
vor 30, 17, 17
|
||||
vsldoi 31,17,4,8
|
||||
|
||||
.long 0x12855CC8
|
||||
.long 0x13704CC8
|
||||
.long 0x134654C8
|
||||
|
||||
b Ltail_4x
|
||||
|
||||
.align 4
|
||||
Lone:
|
||||
vperm 3,3,3,12
|
||||
|
||||
vsldoi 29,4,9,8
|
||||
vor 30, 9, 9
|
||||
vsldoi 31,9,4,8
|
||||
|
||||
vxor 2,3,0
|
||||
vxor 20,20,20
|
||||
vxor 27,27,27
|
||||
vxor 26,26,26
|
||||
|
||||
b Ltail_4x
|
||||
|
||||
Ldone_4x:
|
||||
vperm 0,0,0,12
|
||||
.long 0x7C001F99
|
||||
|
||||
li 10,63
|
||||
li 11,79
|
||||
mtspr 256,12
|
||||
lvx 20,10,1
|
||||
addi 10,10,32
|
||||
lvx 21,11,1
|
||||
addi 11,11,32
|
||||
lvx 22,10,1
|
||||
addi 10,10,32
|
||||
lvx 23,11,1
|
||||
addi 11,11,32
|
||||
lvx 24,10,1
|
||||
addi 10,10,32
|
||||
lvx 25,11,1
|
||||
addi 11,11,32
|
||||
lvx 26,10,1
|
||||
addi 10,10,32
|
||||
lvx 27,11,1
|
||||
addi 11,11,32
|
||||
lvx 28,10,1
|
||||
addi 10,10,32
|
||||
lvx 29,11,1
|
||||
addi 11,11,32
|
||||
lvx 30,10,1
|
||||
lvx 31,11,1
|
||||
addi 1,1,256
|
||||
blr
|
||||
.long 0
|
||||
.byte 0,12,0x04,0,0x80,0,4,0
|
||||
.long 0
|
||||
|
||||
|
||||
.byte 71,72,65,83,72,32,102,111,114,32,80,111,119,101,114,73,83,65,32,50,46,48,55,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
||||
.align 2
|
||||
.align 2
|
|
@ -0,0 +1,968 @@
|
|||
#if defined(__i386__)
|
||||
.text
|
||||
.globl ChaCha20_ctr32
|
||||
.hidden ChaCha20_ctr32
|
||||
.type ChaCha20_ctr32,@function
|
||||
.align 16
|
||||
ChaCha20_ctr32:
|
||||
.L_ChaCha20_ctr32_begin:
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
xorl %eax,%eax
|
||||
cmpl 28(%esp),%eax
|
||||
je .L000no_data
|
||||
call .Lpic_point
|
||||
.Lpic_point:
|
||||
popl %eax
|
||||
leal OPENSSL_ia32cap_P-.Lpic_point(%eax),%ebp
|
||||
testl $16777216,(%ebp)
|
||||
jz .L001x86
|
||||
testl $512,4(%ebp)
|
||||
jz .L001x86
|
||||
jmp .Lssse3_shortcut
|
||||
.L001x86:
|
||||
movl 32(%esp),%esi
|
||||
movl 36(%esp),%edi
|
||||
subl $132,%esp
|
||||
movl (%esi),%eax
|
||||
movl 4(%esi),%ebx
|
||||
movl 8(%esi),%ecx
|
||||
movl 12(%esi),%edx
|
||||
movl %eax,80(%esp)
|
||||
movl %ebx,84(%esp)
|
||||
movl %ecx,88(%esp)
|
||||
movl %edx,92(%esp)
|
||||
movl 16(%esi),%eax
|
||||
movl 20(%esi),%ebx
|
||||
movl 24(%esi),%ecx
|
||||
movl 28(%esi),%edx
|
||||
movl %eax,96(%esp)
|
||||
movl %ebx,100(%esp)
|
||||
movl %ecx,104(%esp)
|
||||
movl %edx,108(%esp)
|
||||
movl (%edi),%eax
|
||||
movl 4(%edi),%ebx
|
||||
movl 8(%edi),%ecx
|
||||
movl 12(%edi),%edx
|
||||
subl $1,%eax
|
||||
movl %eax,112(%esp)
|
||||
movl %ebx,116(%esp)
|
||||
movl %ecx,120(%esp)
|
||||
movl %edx,124(%esp)
|
||||
jmp .L002entry
|
||||
.align 16
|
||||
.L003outer_loop:
|
||||
movl %ebx,156(%esp)
|
||||
movl %eax,152(%esp)
|
||||
movl %ecx,160(%esp)
|
||||
.L002entry:
|
||||
movl $1634760805,%eax
|
||||
movl $857760878,4(%esp)
|
||||
movl $2036477234,8(%esp)
|
||||
movl $1797285236,12(%esp)
|
||||
movl 84(%esp),%ebx
|
||||
movl 88(%esp),%ebp
|
||||
movl 104(%esp),%ecx
|
||||
movl 108(%esp),%esi
|
||||
movl 116(%esp),%edx
|
||||
movl 120(%esp),%edi
|
||||
movl %ebx,20(%esp)
|
||||
movl %ebp,24(%esp)
|
||||
movl %ecx,40(%esp)
|
||||
movl %esi,44(%esp)
|
||||
movl %edx,52(%esp)
|
||||
movl %edi,56(%esp)
|
||||
movl 92(%esp),%ebx
|
||||
movl 124(%esp),%edi
|
||||
movl 112(%esp),%edx
|
||||
movl 80(%esp),%ebp
|
||||
movl 96(%esp),%ecx
|
||||
movl 100(%esp),%esi
|
||||
addl $1,%edx
|
||||
movl %ebx,28(%esp)
|
||||
movl %edi,60(%esp)
|
||||
movl %edx,112(%esp)
|
||||
movl $10,%ebx
|
||||
jmp .L004loop
|
||||
.align 16
|
||||
.L004loop:
|
||||
addl %ebp,%eax
|
||||
movl %ebx,128(%esp)
|
||||
movl %ebp,%ebx
|
||||
xorl %eax,%edx
|
||||
roll $16,%edx
|
||||
addl %edx,%ecx
|
||||
xorl %ecx,%ebx
|
||||
movl 52(%esp),%edi
|
||||
roll $12,%ebx
|
||||
movl 20(%esp),%ebp
|
||||
addl %ebx,%eax
|
||||
xorl %eax,%edx
|
||||
movl %eax,(%esp)
|
||||
roll $8,%edx
|
||||
movl 4(%esp),%eax
|
||||
addl %edx,%ecx
|
||||
movl %edx,48(%esp)
|
||||
xorl %ecx,%ebx
|
||||
addl %ebp,%eax
|
||||
roll $7,%ebx
|
||||
xorl %eax,%edi
|
||||
movl %ecx,32(%esp)
|
||||
roll $16,%edi
|
||||
movl %ebx,16(%esp)
|
||||
addl %edi,%esi
|
||||
movl 40(%esp),%ecx
|
||||
xorl %esi,%ebp
|
||||
movl 56(%esp),%edx
|
||||
roll $12,%ebp
|
||||
movl 24(%esp),%ebx
|
||||
addl %ebp,%eax
|
||||
xorl %eax,%edi
|
||||
movl %eax,4(%esp)
|
||||
roll $8,%edi
|
||||
movl 8(%esp),%eax
|
||||
addl %edi,%esi
|
||||
movl %edi,52(%esp)
|
||||
xorl %esi,%ebp
|
||||
addl %ebx,%eax
|
||||
roll $7,%ebp
|
||||
xorl %eax,%edx
|
||||
movl %esi,36(%esp)
|
||||
roll $16,%edx
|
||||
movl %ebp,20(%esp)
|
||||
addl %edx,%ecx
|
||||
movl 44(%esp),%esi
|
||||
xorl %ecx,%ebx
|
||||
movl 60(%esp),%edi
|
||||
roll $12,%ebx
|
||||
movl 28(%esp),%ebp
|
||||
addl %ebx,%eax
|
||||
xorl %eax,%edx
|
||||
movl %eax,8(%esp)
|
||||
roll $8,%edx
|
||||
movl 12(%esp),%eax
|
||||
addl %edx,%ecx
|
||||
movl %edx,56(%esp)
|
||||
xorl %ecx,%ebx
|
||||
addl %ebp,%eax
|
||||
roll $7,%ebx
|
||||
xorl %eax,%edi
|
||||
roll $16,%edi
|
||||
movl %ebx,24(%esp)
|
||||
addl %edi,%esi
|
||||
xorl %esi,%ebp
|
||||
roll $12,%ebp
|
||||
movl 20(%esp),%ebx
|
||||
addl %ebp,%eax
|
||||
xorl %eax,%edi
|
||||
movl %eax,12(%esp)
|
||||
roll $8,%edi
|
||||
movl (%esp),%eax
|
||||
addl %edi,%esi
|
||||
movl %edi,%edx
|
||||
xorl %esi,%ebp
|
||||
addl %ebx,%eax
|
||||
roll $7,%ebp
|
||||
xorl %eax,%edx
|
||||
roll $16,%edx
|
||||
movl %ebp,28(%esp)
|
||||
addl %edx,%ecx
|
||||
xorl %ecx,%ebx
|
||||
movl 48(%esp),%edi
|
||||
roll $12,%ebx
|
||||
movl 24(%esp),%ebp
|
||||
addl %ebx,%eax
|
||||
xorl %eax,%edx
|
||||
movl %eax,(%esp)
|
||||
roll $8,%edx
|
||||
movl 4(%esp),%eax
|
||||
addl %edx,%ecx
|
||||
movl %edx,60(%esp)
|
||||
xorl %ecx,%ebx
|
||||
addl %ebp,%eax
|
||||
roll $7,%ebx
|
||||
xorl %eax,%edi
|
||||
movl %ecx,40(%esp)
|
||||
roll $16,%edi
|
||||
movl %ebx,20(%esp)
|
||||
addl %edi,%esi
|
||||
movl 32(%esp),%ecx
|
||||
xorl %esi,%ebp
|
||||
movl 52(%esp),%edx
|
||||
roll $12,%ebp
|
||||
movl 28(%esp),%ebx
|
||||
addl %ebp,%eax
|
||||
xorl %eax,%edi
|
||||
movl %eax,4(%esp)
|
||||
roll $8,%edi
|
||||
movl 8(%esp),%eax
|
||||
addl %edi,%esi
|
||||
movl %edi,48(%esp)
|
||||
xorl %esi,%ebp
|
||||
addl %ebx,%eax
|
||||
roll $7,%ebp
|
||||
xorl %eax,%edx
|
||||
movl %esi,44(%esp)
|
||||
roll $16,%edx
|
||||
movl %ebp,24(%esp)
|
||||
addl %edx,%ecx
|
||||
movl 36(%esp),%esi
|
||||
xorl %ecx,%ebx
|
||||
movl 56(%esp),%edi
|
||||
roll $12,%ebx
|
||||
movl 16(%esp),%ebp
|
||||
addl %ebx,%eax
|
||||
xorl %eax,%edx
|
||||
movl %eax,8(%esp)
|
||||
roll $8,%edx
|
||||
movl 12(%esp),%eax
|
||||
addl %edx,%ecx
|
||||
movl %edx,52(%esp)
|
||||
xorl %ecx,%ebx
|
||||
addl %ebp,%eax
|
||||
roll $7,%ebx
|
||||
xorl %eax,%edi
|
||||
roll $16,%edi
|
||||
movl %ebx,28(%esp)
|
||||
addl %edi,%esi
|
||||
xorl %esi,%ebp
|
||||
movl 48(%esp),%edx
|
||||
roll $12,%ebp
|
||||
movl 128(%esp),%ebx
|
||||
addl %ebp,%eax
|
||||
xorl %eax,%edi
|
||||
movl %eax,12(%esp)
|
||||
roll $8,%edi
|
||||
movl (%esp),%eax
|
||||
addl %edi,%esi
|
||||
movl %edi,56(%esp)
|
||||
xorl %esi,%ebp
|
||||
roll $7,%ebp
|
||||
decl %ebx
|
||||
jnz .L004loop
|
||||
movl 160(%esp),%ebx
|
||||
addl $1634760805,%eax
|
||||
addl 80(%esp),%ebp
|
||||
addl 96(%esp),%ecx
|
||||
addl 100(%esp),%esi
|
||||
cmpl $64,%ebx
|
||||
jb .L005tail
|
||||
movl 156(%esp),%ebx
|
||||
addl 112(%esp),%edx
|
||||
addl 120(%esp),%edi
|
||||
xorl (%ebx),%eax
|
||||
xorl 16(%ebx),%ebp
|
||||
movl %eax,(%esp)
|
||||
movl 152(%esp),%eax
|
||||
xorl 32(%ebx),%ecx
|
||||
xorl 36(%ebx),%esi
|
||||
xorl 48(%ebx),%edx
|
||||
xorl 56(%ebx),%edi
|
||||
movl %ebp,16(%eax)
|
||||
movl %ecx,32(%eax)
|
||||
movl %esi,36(%eax)
|
||||
movl %edx,48(%eax)
|
||||
movl %edi,56(%eax)
|
||||
movl 4(%esp),%ebp
|
||||
movl 8(%esp),%ecx
|
||||
movl 12(%esp),%esi
|
||||
movl 20(%esp),%edx
|
||||
movl 24(%esp),%edi
|
||||
addl $857760878,%ebp
|
||||
addl $2036477234,%ecx
|
||||
addl $1797285236,%esi
|
||||
addl 84(%esp),%edx
|
||||
addl 88(%esp),%edi
|
||||
xorl 4(%ebx),%ebp
|
||||
xorl 8(%ebx),%ecx
|
||||
xorl 12(%ebx),%esi
|
||||
xorl 20(%ebx),%edx
|
||||
xorl 24(%ebx),%edi
|
||||
movl %ebp,4(%eax)
|
||||
movl %ecx,8(%eax)
|
||||
movl %esi,12(%eax)
|
||||
movl %edx,20(%eax)
|
||||
movl %edi,24(%eax)
|
||||
movl 28(%esp),%ebp
|
||||
movl 40(%esp),%ecx
|
||||
movl 44(%esp),%esi
|
||||
movl 52(%esp),%edx
|
||||
movl 60(%esp),%edi
|
||||
addl 92(%esp),%ebp
|
||||
addl 104(%esp),%ecx
|
||||
addl 108(%esp),%esi
|
||||
addl 116(%esp),%edx
|
||||
addl 124(%esp),%edi
|
||||
xorl 28(%ebx),%ebp
|
||||
xorl 40(%ebx),%ecx
|
||||
xorl 44(%ebx),%esi
|
||||
xorl 52(%ebx),%edx
|
||||
xorl 60(%ebx),%edi
|
||||
leal 64(%ebx),%ebx
|
||||
movl %ebp,28(%eax)
|
||||
movl (%esp),%ebp
|
||||
movl %ecx,40(%eax)
|
||||
movl 160(%esp),%ecx
|
||||
movl %esi,44(%eax)
|
||||
movl %edx,52(%eax)
|
||||
movl %edi,60(%eax)
|
||||
movl %ebp,(%eax)
|
||||
leal 64(%eax),%eax
|
||||
subl $64,%ecx
|
||||
jnz .L003outer_loop
|
||||
jmp .L006done
|
||||
.L005tail:
|
||||
addl 112(%esp),%edx
|
||||
addl 120(%esp),%edi
|
||||
movl %eax,(%esp)
|
||||
movl %ebp,16(%esp)
|
||||
movl %ecx,32(%esp)
|
||||
movl %esi,36(%esp)
|
||||
movl %edx,48(%esp)
|
||||
movl %edi,56(%esp)
|
||||
movl 4(%esp),%ebp
|
||||
movl 8(%esp),%ecx
|
||||
movl 12(%esp),%esi
|
||||
movl 20(%esp),%edx
|
||||
movl 24(%esp),%edi
|
||||
addl $857760878,%ebp
|
||||
addl $2036477234,%ecx
|
||||
addl $1797285236,%esi
|
||||
addl 84(%esp),%edx
|
||||
addl 88(%esp),%edi
|
||||
movl %ebp,4(%esp)
|
||||
movl %ecx,8(%esp)
|
||||
movl %esi,12(%esp)
|
||||
movl %edx,20(%esp)
|
||||
movl %edi,24(%esp)
|
||||
movl 28(%esp),%ebp
|
||||
movl 40(%esp),%ecx
|
||||
movl 44(%esp),%esi
|
||||
movl 52(%esp),%edx
|
||||
movl 60(%esp),%edi
|
||||
addl 92(%esp),%ebp
|
||||
addl 104(%esp),%ecx
|
||||
addl 108(%esp),%esi
|
||||
addl 116(%esp),%edx
|
||||
addl 124(%esp),%edi
|
||||
movl %ebp,28(%esp)
|
||||
movl 156(%esp),%ebp
|
||||
movl %ecx,40(%esp)
|
||||
movl 152(%esp),%ecx
|
||||
movl %esi,44(%esp)
|
||||
xorl %esi,%esi
|
||||
movl %edx,52(%esp)
|
||||
movl %edi,60(%esp)
|
||||
xorl %eax,%eax
|
||||
xorl %edx,%edx
|
||||
.L007tail_loop:
|
||||
movb (%esi,%ebp,1),%al
|
||||
movb (%esp,%esi,1),%dl
|
||||
leal 1(%esi),%esi
|
||||
xorb %dl,%al
|
||||
movb %al,-1(%ecx,%esi,1)
|
||||
decl %ebx
|
||||
jnz .L007tail_loop
|
||||
.L006done:
|
||||
addl $132,%esp
|
||||
.L000no_data:
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
.size ChaCha20_ctr32,.-.L_ChaCha20_ctr32_begin
|
||||
.globl ChaCha20_ssse3
|
||||
.hidden ChaCha20_ssse3
|
||||
.type ChaCha20_ssse3,@function
|
||||
.align 16
|
||||
ChaCha20_ssse3:
|
||||
.L_ChaCha20_ssse3_begin:
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
.Lssse3_shortcut:
|
||||
movl 20(%esp),%edi
|
||||
movl 24(%esp),%esi
|
||||
movl 28(%esp),%ecx
|
||||
movl 32(%esp),%edx
|
||||
movl 36(%esp),%ebx
|
||||
movl %esp,%ebp
|
||||
subl $524,%esp
|
||||
andl $-64,%esp
|
||||
movl %ebp,512(%esp)
|
||||
leal .Lssse3_data-.Lpic_point(%eax),%eax
|
||||
movdqu (%ebx),%xmm3
|
||||
cmpl $256,%ecx
|
||||
jb .L0081x
|
||||
movl %edx,516(%esp)
|
||||
movl %ebx,520(%esp)
|
||||
subl $256,%ecx
|
||||
leal 384(%esp),%ebp
|
||||
movdqu (%edx),%xmm7
|
||||
pshufd $0,%xmm3,%xmm0
|
||||
pshufd $85,%xmm3,%xmm1
|
||||
pshufd $170,%xmm3,%xmm2
|
||||
pshufd $255,%xmm3,%xmm3
|
||||
paddd 48(%eax),%xmm0
|
||||
pshufd $0,%xmm7,%xmm4
|
||||
pshufd $85,%xmm7,%xmm5
|
||||
psubd 64(%eax),%xmm0
|
||||
pshufd $170,%xmm7,%xmm6
|
||||
pshufd $255,%xmm7,%xmm7
|
||||
movdqa %xmm0,64(%ebp)
|
||||
movdqa %xmm1,80(%ebp)
|
||||
movdqa %xmm2,96(%ebp)
|
||||
movdqa %xmm3,112(%ebp)
|
||||
movdqu 16(%edx),%xmm3
|
||||
movdqa %xmm4,-64(%ebp)
|
||||
movdqa %xmm5,-48(%ebp)
|
||||
movdqa %xmm6,-32(%ebp)
|
||||
movdqa %xmm7,-16(%ebp)
|
||||
movdqa 32(%eax),%xmm7
|
||||
leal 128(%esp),%ebx
|
||||
pshufd $0,%xmm3,%xmm0
|
||||
pshufd $85,%xmm3,%xmm1
|
||||
pshufd $170,%xmm3,%xmm2
|
||||
pshufd $255,%xmm3,%xmm3
|
||||
pshufd $0,%xmm7,%xmm4
|
||||
pshufd $85,%xmm7,%xmm5
|
||||
pshufd $170,%xmm7,%xmm6
|
||||
pshufd $255,%xmm7,%xmm7
|
||||
movdqa %xmm0,(%ebp)
|
||||
movdqa %xmm1,16(%ebp)
|
||||
movdqa %xmm2,32(%ebp)
|
||||
movdqa %xmm3,48(%ebp)
|
||||
movdqa %xmm4,-128(%ebp)
|
||||
movdqa %xmm5,-112(%ebp)
|
||||
movdqa %xmm6,-96(%ebp)
|
||||
movdqa %xmm7,-80(%ebp)
|
||||
leal 128(%esi),%esi
|
||||
leal 128(%edi),%edi
|
||||
jmp .L009outer_loop
|
||||
.align 16
|
||||
.L009outer_loop:
|
||||
movdqa -112(%ebp),%xmm1
|
||||
movdqa -96(%ebp),%xmm2
|
||||
movdqa -80(%ebp),%xmm3
|
||||
movdqa -48(%ebp),%xmm5
|
||||
movdqa -32(%ebp),%xmm6
|
||||
movdqa -16(%ebp),%xmm7
|
||||
movdqa %xmm1,-112(%ebx)
|
||||
movdqa %xmm2,-96(%ebx)
|
||||
movdqa %xmm3,-80(%ebx)
|
||||
movdqa %xmm5,-48(%ebx)
|
||||
movdqa %xmm6,-32(%ebx)
|
||||
movdqa %xmm7,-16(%ebx)
|
||||
movdqa 32(%ebp),%xmm2
|
||||
movdqa 48(%ebp),%xmm3
|
||||
movdqa 64(%ebp),%xmm4
|
||||
movdqa 80(%ebp),%xmm5
|
||||
movdqa 96(%ebp),%xmm6
|
||||
movdqa 112(%ebp),%xmm7
|
||||
paddd 64(%eax),%xmm4
|
||||
movdqa %xmm2,32(%ebx)
|
||||
movdqa %xmm3,48(%ebx)
|
||||
movdqa %xmm4,64(%ebx)
|
||||
movdqa %xmm5,80(%ebx)
|
||||
movdqa %xmm6,96(%ebx)
|
||||
movdqa %xmm7,112(%ebx)
|
||||
movdqa %xmm4,64(%ebp)
|
||||
movdqa -128(%ebp),%xmm0
|
||||
movdqa %xmm4,%xmm6
|
||||
movdqa -64(%ebp),%xmm3
|
||||
movdqa (%ebp),%xmm4
|
||||
movdqa 16(%ebp),%xmm5
|
||||
movl $10,%edx
|
||||
nop
|
||||
.align 16
|
||||
.L010loop:
|
||||
paddd %xmm3,%xmm0
|
||||
movdqa %xmm3,%xmm2
|
||||
pxor %xmm0,%xmm6
|
||||
pshufb (%eax),%xmm6
|
||||
paddd %xmm6,%xmm4
|
||||
pxor %xmm4,%xmm2
|
||||
movdqa -48(%ebx),%xmm3
|
||||
movdqa %xmm2,%xmm1
|
||||
pslld $12,%xmm2
|
||||
psrld $20,%xmm1
|
||||
por %xmm1,%xmm2
|
||||
movdqa -112(%ebx),%xmm1
|
||||
paddd %xmm2,%xmm0
|
||||
movdqa 80(%ebx),%xmm7
|
||||
pxor %xmm0,%xmm6
|
||||
movdqa %xmm0,-128(%ebx)
|
||||
pshufb 16(%eax),%xmm6
|
||||
paddd %xmm6,%xmm4
|
||||
movdqa %xmm6,64(%ebx)
|
||||
pxor %xmm4,%xmm2
|
||||
paddd %xmm3,%xmm1
|
||||
movdqa %xmm2,%xmm0
|
||||
pslld $7,%xmm2
|
||||
psrld $25,%xmm0
|
||||
pxor %xmm1,%xmm7
|
||||
por %xmm0,%xmm2
|
||||
movdqa %xmm4,(%ebx)
|
||||
pshufb (%eax),%xmm7
|
||||
movdqa %xmm2,-64(%ebx)
|
||||
paddd %xmm7,%xmm5
|
||||
movdqa 32(%ebx),%xmm4
|
||||
pxor %xmm5,%xmm3
|
||||
movdqa -32(%ebx),%xmm2
|
||||
movdqa %xmm3,%xmm0
|
||||
pslld $12,%xmm3
|
||||
psrld $20,%xmm0
|
||||
por %xmm0,%xmm3
|
||||
movdqa -96(%ebx),%xmm0
|
||||
paddd %xmm3,%xmm1
|
||||
movdqa 96(%ebx),%xmm6
|
||||
pxor %xmm1,%xmm7
|
||||
movdqa %xmm1,-112(%ebx)
|
||||
pshufb 16(%eax),%xmm7
|
||||
paddd %xmm7,%xmm5
|
||||
movdqa %xmm7,80(%ebx)
|
||||
pxor %xmm5,%xmm3
|
||||
paddd %xmm2,%xmm0
|
||||
movdqa %xmm3,%xmm1
|
||||
pslld $7,%xmm3
|
||||
psrld $25,%xmm1
|
||||
pxor %xmm0,%xmm6
|
||||
por %xmm1,%xmm3
|
||||
movdqa %xmm5,16(%ebx)
|
||||
pshufb (%eax),%xmm6
|
||||
movdqa %xmm3,-48(%ebx)
|
||||
paddd %xmm6,%xmm4
|
||||
movdqa 48(%ebx),%xmm5
|
||||
pxor %xmm4,%xmm2
|
||||
movdqa -16(%ebx),%xmm3
|
||||
movdqa %xmm2,%xmm1
|
||||
pslld $12,%xmm2
|
||||
psrld $20,%xmm1
|
||||
por %xmm1,%xmm2
|
||||
movdqa -80(%ebx),%xmm1
|
||||
paddd %xmm2,%xmm0
|
||||
movdqa 112(%ebx),%xmm7
|
||||
pxor %xmm0,%xmm6
|
||||
movdqa %xmm0,-96(%ebx)
|
||||
pshufb 16(%eax),%xmm6
|
||||
paddd %xmm6,%xmm4
|
||||
movdqa %xmm6,96(%ebx)
|
||||
pxor %xmm4,%xmm2
|
||||
paddd %xmm3,%xmm1
|
||||
movdqa %xmm2,%xmm0
|
||||
pslld $7,%xmm2
|
||||
psrld $25,%xmm0
|
||||
pxor %xmm1,%xmm7
|
||||
por %xmm0,%xmm2
|
||||
pshufb (%eax),%xmm7
|
||||
movdqa %xmm2,-32(%ebx)
|
||||
paddd %xmm7,%xmm5
|
||||
pxor %xmm5,%xmm3
|
||||
movdqa -48(%ebx),%xmm2
|
||||
movdqa %xmm3,%xmm0
|
||||
pslld $12,%xmm3
|
||||
psrld $20,%xmm0
|
||||
por %xmm0,%xmm3
|
||||
movdqa -128(%ebx),%xmm0
|
||||
paddd %xmm3,%xmm1
|
||||
pxor %xmm1,%xmm7
|
||||
movdqa %xmm1,-80(%ebx)
|
||||
pshufb 16(%eax),%xmm7
|
||||
paddd %xmm7,%xmm5
|
||||
movdqa %xmm7,%xmm6
|
||||
pxor %xmm5,%xmm3
|
||||
paddd %xmm2,%xmm0
|
||||
movdqa %xmm3,%xmm1
|
||||
pslld $7,%xmm3
|
||||
psrld $25,%xmm1
|
||||
pxor %xmm0,%xmm6
|
||||
por %xmm1,%xmm3
|
||||
pshufb (%eax),%xmm6
|
||||
movdqa %xmm3,-16(%ebx)
|
||||
paddd %xmm6,%xmm4
|
||||
pxor %xmm4,%xmm2
|
||||
movdqa -32(%ebx),%xmm3
|
||||
movdqa %xmm2,%xmm1
|
||||
pslld $12,%xmm2
|
||||
psrld $20,%xmm1
|
||||
por %xmm1,%xmm2
|
||||
movdqa -112(%ebx),%xmm1
|
||||
paddd %xmm2,%xmm0
|
||||
movdqa 64(%ebx),%xmm7
|
||||
pxor %xmm0,%xmm6
|
||||
movdqa %xmm0,-128(%ebx)
|
||||
pshufb 16(%eax),%xmm6
|
||||
paddd %xmm6,%xmm4
|
||||
movdqa %xmm6,112(%ebx)
|
||||
pxor %xmm4,%xmm2
|
||||
paddd %xmm3,%xmm1
|
||||
movdqa %xmm2,%xmm0
|
||||
pslld $7,%xmm2
|
||||
psrld $25,%xmm0
|
||||
pxor %xmm1,%xmm7
|
||||
por %xmm0,%xmm2
|
||||
movdqa %xmm4,32(%ebx)
|
||||
pshufb (%eax),%xmm7
|
||||
movdqa %xmm2,-48(%ebx)
|
||||
paddd %xmm7,%xmm5
|
||||
movdqa (%ebx),%xmm4
|
||||
pxor %xmm5,%xmm3
|
||||
movdqa -16(%ebx),%xmm2
|
||||
movdqa %xmm3,%xmm0
|
||||
pslld $12,%xmm3
|
||||
psrld $20,%xmm0
|
||||
por %xmm0,%xmm3
|
||||
movdqa -96(%ebx),%xmm0
|
||||
paddd %xmm3,%xmm1
|
||||
movdqa 80(%ebx),%xmm6
|
||||
pxor %xmm1,%xmm7
|
||||
movdqa %xmm1,-112(%ebx)
|
||||
pshufb 16(%eax),%xmm7
|
||||
paddd %xmm7,%xmm5
|
||||
movdqa %xmm7,64(%ebx)
|
||||
pxor %xmm5,%xmm3
|
||||
paddd %xmm2,%xmm0
|
||||
movdqa %xmm3,%xmm1
|
||||
pslld $7,%xmm3
|
||||
psrld $25,%xmm1
|
||||
pxor %xmm0,%xmm6
|
||||
por %xmm1,%xmm3
|
||||
movdqa %xmm5,48(%ebx)
|
||||
pshufb (%eax),%xmm6
|
||||
movdqa %xmm3,-32(%ebx)
|
||||
paddd %xmm6,%xmm4
|
||||
movdqa 16(%ebx),%xmm5
|
||||
pxor %xmm4,%xmm2
|
||||
movdqa -64(%ebx),%xmm3
|
||||
movdqa %xmm2,%xmm1
|
||||
pslld $12,%xmm2
|
||||
psrld $20,%xmm1
|
||||
por %xmm1,%xmm2
|
||||
movdqa -80(%ebx),%xmm1
|
||||
paddd %xmm2,%xmm0
|
||||
movdqa 96(%ebx),%xmm7
|
||||
pxor %xmm0,%xmm6
|
||||
movdqa %xmm0,-96(%ebx)
|
||||
pshufb 16(%eax),%xmm6
|
||||
paddd %xmm6,%xmm4
|
||||
movdqa %xmm6,80(%ebx)
|
||||
pxor %xmm4,%xmm2
|
||||
paddd %xmm3,%xmm1
|
||||
movdqa %xmm2,%xmm0
|
||||
pslld $7,%xmm2
|
||||
psrld $25,%xmm0
|
||||
pxor %xmm1,%xmm7
|
||||
por %xmm0,%xmm2
|
||||
pshufb (%eax),%xmm7
|
||||
movdqa %xmm2,-16(%ebx)
|
||||
paddd %xmm7,%xmm5
|
||||
pxor %xmm5,%xmm3
|
||||
movdqa %xmm3,%xmm0
|
||||
pslld $12,%xmm3
|
||||
psrld $20,%xmm0
|
||||
por %xmm0,%xmm3
|
||||
movdqa -128(%ebx),%xmm0
|
||||
paddd %xmm3,%xmm1
|
||||
movdqa 64(%ebx),%xmm6
|
||||
pxor %xmm1,%xmm7
|
||||
movdqa %xmm1,-80(%ebx)
|
||||
pshufb 16(%eax),%xmm7
|
||||
paddd %xmm7,%xmm5
|
||||
movdqa %xmm7,96(%ebx)
|
||||
pxor %xmm5,%xmm3
|
||||
movdqa %xmm3,%xmm1
|
||||
pslld $7,%xmm3
|
||||
psrld $25,%xmm1
|
||||
por %xmm1,%xmm3
|
||||
decl %edx
|
||||
jnz .L010loop
|
||||
movdqa %xmm3,-64(%ebx)
|
||||
movdqa %xmm4,(%ebx)
|
||||
movdqa %xmm5,16(%ebx)
|
||||
movdqa %xmm6,64(%ebx)
|
||||
movdqa %xmm7,96(%ebx)
|
||||
movdqa -112(%ebx),%xmm1
|
||||
movdqa -96(%ebx),%xmm2
|
||||
movdqa -80(%ebx),%xmm3
|
||||
paddd -128(%ebp),%xmm0
|
||||
paddd -112(%ebp),%xmm1
|
||||
paddd -96(%ebp),%xmm2
|
||||
paddd -80(%ebp),%xmm3
|
||||
movdqa %xmm0,%xmm6
|
||||
punpckldq %xmm1,%xmm0
|
||||
movdqa %xmm2,%xmm7
|
||||
punpckldq %xmm3,%xmm2
|
||||
punpckhdq %xmm1,%xmm6
|
||||
punpckhdq %xmm3,%xmm7
|
||||
movdqa %xmm0,%xmm1
|
||||
punpcklqdq %xmm2,%xmm0
|
||||
movdqa %xmm6,%xmm3
|
||||
punpcklqdq %xmm7,%xmm6
|
||||
punpckhqdq %xmm2,%xmm1
|
||||
punpckhqdq %xmm7,%xmm3
|
||||
movdqu -128(%esi),%xmm4
|
||||
movdqu -64(%esi),%xmm5
|
||||
movdqu (%esi),%xmm2
|
||||
movdqu 64(%esi),%xmm7
|
||||
leal 16(%esi),%esi
|
||||
pxor %xmm0,%xmm4
|
||||
movdqa -64(%ebx),%xmm0
|
||||
pxor %xmm1,%xmm5
|
||||
movdqa -48(%ebx),%xmm1
|
||||
pxor %xmm2,%xmm6
|
||||
movdqa -32(%ebx),%xmm2
|
||||
pxor %xmm3,%xmm7
|
||||
movdqa -16(%ebx),%xmm3
|
||||
movdqu %xmm4,-128(%edi)
|
||||
movdqu %xmm5,-64(%edi)
|
||||
movdqu %xmm6,(%edi)
|
||||
movdqu %xmm7,64(%edi)
|
||||
leal 16(%edi),%edi
|
||||
paddd -64(%ebp),%xmm0
|
||||
paddd -48(%ebp),%xmm1
|
||||
paddd -32(%ebp),%xmm2
|
||||
paddd -16(%ebp),%xmm3
|
||||
movdqa %xmm0,%xmm6
|
||||
punpckldq %xmm1,%xmm0
|
||||
movdqa %xmm2,%xmm7
|
||||
punpckldq %xmm3,%xmm2
|
||||
punpckhdq %xmm1,%xmm6
|
||||
punpckhdq %xmm3,%xmm7
|
||||
movdqa %xmm0,%xmm1
|
||||
punpcklqdq %xmm2,%xmm0
|
||||
movdqa %xmm6,%xmm3
|
||||
punpcklqdq %xmm7,%xmm6
|
||||
punpckhqdq %xmm2,%xmm1
|
||||
punpckhqdq %xmm7,%xmm3
|
||||
movdqu -128(%esi),%xmm4
|
||||
movdqu -64(%esi),%xmm5
|
||||
movdqu (%esi),%xmm2
|
||||
movdqu 64(%esi),%xmm7
|
||||
leal 16(%esi),%esi
|
||||
pxor %xmm0,%xmm4
|
||||
movdqa (%ebx),%xmm0
|
||||
pxor %xmm1,%xmm5
|
||||
movdqa 16(%ebx),%xmm1
|
||||
pxor %xmm2,%xmm6
|
||||
movdqa 32(%ebx),%xmm2
|
||||
pxor %xmm3,%xmm7
|
||||
movdqa 48(%ebx),%xmm3
|
||||
movdqu %xmm4,-128(%edi)
|
||||
movdqu %xmm5,-64(%edi)
|
||||
movdqu %xmm6,(%edi)
|
||||
movdqu %xmm7,64(%edi)
|
||||
leal 16(%edi),%edi
|
||||
paddd (%ebp),%xmm0
|
||||
paddd 16(%ebp),%xmm1
|
||||
paddd 32(%ebp),%xmm2
|
||||
paddd 48(%ebp),%xmm3
|
||||
movdqa %xmm0,%xmm6
|
||||
punpckldq %xmm1,%xmm0
|
||||
movdqa %xmm2,%xmm7
|
||||
punpckldq %xmm3,%xmm2
|
||||
punpckhdq %xmm1,%xmm6
|
||||
punpckhdq %xmm3,%xmm7
|
||||
movdqa %xmm0,%xmm1
|
||||
punpcklqdq %xmm2,%xmm0
|
||||
movdqa %xmm6,%xmm3
|
||||
punpcklqdq %xmm7,%xmm6
|
||||
punpckhqdq %xmm2,%xmm1
|
||||
punpckhqdq %xmm7,%xmm3
|
||||
movdqu -128(%esi),%xmm4
|
||||
movdqu -64(%esi),%xmm5
|
||||
movdqu (%esi),%xmm2
|
||||
movdqu 64(%esi),%xmm7
|
||||
leal 16(%esi),%esi
|
||||
pxor %xmm0,%xmm4
|
||||
movdqa 64(%ebx),%xmm0
|
||||
pxor %xmm1,%xmm5
|
||||
movdqa 80(%ebx),%xmm1
|
||||
pxor %xmm2,%xmm6
|
||||
movdqa 96(%ebx),%xmm2
|
||||
pxor %xmm3,%xmm7
|
||||
movdqa 112(%ebx),%xmm3
|
||||
movdqu %xmm4,-128(%edi)
|
||||
movdqu %xmm5,-64(%edi)
|
||||
movdqu %xmm6,(%edi)
|
||||
movdqu %xmm7,64(%edi)
|
||||
leal 16(%edi),%edi
|
||||
paddd 64(%ebp),%xmm0
|
||||
paddd 80(%ebp),%xmm1
|
||||
paddd 96(%ebp),%xmm2
|
||||
paddd 112(%ebp),%xmm3
|
||||
movdqa %xmm0,%xmm6
|
||||
punpckldq %xmm1,%xmm0
|
||||
movdqa %xmm2,%xmm7
|
||||
punpckldq %xmm3,%xmm2
|
||||
punpckhdq %xmm1,%xmm6
|
||||
punpckhdq %xmm3,%xmm7
|
||||
movdqa %xmm0,%xmm1
|
||||
punpcklqdq %xmm2,%xmm0
|
||||
movdqa %xmm6,%xmm3
|
||||
punpcklqdq %xmm7,%xmm6
|
||||
punpckhqdq %xmm2,%xmm1
|
||||
punpckhqdq %xmm7,%xmm3
|
||||
movdqu -128(%esi),%xmm4
|
||||
movdqu -64(%esi),%xmm5
|
||||
movdqu (%esi),%xmm2
|
||||
movdqu 64(%esi),%xmm7
|
||||
leal 208(%esi),%esi
|
||||
pxor %xmm0,%xmm4
|
||||
pxor %xmm1,%xmm5
|
||||
pxor %xmm2,%xmm6
|
||||
pxor %xmm3,%xmm7
|
||||
movdqu %xmm4,-128(%edi)
|
||||
movdqu %xmm5,-64(%edi)
|
||||
movdqu %xmm6,(%edi)
|
||||
movdqu %xmm7,64(%edi)
|
||||
leal 208(%edi),%edi
|
||||
subl $256,%ecx
|
||||
jnc .L009outer_loop
|
||||
addl $256,%ecx
|
||||
jz .L011done
|
||||
movl 520(%esp),%ebx
|
||||
leal -128(%esi),%esi
|
||||
movl 516(%esp),%edx
|
||||
leal -128(%edi),%edi
|
||||
movd 64(%ebp),%xmm2
|
||||
movdqu (%ebx),%xmm3
|
||||
paddd 96(%eax),%xmm2
|
||||
pand 112(%eax),%xmm3
|
||||
por %xmm2,%xmm3
|
||||
.L0081x:
|
||||
movdqa 32(%eax),%xmm0
|
||||
movdqu (%edx),%xmm1
|
||||
movdqu 16(%edx),%xmm2
|
||||
movdqa (%eax),%xmm6
|
||||
movdqa 16(%eax),%xmm7
|
||||
movl %ebp,48(%esp)
|
||||
movdqa %xmm0,(%esp)
|
||||
movdqa %xmm1,16(%esp)
|
||||
movdqa %xmm2,32(%esp)
|
||||
movdqa %xmm3,48(%esp)
|
||||
movl $10,%edx
|
||||
jmp .L012loop1x
|
||||
.align 16
|
||||
.L013outer1x:
|
||||
movdqa 80(%eax),%xmm3
|
||||
movdqa (%esp),%xmm0
|
||||
movdqa 16(%esp),%xmm1
|
||||
movdqa 32(%esp),%xmm2
|
||||
paddd 48(%esp),%xmm3
|
||||
movl $10,%edx
|
||||
movdqa %xmm3,48(%esp)
|
||||
jmp .L012loop1x
|
||||
.align 16
|
||||
.L012loop1x:
|
||||
paddd %xmm1,%xmm0
|
||||
pxor %xmm0,%xmm3
|
||||
.byte 102,15,56,0,222
|
||||
paddd %xmm3,%xmm2
|
||||
pxor %xmm2,%xmm1
|
||||
movdqa %xmm1,%xmm4
|
||||
psrld $20,%xmm1
|
||||
pslld $12,%xmm4
|
||||
por %xmm4,%xmm1
|
||||
paddd %xmm1,%xmm0
|
||||
pxor %xmm0,%xmm3
|
||||
.byte 102,15,56,0,223
|
||||
paddd %xmm3,%xmm2
|
||||
pxor %xmm2,%xmm1
|
||||
movdqa %xmm1,%xmm4
|
||||
psrld $25,%xmm1
|
||||
pslld $7,%xmm4
|
||||
por %xmm4,%xmm1
|
||||
pshufd $78,%xmm2,%xmm2
|
||||
pshufd $57,%xmm1,%xmm1
|
||||
pshufd $147,%xmm3,%xmm3
|
||||
nop
|
||||
paddd %xmm1,%xmm0
|
||||
pxor %xmm0,%xmm3
|
||||
.byte 102,15,56,0,222
|
||||
paddd %xmm3,%xmm2
|
||||
pxor %xmm2,%xmm1
|
||||
movdqa %xmm1,%xmm4
|
||||
psrld $20,%xmm1
|
||||
pslld $12,%xmm4
|
||||
por %xmm4,%xmm1
|
||||
paddd %xmm1,%xmm0
|
||||
pxor %xmm0,%xmm3
|
||||
.byte 102,15,56,0,223
|
||||
paddd %xmm3,%xmm2
|
||||
pxor %xmm2,%xmm1
|
||||
movdqa %xmm1,%xmm4
|
||||
psrld $25,%xmm1
|
||||
pslld $7,%xmm4
|
||||
por %xmm4,%xmm1
|
||||
pshufd $78,%xmm2,%xmm2
|
||||
pshufd $147,%xmm1,%xmm1
|
||||
pshufd $57,%xmm3,%xmm3
|
||||
decl %edx
|
||||
jnz .L012loop1x
|
||||
paddd (%esp),%xmm0
|
||||
paddd 16(%esp),%xmm1
|
||||
paddd 32(%esp),%xmm2
|
||||
paddd 48(%esp),%xmm3
|
||||
cmpl $64,%ecx
|
||||
jb .L014tail
|
||||
movdqu (%esi),%xmm4
|
||||
movdqu 16(%esi),%xmm5
|
||||
pxor %xmm4,%xmm0
|
||||
movdqu 32(%esi),%xmm4
|
||||
pxor %xmm5,%xmm1
|
||||
movdqu 48(%esi),%xmm5
|
||||
pxor %xmm4,%xmm2
|
||||
pxor %xmm5,%xmm3
|
||||
leal 64(%esi),%esi
|
||||
movdqu %xmm0,(%edi)
|
||||
movdqu %xmm1,16(%edi)
|
||||
movdqu %xmm2,32(%edi)
|
||||
movdqu %xmm3,48(%edi)
|
||||
leal 64(%edi),%edi
|
||||
subl $64,%ecx
|
||||
jnz .L013outer1x
|
||||
jmp .L011done
|
||||
.L014tail:
|
||||
movdqa %xmm0,(%esp)
|
||||
movdqa %xmm1,16(%esp)
|
||||
movdqa %xmm2,32(%esp)
|
||||
movdqa %xmm3,48(%esp)
|
||||
xorl %eax,%eax
|
||||
xorl %edx,%edx
|
||||
xorl %ebp,%ebp
|
||||
.L015tail_loop:
|
||||
movb (%esp,%ebp,1),%al
|
||||
movb (%esi,%ebp,1),%dl
|
||||
leal 1(%ebp),%ebp
|
||||
xorb %dl,%al
|
||||
movb %al,-1(%edi,%ebp,1)
|
||||
decl %ecx
|
||||
jnz .L015tail_loop
|
||||
.L011done:
|
||||
movl 512(%esp),%esp
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
.size ChaCha20_ssse3,.-.L_ChaCha20_ssse3_begin
|
||||
.align 64
|
||||
.Lssse3_data:
|
||||
.byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13
|
||||
.byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14
|
||||
.long 1634760805,857760878,2036477234,1797285236
|
||||
.long 0,1,2,3
|
||||
.long 4,4,4,4
|
||||
.long 1,0,0,0
|
||||
.long 4,0,0,0
|
||||
.long 0,-1,-1,-1
|
||||
.align 64
|
||||
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54
|
||||
.byte 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32
|
||||
.byte 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111
|
||||
.byte 114,103,62,0
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,681 @@
|
|||
#if defined(__i386__)
|
||||
.text
|
||||
.globl md5_block_asm_data_order
|
||||
.hidden md5_block_asm_data_order
|
||||
.type md5_block_asm_data_order,@function
|
||||
.align 16
|
||||
md5_block_asm_data_order:
|
||||
.L_md5_block_asm_data_order_begin:
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
movl 12(%esp),%edi
|
||||
movl 16(%esp),%esi
|
||||
movl 20(%esp),%ecx
|
||||
pushl %ebp
|
||||
shll $6,%ecx
|
||||
pushl %ebx
|
||||
addl %esi,%ecx
|
||||
subl $64,%ecx
|
||||
movl (%edi),%eax
|
||||
pushl %ecx
|
||||
movl 4(%edi),%ebx
|
||||
movl 8(%edi),%ecx
|
||||
movl 12(%edi),%edx
|
||||
.L000start:
|
||||
|
||||
|
||||
movl %ecx,%edi
|
||||
movl (%esi),%ebp
|
||||
|
||||
xorl %edx,%edi
|
||||
andl %ebx,%edi
|
||||
leal 3614090360(%eax,%ebp,1),%eax
|
||||
xorl %edx,%edi
|
||||
addl %edi,%eax
|
||||
movl %ebx,%edi
|
||||
roll $7,%eax
|
||||
movl 4(%esi),%ebp
|
||||
addl %ebx,%eax
|
||||
|
||||
xorl %ecx,%edi
|
||||
andl %eax,%edi
|
||||
leal 3905402710(%edx,%ebp,1),%edx
|
||||
xorl %ecx,%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $12,%edx
|
||||
movl 8(%esi),%ebp
|
||||
addl %eax,%edx
|
||||
|
||||
xorl %ebx,%edi
|
||||
andl %edx,%edi
|
||||
leal 606105819(%ecx,%ebp,1),%ecx
|
||||
xorl %ebx,%edi
|
||||
addl %edi,%ecx
|
||||
movl %edx,%edi
|
||||
roll $17,%ecx
|
||||
movl 12(%esi),%ebp
|
||||
addl %edx,%ecx
|
||||
|
||||
xorl %eax,%edi
|
||||
andl %ecx,%edi
|
||||
leal 3250441966(%ebx,%ebp,1),%ebx
|
||||
xorl %eax,%edi
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $22,%ebx
|
||||
movl 16(%esi),%ebp
|
||||
addl %ecx,%ebx
|
||||
|
||||
xorl %edx,%edi
|
||||
andl %ebx,%edi
|
||||
leal 4118548399(%eax,%ebp,1),%eax
|
||||
xorl %edx,%edi
|
||||
addl %edi,%eax
|
||||
movl %ebx,%edi
|
||||
roll $7,%eax
|
||||
movl 20(%esi),%ebp
|
||||
addl %ebx,%eax
|
||||
|
||||
xorl %ecx,%edi
|
||||
andl %eax,%edi
|
||||
leal 1200080426(%edx,%ebp,1),%edx
|
||||
xorl %ecx,%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $12,%edx
|
||||
movl 24(%esi),%ebp
|
||||
addl %eax,%edx
|
||||
|
||||
xorl %ebx,%edi
|
||||
andl %edx,%edi
|
||||
leal 2821735955(%ecx,%ebp,1),%ecx
|
||||
xorl %ebx,%edi
|
||||
addl %edi,%ecx
|
||||
movl %edx,%edi
|
||||
roll $17,%ecx
|
||||
movl 28(%esi),%ebp
|
||||
addl %edx,%ecx
|
||||
|
||||
xorl %eax,%edi
|
||||
andl %ecx,%edi
|
||||
leal 4249261313(%ebx,%ebp,1),%ebx
|
||||
xorl %eax,%edi
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $22,%ebx
|
||||
movl 32(%esi),%ebp
|
||||
addl %ecx,%ebx
|
||||
|
||||
xorl %edx,%edi
|
||||
andl %ebx,%edi
|
||||
leal 1770035416(%eax,%ebp,1),%eax
|
||||
xorl %edx,%edi
|
||||
addl %edi,%eax
|
||||
movl %ebx,%edi
|
||||
roll $7,%eax
|
||||
movl 36(%esi),%ebp
|
||||
addl %ebx,%eax
|
||||
|
||||
xorl %ecx,%edi
|
||||
andl %eax,%edi
|
||||
leal 2336552879(%edx,%ebp,1),%edx
|
||||
xorl %ecx,%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $12,%edx
|
||||
movl 40(%esi),%ebp
|
||||
addl %eax,%edx
|
||||
|
||||
xorl %ebx,%edi
|
||||
andl %edx,%edi
|
||||
leal 4294925233(%ecx,%ebp,1),%ecx
|
||||
xorl %ebx,%edi
|
||||
addl %edi,%ecx
|
||||
movl %edx,%edi
|
||||
roll $17,%ecx
|
||||
movl 44(%esi),%ebp
|
||||
addl %edx,%ecx
|
||||
|
||||
xorl %eax,%edi
|
||||
andl %ecx,%edi
|
||||
leal 2304563134(%ebx,%ebp,1),%ebx
|
||||
xorl %eax,%edi
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $22,%ebx
|
||||
movl 48(%esi),%ebp
|
||||
addl %ecx,%ebx
|
||||
|
||||
xorl %edx,%edi
|
||||
andl %ebx,%edi
|
||||
leal 1804603682(%eax,%ebp,1),%eax
|
||||
xorl %edx,%edi
|
||||
addl %edi,%eax
|
||||
movl %ebx,%edi
|
||||
roll $7,%eax
|
||||
movl 52(%esi),%ebp
|
||||
addl %ebx,%eax
|
||||
|
||||
xorl %ecx,%edi
|
||||
andl %eax,%edi
|
||||
leal 4254626195(%edx,%ebp,1),%edx
|
||||
xorl %ecx,%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $12,%edx
|
||||
movl 56(%esi),%ebp
|
||||
addl %eax,%edx
|
||||
|
||||
xorl %ebx,%edi
|
||||
andl %edx,%edi
|
||||
leal 2792965006(%ecx,%ebp,1),%ecx
|
||||
xorl %ebx,%edi
|
||||
addl %edi,%ecx
|
||||
movl %edx,%edi
|
||||
roll $17,%ecx
|
||||
movl 60(%esi),%ebp
|
||||
addl %edx,%ecx
|
||||
|
||||
xorl %eax,%edi
|
||||
andl %ecx,%edi
|
||||
leal 1236535329(%ebx,%ebp,1),%ebx
|
||||
xorl %eax,%edi
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $22,%ebx
|
||||
movl 4(%esi),%ebp
|
||||
addl %ecx,%ebx
|
||||
|
||||
|
||||
|
||||
leal 4129170786(%eax,%ebp,1),%eax
|
||||
xorl %ebx,%edi
|
||||
andl %edx,%edi
|
||||
movl 24(%esi),%ebp
|
||||
xorl %ecx,%edi
|
||||
addl %edi,%eax
|
||||
movl %ebx,%edi
|
||||
roll $5,%eax
|
||||
addl %ebx,%eax
|
||||
|
||||
leal 3225465664(%edx,%ebp,1),%edx
|
||||
xorl %eax,%edi
|
||||
andl %ecx,%edi
|
||||
movl 44(%esi),%ebp
|
||||
xorl %ebx,%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $9,%edx
|
||||
addl %eax,%edx
|
||||
|
||||
leal 643717713(%ecx,%ebp,1),%ecx
|
||||
xorl %edx,%edi
|
||||
andl %ebx,%edi
|
||||
movl (%esi),%ebp
|
||||
xorl %eax,%edi
|
||||
addl %edi,%ecx
|
||||
movl %edx,%edi
|
||||
roll $14,%ecx
|
||||
addl %edx,%ecx
|
||||
|
||||
leal 3921069994(%ebx,%ebp,1),%ebx
|
||||
xorl %ecx,%edi
|
||||
andl %eax,%edi
|
||||
movl 20(%esi),%ebp
|
||||
xorl %edx,%edi
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $20,%ebx
|
||||
addl %ecx,%ebx
|
||||
|
||||
leal 3593408605(%eax,%ebp,1),%eax
|
||||
xorl %ebx,%edi
|
||||
andl %edx,%edi
|
||||
movl 40(%esi),%ebp
|
||||
xorl %ecx,%edi
|
||||
addl %edi,%eax
|
||||
movl %ebx,%edi
|
||||
roll $5,%eax
|
||||
addl %ebx,%eax
|
||||
|
||||
leal 38016083(%edx,%ebp,1),%edx
|
||||
xorl %eax,%edi
|
||||
andl %ecx,%edi
|
||||
movl 60(%esi),%ebp
|
||||
xorl %ebx,%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $9,%edx
|
||||
addl %eax,%edx
|
||||
|
||||
leal 3634488961(%ecx,%ebp,1),%ecx
|
||||
xorl %edx,%edi
|
||||
andl %ebx,%edi
|
||||
movl 16(%esi),%ebp
|
||||
xorl %eax,%edi
|
||||
addl %edi,%ecx
|
||||
movl %edx,%edi
|
||||
roll $14,%ecx
|
||||
addl %edx,%ecx
|
||||
|
||||
leal 3889429448(%ebx,%ebp,1),%ebx
|
||||
xorl %ecx,%edi
|
||||
andl %eax,%edi
|
||||
movl 36(%esi),%ebp
|
||||
xorl %edx,%edi
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $20,%ebx
|
||||
addl %ecx,%ebx
|
||||
|
||||
leal 568446438(%eax,%ebp,1),%eax
|
||||
xorl %ebx,%edi
|
||||
andl %edx,%edi
|
||||
movl 56(%esi),%ebp
|
||||
xorl %ecx,%edi
|
||||
addl %edi,%eax
|
||||
movl %ebx,%edi
|
||||
roll $5,%eax
|
||||
addl %ebx,%eax
|
||||
|
||||
leal 3275163606(%edx,%ebp,1),%edx
|
||||
xorl %eax,%edi
|
||||
andl %ecx,%edi
|
||||
movl 12(%esi),%ebp
|
||||
xorl %ebx,%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $9,%edx
|
||||
addl %eax,%edx
|
||||
|
||||
leal 4107603335(%ecx,%ebp,1),%ecx
|
||||
xorl %edx,%edi
|
||||
andl %ebx,%edi
|
||||
movl 32(%esi),%ebp
|
||||
xorl %eax,%edi
|
||||
addl %edi,%ecx
|
||||
movl %edx,%edi
|
||||
roll $14,%ecx
|
||||
addl %edx,%ecx
|
||||
|
||||
leal 1163531501(%ebx,%ebp,1),%ebx
|
||||
xorl %ecx,%edi
|
||||
andl %eax,%edi
|
||||
movl 52(%esi),%ebp
|
||||
xorl %edx,%edi
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $20,%ebx
|
||||
addl %ecx,%ebx
|
||||
|
||||
leal 2850285829(%eax,%ebp,1),%eax
|
||||
xorl %ebx,%edi
|
||||
andl %edx,%edi
|
||||
movl 8(%esi),%ebp
|
||||
xorl %ecx,%edi
|
||||
addl %edi,%eax
|
||||
movl %ebx,%edi
|
||||
roll $5,%eax
|
||||
addl %ebx,%eax
|
||||
|
||||
leal 4243563512(%edx,%ebp,1),%edx
|
||||
xorl %eax,%edi
|
||||
andl %ecx,%edi
|
||||
movl 28(%esi),%ebp
|
||||
xorl %ebx,%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $9,%edx
|
||||
addl %eax,%edx
|
||||
|
||||
leal 1735328473(%ecx,%ebp,1),%ecx
|
||||
xorl %edx,%edi
|
||||
andl %ebx,%edi
|
||||
movl 48(%esi),%ebp
|
||||
xorl %eax,%edi
|
||||
addl %edi,%ecx
|
||||
movl %edx,%edi
|
||||
roll $14,%ecx
|
||||
addl %edx,%ecx
|
||||
|
||||
leal 2368359562(%ebx,%ebp,1),%ebx
|
||||
xorl %ecx,%edi
|
||||
andl %eax,%edi
|
||||
movl 20(%esi),%ebp
|
||||
xorl %edx,%edi
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $20,%ebx
|
||||
addl %ecx,%ebx
|
||||
|
||||
|
||||
|
||||
xorl %edx,%edi
|
||||
xorl %ebx,%edi
|
||||
leal 4294588738(%eax,%ebp,1),%eax
|
||||
addl %edi,%eax
|
||||
roll $4,%eax
|
||||
movl 32(%esi),%ebp
|
||||
movl %ebx,%edi
|
||||
|
||||
leal 2272392833(%edx,%ebp,1),%edx
|
||||
addl %ebx,%eax
|
||||
xorl %ecx,%edi
|
||||
xorl %eax,%edi
|
||||
movl 44(%esi),%ebp
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $11,%edx
|
||||
addl %eax,%edx
|
||||
|
||||
xorl %ebx,%edi
|
||||
xorl %edx,%edi
|
||||
leal 1839030562(%ecx,%ebp,1),%ecx
|
||||
addl %edi,%ecx
|
||||
roll $16,%ecx
|
||||
movl 56(%esi),%ebp
|
||||
movl %edx,%edi
|
||||
|
||||
leal 4259657740(%ebx,%ebp,1),%ebx
|
||||
addl %edx,%ecx
|
||||
xorl %eax,%edi
|
||||
xorl %ecx,%edi
|
||||
movl 4(%esi),%ebp
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $23,%ebx
|
||||
addl %ecx,%ebx
|
||||
|
||||
xorl %edx,%edi
|
||||
xorl %ebx,%edi
|
||||
leal 2763975236(%eax,%ebp,1),%eax
|
||||
addl %edi,%eax
|
||||
roll $4,%eax
|
||||
movl 16(%esi),%ebp
|
||||
movl %ebx,%edi
|
||||
|
||||
leal 1272893353(%edx,%ebp,1),%edx
|
||||
addl %ebx,%eax
|
||||
xorl %ecx,%edi
|
||||
xorl %eax,%edi
|
||||
movl 28(%esi),%ebp
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $11,%edx
|
||||
addl %eax,%edx
|
||||
|
||||
xorl %ebx,%edi
|
||||
xorl %edx,%edi
|
||||
leal 4139469664(%ecx,%ebp,1),%ecx
|
||||
addl %edi,%ecx
|
||||
roll $16,%ecx
|
||||
movl 40(%esi),%ebp
|
||||
movl %edx,%edi
|
||||
|
||||
leal 3200236656(%ebx,%ebp,1),%ebx
|
||||
addl %edx,%ecx
|
||||
xorl %eax,%edi
|
||||
xorl %ecx,%edi
|
||||
movl 52(%esi),%ebp
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $23,%ebx
|
||||
addl %ecx,%ebx
|
||||
|
||||
xorl %edx,%edi
|
||||
xorl %ebx,%edi
|
||||
leal 681279174(%eax,%ebp,1),%eax
|
||||
addl %edi,%eax
|
||||
roll $4,%eax
|
||||
movl (%esi),%ebp
|
||||
movl %ebx,%edi
|
||||
|
||||
leal 3936430074(%edx,%ebp,1),%edx
|
||||
addl %ebx,%eax
|
||||
xorl %ecx,%edi
|
||||
xorl %eax,%edi
|
||||
movl 12(%esi),%ebp
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $11,%edx
|
||||
addl %eax,%edx
|
||||
|
||||
xorl %ebx,%edi
|
||||
xorl %edx,%edi
|
||||
leal 3572445317(%ecx,%ebp,1),%ecx
|
||||
addl %edi,%ecx
|
||||
roll $16,%ecx
|
||||
movl 24(%esi),%ebp
|
||||
movl %edx,%edi
|
||||
|
||||
leal 76029189(%ebx,%ebp,1),%ebx
|
||||
addl %edx,%ecx
|
||||
xorl %eax,%edi
|
||||
xorl %ecx,%edi
|
||||
movl 36(%esi),%ebp
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $23,%ebx
|
||||
addl %ecx,%ebx
|
||||
|
||||
xorl %edx,%edi
|
||||
xorl %ebx,%edi
|
||||
leal 3654602809(%eax,%ebp,1),%eax
|
||||
addl %edi,%eax
|
||||
roll $4,%eax
|
||||
movl 48(%esi),%ebp
|
||||
movl %ebx,%edi
|
||||
|
||||
leal 3873151461(%edx,%ebp,1),%edx
|
||||
addl %ebx,%eax
|
||||
xorl %ecx,%edi
|
||||
xorl %eax,%edi
|
||||
movl 60(%esi),%ebp
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $11,%edx
|
||||
addl %eax,%edx
|
||||
|
||||
xorl %ebx,%edi
|
||||
xorl %edx,%edi
|
||||
leal 530742520(%ecx,%ebp,1),%ecx
|
||||
addl %edi,%ecx
|
||||
roll $16,%ecx
|
||||
movl 8(%esi),%ebp
|
||||
movl %edx,%edi
|
||||
|
||||
leal 3299628645(%ebx,%ebp,1),%ebx
|
||||
addl %edx,%ecx
|
||||
xorl %eax,%edi
|
||||
xorl %ecx,%edi
|
||||
movl (%esi),%ebp
|
||||
addl %edi,%ebx
|
||||
movl $-1,%edi
|
||||
roll $23,%ebx
|
||||
addl %ecx,%ebx
|
||||
|
||||
|
||||
|
||||
xorl %edx,%edi
|
||||
orl %ebx,%edi
|
||||
leal 4096336452(%eax,%ebp,1),%eax
|
||||
xorl %ecx,%edi
|
||||
movl 28(%esi),%ebp
|
||||
addl %edi,%eax
|
||||
movl $-1,%edi
|
||||
roll $6,%eax
|
||||
xorl %ecx,%edi
|
||||
addl %ebx,%eax
|
||||
|
||||
orl %eax,%edi
|
||||
leal 1126891415(%edx,%ebp,1),%edx
|
||||
xorl %ebx,%edi
|
||||
movl 56(%esi),%ebp
|
||||
addl %edi,%edx
|
||||
movl $-1,%edi
|
||||
roll $10,%edx
|
||||
xorl %ebx,%edi
|
||||
addl %eax,%edx
|
||||
|
||||
orl %edx,%edi
|
||||
leal 2878612391(%ecx,%ebp,1),%ecx
|
||||
xorl %eax,%edi
|
||||
movl 20(%esi),%ebp
|
||||
addl %edi,%ecx
|
||||
movl $-1,%edi
|
||||
roll $15,%ecx
|
||||
xorl %eax,%edi
|
||||
addl %edx,%ecx
|
||||
|
||||
orl %ecx,%edi
|
||||
leal 4237533241(%ebx,%ebp,1),%ebx
|
||||
xorl %edx,%edi
|
||||
movl 48(%esi),%ebp
|
||||
addl %edi,%ebx
|
||||
movl $-1,%edi
|
||||
roll $21,%ebx
|
||||
xorl %edx,%edi
|
||||
addl %ecx,%ebx
|
||||
|
||||
orl %ebx,%edi
|
||||
leal 1700485571(%eax,%ebp,1),%eax
|
||||
xorl %ecx,%edi
|
||||
movl 12(%esi),%ebp
|
||||
addl %edi,%eax
|
||||
movl $-1,%edi
|
||||
roll $6,%eax
|
||||
xorl %ecx,%edi
|
||||
addl %ebx,%eax
|
||||
|
||||
orl %eax,%edi
|
||||
leal 2399980690(%edx,%ebp,1),%edx
|
||||
xorl %ebx,%edi
|
||||
movl 40(%esi),%ebp
|
||||
addl %edi,%edx
|
||||
movl $-1,%edi
|
||||
roll $10,%edx
|
||||
xorl %ebx,%edi
|
||||
addl %eax,%edx
|
||||
|
||||
orl %edx,%edi
|
||||
leal 4293915773(%ecx,%ebp,1),%ecx
|
||||
xorl %eax,%edi
|
||||
movl 4(%esi),%ebp
|
||||
addl %edi,%ecx
|
||||
movl $-1,%edi
|
||||
roll $15,%ecx
|
||||
xorl %eax,%edi
|
||||
addl %edx,%ecx
|
||||
|
||||
orl %ecx,%edi
|
||||
leal 2240044497(%ebx,%ebp,1),%ebx
|
||||
xorl %edx,%edi
|
||||
movl 32(%esi),%ebp
|
||||
addl %edi,%ebx
|
||||
movl $-1,%edi
|
||||
roll $21,%ebx
|
||||
xorl %edx,%edi
|
||||
addl %ecx,%ebx
|
||||
|
||||
orl %ebx,%edi
|
||||
leal 1873313359(%eax,%ebp,1),%eax
|
||||
xorl %ecx,%edi
|
||||
movl 60(%esi),%ebp
|
||||
addl %edi,%eax
|
||||
movl $-1,%edi
|
||||
roll $6,%eax
|
||||
xorl %ecx,%edi
|
||||
addl %ebx,%eax
|
||||
|
||||
orl %eax,%edi
|
||||
leal 4264355552(%edx,%ebp,1),%edx
|
||||
xorl %ebx,%edi
|
||||
movl 24(%esi),%ebp
|
||||
addl %edi,%edx
|
||||
movl $-1,%edi
|
||||
roll $10,%edx
|
||||
xorl %ebx,%edi
|
||||
addl %eax,%edx
|
||||
|
||||
orl %edx,%edi
|
||||
leal 2734768916(%ecx,%ebp,1),%ecx
|
||||
xorl %eax,%edi
|
||||
movl 52(%esi),%ebp
|
||||
addl %edi,%ecx
|
||||
movl $-1,%edi
|
||||
roll $15,%ecx
|
||||
xorl %eax,%edi
|
||||
addl %edx,%ecx
|
||||
|
||||
orl %ecx,%edi
|
||||
leal 1309151649(%ebx,%ebp,1),%ebx
|
||||
xorl %edx,%edi
|
||||
movl 16(%esi),%ebp
|
||||
addl %edi,%ebx
|
||||
movl $-1,%edi
|
||||
roll $21,%ebx
|
||||
xorl %edx,%edi
|
||||
addl %ecx,%ebx
|
||||
|
||||
orl %ebx,%edi
|
||||
leal 4149444226(%eax,%ebp,1),%eax
|
||||
xorl %ecx,%edi
|
||||
movl 44(%esi),%ebp
|
||||
addl %edi,%eax
|
||||
movl $-1,%edi
|
||||
roll $6,%eax
|
||||
xorl %ecx,%edi
|
||||
addl %ebx,%eax
|
||||
|
||||
orl %eax,%edi
|
||||
leal 3174756917(%edx,%ebp,1),%edx
|
||||
xorl %ebx,%edi
|
||||
movl 8(%esi),%ebp
|
||||
addl %edi,%edx
|
||||
movl $-1,%edi
|
||||
roll $10,%edx
|
||||
xorl %ebx,%edi
|
||||
addl %eax,%edx
|
||||
|
||||
orl %edx,%edi
|
||||
leal 718787259(%ecx,%ebp,1),%ecx
|
||||
xorl %eax,%edi
|
||||
movl 36(%esi),%ebp
|
||||
addl %edi,%ecx
|
||||
movl $-1,%edi
|
||||
roll $15,%ecx
|
||||
xorl %eax,%edi
|
||||
addl %edx,%ecx
|
||||
|
||||
orl %ecx,%edi
|
||||
leal 3951481745(%ebx,%ebp,1),%ebx
|
||||
xorl %edx,%edi
|
||||
movl 24(%esp),%ebp
|
||||
addl %edi,%ebx
|
||||
addl $64,%esi
|
||||
roll $21,%ebx
|
||||
movl (%ebp),%edi
|
||||
addl %ecx,%ebx
|
||||
addl %edi,%eax
|
||||
movl 4(%ebp),%edi
|
||||
addl %edi,%ebx
|
||||
movl 8(%ebp),%edi
|
||||
addl %edi,%ecx
|
||||
movl 12(%ebp),%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,(%ebp)
|
||||
movl %ebx,4(%ebp)
|
||||
movl (%esp),%edi
|
||||
movl %ecx,8(%ebp)
|
||||
movl %edx,12(%ebp)
|
||||
cmpl %esi,%edi
|
||||
jae .L000start
|
||||
popl %eax
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
popl %edi
|
||||
popl %esi
|
||||
ret
|
||||
.size md5_block_asm_data_order,.-.L_md5_block_asm_data_order_begin
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,675 @@
|
|||
#if defined(__i386__)
|
||||
.text
|
||||
.align 64
|
||||
.L_vpaes_consts:
|
||||
.long 218628480,235210255,168496130,67568393
|
||||
.long 252381056,17041926,33884169,51187212
|
||||
.long 252645135,252645135,252645135,252645135
|
||||
.long 1512730624,3266504856,1377990664,3401244816
|
||||
.long 830229760,1275146365,2969422977,3447763452
|
||||
.long 3411033600,2979783055,338359620,2782886510
|
||||
.long 4209124096,907596821,221174255,1006095553
|
||||
.long 191964160,3799684038,3164090317,1589111125
|
||||
.long 182528256,1777043520,2877432650,3265356744
|
||||
.long 1874708224,3503451415,3305285752,363511674
|
||||
.long 1606117888,3487855781,1093350906,2384367825
|
||||
.long 197121,67569157,134941193,202313229
|
||||
.long 67569157,134941193,202313229,197121
|
||||
.long 134941193,202313229,197121,67569157
|
||||
.long 202313229,197121,67569157,134941193
|
||||
.long 33619971,100992007,168364043,235736079
|
||||
.long 235736079,33619971,100992007,168364043
|
||||
.long 168364043,235736079,33619971,100992007
|
||||
.long 100992007,168364043,235736079,33619971
|
||||
.long 50462976,117835012,185207048,252579084
|
||||
.long 252314880,51251460,117574920,184942860
|
||||
.long 184682752,252054788,50987272,118359308
|
||||
.long 118099200,185467140,251790600,50727180
|
||||
.long 2946363062,528716217,1300004225,1881839624
|
||||
.long 1532713819,1532713819,1532713819,1532713819
|
||||
.long 3602276352,4288629033,3737020424,4153884961
|
||||
.long 1354558464,32357713,2958822624,3775749553
|
||||
.long 1201988352,132424512,1572796698,503232858
|
||||
.long 2213177600,1597421020,4103937655,675398315
|
||||
.long 2749646592,4273543773,1511898873,121693092
|
||||
.long 3040248576,1103263732,2871565598,1608280554
|
||||
.long 2236667136,2588920351,482954393,64377734
|
||||
.long 3069987328,291237287,2117370568,3650299247
|
||||
.long 533321216,3573750986,2572112006,1401264716
|
||||
.long 1339849704,2721158661,548607111,3445553514
|
||||
.long 2128193280,3054596040,2183486460,1257083700
|
||||
.long 655635200,1165381986,3923443150,2344132524
|
||||
.long 190078720,256924420,290342170,357187870
|
||||
.long 1610966272,2263057382,4103205268,309794674
|
||||
.long 2592527872,2233205587,1335446729,3402964816
|
||||
.long 3973531904,3225098121,3002836325,1918774430
|
||||
.long 3870401024,2102906079,2284471353,4117666579
|
||||
.long 617007872,1021508343,366931923,691083277
|
||||
.long 2528395776,3491914898,2968704004,1613121270
|
||||
.long 3445188352,3247741094,844474987,4093578302
|
||||
.long 651481088,1190302358,1689581232,574775300
|
||||
.long 4289380608,206939853,2555985458,2489840491
|
||||
.long 2130264064,327674451,3566485037,3349835193
|
||||
.long 2470714624,316102159,3636825756,3393945945
|
||||
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105
|
||||
.byte 111,110,32,65,69,83,32,102,111,114,32,120,56,54,47,83
|
||||
.byte 83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117
|
||||
.byte 114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105
|
||||
.byte 118,101,114,115,105,116,121,41,0
|
||||
.align 64
|
||||
.hidden _vpaes_preheat
|
||||
.type _vpaes_preheat,@function
|
||||
.align 16
|
||||
_vpaes_preheat:
|
||||
addl (%esp),%ebp
|
||||
movdqa -48(%ebp),%xmm7
|
||||
movdqa -16(%ebp),%xmm6
|
||||
ret
|
||||
.size _vpaes_preheat,.-_vpaes_preheat
|
||||
.hidden _vpaes_encrypt_core
|
||||
.type _vpaes_encrypt_core,@function
|
||||
.align 16
|
||||
_vpaes_encrypt_core:
|
||||
movl $16,%ecx
|
||||
movl 240(%edx),%eax
|
||||
movdqa %xmm6,%xmm1
|
||||
movdqa (%ebp),%xmm2
|
||||
pandn %xmm0,%xmm1
|
||||
pand %xmm6,%xmm0
|
||||
movdqu (%edx),%xmm5
|
||||
.byte 102,15,56,0,208
|
||||
movdqa 16(%ebp),%xmm0
|
||||
pxor %xmm5,%xmm2
|
||||
psrld $4,%xmm1
|
||||
addl $16,%edx
|
||||
.byte 102,15,56,0,193
|
||||
leal 192(%ebp),%ebx
|
||||
pxor %xmm2,%xmm0
|
||||
jmp .L000enc_entry
|
||||
.align 16
|
||||
.L001enc_loop:
|
||||
movdqa 32(%ebp),%xmm4
|
||||
movdqa 48(%ebp),%xmm0
|
||||
.byte 102,15,56,0,226
|
||||
.byte 102,15,56,0,195
|
||||
pxor %xmm5,%xmm4
|
||||
movdqa 64(%ebp),%xmm5
|
||||
pxor %xmm4,%xmm0
|
||||
movdqa -64(%ebx,%ecx,1),%xmm1
|
||||
.byte 102,15,56,0,234
|
||||
movdqa 80(%ebp),%xmm2
|
||||
movdqa (%ebx,%ecx,1),%xmm4
|
||||
.byte 102,15,56,0,211
|
||||
movdqa %xmm0,%xmm3
|
||||
pxor %xmm5,%xmm2
|
||||
.byte 102,15,56,0,193
|
||||
addl $16,%edx
|
||||
pxor %xmm2,%xmm0
|
||||
.byte 102,15,56,0,220
|
||||
addl $16,%ecx
|
||||
pxor %xmm0,%xmm3
|
||||
.byte 102,15,56,0,193
|
||||
andl $48,%ecx
|
||||
subl $1,%eax
|
||||
pxor %xmm3,%xmm0
|
||||
.L000enc_entry:
|
||||
movdqa %xmm6,%xmm1
|
||||
movdqa -32(%ebp),%xmm5
|
||||
pandn %xmm0,%xmm1
|
||||
psrld $4,%xmm1
|
||||
pand %xmm6,%xmm0
|
||||
.byte 102,15,56,0,232
|
||||
movdqa %xmm7,%xmm3
|
||||
pxor %xmm1,%xmm0
|
||||
.byte 102,15,56,0,217
|
||||
movdqa %xmm7,%xmm4
|
||||
pxor %xmm5,%xmm3
|
||||
.byte 102,15,56,0,224
|
||||
movdqa %xmm7,%xmm2
|
||||
pxor %xmm5,%xmm4
|
||||
.byte 102,15,56,0,211
|
||||
movdqa %xmm7,%xmm3
|
||||
pxor %xmm0,%xmm2
|
||||
.byte 102,15,56,0,220
|
||||
movdqu (%edx),%xmm5
|
||||
pxor %xmm1,%xmm3
|
||||
jnz .L001enc_loop
|
||||
movdqa 96(%ebp),%xmm4
|
||||
movdqa 112(%ebp),%xmm0
|
||||
.byte 102,15,56,0,226
|
||||
pxor %xmm5,%xmm4
|
||||
.byte 102,15,56,0,195
|
||||
movdqa 64(%ebx,%ecx,1),%xmm1
|
||||
pxor %xmm4,%xmm0
|
||||
.byte 102,15,56,0,193
|
||||
ret
|
||||
.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
|
||||
.hidden _vpaes_decrypt_core
|
||||
.type _vpaes_decrypt_core,@function
|
||||
.align 16
|
||||
_vpaes_decrypt_core:
|
||||
leal 608(%ebp),%ebx
|
||||
movl 240(%edx),%eax
|
||||
movdqa %xmm6,%xmm1
|
||||
movdqa -64(%ebx),%xmm2
|
||||
pandn %xmm0,%xmm1
|
||||
movl %eax,%ecx
|
||||
psrld $4,%xmm1
|
||||
movdqu (%edx),%xmm5
|
||||
shll $4,%ecx
|
||||
pand %xmm6,%xmm0
|
||||
.byte 102,15,56,0,208
|
||||
movdqa -48(%ebx),%xmm0
|
||||
xorl $48,%ecx
|
||||
.byte 102,15,56,0,193
|
||||
andl $48,%ecx
|
||||
pxor %xmm5,%xmm2
|
||||
movdqa 176(%ebp),%xmm5
|
||||
pxor %xmm2,%xmm0
|
||||
addl $16,%edx
|
||||
leal -352(%ebx,%ecx,1),%ecx
|
||||
jmp .L002dec_entry
|
||||
.align 16
|
||||
.L003dec_loop:
|
||||
movdqa -32(%ebx),%xmm4
|
||||
movdqa -16(%ebx),%xmm1
|
||||
.byte 102,15,56,0,226
|
||||
.byte 102,15,56,0,203
|
||||
pxor %xmm4,%xmm0
|
||||
movdqa (%ebx),%xmm4
|
||||
pxor %xmm1,%xmm0
|
||||
movdqa 16(%ebx),%xmm1
|
||||
.byte 102,15,56,0,226
|
||||
.byte 102,15,56,0,197
|
||||
.byte 102,15,56,0,203
|
||||
pxor %xmm4,%xmm0
|
||||
movdqa 32(%ebx),%xmm4
|
||||
pxor %xmm1,%xmm0
|
||||
movdqa 48(%ebx),%xmm1
|
||||
.byte 102,15,56,0,226
|
||||
.byte 102,15,56,0,197
|
||||
.byte 102,15,56,0,203
|
||||
pxor %xmm4,%xmm0
|
||||
movdqa 64(%ebx),%xmm4
|
||||
pxor %xmm1,%xmm0
|
||||
movdqa 80(%ebx),%xmm1
|
||||
.byte 102,15,56,0,226
|
||||
.byte 102,15,56,0,197
|
||||
.byte 102,15,56,0,203
|
||||
pxor %xmm4,%xmm0
|
||||
addl $16,%edx
|
||||
.byte 102,15,58,15,237,12
|
||||
pxor %xmm1,%xmm0
|
||||
subl $1,%eax
|
||||
.L002dec_entry:
|
||||
movdqa %xmm6,%xmm1
|
||||
movdqa -32(%ebp),%xmm2
|
||||
pandn %xmm0,%xmm1
|
||||
pand %xmm6,%xmm0
|
||||
psrld $4,%xmm1
|
||||
.byte 102,15,56,0,208
|
||||
movdqa %xmm7,%xmm3
|
||||
pxor %xmm1,%xmm0
|
||||
.byte 102,15,56,0,217
|
||||
movdqa %xmm7,%xmm4
|
||||
pxor %xmm2,%xmm3
|
||||
.byte 102,15,56,0,224
|
||||
pxor %xmm2,%xmm4
|
||||
movdqa %xmm7,%xmm2
|
||||
.byte 102,15,56,0,211
|
||||
movdqa %xmm7,%xmm3
|
||||
pxor %xmm0,%xmm2
|
||||
.byte 102,15,56,0,220
|
||||
movdqu (%edx),%xmm0
|
||||
pxor %xmm1,%xmm3
|
||||
jnz .L003dec_loop
|
||||
movdqa 96(%ebx),%xmm4
|
||||
.byte 102,15,56,0,226
|
||||
pxor %xmm0,%xmm4
|
||||
movdqa 112(%ebx),%xmm0
|
||||
movdqa (%ecx),%xmm2
|
||||
.byte 102,15,56,0,195
|
||||
pxor %xmm4,%xmm0
|
||||
.byte 102,15,56,0,194
|
||||
ret
|
||||
.size _vpaes_decrypt_core,.-_vpaes_decrypt_core
|
||||
.hidden _vpaes_schedule_core
|
||||
.type _vpaes_schedule_core,@function
|
||||
.align 16
|
||||
_vpaes_schedule_core:
|
||||
addl (%esp),%ebp
|
||||
movdqu (%esi),%xmm0
|
||||
movdqa 320(%ebp),%xmm2
|
||||
movdqa %xmm0,%xmm3
|
||||
leal (%ebp),%ebx
|
||||
movdqa %xmm2,4(%esp)
|
||||
call _vpaes_schedule_transform
|
||||
movdqa %xmm0,%xmm7
|
||||
testl %edi,%edi
|
||||
jnz .L004schedule_am_decrypting
|
||||
movdqu %xmm0,(%edx)
|
||||
jmp .L005schedule_go
|
||||
.L004schedule_am_decrypting:
|
||||
movdqa 256(%ebp,%ecx,1),%xmm1
|
||||
.byte 102,15,56,0,217
|
||||
movdqu %xmm3,(%edx)
|
||||
xorl $48,%ecx
|
||||
.L005schedule_go:
|
||||
cmpl $192,%eax
|
||||
ja .L006schedule_256
|
||||
je .L007schedule_192
|
||||
.L008schedule_128:
|
||||
movl $10,%eax
|
||||
.L009loop_schedule_128:
|
||||
call _vpaes_schedule_round
|
||||
decl %eax
|
||||
jz .L010schedule_mangle_last
|
||||
call _vpaes_schedule_mangle
|
||||
jmp .L009loop_schedule_128
|
||||
.align 16
|
||||
.L007schedule_192:
|
||||
movdqu 8(%esi),%xmm0
|
||||
call _vpaes_schedule_transform
|
||||
movdqa %xmm0,%xmm6
|
||||
pxor %xmm4,%xmm4
|
||||
movhlps %xmm4,%xmm6
|
||||
movl $4,%eax
|
||||
.L011loop_schedule_192:
|
||||
call _vpaes_schedule_round
|
||||
.byte 102,15,58,15,198,8
|
||||
call _vpaes_schedule_mangle
|
||||
call _vpaes_schedule_192_smear
|
||||
call _vpaes_schedule_mangle
|
||||
call _vpaes_schedule_round
|
||||
decl %eax
|
||||
jz .L010schedule_mangle_last
|
||||
call _vpaes_schedule_mangle
|
||||
call _vpaes_schedule_192_smear
|
||||
jmp .L011loop_schedule_192
|
||||
.align 16
|
||||
.L006schedule_256:
|
||||
movdqu 16(%esi),%xmm0
|
||||
call _vpaes_schedule_transform
|
||||
movl $7,%eax
|
||||
.L012loop_schedule_256:
|
||||
call _vpaes_schedule_mangle
|
||||
movdqa %xmm0,%xmm6
|
||||
call _vpaes_schedule_round
|
||||
decl %eax
|
||||
jz .L010schedule_mangle_last
|
||||
call _vpaes_schedule_mangle
|
||||
pshufd $255,%xmm0,%xmm0
|
||||
movdqa %xmm7,20(%esp)
|
||||
movdqa %xmm6,%xmm7
|
||||
call .L_vpaes_schedule_low_round
|
||||
movdqa 20(%esp),%xmm7
|
||||
jmp .L012loop_schedule_256
|
||||
.align 16
|
||||
.L010schedule_mangle_last:
|
||||
leal 384(%ebp),%ebx
|
||||
testl %edi,%edi
|
||||
jnz .L013schedule_mangle_last_dec
|
||||
movdqa 256(%ebp,%ecx,1),%xmm1
|
||||
.byte 102,15,56,0,193
|
||||
leal 352(%ebp),%ebx
|
||||
addl $32,%edx
|
||||
.L013schedule_mangle_last_dec:
|
||||
addl $-16,%edx
|
||||
pxor 336(%ebp),%xmm0
|
||||
call _vpaes_schedule_transform
|
||||
movdqu %xmm0,(%edx)
|
||||
pxor %xmm0,%xmm0
|
||||
pxor %xmm1,%xmm1
|
||||
pxor %xmm2,%xmm2
|
||||
pxor %xmm3,%xmm3
|
||||
pxor %xmm4,%xmm4
|
||||
pxor %xmm5,%xmm5
|
||||
pxor %xmm6,%xmm6
|
||||
pxor %xmm7,%xmm7
|
||||
ret
|
||||
.size _vpaes_schedule_core,.-_vpaes_schedule_core
|
||||
.hidden _vpaes_schedule_192_smear
|
||||
.type _vpaes_schedule_192_smear,@function
|
||||
.align 16
|
||||
_vpaes_schedule_192_smear:
|
||||
pshufd $128,%xmm6,%xmm1
|
||||
pshufd $254,%xmm7,%xmm0
|
||||
pxor %xmm1,%xmm6
|
||||
pxor %xmm1,%xmm1
|
||||
pxor %xmm0,%xmm6
|
||||
movdqa %xmm6,%xmm0
|
||||
movhlps %xmm1,%xmm6
|
||||
ret
|
||||
.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
|
||||
.hidden _vpaes_schedule_round
|
||||
.type _vpaes_schedule_round,@function
|
||||
.align 16
|
||||
_vpaes_schedule_round:
|
||||
movdqa 8(%esp),%xmm2
|
||||
pxor %xmm1,%xmm1
|
||||
.byte 102,15,58,15,202,15
|
||||
.byte 102,15,58,15,210,15
|
||||
pxor %xmm1,%xmm7
|
||||
pshufd $255,%xmm0,%xmm0
|
||||
.byte 102,15,58,15,192,1
|
||||
movdqa %xmm2,8(%esp)
|
||||
.L_vpaes_schedule_low_round:
|
||||
movdqa %xmm7,%xmm1
|
||||
pslldq $4,%xmm7
|
||||
pxor %xmm1,%xmm7
|
||||
movdqa %xmm7,%xmm1
|
||||
pslldq $8,%xmm7
|
||||
pxor %xmm1,%xmm7
|
||||
pxor 336(%ebp),%xmm7
|
||||
movdqa -16(%ebp),%xmm4
|
||||
movdqa -48(%ebp),%xmm5
|
||||
movdqa %xmm4,%xmm1
|
||||
pandn %xmm0,%xmm1
|
||||
psrld $4,%xmm1
|
||||
pand %xmm4,%xmm0
|
||||
movdqa -32(%ebp),%xmm2
|
||||
.byte 102,15,56,0,208
|
||||
pxor %xmm1,%xmm0
|
||||
movdqa %xmm5,%xmm3
|
||||
.byte 102,15,56,0,217
|
||||
pxor %xmm2,%xmm3
|
||||
movdqa %xmm5,%xmm4
|
||||
.byte 102,15,56,0,224
|
||||
pxor %xmm2,%xmm4
|
||||
movdqa %xmm5,%xmm2
|
||||
.byte 102,15,56,0,211
|
||||
pxor %xmm0,%xmm2
|
||||
movdqa %xmm5,%xmm3
|
||||
.byte 102,15,56,0,220
|
||||
pxor %xmm1,%xmm3
|
||||
movdqa 32(%ebp),%xmm4
|
||||
.byte 102,15,56,0,226
|
||||
movdqa 48(%ebp),%xmm0
|
||||
.byte 102,15,56,0,195
|
||||
pxor %xmm4,%xmm0
|
||||
pxor %xmm7,%xmm0
|
||||
movdqa %xmm0,%xmm7
|
||||
ret
|
||||
.size _vpaes_schedule_round,.-_vpaes_schedule_round
|
||||
.hidden _vpaes_schedule_transform
|
||||
.type _vpaes_schedule_transform,@function
|
||||
.align 16
|
||||
_vpaes_schedule_transform:
|
||||
movdqa -16(%ebp),%xmm2
|
||||
movdqa %xmm2,%xmm1
|
||||
pandn %xmm0,%xmm1
|
||||
psrld $4,%xmm1
|
||||
pand %xmm2,%xmm0
|
||||
movdqa (%ebx),%xmm2
|
||||
.byte 102,15,56,0,208
|
||||
movdqa 16(%ebx),%xmm0
|
||||
.byte 102,15,56,0,193
|
||||
pxor %xmm2,%xmm0
|
||||
ret
|
||||
.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
|
||||
.hidden _vpaes_schedule_mangle
|
||||
.type _vpaes_schedule_mangle,@function
|
||||
.align 16
|
||||
_vpaes_schedule_mangle:
|
||||
movdqa %xmm0,%xmm4
|
||||
movdqa 128(%ebp),%xmm5
|
||||
testl %edi,%edi
|
||||
jnz .L014schedule_mangle_dec
|
||||
addl $16,%edx
|
||||
pxor 336(%ebp),%xmm4
|
||||
.byte 102,15,56,0,229
|
||||
movdqa %xmm4,%xmm3
|
||||
.byte 102,15,56,0,229
|
||||
pxor %xmm4,%xmm3
|
||||
.byte 102,15,56,0,229
|
||||
pxor %xmm4,%xmm3
|
||||
jmp .L015schedule_mangle_both
|
||||
.align 16
|
||||
.L014schedule_mangle_dec:
|
||||
movdqa -16(%ebp),%xmm2
|
||||
leal 416(%ebp),%esi
|
||||
movdqa %xmm2,%xmm1
|
||||
pandn %xmm4,%xmm1
|
||||
psrld $4,%xmm1
|
||||
pand %xmm2,%xmm4
|
||||
movdqa (%esi),%xmm2
|
||||
.byte 102,15,56,0,212
|
||||
movdqa 16(%esi),%xmm3
|
||||
.byte 102,15,56,0,217
|
||||
pxor %xmm2,%xmm3
|
||||
.byte 102,15,56,0,221
|
||||
movdqa 32(%esi),%xmm2
|
||||
.byte 102,15,56,0,212
|
||||
pxor %xmm3,%xmm2
|
||||
movdqa 48(%esi),%xmm3
|
||||
.byte 102,15,56,0,217
|
||||
pxor %xmm2,%xmm3
|
||||
.byte 102,15,56,0,221
|
||||
movdqa 64(%esi),%xmm2
|
||||
.byte 102,15,56,0,212
|
||||
pxor %xmm3,%xmm2
|
||||
movdqa 80(%esi),%xmm3
|
||||
.byte 102,15,56,0,217
|
||||
pxor %xmm2,%xmm3
|
||||
.byte 102,15,56,0,221
|
||||
movdqa 96(%esi),%xmm2
|
||||
.byte 102,15,56,0,212
|
||||
pxor %xmm3,%xmm2
|
||||
movdqa 112(%esi),%xmm3
|
||||
.byte 102,15,56,0,217
|
||||
pxor %xmm2,%xmm3
|
||||
addl $-16,%edx
|
||||
.L015schedule_mangle_both:
|
||||
movdqa 256(%ebp,%ecx,1),%xmm1
|
||||
.byte 102,15,56,0,217
|
||||
addl $-16,%ecx
|
||||
andl $48,%ecx
|
||||
movdqu %xmm3,(%edx)
|
||||
ret
|
||||
.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
|
||||
.globl vpaes_set_encrypt_key
|
||||
.hidden vpaes_set_encrypt_key
|
||||
.type vpaes_set_encrypt_key,@function
|
||||
.align 16
|
||||
vpaes_set_encrypt_key:
|
||||
.L_vpaes_set_encrypt_key_begin:
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
movl 20(%esp),%esi
|
||||
leal -56(%esp),%ebx
|
||||
movl 24(%esp),%eax
|
||||
andl $-16,%ebx
|
||||
movl 28(%esp),%edx
|
||||
xchgl %esp,%ebx
|
||||
movl %ebx,48(%esp)
|
||||
movl %eax,%ebx
|
||||
shrl $5,%ebx
|
||||
addl $5,%ebx
|
||||
movl %ebx,240(%edx)
|
||||
movl $48,%ecx
|
||||
movl $0,%edi
|
||||
leal .L_vpaes_consts+0x30-.L016pic_point,%ebp
|
||||
call _vpaes_schedule_core
|
||||
.L016pic_point:
|
||||
movl 48(%esp),%esp
|
||||
xorl %eax,%eax
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
.size vpaes_set_encrypt_key,.-.L_vpaes_set_encrypt_key_begin
|
||||
.globl vpaes_set_decrypt_key
|
||||
.hidden vpaes_set_decrypt_key
|
||||
.type vpaes_set_decrypt_key,@function
|
||||
.align 16
|
||||
vpaes_set_decrypt_key:
|
||||
.L_vpaes_set_decrypt_key_begin:
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
movl 20(%esp),%esi
|
||||
leal -56(%esp),%ebx
|
||||
movl 24(%esp),%eax
|
||||
andl $-16,%ebx
|
||||
movl 28(%esp),%edx
|
||||
xchgl %esp,%ebx
|
||||
movl %ebx,48(%esp)
|
||||
movl %eax,%ebx
|
||||
shrl $5,%ebx
|
||||
addl $5,%ebx
|
||||
movl %ebx,240(%edx)
|
||||
shll $4,%ebx
|
||||
leal 16(%edx,%ebx,1),%edx
|
||||
movl $1,%edi
|
||||
movl %eax,%ecx
|
||||
shrl $1,%ecx
|
||||
andl $32,%ecx
|
||||
xorl $32,%ecx
|
||||
leal .L_vpaes_consts+0x30-.L017pic_point,%ebp
|
||||
call _vpaes_schedule_core
|
||||
.L017pic_point:
|
||||
movl 48(%esp),%esp
|
||||
xorl %eax,%eax
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
.size vpaes_set_decrypt_key,.-.L_vpaes_set_decrypt_key_begin
|
||||
.globl vpaes_encrypt
|
||||
.hidden vpaes_encrypt
|
||||
.type vpaes_encrypt,@function
|
||||
.align 16
|
||||
vpaes_encrypt:
|
||||
.L_vpaes_encrypt_begin:
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
leal .L_vpaes_consts+0x30-.L018pic_point,%ebp
|
||||
call _vpaes_preheat
|
||||
.L018pic_point:
|
||||
movl 20(%esp),%esi
|
||||
leal -56(%esp),%ebx
|
||||
movl 24(%esp),%edi
|
||||
andl $-16,%ebx
|
||||
movl 28(%esp),%edx
|
||||
xchgl %esp,%ebx
|
||||
movl %ebx,48(%esp)
|
||||
movdqu (%esi),%xmm0
|
||||
call _vpaes_encrypt_core
|
||||
movdqu %xmm0,(%edi)
|
||||
movl 48(%esp),%esp
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
.size vpaes_encrypt,.-.L_vpaes_encrypt_begin
|
||||
.globl vpaes_decrypt
|
||||
.hidden vpaes_decrypt
|
||||
.type vpaes_decrypt,@function
|
||||
.align 16
|
||||
vpaes_decrypt:
|
||||
.L_vpaes_decrypt_begin:
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
leal .L_vpaes_consts+0x30-.L019pic_point,%ebp
|
||||
call _vpaes_preheat
|
||||
.L019pic_point:
|
||||
movl 20(%esp),%esi
|
||||
leal -56(%esp),%ebx
|
||||
movl 24(%esp),%edi
|
||||
andl $-16,%ebx
|
||||
movl 28(%esp),%edx
|
||||
xchgl %esp,%ebx
|
||||
movl %ebx,48(%esp)
|
||||
movdqu (%esi),%xmm0
|
||||
call _vpaes_decrypt_core
|
||||
movdqu %xmm0,(%edi)
|
||||
movl 48(%esp),%esp
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
.size vpaes_decrypt,.-.L_vpaes_decrypt_begin
|
||||
.globl vpaes_cbc_encrypt
|
||||
.hidden vpaes_cbc_encrypt
|
||||
.type vpaes_cbc_encrypt,@function
|
||||
.align 16
|
||||
vpaes_cbc_encrypt:
|
||||
.L_vpaes_cbc_encrypt_begin:
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
movl 20(%esp),%esi
|
||||
movl 24(%esp),%edi
|
||||
movl 28(%esp),%eax
|
||||
movl 32(%esp),%edx
|
||||
subl $16,%eax
|
||||
jc .L020cbc_abort
|
||||
leal -56(%esp),%ebx
|
||||
movl 36(%esp),%ebp
|
||||
andl $-16,%ebx
|
||||
movl 40(%esp),%ecx
|
||||
xchgl %esp,%ebx
|
||||
movdqu (%ebp),%xmm1
|
||||
subl %esi,%edi
|
||||
movl %ebx,48(%esp)
|
||||
movl %edi,(%esp)
|
||||
movl %edx,4(%esp)
|
||||
movl %ebp,8(%esp)
|
||||
movl %eax,%edi
|
||||
leal .L_vpaes_consts+0x30-.L021pic_point,%ebp
|
||||
call _vpaes_preheat
|
||||
.L021pic_point:
|
||||
cmpl $0,%ecx
|
||||
je .L022cbc_dec_loop
|
||||
jmp .L023cbc_enc_loop
|
||||
.align 16
|
||||
.L023cbc_enc_loop:
|
||||
movdqu (%esi),%xmm0
|
||||
pxor %xmm1,%xmm0
|
||||
call _vpaes_encrypt_core
|
||||
movl (%esp),%ebx
|
||||
movl 4(%esp),%edx
|
||||
movdqa %xmm0,%xmm1
|
||||
movdqu %xmm0,(%ebx,%esi,1)
|
||||
leal 16(%esi),%esi
|
||||
subl $16,%edi
|
||||
jnc .L023cbc_enc_loop
|
||||
jmp .L024cbc_done
|
||||
.align 16
|
||||
.L022cbc_dec_loop:
|
||||
movdqu (%esi),%xmm0
|
||||
movdqa %xmm1,16(%esp)
|
||||
movdqa %xmm0,32(%esp)
|
||||
call _vpaes_decrypt_core
|
||||
movl (%esp),%ebx
|
||||
movl 4(%esp),%edx
|
||||
pxor 16(%esp),%xmm0
|
||||
movdqa 32(%esp),%xmm1
|
||||
movdqu %xmm0,(%ebx,%esi,1)
|
||||
leal 16(%esi),%esi
|
||||
subl $16,%edi
|
||||
jnc .L022cbc_dec_loop
|
||||
.L024cbc_done:
|
||||
movl 8(%esp),%ebx
|
||||
movl 48(%esp),%esp
|
||||
movdqu %xmm1,(%ebx)
|
||||
.L020cbc_abort:
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
.size vpaes_cbc_encrypt,.-.L_vpaes_cbc_encrypt_begin
|
||||
#endif
|
|
@ -0,0 +1,475 @@
|
|||
#if defined(__i386__)
|
||||
.text
|
||||
.globl bn_mul_mont
|
||||
.hidden bn_mul_mont
|
||||
.type bn_mul_mont,@function
|
||||
.align 16
|
||||
bn_mul_mont:
|
||||
.L_bn_mul_mont_begin:
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
xorl %eax,%eax
|
||||
movl 40(%esp),%edi
|
||||
cmpl $4,%edi
|
||||
jl .L000just_leave
|
||||
leal 20(%esp),%esi
|
||||
leal 24(%esp),%edx
|
||||
addl $2,%edi
|
||||
negl %edi
|
||||
leal -32(%esp,%edi,4),%ebp
|
||||
negl %edi
|
||||
movl %ebp,%eax
|
||||
subl %edx,%eax
|
||||
andl $2047,%eax
|
||||
subl %eax,%ebp
|
||||
xorl %ebp,%edx
|
||||
andl $2048,%edx
|
||||
xorl $2048,%edx
|
||||
subl %edx,%ebp
|
||||
andl $-64,%ebp
|
||||
movl %esp,%eax
|
||||
subl %ebp,%eax
|
||||
andl $-4096,%eax
|
||||
movl %esp,%edx
|
||||
leal (%ebp,%eax,1),%esp
|
||||
movl (%esp),%eax
|
||||
cmpl %ebp,%esp
|
||||
ja .L001page_walk
|
||||
jmp .L002page_walk_done
|
||||
.align 16
|
||||
.L001page_walk:
|
||||
leal -4096(%esp),%esp
|
||||
movl (%esp),%eax
|
||||
cmpl %ebp,%esp
|
||||
ja .L001page_walk
|
||||
.L002page_walk_done:
|
||||
movl (%esi),%eax
|
||||
movl 4(%esi),%ebx
|
||||
movl 8(%esi),%ecx
|
||||
movl 12(%esi),%ebp
|
||||
movl 16(%esi),%esi
|
||||
movl (%esi),%esi
|
||||
movl %eax,4(%esp)
|
||||
movl %ebx,8(%esp)
|
||||
movl %ecx,12(%esp)
|
||||
movl %ebp,16(%esp)
|
||||
movl %esi,20(%esp)
|
||||
leal -3(%edi),%ebx
|
||||
movl %edx,24(%esp)
|
||||
call .L003PIC_me_up
|
||||
.L003PIC_me_up:
|
||||
popl %eax
|
||||
leal OPENSSL_ia32cap_P-.L003PIC_me_up(%eax),%eax
|
||||
btl $26,(%eax)
|
||||
jnc .L004non_sse2
|
||||
movl $-1,%eax
|
||||
movd %eax,%mm7
|
||||
movl 8(%esp),%esi
|
||||
movl 12(%esp),%edi
|
||||
movl 16(%esp),%ebp
|
||||
xorl %edx,%edx
|
||||
xorl %ecx,%ecx
|
||||
movd (%edi),%mm4
|
||||
movd (%esi),%mm5
|
||||
movd (%ebp),%mm3
|
||||
pmuludq %mm4,%mm5
|
||||
movq %mm5,%mm2
|
||||
movq %mm5,%mm0
|
||||
pand %mm7,%mm0
|
||||
pmuludq 20(%esp),%mm5
|
||||
pmuludq %mm5,%mm3
|
||||
paddq %mm0,%mm3
|
||||
movd 4(%ebp),%mm1
|
||||
movd 4(%esi),%mm0
|
||||
psrlq $32,%mm2
|
||||
psrlq $32,%mm3
|
||||
incl %ecx
|
||||
.align 16
|
||||
.L0051st:
|
||||
pmuludq %mm4,%mm0
|
||||
pmuludq %mm5,%mm1
|
||||
paddq %mm0,%mm2
|
||||
paddq %mm1,%mm3
|
||||
movq %mm2,%mm0
|
||||
pand %mm7,%mm0
|
||||
movd 4(%ebp,%ecx,4),%mm1
|
||||
paddq %mm0,%mm3
|
||||
movd 4(%esi,%ecx,4),%mm0
|
||||
psrlq $32,%mm2
|
||||
movd %mm3,28(%esp,%ecx,4)
|
||||
psrlq $32,%mm3
|
||||
leal 1(%ecx),%ecx
|
||||
cmpl %ebx,%ecx
|
||||
jl .L0051st
|
||||
pmuludq %mm4,%mm0
|
||||
pmuludq %mm5,%mm1
|
||||
paddq %mm0,%mm2
|
||||
paddq %mm1,%mm3
|
||||
movq %mm2,%mm0
|
||||
pand %mm7,%mm0
|
||||
paddq %mm0,%mm3
|
||||
movd %mm3,28(%esp,%ecx,4)
|
||||
psrlq $32,%mm2
|
||||
psrlq $32,%mm3
|
||||
paddq %mm2,%mm3
|
||||
movq %mm3,32(%esp,%ebx,4)
|
||||
incl %edx
|
||||
.L006outer:
|
||||
xorl %ecx,%ecx
|
||||
movd (%edi,%edx,4),%mm4
|
||||
movd (%esi),%mm5
|
||||
movd 32(%esp),%mm6
|
||||
movd (%ebp),%mm3
|
||||
pmuludq %mm4,%mm5
|
||||
paddq %mm6,%mm5
|
||||
movq %mm5,%mm0
|
||||
movq %mm5,%mm2
|
||||
pand %mm7,%mm0
|
||||
pmuludq 20(%esp),%mm5
|
||||
pmuludq %mm5,%mm3
|
||||
paddq %mm0,%mm3
|
||||
movd 36(%esp),%mm6
|
||||
movd 4(%ebp),%mm1
|
||||
movd 4(%esi),%mm0
|
||||
psrlq $32,%mm2
|
||||
psrlq $32,%mm3
|
||||
paddq %mm6,%mm2
|
||||
incl %ecx
|
||||
decl %ebx
|
||||
.L007inner:
|
||||
pmuludq %mm4,%mm0
|
||||
pmuludq %mm5,%mm1
|
||||
paddq %mm0,%mm2
|
||||
paddq %mm1,%mm3
|
||||
movq %mm2,%mm0
|
||||
movd 36(%esp,%ecx,4),%mm6
|
||||
pand %mm7,%mm0
|
||||
movd 4(%ebp,%ecx,4),%mm1
|
||||
paddq %mm0,%mm3
|
||||
movd 4(%esi,%ecx,4),%mm0
|
||||
psrlq $32,%mm2
|
||||
movd %mm3,28(%esp,%ecx,4)
|
||||
psrlq $32,%mm3
|
||||
paddq %mm6,%mm2
|
||||
decl %ebx
|
||||
leal 1(%ecx),%ecx
|
||||
jnz .L007inner
|
||||
movl %ecx,%ebx
|
||||
pmuludq %mm4,%mm0
|
||||
pmuludq %mm5,%mm1
|
||||
paddq %mm0,%mm2
|
||||
paddq %mm1,%mm3
|
||||
movq %mm2,%mm0
|
||||
pand %mm7,%mm0
|
||||
paddq %mm0,%mm3
|
||||
movd %mm3,28(%esp,%ecx,4)
|
||||
psrlq $32,%mm2
|
||||
psrlq $32,%mm3
|
||||
movd 36(%esp,%ebx,4),%mm6
|
||||
paddq %mm2,%mm3
|
||||
paddq %mm6,%mm3
|
||||
movq %mm3,32(%esp,%ebx,4)
|
||||
leal 1(%edx),%edx
|
||||
cmpl %ebx,%edx
|
||||
jle .L006outer
|
||||
emms
|
||||
jmp .L008common_tail
|
||||
.align 16
|
||||
.L004non_sse2:
|
||||
movl 8(%esp),%esi
|
||||
leal 1(%ebx),%ebp
|
||||
movl 12(%esp),%edi
|
||||
xorl %ecx,%ecx
|
||||
movl %esi,%edx
|
||||
andl $1,%ebp
|
||||
subl %edi,%edx
|
||||
leal 4(%edi,%ebx,4),%eax
|
||||
orl %edx,%ebp
|
||||
movl (%edi),%edi
|
||||
jz .L009bn_sqr_mont
|
||||
movl %eax,28(%esp)
|
||||
movl (%esi),%eax
|
||||
xorl %edx,%edx
|
||||
.align 16
|
||||
.L010mull:
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl %eax,%ebp
|
||||
leal 1(%ecx),%ecx
|
||||
adcl $0,%edx
|
||||
movl (%esi,%ecx,4),%eax
|
||||
cmpl %ebx,%ecx
|
||||
movl %ebp,28(%esp,%ecx,4)
|
||||
jl .L010mull
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
movl 20(%esp),%edi
|
||||
addl %ebp,%eax
|
||||
movl 16(%esp),%esi
|
||||
adcl $0,%edx
|
||||
imull 32(%esp),%edi
|
||||
movl %eax,32(%esp,%ebx,4)
|
||||
xorl %ecx,%ecx
|
||||
movl %edx,36(%esp,%ebx,4)
|
||||
movl %ecx,40(%esp,%ebx,4)
|
||||
movl (%esi),%eax
|
||||
mull %edi
|
||||
addl 32(%esp),%eax
|
||||
movl 4(%esi),%eax
|
||||
adcl $0,%edx
|
||||
incl %ecx
|
||||
jmp .L0112ndmadd
|
||||
.align 16
|
||||
.L0121stmadd:
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl 32(%esp,%ecx,4),%ebp
|
||||
leal 1(%ecx),%ecx
|
||||
adcl $0,%edx
|
||||
addl %eax,%ebp
|
||||
movl (%esi,%ecx,4),%eax
|
||||
adcl $0,%edx
|
||||
cmpl %ebx,%ecx
|
||||
movl %ebp,28(%esp,%ecx,4)
|
||||
jl .L0121stmadd
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl 32(%esp,%ebx,4),%eax
|
||||
movl 20(%esp),%edi
|
||||
adcl $0,%edx
|
||||
movl 16(%esp),%esi
|
||||
addl %eax,%ebp
|
||||
adcl $0,%edx
|
||||
imull 32(%esp),%edi
|
||||
xorl %ecx,%ecx
|
||||
addl 36(%esp,%ebx,4),%edx
|
||||
movl %ebp,32(%esp,%ebx,4)
|
||||
adcl $0,%ecx
|
||||
movl (%esi),%eax
|
||||
movl %edx,36(%esp,%ebx,4)
|
||||
movl %ecx,40(%esp,%ebx,4)
|
||||
mull %edi
|
||||
addl 32(%esp),%eax
|
||||
movl 4(%esi),%eax
|
||||
adcl $0,%edx
|
||||
movl $1,%ecx
|
||||
.align 16
|
||||
.L0112ndmadd:
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl 32(%esp,%ecx,4),%ebp
|
||||
leal 1(%ecx),%ecx
|
||||
adcl $0,%edx
|
||||
addl %eax,%ebp
|
||||
movl (%esi,%ecx,4),%eax
|
||||
adcl $0,%edx
|
||||
cmpl %ebx,%ecx
|
||||
movl %ebp,24(%esp,%ecx,4)
|
||||
jl .L0112ndmadd
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl 32(%esp,%ebx,4),%ebp
|
||||
adcl $0,%edx
|
||||
addl %eax,%ebp
|
||||
adcl $0,%edx
|
||||
movl %ebp,28(%esp,%ebx,4)
|
||||
xorl %eax,%eax
|
||||
movl 12(%esp),%ecx
|
||||
addl 36(%esp,%ebx,4),%edx
|
||||
adcl 40(%esp,%ebx,4),%eax
|
||||
leal 4(%ecx),%ecx
|
||||
movl %edx,32(%esp,%ebx,4)
|
||||
cmpl 28(%esp),%ecx
|
||||
movl %eax,36(%esp,%ebx,4)
|
||||
je .L008common_tail
|
||||
movl (%ecx),%edi
|
||||
movl 8(%esp),%esi
|
||||
movl %ecx,12(%esp)
|
||||
xorl %ecx,%ecx
|
||||
xorl %edx,%edx
|
||||
movl (%esi),%eax
|
||||
jmp .L0121stmadd
|
||||
.align 16
|
||||
.L009bn_sqr_mont:
|
||||
movl %ebx,(%esp)
|
||||
movl %ecx,12(%esp)
|
||||
movl %edi,%eax
|
||||
mull %edi
|
||||
movl %eax,32(%esp)
|
||||
movl %edx,%ebx
|
||||
shrl $1,%edx
|
||||
andl $1,%ebx
|
||||
incl %ecx
|
||||
.align 16
|
||||
.L013sqr:
|
||||
movl (%esi,%ecx,4),%eax
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl %ebp,%eax
|
||||
leal 1(%ecx),%ecx
|
||||
adcl $0,%edx
|
||||
leal (%ebx,%eax,2),%ebp
|
||||
shrl $31,%eax
|
||||
cmpl (%esp),%ecx
|
||||
movl %eax,%ebx
|
||||
movl %ebp,28(%esp,%ecx,4)
|
||||
jl .L013sqr
|
||||
movl (%esi,%ecx,4),%eax
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl %ebp,%eax
|
||||
movl 20(%esp),%edi
|
||||
adcl $0,%edx
|
||||
movl 16(%esp),%esi
|
||||
leal (%ebx,%eax,2),%ebp
|
||||
imull 32(%esp),%edi
|
||||
shrl $31,%eax
|
||||
movl %ebp,32(%esp,%ecx,4)
|
||||
leal (%eax,%edx,2),%ebp
|
||||
movl (%esi),%eax
|
||||
shrl $31,%edx
|
||||
movl %ebp,36(%esp,%ecx,4)
|
||||
movl %edx,40(%esp,%ecx,4)
|
||||
mull %edi
|
||||
addl 32(%esp),%eax
|
||||
movl %ecx,%ebx
|
||||
adcl $0,%edx
|
||||
movl 4(%esi),%eax
|
||||
movl $1,%ecx
|
||||
.align 16
|
||||
.L0143rdmadd:
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl 32(%esp,%ecx,4),%ebp
|
||||
adcl $0,%edx
|
||||
addl %eax,%ebp
|
||||
movl 4(%esi,%ecx,4),%eax
|
||||
adcl $0,%edx
|
||||
movl %ebp,28(%esp,%ecx,4)
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl 36(%esp,%ecx,4),%ebp
|
||||
leal 2(%ecx),%ecx
|
||||
adcl $0,%edx
|
||||
addl %eax,%ebp
|
||||
movl (%esi,%ecx,4),%eax
|
||||
adcl $0,%edx
|
||||
cmpl %ebx,%ecx
|
||||
movl %ebp,24(%esp,%ecx,4)
|
||||
jl .L0143rdmadd
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl 32(%esp,%ebx,4),%ebp
|
||||
adcl $0,%edx
|
||||
addl %eax,%ebp
|
||||
adcl $0,%edx
|
||||
movl %ebp,28(%esp,%ebx,4)
|
||||
movl 12(%esp),%ecx
|
||||
xorl %eax,%eax
|
||||
movl 8(%esp),%esi
|
||||
addl 36(%esp,%ebx,4),%edx
|
||||
adcl 40(%esp,%ebx,4),%eax
|
||||
movl %edx,32(%esp,%ebx,4)
|
||||
cmpl %ebx,%ecx
|
||||
movl %eax,36(%esp,%ebx,4)
|
||||
je .L008common_tail
|
||||
movl 4(%esi,%ecx,4),%edi
|
||||
leal 1(%ecx),%ecx
|
||||
movl %edi,%eax
|
||||
movl %ecx,12(%esp)
|
||||
mull %edi
|
||||
addl 32(%esp,%ecx,4),%eax
|
||||
adcl $0,%edx
|
||||
movl %eax,32(%esp,%ecx,4)
|
||||
xorl %ebp,%ebp
|
||||
cmpl %ebx,%ecx
|
||||
leal 1(%ecx),%ecx
|
||||
je .L015sqrlast
|
||||
movl %edx,%ebx
|
||||
shrl $1,%edx
|
||||
andl $1,%ebx
|
||||
.align 16
|
||||
.L016sqradd:
|
||||
movl (%esi,%ecx,4),%eax
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl %ebp,%eax
|
||||
leal (%eax,%eax,1),%ebp
|
||||
adcl $0,%edx
|
||||
shrl $31,%eax
|
||||
addl 32(%esp,%ecx,4),%ebp
|
||||
leal 1(%ecx),%ecx
|
||||
adcl $0,%eax
|
||||
addl %ebx,%ebp
|
||||
adcl $0,%eax
|
||||
cmpl (%esp),%ecx
|
||||
movl %ebp,28(%esp,%ecx,4)
|
||||
movl %eax,%ebx
|
||||
jle .L016sqradd
|
||||
movl %edx,%ebp
|
||||
addl %edx,%edx
|
||||
shrl $31,%ebp
|
||||
addl %ebx,%edx
|
||||
adcl $0,%ebp
|
||||
.L015sqrlast:
|
||||
movl 20(%esp),%edi
|
||||
movl 16(%esp),%esi
|
||||
imull 32(%esp),%edi
|
||||
addl 32(%esp,%ecx,4),%edx
|
||||
movl (%esi),%eax
|
||||
adcl $0,%ebp
|
||||
movl %edx,32(%esp,%ecx,4)
|
||||
movl %ebp,36(%esp,%ecx,4)
|
||||
mull %edi
|
||||
addl 32(%esp),%eax
|
||||
leal -1(%ecx),%ebx
|
||||
adcl $0,%edx
|
||||
movl $1,%ecx
|
||||
movl 4(%esi),%eax
|
||||
jmp .L0143rdmadd
|
||||
.align 16
|
||||
.L008common_tail:
|
||||
movl 16(%esp),%ebp
|
||||
movl 4(%esp),%edi
|
||||
leal 32(%esp),%esi
|
||||
movl (%esi),%eax
|
||||
movl %ebx,%ecx
|
||||
xorl %edx,%edx
|
||||
.align 16
|
||||
.L017sub:
|
||||
sbbl (%ebp,%edx,4),%eax
|
||||
movl %eax,(%edi,%edx,4)
|
||||
decl %ecx
|
||||
movl 4(%esi,%edx,4),%eax
|
||||
leal 1(%edx),%edx
|
||||
jge .L017sub
|
||||
sbbl $0,%eax
|
||||
andl %eax,%esi
|
||||
notl %eax
|
||||
movl %edi,%ebp
|
||||
andl %eax,%ebp
|
||||
orl %ebp,%esi
|
||||
.align 16
|
||||
.L018copy:
|
||||
movl (%esi,%ebx,4),%eax
|
||||
movl %eax,(%edi,%ebx,4)
|
||||
movl %ecx,32(%esp,%ebx,4)
|
||||
decl %ebx
|
||||
jge .L018copy
|
||||
movl 24(%esp),%esp
|
||||
movl $1,%eax
|
||||
.L000just_leave:
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
.size bn_mul_mont,.-.L_bn_mul_mont_begin
|
||||
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105
|
||||
.byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56
|
||||
.byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121
|
||||
.byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46
|
||||
.byte 111,114,103,62,0
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,798 @@
|
|||
#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM)
|
||||
.text
|
||||
|
||||
.type _aesni_ctr32_ghash_6x,@function
|
||||
.align 32
|
||||
_aesni_ctr32_ghash_6x:
|
||||
vmovdqu 32(%r11),%xmm2
|
||||
subq $6,%rdx
|
||||
vpxor %xmm4,%xmm4,%xmm4
|
||||
vmovdqu 0-128(%rcx),%xmm15
|
||||
vpaddb %xmm2,%xmm1,%xmm10
|
||||
vpaddb %xmm2,%xmm10,%xmm11
|
||||
vpaddb %xmm2,%xmm11,%xmm12
|
||||
vpaddb %xmm2,%xmm12,%xmm13
|
||||
vpaddb %xmm2,%xmm13,%xmm14
|
||||
vpxor %xmm15,%xmm1,%xmm9
|
||||
vmovdqu %xmm4,16+8(%rsp)
|
||||
jmp .Loop6x
|
||||
|
||||
.align 32
|
||||
.Loop6x:
|
||||
addl $100663296,%ebx
|
||||
jc .Lhandle_ctr32
|
||||
vmovdqu 0-32(%r9),%xmm3
|
||||
vpaddb %xmm2,%xmm14,%xmm1
|
||||
vpxor %xmm15,%xmm10,%xmm10
|
||||
vpxor %xmm15,%xmm11,%xmm11
|
||||
|
||||
.Lresume_ctr32:
|
||||
vmovdqu %xmm1,(%r8)
|
||||
vpclmulqdq $0x10,%xmm3,%xmm7,%xmm5
|
||||
vpxor %xmm15,%xmm12,%xmm12
|
||||
vmovups 16-128(%rcx),%xmm2
|
||||
vpclmulqdq $0x01,%xmm3,%xmm7,%xmm6
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
xorq %r12,%r12
|
||||
cmpq %r14,%r15
|
||||
|
||||
vaesenc %xmm2,%xmm9,%xmm9
|
||||
vmovdqu 48+8(%rsp),%xmm0
|
||||
vpxor %xmm15,%xmm13,%xmm13
|
||||
vpclmulqdq $0x00,%xmm3,%xmm7,%xmm1
|
||||
vaesenc %xmm2,%xmm10,%xmm10
|
||||
vpxor %xmm15,%xmm14,%xmm14
|
||||
setnc %r12b
|
||||
vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7
|
||||
vaesenc %xmm2,%xmm11,%xmm11
|
||||
vmovdqu 16-32(%r9),%xmm3
|
||||
negq %r12
|
||||
vaesenc %xmm2,%xmm12,%xmm12
|
||||
vpxor %xmm5,%xmm6,%xmm6
|
||||
vpclmulqdq $0x00,%xmm3,%xmm0,%xmm5
|
||||
vpxor %xmm4,%xmm8,%xmm8
|
||||
vaesenc %xmm2,%xmm13,%xmm13
|
||||
vpxor %xmm5,%xmm1,%xmm4
|
||||
andq $0x60,%r12
|
||||
vmovups 32-128(%rcx),%xmm15
|
||||
vpclmulqdq $0x10,%xmm3,%xmm0,%xmm1
|
||||
vaesenc %xmm2,%xmm14,%xmm14
|
||||
|
||||
vpclmulqdq $0x01,%xmm3,%xmm0,%xmm2
|
||||
leaq (%r14,%r12,1),%r14
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vpxor 16+8(%rsp),%xmm8,%xmm8
|
||||
vpclmulqdq $0x11,%xmm3,%xmm0,%xmm3
|
||||
vmovdqu 64+8(%rsp),%xmm0
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
movbeq 88(%r14),%r13
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
movbeq 80(%r14),%r12
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
movq %r13,32+8(%rsp)
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
movq %r12,40+8(%rsp)
|
||||
vmovdqu 48-32(%r9),%xmm5
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
|
||||
vmovups 48-128(%rcx),%xmm15
|
||||
vpxor %xmm1,%xmm6,%xmm6
|
||||
vpclmulqdq $0x00,%xmm5,%xmm0,%xmm1
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vpxor %xmm2,%xmm6,%xmm6
|
||||
vpclmulqdq $0x10,%xmm5,%xmm0,%xmm2
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
vpxor %xmm3,%xmm7,%xmm7
|
||||
vpclmulqdq $0x01,%xmm5,%xmm0,%xmm3
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
vpclmulqdq $0x11,%xmm5,%xmm0,%xmm5
|
||||
vmovdqu 80+8(%rsp),%xmm0
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
vpxor %xmm1,%xmm4,%xmm4
|
||||
vmovdqu 64-32(%r9),%xmm1
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
|
||||
vmovups 64-128(%rcx),%xmm15
|
||||
vpxor %xmm2,%xmm6,%xmm6
|
||||
vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vpxor %xmm3,%xmm6,%xmm6
|
||||
vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
movbeq 72(%r14),%r13
|
||||
vpxor %xmm5,%xmm7,%xmm7
|
||||
vpclmulqdq $0x01,%xmm1,%xmm0,%xmm5
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
movbeq 64(%r14),%r12
|
||||
vpclmulqdq $0x11,%xmm1,%xmm0,%xmm1
|
||||
vmovdqu 96+8(%rsp),%xmm0
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
movq %r13,48+8(%rsp)
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
movq %r12,56+8(%rsp)
|
||||
vpxor %xmm2,%xmm4,%xmm4
|
||||
vmovdqu 96-32(%r9),%xmm2
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
|
||||
vmovups 80-128(%rcx),%xmm15
|
||||
vpxor %xmm3,%xmm6,%xmm6
|
||||
vpclmulqdq $0x00,%xmm2,%xmm0,%xmm3
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vpxor %xmm5,%xmm6,%xmm6
|
||||
vpclmulqdq $0x10,%xmm2,%xmm0,%xmm5
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
movbeq 56(%r14),%r13
|
||||
vpxor %xmm1,%xmm7,%xmm7
|
||||
vpclmulqdq $0x01,%xmm2,%xmm0,%xmm1
|
||||
vpxor 112+8(%rsp),%xmm8,%xmm8
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
movbeq 48(%r14),%r12
|
||||
vpclmulqdq $0x11,%xmm2,%xmm0,%xmm2
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
movq %r13,64+8(%rsp)
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
movq %r12,72+8(%rsp)
|
||||
vpxor %xmm3,%xmm4,%xmm4
|
||||
vmovdqu 112-32(%r9),%xmm3
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
|
||||
vmovups 96-128(%rcx),%xmm15
|
||||
vpxor %xmm5,%xmm6,%xmm6
|
||||
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm5
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vpxor %xmm1,%xmm6,%xmm6
|
||||
vpclmulqdq $0x01,%xmm3,%xmm8,%xmm1
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
movbeq 40(%r14),%r13
|
||||
vpxor %xmm2,%xmm7,%xmm7
|
||||
vpclmulqdq $0x00,%xmm3,%xmm8,%xmm2
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
movbeq 32(%r14),%r12
|
||||
vpclmulqdq $0x11,%xmm3,%xmm8,%xmm8
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
movq %r13,80+8(%rsp)
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
movq %r12,88+8(%rsp)
|
||||
vpxor %xmm5,%xmm6,%xmm6
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
vpxor %xmm1,%xmm6,%xmm6
|
||||
|
||||
vmovups 112-128(%rcx),%xmm15
|
||||
vpslldq $8,%xmm6,%xmm5
|
||||
vpxor %xmm2,%xmm4,%xmm4
|
||||
vmovdqu 16(%r11),%xmm3
|
||||
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vpxor %xmm8,%xmm7,%xmm7
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
vpxor %xmm5,%xmm4,%xmm4
|
||||
movbeq 24(%r14),%r13
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
movbeq 16(%r14),%r12
|
||||
vpalignr $8,%xmm4,%xmm4,%xmm0
|
||||
vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4
|
||||
movq %r13,96+8(%rsp)
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
movq %r12,104+8(%rsp)
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
vmovups 128-128(%rcx),%xmm1
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
|
||||
vaesenc %xmm1,%xmm9,%xmm9
|
||||
vmovups 144-128(%rcx),%xmm15
|
||||
vaesenc %xmm1,%xmm10,%xmm10
|
||||
vpsrldq $8,%xmm6,%xmm6
|
||||
vaesenc %xmm1,%xmm11,%xmm11
|
||||
vpxor %xmm6,%xmm7,%xmm7
|
||||
vaesenc %xmm1,%xmm12,%xmm12
|
||||
vpxor %xmm0,%xmm4,%xmm4
|
||||
movbeq 8(%r14),%r13
|
||||
vaesenc %xmm1,%xmm13,%xmm13
|
||||
movbeq 0(%r14),%r12
|
||||
vaesenc %xmm1,%xmm14,%xmm14
|
||||
vmovups 160-128(%rcx),%xmm1
|
||||
cmpl $11,%ebp
|
||||
jb .Lenc_tail
|
||||
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
|
||||
vaesenc %xmm1,%xmm9,%xmm9
|
||||
vaesenc %xmm1,%xmm10,%xmm10
|
||||
vaesenc %xmm1,%xmm11,%xmm11
|
||||
vaesenc %xmm1,%xmm12,%xmm12
|
||||
vaesenc %xmm1,%xmm13,%xmm13
|
||||
vmovups 176-128(%rcx),%xmm15
|
||||
vaesenc %xmm1,%xmm14,%xmm14
|
||||
vmovups 192-128(%rcx),%xmm1
|
||||
je .Lenc_tail
|
||||
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
|
||||
vaesenc %xmm1,%xmm9,%xmm9
|
||||
vaesenc %xmm1,%xmm10,%xmm10
|
||||
vaesenc %xmm1,%xmm11,%xmm11
|
||||
vaesenc %xmm1,%xmm12,%xmm12
|
||||
vaesenc %xmm1,%xmm13,%xmm13
|
||||
vmovups 208-128(%rcx),%xmm15
|
||||
vaesenc %xmm1,%xmm14,%xmm14
|
||||
vmovups 224-128(%rcx),%xmm1
|
||||
jmp .Lenc_tail
|
||||
|
||||
.align 32
|
||||
.Lhandle_ctr32:
|
||||
vmovdqu (%r11),%xmm0
|
||||
vpshufb %xmm0,%xmm1,%xmm6
|
||||
vmovdqu 48(%r11),%xmm5
|
||||
vpaddd 64(%r11),%xmm6,%xmm10
|
||||
vpaddd %xmm5,%xmm6,%xmm11
|
||||
vmovdqu 0-32(%r9),%xmm3
|
||||
vpaddd %xmm5,%xmm10,%xmm12
|
||||
vpshufb %xmm0,%xmm10,%xmm10
|
||||
vpaddd %xmm5,%xmm11,%xmm13
|
||||
vpshufb %xmm0,%xmm11,%xmm11
|
||||
vpxor %xmm15,%xmm10,%xmm10
|
||||
vpaddd %xmm5,%xmm12,%xmm14
|
||||
vpshufb %xmm0,%xmm12,%xmm12
|
||||
vpxor %xmm15,%xmm11,%xmm11
|
||||
vpaddd %xmm5,%xmm13,%xmm1
|
||||
vpshufb %xmm0,%xmm13,%xmm13
|
||||
vpshufb %xmm0,%xmm14,%xmm14
|
||||
vpshufb %xmm0,%xmm1,%xmm1
|
||||
jmp .Lresume_ctr32
|
||||
|
||||
.align 32
|
||||
.Lenc_tail:
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vmovdqu %xmm7,16+8(%rsp)
|
||||
vpalignr $8,%xmm4,%xmm4,%xmm8
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4
|
||||
vpxor 0(%rdi),%xmm1,%xmm2
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
vpxor 16(%rdi),%xmm1,%xmm0
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
vpxor 32(%rdi),%xmm1,%xmm5
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
vpxor 48(%rdi),%xmm1,%xmm6
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
vpxor 64(%rdi),%xmm1,%xmm7
|
||||
vpxor 80(%rdi),%xmm1,%xmm3
|
||||
vmovdqu (%r8),%xmm1
|
||||
|
||||
vaesenclast %xmm2,%xmm9,%xmm9
|
||||
vmovdqu 32(%r11),%xmm2
|
||||
vaesenclast %xmm0,%xmm10,%xmm10
|
||||
vpaddb %xmm2,%xmm1,%xmm0
|
||||
movq %r13,112+8(%rsp)
|
||||
leaq 96(%rdi),%rdi
|
||||
vaesenclast %xmm5,%xmm11,%xmm11
|
||||
vpaddb %xmm2,%xmm0,%xmm5
|
||||
movq %r12,120+8(%rsp)
|
||||
leaq 96(%rsi),%rsi
|
||||
vmovdqu 0-128(%rcx),%xmm15
|
||||
vaesenclast %xmm6,%xmm12,%xmm12
|
||||
vpaddb %xmm2,%xmm5,%xmm6
|
||||
vaesenclast %xmm7,%xmm13,%xmm13
|
||||
vpaddb %xmm2,%xmm6,%xmm7
|
||||
vaesenclast %xmm3,%xmm14,%xmm14
|
||||
vpaddb %xmm2,%xmm7,%xmm3
|
||||
|
||||
addq $0x60,%r10
|
||||
subq $0x6,%rdx
|
||||
jc .L6x_done
|
||||
|
||||
vmovups %xmm9,-96(%rsi)
|
||||
vpxor %xmm15,%xmm1,%xmm9
|
||||
vmovups %xmm10,-80(%rsi)
|
||||
vmovdqa %xmm0,%xmm10
|
||||
vmovups %xmm11,-64(%rsi)
|
||||
vmovdqa %xmm5,%xmm11
|
||||
vmovups %xmm12,-48(%rsi)
|
||||
vmovdqa %xmm6,%xmm12
|
||||
vmovups %xmm13,-32(%rsi)
|
||||
vmovdqa %xmm7,%xmm13
|
||||
vmovups %xmm14,-16(%rsi)
|
||||
vmovdqa %xmm3,%xmm14
|
||||
vmovdqu 32+8(%rsp),%xmm7
|
||||
jmp .Loop6x
|
||||
|
||||
.L6x_done:
|
||||
vpxor 16+8(%rsp),%xmm8,%xmm8
|
||||
vpxor %xmm4,%xmm8,%xmm8
|
||||
|
||||
.byte 0xf3,0xc3
|
||||
.size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x
|
||||
.globl aesni_gcm_decrypt
|
||||
.hidden aesni_gcm_decrypt
|
||||
.type aesni_gcm_decrypt,@function
|
||||
.align 32
|
||||
aesni_gcm_decrypt:
|
||||
xorq %r10,%r10
|
||||
|
||||
|
||||
|
||||
cmpq $0x60,%rdx
|
||||
jb .Lgcm_dec_abort
|
||||
|
||||
leaq (%rsp),%rax
|
||||
pushq %rbx
|
||||
pushq %rbp
|
||||
pushq %r12
|
||||
pushq %r13
|
||||
pushq %r14
|
||||
pushq %r15
|
||||
vzeroupper
|
||||
|
||||
vmovdqu (%r8),%xmm1
|
||||
addq $-128,%rsp
|
||||
movl 12(%r8),%ebx
|
||||
leaq .Lbswap_mask(%rip),%r11
|
||||
leaq -128(%rcx),%r14
|
||||
movq $0xf80,%r15
|
||||
vmovdqu (%r9),%xmm8
|
||||
andq $-128,%rsp
|
||||
vmovdqu (%r11),%xmm0
|
||||
leaq 128(%rcx),%rcx
|
||||
leaq 32+32(%r9),%r9
|
||||
movl 240-128(%rcx),%ebp
|
||||
vpshufb %xmm0,%xmm8,%xmm8
|
||||
|
||||
andq %r15,%r14
|
||||
andq %rsp,%r15
|
||||
subq %r14,%r15
|
||||
jc .Ldec_no_key_aliasing
|
||||
cmpq $768,%r15
|
||||
jnc .Ldec_no_key_aliasing
|
||||
subq %r15,%rsp
|
||||
.Ldec_no_key_aliasing:
|
||||
|
||||
vmovdqu 80(%rdi),%xmm7
|
||||
leaq (%rdi),%r14
|
||||
vmovdqu 64(%rdi),%xmm4
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
leaq -192(%rdi,%rdx,1),%r15
|
||||
|
||||
vmovdqu 48(%rdi),%xmm5
|
||||
shrq $4,%rdx
|
||||
xorq %r10,%r10
|
||||
vmovdqu 32(%rdi),%xmm6
|
||||
vpshufb %xmm0,%xmm7,%xmm7
|
||||
vmovdqu 16(%rdi),%xmm2
|
||||
vpshufb %xmm0,%xmm4,%xmm4
|
||||
vmovdqu (%rdi),%xmm3
|
||||
vpshufb %xmm0,%xmm5,%xmm5
|
||||
vmovdqu %xmm4,48(%rsp)
|
||||
vpshufb %xmm0,%xmm6,%xmm6
|
||||
vmovdqu %xmm5,64(%rsp)
|
||||
vpshufb %xmm0,%xmm2,%xmm2
|
||||
vmovdqu %xmm6,80(%rsp)
|
||||
vpshufb %xmm0,%xmm3,%xmm3
|
||||
vmovdqu %xmm2,96(%rsp)
|
||||
vmovdqu %xmm3,112(%rsp)
|
||||
|
||||
call _aesni_ctr32_ghash_6x
|
||||
|
||||
vmovups %xmm9,-96(%rsi)
|
||||
vmovups %xmm10,-80(%rsi)
|
||||
vmovups %xmm11,-64(%rsi)
|
||||
vmovups %xmm12,-48(%rsi)
|
||||
vmovups %xmm13,-32(%rsi)
|
||||
vmovups %xmm14,-16(%rsi)
|
||||
|
||||
vpshufb (%r11),%xmm8,%xmm8
|
||||
vmovdqu %xmm8,-64(%r9)
|
||||
|
||||
vzeroupper
|
||||
movq -48(%rax),%r15
|
||||
movq -40(%rax),%r14
|
||||
movq -32(%rax),%r13
|
||||
movq -24(%rax),%r12
|
||||
movq -16(%rax),%rbp
|
||||
movq -8(%rax),%rbx
|
||||
leaq (%rax),%rsp
|
||||
.Lgcm_dec_abort:
|
||||
movq %r10,%rax
|
||||
.byte 0xf3,0xc3
|
||||
.size aesni_gcm_decrypt,.-aesni_gcm_decrypt
|
||||
.type _aesni_ctr32_6x,@function
|
||||
.align 32
|
||||
_aesni_ctr32_6x:
|
||||
vmovdqu 0-128(%rcx),%xmm4
|
||||
vmovdqu 32(%r11),%xmm2
|
||||
leaq -1(%rbp),%r13
|
||||
vmovups 16-128(%rcx),%xmm15
|
||||
leaq 32-128(%rcx),%r12
|
||||
vpxor %xmm4,%xmm1,%xmm9
|
||||
addl $100663296,%ebx
|
||||
jc .Lhandle_ctr32_2
|
||||
vpaddb %xmm2,%xmm1,%xmm10
|
||||
vpaddb %xmm2,%xmm10,%xmm11
|
||||
vpxor %xmm4,%xmm10,%xmm10
|
||||
vpaddb %xmm2,%xmm11,%xmm12
|
||||
vpxor %xmm4,%xmm11,%xmm11
|
||||
vpaddb %xmm2,%xmm12,%xmm13
|
||||
vpxor %xmm4,%xmm12,%xmm12
|
||||
vpaddb %xmm2,%xmm13,%xmm14
|
||||
vpxor %xmm4,%xmm13,%xmm13
|
||||
vpaddb %xmm2,%xmm14,%xmm1
|
||||
vpxor %xmm4,%xmm14,%xmm14
|
||||
jmp .Loop_ctr32
|
||||
|
||||
.align 16
|
||||
.Loop_ctr32:
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
vmovups (%r12),%xmm15
|
||||
leaq 16(%r12),%r12
|
||||
decl %r13d
|
||||
jnz .Loop_ctr32
|
||||
|
||||
vmovdqu (%r12),%xmm3
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vpxor 0(%rdi),%xmm3,%xmm4
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
vpxor 16(%rdi),%xmm3,%xmm5
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
vpxor 32(%rdi),%xmm3,%xmm6
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
vpxor 48(%rdi),%xmm3,%xmm8
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
vpxor 64(%rdi),%xmm3,%xmm2
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
vpxor 80(%rdi),%xmm3,%xmm3
|
||||
leaq 96(%rdi),%rdi
|
||||
|
||||
vaesenclast %xmm4,%xmm9,%xmm9
|
||||
vaesenclast %xmm5,%xmm10,%xmm10
|
||||
vaesenclast %xmm6,%xmm11,%xmm11
|
||||
vaesenclast %xmm8,%xmm12,%xmm12
|
||||
vaesenclast %xmm2,%xmm13,%xmm13
|
||||
vaesenclast %xmm3,%xmm14,%xmm14
|
||||
vmovups %xmm9,0(%rsi)
|
||||
vmovups %xmm10,16(%rsi)
|
||||
vmovups %xmm11,32(%rsi)
|
||||
vmovups %xmm12,48(%rsi)
|
||||
vmovups %xmm13,64(%rsi)
|
||||
vmovups %xmm14,80(%rsi)
|
||||
leaq 96(%rsi),%rsi
|
||||
|
||||
.byte 0xf3,0xc3
|
||||
.align 32
|
||||
.Lhandle_ctr32_2:
|
||||
vpshufb %xmm0,%xmm1,%xmm6
|
||||
vmovdqu 48(%r11),%xmm5
|
||||
vpaddd 64(%r11),%xmm6,%xmm10
|
||||
vpaddd %xmm5,%xmm6,%xmm11
|
||||
vpaddd %xmm5,%xmm10,%xmm12
|
||||
vpshufb %xmm0,%xmm10,%xmm10
|
||||
vpaddd %xmm5,%xmm11,%xmm13
|
||||
vpshufb %xmm0,%xmm11,%xmm11
|
||||
vpxor %xmm4,%xmm10,%xmm10
|
||||
vpaddd %xmm5,%xmm12,%xmm14
|
||||
vpshufb %xmm0,%xmm12,%xmm12
|
||||
vpxor %xmm4,%xmm11,%xmm11
|
||||
vpaddd %xmm5,%xmm13,%xmm1
|
||||
vpshufb %xmm0,%xmm13,%xmm13
|
||||
vpxor %xmm4,%xmm12,%xmm12
|
||||
vpshufb %xmm0,%xmm14,%xmm14
|
||||
vpxor %xmm4,%xmm13,%xmm13
|
||||
vpshufb %xmm0,%xmm1,%xmm1
|
||||
vpxor %xmm4,%xmm14,%xmm14
|
||||
jmp .Loop_ctr32
|
||||
.size _aesni_ctr32_6x,.-_aesni_ctr32_6x
|
||||
|
||||
.globl aesni_gcm_encrypt
|
||||
.hidden aesni_gcm_encrypt
|
||||
.type aesni_gcm_encrypt,@function
|
||||
.align 32
|
||||
aesni_gcm_encrypt:
|
||||
xorq %r10,%r10
|
||||
|
||||
|
||||
|
||||
|
||||
cmpq $288,%rdx
|
||||
jb .Lgcm_enc_abort
|
||||
|
||||
leaq (%rsp),%rax
|
||||
pushq %rbx
|
||||
pushq %rbp
|
||||
pushq %r12
|
||||
pushq %r13
|
||||
pushq %r14
|
||||
pushq %r15
|
||||
vzeroupper
|
||||
|
||||
vmovdqu (%r8),%xmm1
|
||||
addq $-128,%rsp
|
||||
movl 12(%r8),%ebx
|
||||
leaq .Lbswap_mask(%rip),%r11
|
||||
leaq -128(%rcx),%r14
|
||||
movq $0xf80,%r15
|
||||
leaq 128(%rcx),%rcx
|
||||
vmovdqu (%r11),%xmm0
|
||||
andq $-128,%rsp
|
||||
movl 240-128(%rcx),%ebp
|
||||
|
||||
andq %r15,%r14
|
||||
andq %rsp,%r15
|
||||
subq %r14,%r15
|
||||
jc .Lenc_no_key_aliasing
|
||||
cmpq $768,%r15
|
||||
jnc .Lenc_no_key_aliasing
|
||||
subq %r15,%rsp
|
||||
.Lenc_no_key_aliasing:
|
||||
|
||||
leaq (%rsi),%r14
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
leaq -192(%rsi,%rdx,1),%r15
|
||||
|
||||
shrq $4,%rdx
|
||||
|
||||
call _aesni_ctr32_6x
|
||||
vpshufb %xmm0,%xmm9,%xmm8
|
||||
vpshufb %xmm0,%xmm10,%xmm2
|
||||
vmovdqu %xmm8,112(%rsp)
|
||||
vpshufb %xmm0,%xmm11,%xmm4
|
||||
vmovdqu %xmm2,96(%rsp)
|
||||
vpshufb %xmm0,%xmm12,%xmm5
|
||||
vmovdqu %xmm4,80(%rsp)
|
||||
vpshufb %xmm0,%xmm13,%xmm6
|
||||
vmovdqu %xmm5,64(%rsp)
|
||||
vpshufb %xmm0,%xmm14,%xmm7
|
||||
vmovdqu %xmm6,48(%rsp)
|
||||
|
||||
call _aesni_ctr32_6x
|
||||
|
||||
vmovdqu (%r9),%xmm8
|
||||
leaq 32+32(%r9),%r9
|
||||
subq $12,%rdx
|
||||
movq $192,%r10
|
||||
vpshufb %xmm0,%xmm8,%xmm8
|
||||
|
||||
call _aesni_ctr32_ghash_6x
|
||||
vmovdqu 32(%rsp),%xmm7
|
||||
vmovdqu (%r11),%xmm0
|
||||
vmovdqu 0-32(%r9),%xmm3
|
||||
vpunpckhqdq %xmm7,%xmm7,%xmm1
|
||||
vmovdqu 32-32(%r9),%xmm15
|
||||
vmovups %xmm9,-96(%rsi)
|
||||
vpshufb %xmm0,%xmm9,%xmm9
|
||||
vpxor %xmm7,%xmm1,%xmm1
|
||||
vmovups %xmm10,-80(%rsi)
|
||||
vpshufb %xmm0,%xmm10,%xmm10
|
||||
vmovups %xmm11,-64(%rsi)
|
||||
vpshufb %xmm0,%xmm11,%xmm11
|
||||
vmovups %xmm12,-48(%rsi)
|
||||
vpshufb %xmm0,%xmm12,%xmm12
|
||||
vmovups %xmm13,-32(%rsi)
|
||||
vpshufb %xmm0,%xmm13,%xmm13
|
||||
vmovups %xmm14,-16(%rsi)
|
||||
vpshufb %xmm0,%xmm14,%xmm14
|
||||
vmovdqu %xmm9,16(%rsp)
|
||||
vmovdqu 48(%rsp),%xmm6
|
||||
vmovdqu 16-32(%r9),%xmm0
|
||||
vpunpckhqdq %xmm6,%xmm6,%xmm2
|
||||
vpclmulqdq $0x00,%xmm3,%xmm7,%xmm5
|
||||
vpxor %xmm6,%xmm2,%xmm2
|
||||
vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7
|
||||
vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1
|
||||
|
||||
vmovdqu 64(%rsp),%xmm9
|
||||
vpclmulqdq $0x00,%xmm0,%xmm6,%xmm4
|
||||
vmovdqu 48-32(%r9),%xmm3
|
||||
vpxor %xmm5,%xmm4,%xmm4
|
||||
vpunpckhqdq %xmm9,%xmm9,%xmm5
|
||||
vpclmulqdq $0x11,%xmm0,%xmm6,%xmm6
|
||||
vpxor %xmm9,%xmm5,%xmm5
|
||||
vpxor %xmm7,%xmm6,%xmm6
|
||||
vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2
|
||||
vmovdqu 80-32(%r9),%xmm15
|
||||
vpxor %xmm1,%xmm2,%xmm2
|
||||
|
||||
vmovdqu 80(%rsp),%xmm1
|
||||
vpclmulqdq $0x00,%xmm3,%xmm9,%xmm7
|
||||
vmovdqu 64-32(%r9),%xmm0
|
||||
vpxor %xmm4,%xmm7,%xmm7
|
||||
vpunpckhqdq %xmm1,%xmm1,%xmm4
|
||||
vpclmulqdq $0x11,%xmm3,%xmm9,%xmm9
|
||||
vpxor %xmm1,%xmm4,%xmm4
|
||||
vpxor %xmm6,%xmm9,%xmm9
|
||||
vpclmulqdq $0x00,%xmm15,%xmm5,%xmm5
|
||||
vpxor %xmm2,%xmm5,%xmm5
|
||||
|
||||
vmovdqu 96(%rsp),%xmm2
|
||||
vpclmulqdq $0x00,%xmm0,%xmm1,%xmm6
|
||||
vmovdqu 96-32(%r9),%xmm3
|
||||
vpxor %xmm7,%xmm6,%xmm6
|
||||
vpunpckhqdq %xmm2,%xmm2,%xmm7
|
||||
vpclmulqdq $0x11,%xmm0,%xmm1,%xmm1
|
||||
vpxor %xmm2,%xmm7,%xmm7
|
||||
vpxor %xmm9,%xmm1,%xmm1
|
||||
vpclmulqdq $0x10,%xmm15,%xmm4,%xmm4
|
||||
vmovdqu 128-32(%r9),%xmm15
|
||||
vpxor %xmm5,%xmm4,%xmm4
|
||||
|
||||
vpxor 112(%rsp),%xmm8,%xmm8
|
||||
vpclmulqdq $0x00,%xmm3,%xmm2,%xmm5
|
||||
vmovdqu 112-32(%r9),%xmm0
|
||||
vpunpckhqdq %xmm8,%xmm8,%xmm9
|
||||
vpxor %xmm6,%xmm5,%xmm5
|
||||
vpclmulqdq $0x11,%xmm3,%xmm2,%xmm2
|
||||
vpxor %xmm8,%xmm9,%xmm9
|
||||
vpxor %xmm1,%xmm2,%xmm2
|
||||
vpclmulqdq $0x00,%xmm15,%xmm7,%xmm7
|
||||
vpxor %xmm4,%xmm7,%xmm4
|
||||
|
||||
vpclmulqdq $0x00,%xmm0,%xmm8,%xmm6
|
||||
vmovdqu 0-32(%r9),%xmm3
|
||||
vpunpckhqdq %xmm14,%xmm14,%xmm1
|
||||
vpclmulqdq $0x11,%xmm0,%xmm8,%xmm8
|
||||
vpxor %xmm14,%xmm1,%xmm1
|
||||
vpxor %xmm5,%xmm6,%xmm5
|
||||
vpclmulqdq $0x10,%xmm15,%xmm9,%xmm9
|
||||
vmovdqu 32-32(%r9),%xmm15
|
||||
vpxor %xmm2,%xmm8,%xmm7
|
||||
vpxor %xmm4,%xmm9,%xmm6
|
||||
|
||||
vmovdqu 16-32(%r9),%xmm0
|
||||
vpxor %xmm5,%xmm7,%xmm9
|
||||
vpclmulqdq $0x00,%xmm3,%xmm14,%xmm4
|
||||
vpxor %xmm9,%xmm6,%xmm6
|
||||
vpunpckhqdq %xmm13,%xmm13,%xmm2
|
||||
vpclmulqdq $0x11,%xmm3,%xmm14,%xmm14
|
||||
vpxor %xmm13,%xmm2,%xmm2
|
||||
vpslldq $8,%xmm6,%xmm9
|
||||
vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1
|
||||
vpxor %xmm9,%xmm5,%xmm8
|
||||
vpsrldq $8,%xmm6,%xmm6
|
||||
vpxor %xmm6,%xmm7,%xmm7
|
||||
|
||||
vpclmulqdq $0x00,%xmm0,%xmm13,%xmm5
|
||||
vmovdqu 48-32(%r9),%xmm3
|
||||
vpxor %xmm4,%xmm5,%xmm5
|
||||
vpunpckhqdq %xmm12,%xmm12,%xmm9
|
||||
vpclmulqdq $0x11,%xmm0,%xmm13,%xmm13
|
||||
vpxor %xmm12,%xmm9,%xmm9
|
||||
vpxor %xmm14,%xmm13,%xmm13
|
||||
vpalignr $8,%xmm8,%xmm8,%xmm14
|
||||
vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2
|
||||
vmovdqu 80-32(%r9),%xmm15
|
||||
vpxor %xmm1,%xmm2,%xmm2
|
||||
|
||||
vpclmulqdq $0x00,%xmm3,%xmm12,%xmm4
|
||||
vmovdqu 64-32(%r9),%xmm0
|
||||
vpxor %xmm5,%xmm4,%xmm4
|
||||
vpunpckhqdq %xmm11,%xmm11,%xmm1
|
||||
vpclmulqdq $0x11,%xmm3,%xmm12,%xmm12
|
||||
vpxor %xmm11,%xmm1,%xmm1
|
||||
vpxor %xmm13,%xmm12,%xmm12
|
||||
vxorps 16(%rsp),%xmm7,%xmm7
|
||||
vpclmulqdq $0x00,%xmm15,%xmm9,%xmm9
|
||||
vpxor %xmm2,%xmm9,%xmm9
|
||||
|
||||
vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8
|
||||
vxorps %xmm14,%xmm8,%xmm8
|
||||
|
||||
vpclmulqdq $0x00,%xmm0,%xmm11,%xmm5
|
||||
vmovdqu 96-32(%r9),%xmm3
|
||||
vpxor %xmm4,%xmm5,%xmm5
|
||||
vpunpckhqdq %xmm10,%xmm10,%xmm2
|
||||
vpclmulqdq $0x11,%xmm0,%xmm11,%xmm11
|
||||
vpxor %xmm10,%xmm2,%xmm2
|
||||
vpalignr $8,%xmm8,%xmm8,%xmm14
|
||||
vpxor %xmm12,%xmm11,%xmm11
|
||||
vpclmulqdq $0x10,%xmm15,%xmm1,%xmm1
|
||||
vmovdqu 128-32(%r9),%xmm15
|
||||
vpxor %xmm9,%xmm1,%xmm1
|
||||
|
||||
vxorps %xmm7,%xmm14,%xmm14
|
||||
vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8
|
||||
vxorps %xmm14,%xmm8,%xmm8
|
||||
|
||||
vpclmulqdq $0x00,%xmm3,%xmm10,%xmm4
|
||||
vmovdqu 112-32(%r9),%xmm0
|
||||
vpxor %xmm5,%xmm4,%xmm4
|
||||
vpunpckhqdq %xmm8,%xmm8,%xmm9
|
||||
vpclmulqdq $0x11,%xmm3,%xmm10,%xmm10
|
||||
vpxor %xmm8,%xmm9,%xmm9
|
||||
vpxor %xmm11,%xmm10,%xmm10
|
||||
vpclmulqdq $0x00,%xmm15,%xmm2,%xmm2
|
||||
vpxor %xmm1,%xmm2,%xmm2
|
||||
|
||||
vpclmulqdq $0x00,%xmm0,%xmm8,%xmm5
|
||||
vpclmulqdq $0x11,%xmm0,%xmm8,%xmm7
|
||||
vpxor %xmm4,%xmm5,%xmm5
|
||||
vpclmulqdq $0x10,%xmm15,%xmm9,%xmm6
|
||||
vpxor %xmm10,%xmm7,%xmm7
|
||||
vpxor %xmm2,%xmm6,%xmm6
|
||||
|
||||
vpxor %xmm5,%xmm7,%xmm4
|
||||
vpxor %xmm4,%xmm6,%xmm6
|
||||
vpslldq $8,%xmm6,%xmm1
|
||||
vmovdqu 16(%r11),%xmm3
|
||||
vpsrldq $8,%xmm6,%xmm6
|
||||
vpxor %xmm1,%xmm5,%xmm8
|
||||
vpxor %xmm6,%xmm7,%xmm7
|
||||
|
||||
vpalignr $8,%xmm8,%xmm8,%xmm2
|
||||
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8
|
||||
vpxor %xmm2,%xmm8,%xmm8
|
||||
|
||||
vpalignr $8,%xmm8,%xmm8,%xmm2
|
||||
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8
|
||||
vpxor %xmm7,%xmm2,%xmm2
|
||||
vpxor %xmm2,%xmm8,%xmm8
|
||||
vpshufb (%r11),%xmm8,%xmm8
|
||||
vmovdqu %xmm8,-64(%r9)
|
||||
|
||||
vzeroupper
|
||||
movq -48(%rax),%r15
|
||||
movq -40(%rax),%r14
|
||||
movq -32(%rax),%r13
|
||||
movq -24(%rax),%r12
|
||||
movq -16(%rax),%rbp
|
||||
movq -8(%rax),%rbx
|
||||
leaq (%rax),%rsp
|
||||
.Lgcm_enc_abort:
|
||||
movq %r10,%rax
|
||||
.byte 0xf3,0xc3
|
||||
.size aesni_gcm_encrypt,.-aesni_gcm_encrypt
|
||||
.align 64
|
||||
.Lbswap_mask:
|
||||
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
|
||||
.Lpoly:
|
||||
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
|
||||
.Lone_msb:
|
||||
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
|
||||
.Ltwo_lsb:
|
||||
.byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
||||
.Lone_lsb:
|
||||
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
||||
.byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
||||
.align 64
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,671 @@
|
|||
#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM)
|
||||
.text
|
||||
.align 16
|
||||
|
||||
.globl md5_block_asm_data_order
|
||||
.hidden md5_block_asm_data_order
|
||||
.type md5_block_asm_data_order,@function
|
||||
md5_block_asm_data_order:
|
||||
pushq %rbp
|
||||
pushq %rbx
|
||||
pushq %r12
|
||||
pushq %r14
|
||||
pushq %r15
|
||||
.Lprologue:
|
||||
|
||||
|
||||
|
||||
|
||||
movq %rdi,%rbp
|
||||
shlq $6,%rdx
|
||||
leaq (%rsi,%rdx,1),%rdi
|
||||
movl 0(%rbp),%eax
|
||||
movl 4(%rbp),%ebx
|
||||
movl 8(%rbp),%ecx
|
||||
movl 12(%rbp),%edx
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
cmpq %rdi,%rsi
|
||||
je .Lend
|
||||
|
||||
|
||||
.Lloop:
|
||||
movl %eax,%r8d
|
||||
movl %ebx,%r9d
|
||||
movl %ecx,%r14d
|
||||
movl %edx,%r15d
|
||||
movl 0(%rsi),%r10d
|
||||
movl %edx,%r11d
|
||||
xorl %ecx,%r11d
|
||||
leal -680876936(%rax,%r10,1),%eax
|
||||
andl %ebx,%r11d
|
||||
xorl %edx,%r11d
|
||||
movl 4(%rsi),%r10d
|
||||
addl %r11d,%eax
|
||||
roll $7,%eax
|
||||
movl %ecx,%r11d
|
||||
addl %ebx,%eax
|
||||
xorl %ebx,%r11d
|
||||
leal -389564586(%rdx,%r10,1),%edx
|
||||
andl %eax,%r11d
|
||||
xorl %ecx,%r11d
|
||||
movl 8(%rsi),%r10d
|
||||
addl %r11d,%edx
|
||||
roll $12,%edx
|
||||
movl %ebx,%r11d
|
||||
addl %eax,%edx
|
||||
xorl %eax,%r11d
|
||||
leal 606105819(%rcx,%r10,1),%ecx
|
||||
andl %edx,%r11d
|
||||
xorl %ebx,%r11d
|
||||
movl 12(%rsi),%r10d
|
||||
addl %r11d,%ecx
|
||||
roll $17,%ecx
|
||||
movl %eax,%r11d
|
||||
addl %edx,%ecx
|
||||
xorl %edx,%r11d
|
||||
leal -1044525330(%rbx,%r10,1),%ebx
|
||||
andl %ecx,%r11d
|
||||
xorl %eax,%r11d
|
||||
movl 16(%rsi),%r10d
|
||||
addl %r11d,%ebx
|
||||
roll $22,%ebx
|
||||
movl %edx,%r11d
|
||||
addl %ecx,%ebx
|
||||
xorl %ecx,%r11d
|
||||
leal -176418897(%rax,%r10,1),%eax
|
||||
andl %ebx,%r11d
|
||||
xorl %edx,%r11d
|
||||
movl 20(%rsi),%r10d
|
||||
addl %r11d,%eax
|
||||
roll $7,%eax
|
||||
movl %ecx,%r11d
|
||||
addl %ebx,%eax
|
||||
xorl %ebx,%r11d
|
||||
leal 1200080426(%rdx,%r10,1),%edx
|
||||
andl %eax,%r11d
|
||||
xorl %ecx,%r11d
|
||||
movl 24(%rsi),%r10d
|
||||
addl %r11d,%edx
|
||||
roll $12,%edx
|
||||
movl %ebx,%r11d
|
||||
addl %eax,%edx
|
||||
xorl %eax,%r11d
|
||||
leal -1473231341(%rcx,%r10,1),%ecx
|
||||
andl %edx,%r11d
|
||||
xorl %ebx,%r11d
|
||||
movl 28(%rsi),%r10d
|
||||
addl %r11d,%ecx
|
||||
roll $17,%ecx
|
||||
movl %eax,%r11d
|
||||
addl %edx,%ecx
|
||||
xorl %edx,%r11d
|
||||
leal -45705983(%rbx,%r10,1),%ebx
|
||||
andl %ecx,%r11d
|
||||
xorl %eax,%r11d
|
||||
movl 32(%rsi),%r10d
|
||||
addl %r11d,%ebx
|
||||
roll $22,%ebx
|
||||
movl %edx,%r11d
|
||||
addl %ecx,%ebx
|
||||
xorl %ecx,%r11d
|
||||
leal 1770035416(%rax,%r10,1),%eax
|
||||
andl %ebx,%r11d
|
||||
xorl %edx,%r11d
|
||||
movl 36(%rsi),%r10d
|
||||
addl %r11d,%eax
|
||||
roll $7,%eax
|
||||
movl %ecx,%r11d
|
||||
addl %ebx,%eax
|
||||
xorl %ebx,%r11d
|
||||
leal -1958414417(%rdx,%r10,1),%edx
|
||||
andl %eax,%r11d
|
||||
xorl %ecx,%r11d
|
||||
movl 40(%rsi),%r10d
|
||||
addl %r11d,%edx
|
||||
roll $12,%edx
|
||||
movl %ebx,%r11d
|
||||
addl %eax,%edx
|
||||
xorl %eax,%r11d
|
||||
leal -42063(%rcx,%r10,1),%ecx
|
||||
andl %edx,%r11d
|
||||
xorl %ebx,%r11d
|
||||
movl 44(%rsi),%r10d
|
||||
addl %r11d,%ecx
|
||||
roll $17,%ecx
|
||||
movl %eax,%r11d
|
||||
addl %edx,%ecx
|
||||
xorl %edx,%r11d
|
||||
leal -1990404162(%rbx,%r10,1),%ebx
|
||||
andl %ecx,%r11d
|
||||
xorl %eax,%r11d
|
||||
movl 48(%rsi),%r10d
|
||||
addl %r11d,%ebx
|
||||
roll $22,%ebx
|
||||
movl %edx,%r11d
|
||||
addl %ecx,%ebx
|
||||
xorl %ecx,%r11d
|
||||
leal 1804603682(%rax,%r10,1),%eax
|
||||
andl %ebx,%r11d
|
||||
xorl %edx,%r11d
|
||||
movl 52(%rsi),%r10d
|
||||
addl %r11d,%eax
|
||||
roll $7,%eax
|
||||
movl %ecx,%r11d
|
||||
addl %ebx,%eax
|
||||
xorl %ebx,%r11d
|
||||
leal -40341101(%rdx,%r10,1),%edx
|
||||
andl %eax,%r11d
|
||||
xorl %ecx,%r11d
|
||||
movl 56(%rsi),%r10d
|
||||
addl %r11d,%edx
|
||||
roll $12,%edx
|
||||
movl %ebx,%r11d
|
||||
addl %eax,%edx
|
||||
xorl %eax,%r11d
|
||||
leal -1502002290(%rcx,%r10,1),%ecx
|
||||
andl %edx,%r11d
|
||||
xorl %ebx,%r11d
|
||||
movl 60(%rsi),%r10d
|
||||
addl %r11d,%ecx
|
||||
roll $17,%ecx
|
||||
movl %eax,%r11d
|
||||
addl %edx,%ecx
|
||||
xorl %edx,%r11d
|
||||
leal 1236535329(%rbx,%r10,1),%ebx
|
||||
andl %ecx,%r11d
|
||||
xorl %eax,%r11d
|
||||
movl 0(%rsi),%r10d
|
||||
addl %r11d,%ebx
|
||||
roll $22,%ebx
|
||||
movl %edx,%r11d
|
||||
addl %ecx,%ebx
|
||||
movl 4(%rsi),%r10d
|
||||
movl %edx,%r11d
|
||||
movl %edx,%r12d
|
||||
notl %r11d
|
||||
leal -165796510(%rax,%r10,1),%eax
|
||||
andl %ebx,%r12d
|
||||
andl %ecx,%r11d
|
||||
movl 24(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %ecx,%r11d
|
||||
addl %r12d,%eax
|
||||
movl %ecx,%r12d
|
||||
roll $5,%eax
|
||||
addl %ebx,%eax
|
||||
notl %r11d
|
||||
leal -1069501632(%rdx,%r10,1),%edx
|
||||
andl %eax,%r12d
|
||||
andl %ebx,%r11d
|
||||
movl 44(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %ebx,%r11d
|
||||
addl %r12d,%edx
|
||||
movl %ebx,%r12d
|
||||
roll $9,%edx
|
||||
addl %eax,%edx
|
||||
notl %r11d
|
||||
leal 643717713(%rcx,%r10,1),%ecx
|
||||
andl %edx,%r12d
|
||||
andl %eax,%r11d
|
||||
movl 0(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %eax,%r11d
|
||||
addl %r12d,%ecx
|
||||
movl %eax,%r12d
|
||||
roll $14,%ecx
|
||||
addl %edx,%ecx
|
||||
notl %r11d
|
||||
leal -373897302(%rbx,%r10,1),%ebx
|
||||
andl %ecx,%r12d
|
||||
andl %edx,%r11d
|
||||
movl 20(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %edx,%r11d
|
||||
addl %r12d,%ebx
|
||||
movl %edx,%r12d
|
||||
roll $20,%ebx
|
||||
addl %ecx,%ebx
|
||||
notl %r11d
|
||||
leal -701558691(%rax,%r10,1),%eax
|
||||
andl %ebx,%r12d
|
||||
andl %ecx,%r11d
|
||||
movl 40(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %ecx,%r11d
|
||||
addl %r12d,%eax
|
||||
movl %ecx,%r12d
|
||||
roll $5,%eax
|
||||
addl %ebx,%eax
|
||||
notl %r11d
|
||||
leal 38016083(%rdx,%r10,1),%edx
|
||||
andl %eax,%r12d
|
||||
andl %ebx,%r11d
|
||||
movl 60(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %ebx,%r11d
|
||||
addl %r12d,%edx
|
||||
movl %ebx,%r12d
|
||||
roll $9,%edx
|
||||
addl %eax,%edx
|
||||
notl %r11d
|
||||
leal -660478335(%rcx,%r10,1),%ecx
|
||||
andl %edx,%r12d
|
||||
andl %eax,%r11d
|
||||
movl 16(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %eax,%r11d
|
||||
addl %r12d,%ecx
|
||||
movl %eax,%r12d
|
||||
roll $14,%ecx
|
||||
addl %edx,%ecx
|
||||
notl %r11d
|
||||
leal -405537848(%rbx,%r10,1),%ebx
|
||||
andl %ecx,%r12d
|
||||
andl %edx,%r11d
|
||||
movl 36(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %edx,%r11d
|
||||
addl %r12d,%ebx
|
||||
movl %edx,%r12d
|
||||
roll $20,%ebx
|
||||
addl %ecx,%ebx
|
||||
notl %r11d
|
||||
leal 568446438(%rax,%r10,1),%eax
|
||||
andl %ebx,%r12d
|
||||
andl %ecx,%r11d
|
||||
movl 56(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %ecx,%r11d
|
||||
addl %r12d,%eax
|
||||
movl %ecx,%r12d
|
||||
roll $5,%eax
|
||||
addl %ebx,%eax
|
||||
notl %r11d
|
||||
leal -1019803690(%rdx,%r10,1),%edx
|
||||
andl %eax,%r12d
|
||||
andl %ebx,%r11d
|
||||
movl 12(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %ebx,%r11d
|
||||
addl %r12d,%edx
|
||||
movl %ebx,%r12d
|
||||
roll $9,%edx
|
||||
addl %eax,%edx
|
||||
notl %r11d
|
||||
leal -187363961(%rcx,%r10,1),%ecx
|
||||
andl %edx,%r12d
|
||||
andl %eax,%r11d
|
||||
movl 32(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %eax,%r11d
|
||||
addl %r12d,%ecx
|
||||
movl %eax,%r12d
|
||||
roll $14,%ecx
|
||||
addl %edx,%ecx
|
||||
notl %r11d
|
||||
leal 1163531501(%rbx,%r10,1),%ebx
|
||||
andl %ecx,%r12d
|
||||
andl %edx,%r11d
|
||||
movl 52(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %edx,%r11d
|
||||
addl %r12d,%ebx
|
||||
movl %edx,%r12d
|
||||
roll $20,%ebx
|
||||
addl %ecx,%ebx
|
||||
notl %r11d
|
||||
leal -1444681467(%rax,%r10,1),%eax
|
||||
andl %ebx,%r12d
|
||||
andl %ecx,%r11d
|
||||
movl 8(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %ecx,%r11d
|
||||
addl %r12d,%eax
|
||||
movl %ecx,%r12d
|
||||
roll $5,%eax
|
||||
addl %ebx,%eax
|
||||
notl %r11d
|
||||
leal -51403784(%rdx,%r10,1),%edx
|
||||
andl %eax,%r12d
|
||||
andl %ebx,%r11d
|
||||
movl 28(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %ebx,%r11d
|
||||
addl %r12d,%edx
|
||||
movl %ebx,%r12d
|
||||
roll $9,%edx
|
||||
addl %eax,%edx
|
||||
notl %r11d
|
||||
leal 1735328473(%rcx,%r10,1),%ecx
|
||||
andl %edx,%r12d
|
||||
andl %eax,%r11d
|
||||
movl 48(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %eax,%r11d
|
||||
addl %r12d,%ecx
|
||||
movl %eax,%r12d
|
||||
roll $14,%ecx
|
||||
addl %edx,%ecx
|
||||
notl %r11d
|
||||
leal -1926607734(%rbx,%r10,1),%ebx
|
||||
andl %ecx,%r12d
|
||||
andl %edx,%r11d
|
||||
movl 0(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %edx,%r11d
|
||||
addl %r12d,%ebx
|
||||
movl %edx,%r12d
|
||||
roll $20,%ebx
|
||||
addl %ecx,%ebx
|
||||
movl 20(%rsi),%r10d
|
||||
movl %ecx,%r11d
|
||||
leal -378558(%rax,%r10,1),%eax
|
||||
movl 32(%rsi),%r10d
|
||||
xorl %edx,%r11d
|
||||
xorl %ebx,%r11d
|
||||
addl %r11d,%eax
|
||||
roll $4,%eax
|
||||
movl %ebx,%r11d
|
||||
addl %ebx,%eax
|
||||
leal -2022574463(%rdx,%r10,1),%edx
|
||||
movl 44(%rsi),%r10d
|
||||
xorl %ecx,%r11d
|
||||
xorl %eax,%r11d
|
||||
addl %r11d,%edx
|
||||
roll $11,%edx
|
||||
movl %eax,%r11d
|
||||
addl %eax,%edx
|
||||
leal 1839030562(%rcx,%r10,1),%ecx
|
||||
movl 56(%rsi),%r10d
|
||||
xorl %ebx,%r11d
|
||||
xorl %edx,%r11d
|
||||
addl %r11d,%ecx
|
||||
roll $16,%ecx
|
||||
movl %edx,%r11d
|
||||
addl %edx,%ecx
|
||||
leal -35309556(%rbx,%r10,1),%ebx
|
||||
movl 4(%rsi),%r10d
|
||||
xorl %eax,%r11d
|
||||
xorl %ecx,%r11d
|
||||
addl %r11d,%ebx
|
||||
roll $23,%ebx
|
||||
movl %ecx,%r11d
|
||||
addl %ecx,%ebx
|
||||
leal -1530992060(%rax,%r10,1),%eax
|
||||
movl 16(%rsi),%r10d
|
||||
xorl %edx,%r11d
|
||||
xorl %ebx,%r11d
|
||||
addl %r11d,%eax
|
||||
roll $4,%eax
|
||||
movl %ebx,%r11d
|
||||
addl %ebx,%eax
|
||||
leal 1272893353(%rdx,%r10,1),%edx
|
||||
movl 28(%rsi),%r10d
|
||||
xorl %ecx,%r11d
|
||||
xorl %eax,%r11d
|
||||
addl %r11d,%edx
|
||||
roll $11,%edx
|
||||
movl %eax,%r11d
|
||||
addl %eax,%edx
|
||||
leal -155497632(%rcx,%r10,1),%ecx
|
||||
movl 40(%rsi),%r10d
|
||||
xorl %ebx,%r11d
|
||||
xorl %edx,%r11d
|
||||
addl %r11d,%ecx
|
||||
roll $16,%ecx
|
||||
movl %edx,%r11d
|
||||
addl %edx,%ecx
|
||||
leal -1094730640(%rbx,%r10,1),%ebx
|
||||
movl 52(%rsi),%r10d
|
||||
xorl %eax,%r11d
|
||||
xorl %ecx,%r11d
|
||||
addl %r11d,%ebx
|
||||
roll $23,%ebx
|
||||
movl %ecx,%r11d
|
||||
addl %ecx,%ebx
|
||||
leal 681279174(%rax,%r10,1),%eax
|
||||
movl 0(%rsi),%r10d
|
||||
xorl %edx,%r11d
|
||||
xorl %ebx,%r11d
|
||||
addl %r11d,%eax
|
||||
roll $4,%eax
|
||||
movl %ebx,%r11d
|
||||
addl %ebx,%eax
|
||||
leal -358537222(%rdx,%r10,1),%edx
|
||||
movl 12(%rsi),%r10d
|
||||
xorl %ecx,%r11d
|
||||
xorl %eax,%r11d
|
||||
addl %r11d,%edx
|
||||
roll $11,%edx
|
||||
movl %eax,%r11d
|
||||
addl %eax,%edx
|
||||
leal -722521979(%rcx,%r10,1),%ecx
|
||||
movl 24(%rsi),%r10d
|
||||
xorl %ebx,%r11d
|
||||
xorl %edx,%r11d
|
||||
addl %r11d,%ecx
|
||||
roll $16,%ecx
|
||||
movl %edx,%r11d
|
||||
addl %edx,%ecx
|
||||
leal 76029189(%rbx,%r10,1),%ebx
|
||||
movl 36(%rsi),%r10d
|
||||
xorl %eax,%r11d
|
||||
xorl %ecx,%r11d
|
||||
addl %r11d,%ebx
|
||||
roll $23,%ebx
|
||||
movl %ecx,%r11d
|
||||
addl %ecx,%ebx
|
||||
leal -640364487(%rax,%r10,1),%eax
|
||||
movl 48(%rsi),%r10d
|
||||
xorl %edx,%r11d
|
||||
xorl %ebx,%r11d
|
||||
addl %r11d,%eax
|
||||
roll $4,%eax
|
||||
movl %ebx,%r11d
|
||||
addl %ebx,%eax
|
||||
leal -421815835(%rdx,%r10,1),%edx
|
||||
movl 60(%rsi),%r10d
|
||||
xorl %ecx,%r11d
|
||||
xorl %eax,%r11d
|
||||
addl %r11d,%edx
|
||||
roll $11,%edx
|
||||
movl %eax,%r11d
|
||||
addl %eax,%edx
|
||||
leal 530742520(%rcx,%r10,1),%ecx
|
||||
movl 8(%rsi),%r10d
|
||||
xorl %ebx,%r11d
|
||||
xorl %edx,%r11d
|
||||
addl %r11d,%ecx
|
||||
roll $16,%ecx
|
||||
movl %edx,%r11d
|
||||
addl %edx,%ecx
|
||||
leal -995338651(%rbx,%r10,1),%ebx
|
||||
movl 0(%rsi),%r10d
|
||||
xorl %eax,%r11d
|
||||
xorl %ecx,%r11d
|
||||
addl %r11d,%ebx
|
||||
roll $23,%ebx
|
||||
movl %ecx,%r11d
|
||||
addl %ecx,%ebx
|
||||
movl 0(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
xorl %edx,%r11d
|
||||
leal -198630844(%rax,%r10,1),%eax
|
||||
orl %ebx,%r11d
|
||||
xorl %ecx,%r11d
|
||||
addl %r11d,%eax
|
||||
movl 28(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $6,%eax
|
||||
xorl %ecx,%r11d
|
||||
addl %ebx,%eax
|
||||
leal 1126891415(%rdx,%r10,1),%edx
|
||||
orl %eax,%r11d
|
||||
xorl %ebx,%r11d
|
||||
addl %r11d,%edx
|
||||
movl 56(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $10,%edx
|
||||
xorl %ebx,%r11d
|
||||
addl %eax,%edx
|
||||
leal -1416354905(%rcx,%r10,1),%ecx
|
||||
orl %edx,%r11d
|
||||
xorl %eax,%r11d
|
||||
addl %r11d,%ecx
|
||||
movl 20(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $15,%ecx
|
||||
xorl %eax,%r11d
|
||||
addl %edx,%ecx
|
||||
leal -57434055(%rbx,%r10,1),%ebx
|
||||
orl %ecx,%r11d
|
||||
xorl %edx,%r11d
|
||||
addl %r11d,%ebx
|
||||
movl 48(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $21,%ebx
|
||||
xorl %edx,%r11d
|
||||
addl %ecx,%ebx
|
||||
leal 1700485571(%rax,%r10,1),%eax
|
||||
orl %ebx,%r11d
|
||||
xorl %ecx,%r11d
|
||||
addl %r11d,%eax
|
||||
movl 12(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $6,%eax
|
||||
xorl %ecx,%r11d
|
||||
addl %ebx,%eax
|
||||
leal -1894986606(%rdx,%r10,1),%edx
|
||||
orl %eax,%r11d
|
||||
xorl %ebx,%r11d
|
||||
addl %r11d,%edx
|
||||
movl 40(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $10,%edx
|
||||
xorl %ebx,%r11d
|
||||
addl %eax,%edx
|
||||
leal -1051523(%rcx,%r10,1),%ecx
|
||||
orl %edx,%r11d
|
||||
xorl %eax,%r11d
|
||||
addl %r11d,%ecx
|
||||
movl 4(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $15,%ecx
|
||||
xorl %eax,%r11d
|
||||
addl %edx,%ecx
|
||||
leal -2054922799(%rbx,%r10,1),%ebx
|
||||
orl %ecx,%r11d
|
||||
xorl %edx,%r11d
|
||||
addl %r11d,%ebx
|
||||
movl 32(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $21,%ebx
|
||||
xorl %edx,%r11d
|
||||
addl %ecx,%ebx
|
||||
leal 1873313359(%rax,%r10,1),%eax
|
||||
orl %ebx,%r11d
|
||||
xorl %ecx,%r11d
|
||||
addl %r11d,%eax
|
||||
movl 60(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $6,%eax
|
||||
xorl %ecx,%r11d
|
||||
addl %ebx,%eax
|
||||
leal -30611744(%rdx,%r10,1),%edx
|
||||
orl %eax,%r11d
|
||||
xorl %ebx,%r11d
|
||||
addl %r11d,%edx
|
||||
movl 24(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $10,%edx
|
||||
xorl %ebx,%r11d
|
||||
addl %eax,%edx
|
||||
leal -1560198380(%rcx,%r10,1),%ecx
|
||||
orl %edx,%r11d
|
||||
xorl %eax,%r11d
|
||||
addl %r11d,%ecx
|
||||
movl 52(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $15,%ecx
|
||||
xorl %eax,%r11d
|
||||
addl %edx,%ecx
|
||||
leal 1309151649(%rbx,%r10,1),%ebx
|
||||
orl %ecx,%r11d
|
||||
xorl %edx,%r11d
|
||||
addl %r11d,%ebx
|
||||
movl 16(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $21,%ebx
|
||||
xorl %edx,%r11d
|
||||
addl %ecx,%ebx
|
||||
leal -145523070(%rax,%r10,1),%eax
|
||||
orl %ebx,%r11d
|
||||
xorl %ecx,%r11d
|
||||
addl %r11d,%eax
|
||||
movl 44(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $6,%eax
|
||||
xorl %ecx,%r11d
|
||||
addl %ebx,%eax
|
||||
leal -1120210379(%rdx,%r10,1),%edx
|
||||
orl %eax,%r11d
|
||||
xorl %ebx,%r11d
|
||||
addl %r11d,%edx
|
||||
movl 8(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $10,%edx
|
||||
xorl %ebx,%r11d
|
||||
addl %eax,%edx
|
||||
leal 718787259(%rcx,%r10,1),%ecx
|
||||
orl %edx,%r11d
|
||||
xorl %eax,%r11d
|
||||
addl %r11d,%ecx
|
||||
movl 36(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $15,%ecx
|
||||
xorl %eax,%r11d
|
||||
addl %edx,%ecx
|
||||
leal -343485551(%rbx,%r10,1),%ebx
|
||||
orl %ecx,%r11d
|
||||
xorl %edx,%r11d
|
||||
addl %r11d,%ebx
|
||||
movl 0(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $21,%ebx
|
||||
xorl %edx,%r11d
|
||||
addl %ecx,%ebx
|
||||
|
||||
addl %r8d,%eax
|
||||
addl %r9d,%ebx
|
||||
addl %r14d,%ecx
|
||||
addl %r15d,%edx
|
||||
|
||||
|
||||
addq $64,%rsi
|
||||
cmpq %rdi,%rsi
|
||||
jb .Lloop
|
||||
|
||||
|
||||
.Lend:
|
||||
movl %eax,0(%rbp)
|
||||
movl %ebx,4(%rbp)
|
||||
movl %ecx,8(%rbp)
|
||||
movl %edx,12(%rbp)
|
||||
|
||||
movq (%rsp),%r15
|
||||
movq 8(%rsp),%r14
|
||||
movq 16(%rsp),%r12
|
||||
movq 24(%rsp),%rbx
|
||||
movq 32(%rsp),%rbp
|
||||
addq $40,%rsp
|
||||
.Lepilogue:
|
||||
.byte 0xf3,0xc3
|
||||
.size md5_block_asm_data_order,.-md5_block_asm_data_order
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,48 @@
|
|||
#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM)
|
||||
.text
|
||||
|
||||
|
||||
|
||||
|
||||
.globl CRYPTO_rdrand
|
||||
.hidden CRYPTO_rdrand
|
||||
.type CRYPTO_rdrand,@function
|
||||
.align 16
|
||||
CRYPTO_rdrand:
|
||||
xorq %rax,%rax
|
||||
|
||||
|
||||
.byte 0x48, 0x0f, 0xc7, 0xf1
|
||||
|
||||
adcq %rax,%rax
|
||||
movq %rcx,0(%rdi)
|
||||
.byte 0xf3,0xc3
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.globl CRYPTO_rdrand_multiple8_buf
|
||||
.hidden CRYPTO_rdrand_multiple8_buf
|
||||
.type CRYPTO_rdrand_multiple8_buf,@function
|
||||
.align 16
|
||||
CRYPTO_rdrand_multiple8_buf:
|
||||
testq %rsi,%rsi
|
||||
jz .Lout
|
||||
movq $8,%rdx
|
||||
.Lloop:
|
||||
|
||||
|
||||
.byte 0x48, 0x0f, 0xc7, 0xf1
|
||||
jnc .Lerr
|
||||
movq %rcx,0(%rdi)
|
||||
addq %rdx,%rdi
|
||||
subq %rdx,%rsi
|
||||
jnz .Lloop
|
||||
.Lout:
|
||||
movq $1,%rax
|
||||
.byte 0xf3,0xc3
|
||||
.Lerr:
|
||||
xorq %rax,%rax
|
||||
.byte 0xf3,0xc3
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,834 @@
|
|||
#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM)
|
||||
.text
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.type _vpaes_encrypt_core,@function
|
||||
.align 16
|
||||
_vpaes_encrypt_core:
|
||||
movq %rdx,%r9
|
||||
movq $16,%r11
|
||||
movl 240(%rdx),%eax
|
||||
movdqa %xmm9,%xmm1
|
||||
movdqa .Lk_ipt(%rip),%xmm2
|
||||
pandn %xmm0,%xmm1
|
||||
movdqu (%r9),%xmm5
|
||||
psrld $4,%xmm1
|
||||
pand %xmm9,%xmm0
|
||||
.byte 102,15,56,0,208
|
||||
movdqa .Lk_ipt+16(%rip),%xmm0
|
||||
.byte 102,15,56,0,193
|
||||
pxor %xmm5,%xmm2
|
||||
addq $16,%r9
|
||||
pxor %xmm2,%xmm0
|
||||
leaq .Lk_mc_backward(%rip),%r10
|
||||
jmp .Lenc_entry
|
||||
|
||||
.align 16
|
||||
.Lenc_loop:
|
||||
|
||||
movdqa %xmm13,%xmm4
|
||||
movdqa %xmm12,%xmm0
|
||||
.byte 102,15,56,0,226
|
||||
.byte 102,15,56,0,195
|
||||
pxor %xmm5,%xmm4
|
||||
movdqa %xmm15,%xmm5
|
||||
pxor %xmm4,%xmm0
|
||||
movdqa -64(%r11,%r10,1),%xmm1
|
||||
.byte 102,15,56,0,234
|
||||
movdqa (%r11,%r10,1),%xmm4
|
||||
movdqa %xmm14,%xmm2
|
||||
.byte 102,15,56,0,211
|
||||
movdqa %xmm0,%xmm3
|
||||
pxor %xmm5,%xmm2
|
||||
.byte 102,15,56,0,193
|
||||
addq $16,%r9
|
||||
pxor %xmm2,%xmm0
|
||||
.byte 102,15,56,0,220
|
||||
addq $16,%r11
|
||||
pxor %xmm0,%xmm3
|
||||
.byte 102,15,56,0,193
|
||||
andq $0x30,%r11
|
||||
subq $1,%rax
|
||||
pxor %xmm3,%xmm0
|
||||
|
||||
.Lenc_entry:
|
||||
|
||||
movdqa %xmm9,%xmm1
|
||||
movdqa %xmm11,%xmm5
|
||||
pandn %xmm0,%xmm1
|
||||
psrld $4,%xmm1
|
||||
pand %xmm9,%xmm0
|
||||
.byte 102,15,56,0,232
|
||||
movdqa %xmm10,%xmm3
|
||||
pxor %xmm1,%xmm0
|
||||
.byte 102,15,56,0,217
|
||||
movdqa %xmm10,%xmm4
|
||||
pxor %xmm5,%xmm3
|
||||
.byte 102,15,56,0,224
|
||||
movdqa %xmm10,%xmm2
|
||||
pxor %xmm5,%xmm4
|
||||
.byte 102,15,56,0,211
|
||||
movdqa %xmm10,%xmm3
|
||||
pxor %xmm0,%xmm2
|
||||
.byte 102,15,56,0,220
|
||||
movdqu (%r9),%xmm5
|
||||
pxor %xmm1,%xmm3
|
||||
jnz .Lenc_loop
|
||||
|
||||
|
||||
movdqa -96(%r10),%xmm4
|
||||
movdqa -80(%r10),%xmm0
|
||||
.byte 102,15,56,0,226
|
||||
pxor %xmm5,%xmm4
|
||||
.byte 102,15,56,0,195
|
||||
movdqa 64(%r11,%r10,1),%xmm1
|
||||
pxor %xmm4,%xmm0
|
||||
.byte 102,15,56,0,193
|
||||
.byte 0xf3,0xc3
|
||||
.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.type _vpaes_decrypt_core,@function
|
||||
.align 16
|
||||
_vpaes_decrypt_core:
|
||||
movq %rdx,%r9
|
||||
movl 240(%rdx),%eax
|
||||
movdqa %xmm9,%xmm1
|
||||
movdqa .Lk_dipt(%rip),%xmm2
|
||||
pandn %xmm0,%xmm1
|
||||
movq %rax,%r11
|
||||
psrld $4,%xmm1
|
||||
movdqu (%r9),%xmm5
|
||||
shlq $4,%r11
|
||||
pand %xmm9,%xmm0
|
||||
.byte 102,15,56,0,208
|
||||
movdqa .Lk_dipt+16(%rip),%xmm0
|
||||
xorq $0x30,%r11
|
||||
leaq .Lk_dsbd(%rip),%r10
|
||||
.byte 102,15,56,0,193
|
||||
andq $0x30,%r11
|
||||
pxor %xmm5,%xmm2
|
||||
movdqa .Lk_mc_forward+48(%rip),%xmm5
|
||||
pxor %xmm2,%xmm0
|
||||
addq $16,%r9
|
||||
addq %r10,%r11
|
||||
jmp .Ldec_entry
|
||||
|
||||
.align 16
|
||||
.Ldec_loop:
|
||||
|
||||
|
||||
|
||||
movdqa -32(%r10),%xmm4
|
||||
movdqa -16(%r10),%xmm1
|
||||
.byte 102,15,56,0,226
|
||||
.byte 102,15,56,0,203
|
||||
pxor %xmm4,%xmm0
|
||||
movdqa 0(%r10),%xmm4
|
||||
pxor %xmm1,%xmm0
|
||||
movdqa 16(%r10),%xmm1
|
||||
|
||||
.byte 102,15,56,0,226
|
||||
.byte 102,15,56,0,197
|
||||
.byte 102,15,56,0,203
|
||||
pxor %xmm4,%xmm0
|
||||
movdqa 32(%r10),%xmm4
|
||||
pxor %xmm1,%xmm0
|
||||
movdqa 48(%r10),%xmm1
|
||||
|
||||
.byte 102,15,56,0,226
|
||||
.byte 102,15,56,0,197
|
||||
.byte 102,15,56,0,203
|
||||
pxor %xmm4,%xmm0
|
||||
movdqa 64(%r10),%xmm4
|
||||
pxor %xmm1,%xmm0
|
||||
movdqa 80(%r10),%xmm1
|
||||
|
||||
.byte 102,15,56,0,226
|
||||
.byte 102,15,56,0,197
|
||||
.byte 102,15,56,0,203
|
||||
pxor %xmm4,%xmm0
|
||||
addq $16,%r9
|
||||
.byte 102,15,58,15,237,12
|
||||
pxor %xmm1,%xmm0
|
||||
subq $1,%rax
|
||||
|
||||
.Ldec_entry:
|
||||
|
||||
movdqa %xmm9,%xmm1
|
||||
pandn %xmm0,%xmm1
|
||||
movdqa %xmm11,%xmm2
|
||||
psrld $4,%xmm1
|
||||
pand %xmm9,%xmm0
|
||||
.byte 102,15,56,0,208
|
||||
movdqa %xmm10,%xmm3
|
||||
pxor %xmm1,%xmm0
|
||||
.byte 102,15,56,0,217
|
||||
movdqa %xmm10,%xmm4
|
||||
pxor %xmm2,%xmm3
|
||||
.byte 102,15,56,0,224
|
||||
pxor %xmm2,%xmm4
|
||||
movdqa %xmm10,%xmm2
|
||||
.byte 102,15,56,0,211
|
||||
movdqa %xmm10,%xmm3
|
||||
pxor %xmm0,%xmm2
|
||||
.byte 102,15,56,0,220
|
||||
movdqu (%r9),%xmm0
|
||||
pxor %xmm1,%xmm3
|
||||
jnz .Ldec_loop
|
||||
|
||||
|
||||
movdqa 96(%r10),%xmm4
|
||||
.byte 102,15,56,0,226
|
||||
pxor %xmm0,%xmm4
|
||||
movdqa 112(%r10),%xmm0
|
||||
movdqa -352(%r11),%xmm2
|
||||
.byte 102,15,56,0,195
|
||||
pxor %xmm4,%xmm0
|
||||
.byte 102,15,56,0,194
|
||||
.byte 0xf3,0xc3
|
||||
.size _vpaes_decrypt_core,.-_vpaes_decrypt_core
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.type _vpaes_schedule_core,@function
|
||||
.align 16
|
||||
_vpaes_schedule_core:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
call _vpaes_preheat
|
||||
movdqa .Lk_rcon(%rip),%xmm8
|
||||
movdqu (%rdi),%xmm0
|
||||
|
||||
|
||||
movdqa %xmm0,%xmm3
|
||||
leaq .Lk_ipt(%rip),%r11
|
||||
call _vpaes_schedule_transform
|
||||
movdqa %xmm0,%xmm7
|
||||
|
||||
leaq .Lk_sr(%rip),%r10
|
||||
testq %rcx,%rcx
|
||||
jnz .Lschedule_am_decrypting
|
||||
|
||||
|
||||
movdqu %xmm0,(%rdx)
|
||||
jmp .Lschedule_go
|
||||
|
||||
.Lschedule_am_decrypting:
|
||||
|
||||
movdqa (%r8,%r10,1),%xmm1
|
||||
.byte 102,15,56,0,217
|
||||
movdqu %xmm3,(%rdx)
|
||||
xorq $0x30,%r8
|
||||
|
||||
.Lschedule_go:
|
||||
cmpl $192,%esi
|
||||
ja .Lschedule_256
|
||||
je .Lschedule_192
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.Lschedule_128:
|
||||
movl $10,%esi
|
||||
|
||||
.Loop_schedule_128:
|
||||
call _vpaes_schedule_round
|
||||
decq %rsi
|
||||
jz .Lschedule_mangle_last
|
||||
call _vpaes_schedule_mangle
|
||||
jmp .Loop_schedule_128
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.align 16
|
||||
.Lschedule_192:
|
||||
movdqu 8(%rdi),%xmm0
|
||||
call _vpaes_schedule_transform
|
||||
movdqa %xmm0,%xmm6
|
||||
pxor %xmm4,%xmm4
|
||||
movhlps %xmm4,%xmm6
|
||||
movl $4,%esi
|
||||
|
||||
.Loop_schedule_192:
|
||||
call _vpaes_schedule_round
|
||||
.byte 102,15,58,15,198,8
|
||||
call _vpaes_schedule_mangle
|
||||
call _vpaes_schedule_192_smear
|
||||
call _vpaes_schedule_mangle
|
||||
call _vpaes_schedule_round
|
||||
decq %rsi
|
||||
jz .Lschedule_mangle_last
|
||||
call _vpaes_schedule_mangle
|
||||
call _vpaes_schedule_192_smear
|
||||
jmp .Loop_schedule_192
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.align 16
|
||||
.Lschedule_256:
|
||||
movdqu 16(%rdi),%xmm0
|
||||
call _vpaes_schedule_transform
|
||||
movl $7,%esi
|
||||
|
||||
.Loop_schedule_256:
|
||||
call _vpaes_schedule_mangle
|
||||
movdqa %xmm0,%xmm6
|
||||
|
||||
|
||||
call _vpaes_schedule_round
|
||||
decq %rsi
|
||||
jz .Lschedule_mangle_last
|
||||
call _vpaes_schedule_mangle
|
||||
|
||||
|
||||
pshufd $0xFF,%xmm0,%xmm0
|
||||
movdqa %xmm7,%xmm5
|
||||
movdqa %xmm6,%xmm7
|
||||
call _vpaes_schedule_low_round
|
||||
movdqa %xmm5,%xmm7
|
||||
|
||||
jmp .Loop_schedule_256
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.align 16
|
||||
.Lschedule_mangle_last:
|
||||
|
||||
leaq .Lk_deskew(%rip),%r11
|
||||
testq %rcx,%rcx
|
||||
jnz .Lschedule_mangle_last_dec
|
||||
|
||||
|
||||
movdqa (%r8,%r10,1),%xmm1
|
||||
.byte 102,15,56,0,193
|
||||
leaq .Lk_opt(%rip),%r11
|
||||
addq $32,%rdx
|
||||
|
||||
.Lschedule_mangle_last_dec:
|
||||
addq $-16,%rdx
|
||||
pxor .Lk_s63(%rip),%xmm0
|
||||
call _vpaes_schedule_transform
|
||||
movdqu %xmm0,(%rdx)
|
||||
|
||||
|
||||
pxor %xmm0,%xmm0
|
||||
pxor %xmm1,%xmm1
|
||||
pxor %xmm2,%xmm2
|
||||
pxor %xmm3,%xmm3
|
||||
pxor %xmm4,%xmm4
|
||||
pxor %xmm5,%xmm5
|
||||
pxor %xmm6,%xmm6
|
||||
pxor %xmm7,%xmm7
|
||||
.byte 0xf3,0xc3
|
||||
.size _vpaes_schedule_core,.-_vpaes_schedule_core
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.type _vpaes_schedule_192_smear,@function
|
||||
.align 16
|
||||
_vpaes_schedule_192_smear:
|
||||
pshufd $0x80,%xmm6,%xmm1
|
||||
pshufd $0xFE,%xmm7,%xmm0
|
||||
pxor %xmm1,%xmm6
|
||||
pxor %xmm1,%xmm1
|
||||
pxor %xmm0,%xmm6
|
||||
movdqa %xmm6,%xmm0
|
||||
movhlps %xmm1,%xmm6
|
||||
.byte 0xf3,0xc3
|
||||
.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.type _vpaes_schedule_round,@function
|
||||
.align 16
|
||||
_vpaes_schedule_round:
|
||||
|
||||
pxor %xmm1,%xmm1
|
||||
.byte 102,65,15,58,15,200,15
|
||||
.byte 102,69,15,58,15,192,15
|
||||
pxor %xmm1,%xmm7
|
||||
|
||||
|
||||
pshufd $0xFF,%xmm0,%xmm0
|
||||
.byte 102,15,58,15,192,1
|
||||
|
||||
|
||||
|
||||
|
||||
_vpaes_schedule_low_round:
|
||||
|
||||
movdqa %xmm7,%xmm1
|
||||
pslldq $4,%xmm7
|
||||
pxor %xmm1,%xmm7
|
||||
movdqa %xmm7,%xmm1
|
||||
pslldq $8,%xmm7
|
||||
pxor %xmm1,%xmm7
|
||||
pxor .Lk_s63(%rip),%xmm7
|
||||
|
||||
|
||||
movdqa %xmm9,%xmm1
|
||||
pandn %xmm0,%xmm1
|
||||
psrld $4,%xmm1
|
||||
pand %xmm9,%xmm0
|
||||
movdqa %xmm11,%xmm2
|
||||
.byte 102,15,56,0,208
|
||||
pxor %xmm1,%xmm0
|
||||
movdqa %xmm10,%xmm3
|
||||
.byte 102,15,56,0,217
|
||||
pxor %xmm2,%xmm3
|
||||
movdqa %xmm10,%xmm4
|
||||
.byte 102,15,56,0,224
|
||||
pxor %xmm2,%xmm4
|
||||
movdqa %xmm10,%xmm2
|
||||
.byte 102,15,56,0,211
|
||||
pxor %xmm0,%xmm2
|
||||
movdqa %xmm10,%xmm3
|
||||
.byte 102,15,56,0,220
|
||||
pxor %xmm1,%xmm3
|
||||
movdqa %xmm13,%xmm4
|
||||
.byte 102,15,56,0,226
|
||||
movdqa %xmm12,%xmm0
|
||||
.byte 102,15,56,0,195
|
||||
pxor %xmm4,%xmm0
|
||||
|
||||
|
||||
pxor %xmm7,%xmm0
|
||||
movdqa %xmm0,%xmm7
|
||||
.byte 0xf3,0xc3
|
||||
.size _vpaes_schedule_round,.-_vpaes_schedule_round
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.type _vpaes_schedule_transform,@function
|
||||
.align 16
|
||||
_vpaes_schedule_transform:
|
||||
movdqa %xmm9,%xmm1
|
||||
pandn %xmm0,%xmm1
|
||||
psrld $4,%xmm1
|
||||
pand %xmm9,%xmm0
|
||||
movdqa (%r11),%xmm2
|
||||
.byte 102,15,56,0,208
|
||||
movdqa 16(%r11),%xmm0
|
||||
.byte 102,15,56,0,193
|
||||
pxor %xmm2,%xmm0
|
||||
.byte 0xf3,0xc3
|
||||
.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.type _vpaes_schedule_mangle,@function
|
||||
.align 16
|
||||
_vpaes_schedule_mangle:
|
||||
movdqa %xmm0,%xmm4
|
||||
movdqa .Lk_mc_forward(%rip),%xmm5
|
||||
testq %rcx,%rcx
|
||||
jnz .Lschedule_mangle_dec
|
||||
|
||||
|
||||
addq $16,%rdx
|
||||
pxor .Lk_s63(%rip),%xmm4
|
||||
.byte 102,15,56,0,229
|
||||
movdqa %xmm4,%xmm3
|
||||
.byte 102,15,56,0,229
|
||||
pxor %xmm4,%xmm3
|
||||
.byte 102,15,56,0,229
|
||||
pxor %xmm4,%xmm3
|
||||
|
||||
jmp .Lschedule_mangle_both
|
||||
.align 16
|
||||
.Lschedule_mangle_dec:
|
||||
|
||||
leaq .Lk_dksd(%rip),%r11
|
||||
movdqa %xmm9,%xmm1
|
||||
pandn %xmm4,%xmm1
|
||||
psrld $4,%xmm1
|
||||
pand %xmm9,%xmm4
|
||||
|
||||
movdqa 0(%r11),%xmm2
|
||||
.byte 102,15,56,0,212
|
||||
movdqa 16(%r11),%xmm3
|
||||
.byte 102,15,56,0,217
|
||||
pxor %xmm2,%xmm3
|
||||
.byte 102,15,56,0,221
|
||||
|
||||
movdqa 32(%r11),%xmm2
|
||||
.byte 102,15,56,0,212
|
||||
pxor %xmm3,%xmm2
|
||||
movdqa 48(%r11),%xmm3
|
||||
.byte 102,15,56,0,217
|
||||
pxor %xmm2,%xmm3
|
||||
.byte 102,15,56,0,221
|
||||
|
||||
movdqa 64(%r11),%xmm2
|
||||
.byte 102,15,56,0,212
|
||||
pxor %xmm3,%xmm2
|
||||
movdqa 80(%r11),%xmm3
|
||||
.byte 102,15,56,0,217
|
||||
pxor %xmm2,%xmm3
|
||||
.byte 102,15,56,0,221
|
||||
|
||||
movdqa 96(%r11),%xmm2
|
||||
.byte 102,15,56,0,212
|
||||
pxor %xmm3,%xmm2
|
||||
movdqa 112(%r11),%xmm3
|
||||
.byte 102,15,56,0,217
|
||||
pxor %xmm2,%xmm3
|
||||
|
||||
addq $-16,%rdx
|
||||
|
||||
.Lschedule_mangle_both:
|
||||
movdqa (%r8,%r10,1),%xmm1
|
||||
.byte 102,15,56,0,217
|
||||
addq $-16,%r8
|
||||
andq $0x30,%r8
|
||||
movdqu %xmm3,(%rdx)
|
||||
.byte 0xf3,0xc3
|
||||
.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
|
||||
|
||||
|
||||
|
||||
|
||||
.globl vpaes_set_encrypt_key
|
||||
.hidden vpaes_set_encrypt_key
|
||||
.type vpaes_set_encrypt_key,@function
|
||||
.align 16
|
||||
vpaes_set_encrypt_key:
|
||||
movl %esi,%eax
|
||||
shrl $5,%eax
|
||||
addl $5,%eax
|
||||
movl %eax,240(%rdx)
|
||||
|
||||
movl $0,%ecx
|
||||
movl $0x30,%r8d
|
||||
call _vpaes_schedule_core
|
||||
xorl %eax,%eax
|
||||
.byte 0xf3,0xc3
|
||||
.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
|
||||
|
||||
.globl vpaes_set_decrypt_key
|
||||
.hidden vpaes_set_decrypt_key
|
||||
.type vpaes_set_decrypt_key,@function
|
||||
.align 16
|
||||
vpaes_set_decrypt_key:
|
||||
movl %esi,%eax
|
||||
shrl $5,%eax
|
||||
addl $5,%eax
|
||||
movl %eax,240(%rdx)
|
||||
shll $4,%eax
|
||||
leaq 16(%rdx,%rax,1),%rdx
|
||||
|
||||
movl $1,%ecx
|
||||
movl %esi,%r8d
|
||||
shrl $1,%r8d
|
||||
andl $32,%r8d
|
||||
xorl $32,%r8d
|
||||
call _vpaes_schedule_core
|
||||
xorl %eax,%eax
|
||||
.byte 0xf3,0xc3
|
||||
.size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key
|
||||
|
||||
.globl vpaes_encrypt
|
||||
.hidden vpaes_encrypt
|
||||
.type vpaes_encrypt,@function
|
||||
.align 16
|
||||
vpaes_encrypt:
|
||||
movdqu (%rdi),%xmm0
|
||||
call _vpaes_preheat
|
||||
call _vpaes_encrypt_core
|
||||
movdqu %xmm0,(%rsi)
|
||||
.byte 0xf3,0xc3
|
||||
.size vpaes_encrypt,.-vpaes_encrypt
|
||||
|
||||
.globl vpaes_decrypt
|
||||
.hidden vpaes_decrypt
|
||||
.type vpaes_decrypt,@function
|
||||
.align 16
|
||||
vpaes_decrypt:
|
||||
movdqu (%rdi),%xmm0
|
||||
call _vpaes_preheat
|
||||
call _vpaes_decrypt_core
|
||||
movdqu %xmm0,(%rsi)
|
||||
.byte 0xf3,0xc3
|
||||
.size vpaes_decrypt,.-vpaes_decrypt
|
||||
.globl vpaes_cbc_encrypt
|
||||
.hidden vpaes_cbc_encrypt
|
||||
.type vpaes_cbc_encrypt,@function
|
||||
.align 16
|
||||
vpaes_cbc_encrypt:
|
||||
xchgq %rcx,%rdx
|
||||
subq $16,%rcx
|
||||
jc .Lcbc_abort
|
||||
movdqu (%r8),%xmm6
|
||||
subq %rdi,%rsi
|
||||
call _vpaes_preheat
|
||||
cmpl $0,%r9d
|
||||
je .Lcbc_dec_loop
|
||||
jmp .Lcbc_enc_loop
|
||||
.align 16
|
||||
.Lcbc_enc_loop:
|
||||
movdqu (%rdi),%xmm0
|
||||
pxor %xmm6,%xmm0
|
||||
call _vpaes_encrypt_core
|
||||
movdqa %xmm0,%xmm6
|
||||
movdqu %xmm0,(%rsi,%rdi,1)
|
||||
leaq 16(%rdi),%rdi
|
||||
subq $16,%rcx
|
||||
jnc .Lcbc_enc_loop
|
||||
jmp .Lcbc_done
|
||||
.align 16
|
||||
.Lcbc_dec_loop:
|
||||
movdqu (%rdi),%xmm0
|
||||
movdqa %xmm0,%xmm7
|
||||
call _vpaes_decrypt_core
|
||||
pxor %xmm6,%xmm0
|
||||
movdqa %xmm7,%xmm6
|
||||
movdqu %xmm0,(%rsi,%rdi,1)
|
||||
leaq 16(%rdi),%rdi
|
||||
subq $16,%rcx
|
||||
jnc .Lcbc_dec_loop
|
||||
.Lcbc_done:
|
||||
movdqu %xmm6,(%r8)
|
||||
.Lcbc_abort:
|
||||
.byte 0xf3,0xc3
|
||||
.size vpaes_cbc_encrypt,.-vpaes_cbc_encrypt
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.type _vpaes_preheat,@function
|
||||
.align 16
|
||||
_vpaes_preheat:
|
||||
leaq .Lk_s0F(%rip),%r10
|
||||
movdqa -32(%r10),%xmm10
|
||||
movdqa -16(%r10),%xmm11
|
||||
movdqa 0(%r10),%xmm9
|
||||
movdqa 48(%r10),%xmm13
|
||||
movdqa 64(%r10),%xmm12
|
||||
movdqa 80(%r10),%xmm15
|
||||
movdqa 96(%r10),%xmm14
|
||||
.byte 0xf3,0xc3
|
||||
.size _vpaes_preheat,.-_vpaes_preheat
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.type _vpaes_consts,@object
|
||||
.align 64
|
||||
_vpaes_consts:
|
||||
.Lk_inv:
|
||||
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
|
||||
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
|
||||
|
||||
.Lk_s0F:
|
||||
.quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
|
||||
|
||||
.Lk_ipt:
|
||||
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
|
||||
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
|
||||
|
||||
.Lk_sb1:
|
||||
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
|
||||
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
|
||||
.Lk_sb2:
|
||||
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
|
||||
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
|
||||
.Lk_sbo:
|
||||
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
|
||||
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
|
||||
|
||||
.Lk_mc_forward:
|
||||
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
|
||||
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
|
||||
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
|
||||
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
|
||||
|
||||
.Lk_mc_backward:
|
||||
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
|
||||
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
|
||||
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
|
||||
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
|
||||
|
||||
.Lk_sr:
|
||||
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
|
||||
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
|
||||
.quad 0x0F060D040B020900, 0x070E050C030A0108
|
||||
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
|
||||
|
||||
.Lk_rcon:
|
||||
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
|
||||
|
||||
.Lk_s63:
|
||||
.quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
|
||||
|
||||
.Lk_opt:
|
||||
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
|
||||
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
|
||||
|
||||
.Lk_deskew:
|
||||
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
|
||||
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.Lk_dksd:
|
||||
.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
|
||||
.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
|
||||
.Lk_dksb:
|
||||
.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
|
||||
.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
|
||||
.Lk_dkse:
|
||||
.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
|
||||
.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
|
||||
.Lk_dks9:
|
||||
.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
|
||||
.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.Lk_dipt:
|
||||
.quad 0x0F505B040B545F00, 0x154A411E114E451A
|
||||
.quad 0x86E383E660056500, 0x12771772F491F194
|
||||
|
||||
.Lk_dsb9:
|
||||
.quad 0x851C03539A86D600, 0xCAD51F504F994CC9
|
||||
.quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
|
||||
.Lk_dsbd:
|
||||
.quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
|
||||
.quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
|
||||
.Lk_dsbb:
|
||||
.quad 0xD022649296B44200, 0x602646F6B0F2D404
|
||||
.quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
|
||||
.Lk_dsbe:
|
||||
.quad 0x46F2929626D4D000, 0x2242600464B4F6B0
|
||||
.quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
|
||||
.Lk_dsbo:
|
||||
.quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
|
||||
.quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
|
||||
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
|
||||
.align 64
|
||||
.size _vpaes_consts,.-_vpaes_consts
|
||||
#endif
|
|
@ -0,0 +1,866 @@
|
|||
#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM)
|
||||
.text
|
||||
|
||||
.extern OPENSSL_ia32cap_P
|
||||
.hidden OPENSSL_ia32cap_P
|
||||
|
||||
.globl bn_mul_mont
|
||||
.hidden bn_mul_mont
|
||||
.type bn_mul_mont,@function
|
||||
.align 16
|
||||
bn_mul_mont:
|
||||
.cfi_startproc
|
||||
movl %r9d,%r9d
|
||||
movq %rsp,%rax
|
||||
.cfi_def_cfa_register %rax
|
||||
testl $3,%r9d
|
||||
jnz .Lmul_enter
|
||||
cmpl $8,%r9d
|
||||
jb .Lmul_enter
|
||||
cmpq %rsi,%rdx
|
||||
jne .Lmul4x_enter
|
||||
testl $7,%r9d
|
||||
jz .Lsqr8x_enter
|
||||
jmp .Lmul4x_enter
|
||||
|
||||
.align 16
|
||||
.Lmul_enter:
|
||||
pushq %rbx
|
||||
.cfi_offset %rbx,-16
|
||||
pushq %rbp
|
||||
.cfi_offset %rbp,-24
|
||||
pushq %r12
|
||||
.cfi_offset %r12,-32
|
||||
pushq %r13
|
||||
.cfi_offset %r13,-40
|
||||
pushq %r14
|
||||
.cfi_offset %r14,-48
|
||||
pushq %r15
|
||||
.cfi_offset %r15,-56
|
||||
|
||||
negq %r9
|
||||
movq %rsp,%r11
|
||||
leaq -16(%rsp,%r9,8),%r10
|
||||
negq %r9
|
||||
andq $-1024,%r10
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
subq %r10,%r11
|
||||
andq $-4096,%r11
|
||||
leaq (%r10,%r11,1),%rsp
|
||||
movq (%rsp),%r11
|
||||
cmpq %r10,%rsp
|
||||
ja .Lmul_page_walk
|
||||
jmp .Lmul_page_walk_done
|
||||
|
||||
.align 16
|
||||
.Lmul_page_walk:
|
||||
leaq -4096(%rsp),%rsp
|
||||
movq (%rsp),%r11
|
||||
cmpq %r10,%rsp
|
||||
ja .Lmul_page_walk
|
||||
.Lmul_page_walk_done:
|
||||
|
||||
movq %rax,8(%rsp,%r9,8)
|
||||
.cfi_escape 0x0f,0x0a,0x77,0x08,0x79,0x00,0x38,0x1e,0x22,0x06,0x23,0x08
|
||||
.Lmul_body:
|
||||
movq %rdx,%r12
|
||||
movq (%r8),%r8
|
||||
movq (%r12),%rbx
|
||||
movq (%rsi),%rax
|
||||
|
||||
xorq %r14,%r14
|
||||
xorq %r15,%r15
|
||||
|
||||
movq %r8,%rbp
|
||||
mulq %rbx
|
||||
movq %rax,%r10
|
||||
movq (%rcx),%rax
|
||||
|
||||
imulq %r10,%rbp
|
||||
movq %rdx,%r11
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%r10
|
||||
movq 8(%rsi),%rax
|
||||
adcq $0,%rdx
|
||||
movq %rdx,%r13
|
||||
|
||||
leaq 1(%r15),%r15
|
||||
jmp .L1st_enter
|
||||
|
||||
.align 16
|
||||
.L1st:
|
||||
addq %rax,%r13
|
||||
movq (%rsi,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r11,%r13
|
||||
movq %r10,%r11
|
||||
adcq $0,%rdx
|
||||
movq %r13,-16(%rsp,%r15,8)
|
||||
movq %rdx,%r13
|
||||
|
||||
.L1st_enter:
|
||||
mulq %rbx
|
||||
addq %rax,%r11
|
||||
movq (%rcx,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
leaq 1(%r15),%r15
|
||||
movq %rdx,%r10
|
||||
|
||||
mulq %rbp
|
||||
cmpq %r9,%r15
|
||||
jne .L1st
|
||||
|
||||
addq %rax,%r13
|
||||
movq (%rsi),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r11,%r13
|
||||
adcq $0,%rdx
|
||||
movq %r13,-16(%rsp,%r15,8)
|
||||
movq %rdx,%r13
|
||||
movq %r10,%r11
|
||||
|
||||
xorq %rdx,%rdx
|
||||
addq %r11,%r13
|
||||
adcq $0,%rdx
|
||||
movq %r13,-8(%rsp,%r9,8)
|
||||
movq %rdx,(%rsp,%r9,8)
|
||||
|
||||
leaq 1(%r14),%r14
|
||||
jmp .Louter
|
||||
.align 16
|
||||
.Louter:
|
||||
movq (%r12,%r14,8),%rbx
|
||||
xorq %r15,%r15
|
||||
movq %r8,%rbp
|
||||
movq (%rsp),%r10
|
||||
mulq %rbx
|
||||
addq %rax,%r10
|
||||
movq (%rcx),%rax
|
||||
adcq $0,%rdx
|
||||
|
||||
imulq %r10,%rbp
|
||||
movq %rdx,%r11
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%r10
|
||||
movq 8(%rsi),%rax
|
||||
adcq $0,%rdx
|
||||
movq 8(%rsp),%r10
|
||||
movq %rdx,%r13
|
||||
|
||||
leaq 1(%r15),%r15
|
||||
jmp .Linner_enter
|
||||
|
||||
.align 16
|
||||
.Linner:
|
||||
addq %rax,%r13
|
||||
movq (%rsi,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r10,%r13
|
||||
movq (%rsp,%r15,8),%r10
|
||||
adcq $0,%rdx
|
||||
movq %r13,-16(%rsp,%r15,8)
|
||||
movq %rdx,%r13
|
||||
|
||||
.Linner_enter:
|
||||
mulq %rbx
|
||||
addq %rax,%r11
|
||||
movq (%rcx,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r11,%r10
|
||||
movq %rdx,%r11
|
||||
adcq $0,%r11
|
||||
leaq 1(%r15),%r15
|
||||
|
||||
mulq %rbp
|
||||
cmpq %r9,%r15
|
||||
jne .Linner
|
||||
|
||||
addq %rax,%r13
|
||||
movq (%rsi),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r10,%r13
|
||||
movq (%rsp,%r15,8),%r10
|
||||
adcq $0,%rdx
|
||||
movq %r13,-16(%rsp,%r15,8)
|
||||
movq %rdx,%r13
|
||||
|
||||
xorq %rdx,%rdx
|
||||
addq %r11,%r13
|
||||
adcq $0,%rdx
|
||||
addq %r10,%r13
|
||||
adcq $0,%rdx
|
||||
movq %r13,-8(%rsp,%r9,8)
|
||||
movq %rdx,(%rsp,%r9,8)
|
||||
|
||||
leaq 1(%r14),%r14
|
||||
cmpq %r9,%r14
|
||||
jb .Louter
|
||||
|
||||
xorq %r14,%r14
|
||||
movq (%rsp),%rax
|
||||
leaq (%rsp),%rsi
|
||||
movq %r9,%r15
|
||||
jmp .Lsub
|
||||
.align 16
|
||||
.Lsub:
|
||||
sbbq (%rcx,%r14,8),%rax
|
||||
movq %rax,(%rdi,%r14,8)
|
||||
movq 8(%rsi,%r14,8),%rax
|
||||
leaq 1(%r14),%r14
|
||||
decq %r15
|
||||
jnz .Lsub
|
||||
|
||||
sbbq $0,%rax
|
||||
xorq %r14,%r14
|
||||
andq %rax,%rsi
|
||||
notq %rax
|
||||
movq %rdi,%rcx
|
||||
andq %rax,%rcx
|
||||
movq %r9,%r15
|
||||
orq %rcx,%rsi
|
||||
.align 16
|
||||
.Lcopy:
|
||||
movq (%rsi,%r14,8),%rax
|
||||
movq %r14,(%rsp,%r14,8)
|
||||
movq %rax,(%rdi,%r14,8)
|
||||
leaq 1(%r14),%r14
|
||||
subq $1,%r15
|
||||
jnz .Lcopy
|
||||
|
||||
movq 8(%rsp,%r9,8),%rsi
|
||||
.cfi_def_cfa %rsi,8
|
||||
movq $1,%rax
|
||||
movq -48(%rsi),%r15
|
||||
.cfi_restore %r15
|
||||
movq -40(%rsi),%r14
|
||||
.cfi_restore %r14
|
||||
movq -32(%rsi),%r13
|
||||
.cfi_restore %r13
|
||||
movq -24(%rsi),%r12
|
||||
.cfi_restore %r12
|
||||
movq -16(%rsi),%rbp
|
||||
.cfi_restore %rbp
|
||||
movq -8(%rsi),%rbx
|
||||
.cfi_restore %rbx
|
||||
leaq (%rsi),%rsp
|
||||
.cfi_def_cfa_register %rsp
|
||||
.Lmul_epilogue:
|
||||
.byte 0xf3,0xc3
|
||||
.cfi_endproc
|
||||
.size bn_mul_mont,.-bn_mul_mont
|
||||
.type bn_mul4x_mont,@function
|
||||
.align 16
|
||||
bn_mul4x_mont:
|
||||
.cfi_startproc
|
||||
movl %r9d,%r9d
|
||||
movq %rsp,%rax
|
||||
.cfi_def_cfa_register %rax
|
||||
.Lmul4x_enter:
|
||||
pushq %rbx
|
||||
.cfi_offset %rbx,-16
|
||||
pushq %rbp
|
||||
.cfi_offset %rbp,-24
|
||||
pushq %r12
|
||||
.cfi_offset %r12,-32
|
||||
pushq %r13
|
||||
.cfi_offset %r13,-40
|
||||
pushq %r14
|
||||
.cfi_offset %r14,-48
|
||||
pushq %r15
|
||||
.cfi_offset %r15,-56
|
||||
|
||||
negq %r9
|
||||
movq %rsp,%r11
|
||||
leaq -32(%rsp,%r9,8),%r10
|
||||
negq %r9
|
||||
andq $-1024,%r10
|
||||
|
||||
subq %r10,%r11
|
||||
andq $-4096,%r11
|
||||
leaq (%r10,%r11,1),%rsp
|
||||
movq (%rsp),%r11
|
||||
cmpq %r10,%rsp
|
||||
ja .Lmul4x_page_walk
|
||||
jmp .Lmul4x_page_walk_done
|
||||
|
||||
.Lmul4x_page_walk:
|
||||
leaq -4096(%rsp),%rsp
|
||||
movq (%rsp),%r11
|
||||
cmpq %r10,%rsp
|
||||
ja .Lmul4x_page_walk
|
||||
.Lmul4x_page_walk_done:
|
||||
|
||||
movq %rax,8(%rsp,%r9,8)
|
||||
.cfi_escape 0x0f,0x0a,0x77,0x08,0x79,0x00,0x38,0x1e,0x22,0x06,0x23,0x08
|
||||
.Lmul4x_body:
|
||||
movq %rdi,16(%rsp,%r9,8)
|
||||
movq %rdx,%r12
|
||||
movq (%r8),%r8
|
||||
movq (%r12),%rbx
|
||||
movq (%rsi),%rax
|
||||
|
||||
xorq %r14,%r14
|
||||
xorq %r15,%r15
|
||||
|
||||
movq %r8,%rbp
|
||||
mulq %rbx
|
||||
movq %rax,%r10
|
||||
movq (%rcx),%rax
|
||||
|
||||
imulq %r10,%rbp
|
||||
movq %rdx,%r11
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%r10
|
||||
movq 8(%rsi),%rax
|
||||
adcq $0,%rdx
|
||||
movq %rdx,%rdi
|
||||
|
||||
mulq %rbx
|
||||
addq %rax,%r11
|
||||
movq 8(%rcx),%rax
|
||||
adcq $0,%rdx
|
||||
movq %rdx,%r10
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%rdi
|
||||
movq 16(%rsi),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r11,%rdi
|
||||
leaq 4(%r15),%r15
|
||||
adcq $0,%rdx
|
||||
movq %rdi,(%rsp)
|
||||
movq %rdx,%r13
|
||||
jmp .L1st4x
|
||||
.align 16
|
||||
.L1st4x:
|
||||
mulq %rbx
|
||||
addq %rax,%r10
|
||||
movq -16(%rcx,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
movq %rdx,%r11
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%r13
|
||||
movq -8(%rsi,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r10,%r13
|
||||
adcq $0,%rdx
|
||||
movq %r13,-24(%rsp,%r15,8)
|
||||
movq %rdx,%rdi
|
||||
|
||||
mulq %rbx
|
||||
addq %rax,%r11
|
||||
movq -8(%rcx,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
movq %rdx,%r10
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%rdi
|
||||
movq (%rsi,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r11,%rdi
|
||||
adcq $0,%rdx
|
||||
movq %rdi,-16(%rsp,%r15,8)
|
||||
movq %rdx,%r13
|
||||
|
||||
mulq %rbx
|
||||
addq %rax,%r10
|
||||
movq (%rcx,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
movq %rdx,%r11
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%r13
|
||||
movq 8(%rsi,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r10,%r13
|
||||
adcq $0,%rdx
|
||||
movq %r13,-8(%rsp,%r15,8)
|
||||
movq %rdx,%rdi
|
||||
|
||||
mulq %rbx
|
||||
addq %rax,%r11
|
||||
movq 8(%rcx,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
leaq 4(%r15),%r15
|
||||
movq %rdx,%r10
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%rdi
|
||||
movq -16(%rsi,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r11,%rdi
|
||||
adcq $0,%rdx
|
||||
movq %rdi,-32(%rsp,%r15,8)
|
||||
movq %rdx,%r13
|
||||
cmpq %r9,%r15
|
||||
jb .L1st4x
|
||||
|
||||
mulq %rbx
|
||||
addq %rax,%r10
|
||||
movq -16(%rcx,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
movq %rdx,%r11
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%r13
|
||||
movq -8(%rsi,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r10,%r13
|
||||
adcq $0,%rdx
|
||||
movq %r13,-24(%rsp,%r15,8)
|
||||
movq %rdx,%rdi
|
||||
|
||||
mulq %rbx
|
||||
addq %rax,%r11
|
||||
movq -8(%rcx,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
movq %rdx,%r10
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%rdi
|
||||
movq (%rsi),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r11,%rdi
|
||||
adcq $0,%rdx
|
||||
movq %rdi,-16(%rsp,%r15,8)
|
||||
movq %rdx,%r13
|
||||
|
||||
xorq %rdi,%rdi
|
||||
addq %r10,%r13
|
||||
adcq $0,%rdi
|
||||
movq %r13,-8(%rsp,%r15,8)
|
||||
movq %rdi,(%rsp,%r15,8)
|
||||
|
||||
leaq 1(%r14),%r14
|
||||
.align 4
|
||||
.Louter4x:
|
||||
movq (%r12,%r14,8),%rbx
|
||||
xorq %r15,%r15
|
||||
movq (%rsp),%r10
|
||||
movq %r8,%rbp
|
||||
mulq %rbx
|
||||
addq %rax,%r10
|
||||
movq (%rcx),%rax
|
||||
adcq $0,%rdx
|
||||
|
||||
imulq %r10,%rbp
|
||||
movq %rdx,%r11
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%r10
|
||||
movq 8(%rsi),%rax
|
||||
adcq $0,%rdx
|
||||
movq %rdx,%rdi
|
||||
|
||||
mulq %rbx
|
||||
addq %rax,%r11
|
||||
movq 8(%rcx),%rax
|
||||
adcq $0,%rdx
|
||||
addq 8(%rsp),%r11
|
||||
adcq $0,%rdx
|
||||
movq %rdx,%r10
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%rdi
|
||||
movq 16(%rsi),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r11,%rdi
|
||||
leaq 4(%r15),%r15
|
||||
adcq $0,%rdx
|
||||
movq %rdi,(%rsp)
|
||||
movq %rdx,%r13
|
||||
jmp .Linner4x
|
||||
.align 16
|
||||
.Linner4x:
|
||||
mulq %rbx
|
||||
addq %rax,%r10
|
||||
movq -16(%rcx,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq -16(%rsp,%r15,8),%r10
|
||||
adcq $0,%rdx
|
||||
movq %rdx,%r11
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%r13
|
||||
movq -8(%rsi,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r10,%r13
|
||||
adcq $0,%rdx
|
||||
movq %r13,-24(%rsp,%r15,8)
|
||||
movq %rdx,%rdi
|
||||
|
||||
mulq %rbx
|
||||
addq %rax,%r11
|
||||
movq -8(%rcx,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq -8(%rsp,%r15,8),%r11
|
||||
adcq $0,%rdx
|
||||
movq %rdx,%r10
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%rdi
|
||||
movq (%rsi,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r11,%rdi
|
||||
adcq $0,%rdx
|
||||
movq %rdi,-16(%rsp,%r15,8)
|
||||
movq %rdx,%r13
|
||||
|
||||
mulq %rbx
|
||||
addq %rax,%r10
|
||||
movq (%rcx,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq (%rsp,%r15,8),%r10
|
||||
adcq $0,%rdx
|
||||
movq %rdx,%r11
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%r13
|
||||
movq 8(%rsi,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r10,%r13
|
||||
adcq $0,%rdx
|
||||
movq %r13,-8(%rsp,%r15,8)
|
||||
movq %rdx,%rdi
|
||||
|
||||
mulq %rbx
|
||||
addq %rax,%r11
|
||||
movq 8(%rcx,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq 8(%rsp,%r15,8),%r11
|
||||
adcq $0,%rdx
|
||||
leaq 4(%r15),%r15
|
||||
movq %rdx,%r10
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%rdi
|
||||
movq -16(%rsi,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r11,%rdi
|
||||
adcq $0,%rdx
|
||||
movq %rdi,-32(%rsp,%r15,8)
|
||||
movq %rdx,%r13
|
||||
cmpq %r9,%r15
|
||||
jb .Linner4x
|
||||
|
||||
mulq %rbx
|
||||
addq %rax,%r10
|
||||
movq -16(%rcx,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq -16(%rsp,%r15,8),%r10
|
||||
adcq $0,%rdx
|
||||
movq %rdx,%r11
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%r13
|
||||
movq -8(%rsi,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r10,%r13
|
||||
adcq $0,%rdx
|
||||
movq %r13,-24(%rsp,%r15,8)
|
||||
movq %rdx,%rdi
|
||||
|
||||
mulq %rbx
|
||||
addq %rax,%r11
|
||||
movq -8(%rcx,%r15,8),%rax
|
||||
adcq $0,%rdx
|
||||
addq -8(%rsp,%r15,8),%r11
|
||||
adcq $0,%rdx
|
||||
leaq 1(%r14),%r14
|
||||
movq %rdx,%r10
|
||||
|
||||
mulq %rbp
|
||||
addq %rax,%rdi
|
||||
movq (%rsi),%rax
|
||||
adcq $0,%rdx
|
||||
addq %r11,%rdi
|
||||
adcq $0,%rdx
|
||||
movq %rdi,-16(%rsp,%r15,8)
|
||||
movq %rdx,%r13
|
||||
|
||||
xorq %rdi,%rdi
|
||||
addq %r10,%r13
|
||||
adcq $0,%rdi
|
||||
addq (%rsp,%r9,8),%r13
|
||||
adcq $0,%rdi
|
||||
movq %r13,-8(%rsp,%r15,8)
|
||||
movq %rdi,(%rsp,%r15,8)
|
||||
|
||||
cmpq %r9,%r14
|
||||
jb .Louter4x
|
||||
movq 16(%rsp,%r9,8),%rdi
|
||||
leaq -4(%r9),%r15
|
||||
movq 0(%rsp),%rax
|
||||
pxor %xmm0,%xmm0
|
||||
movq 8(%rsp),%rdx
|
||||
shrq $2,%r15
|
||||
leaq (%rsp),%rsi
|
||||
xorq %r14,%r14
|
||||
|
||||
subq 0(%rcx),%rax
|
||||
movq 16(%rsi),%rbx
|
||||
movq 24(%rsi),%rbp
|
||||
sbbq 8(%rcx),%rdx
|
||||
jmp .Lsub4x
|
||||
.align 16
|
||||
.Lsub4x:
|
||||
movq %rax,0(%rdi,%r14,8)
|
||||
movq %rdx,8(%rdi,%r14,8)
|
||||
sbbq 16(%rcx,%r14,8),%rbx
|
||||
movq 32(%rsi,%r14,8),%rax
|
||||
movq 40(%rsi,%r14,8),%rdx
|
||||
sbbq 24(%rcx,%r14,8),%rbp
|
||||
movq %rbx,16(%rdi,%r14,8)
|
||||
movq %rbp,24(%rdi,%r14,8)
|
||||
sbbq 32(%rcx,%r14,8),%rax
|
||||
movq 48(%rsi,%r14,8),%rbx
|
||||
movq 56(%rsi,%r14,8),%rbp
|
||||
sbbq 40(%rcx,%r14,8),%rdx
|
||||
leaq 4(%r14),%r14
|
||||
decq %r15
|
||||
jnz .Lsub4x
|
||||
|
||||
movq %rax,0(%rdi,%r14,8)
|
||||
movq 32(%rsi,%r14,8),%rax
|
||||
sbbq 16(%rcx,%r14,8),%rbx
|
||||
movq %rdx,8(%rdi,%r14,8)
|
||||
sbbq 24(%rcx,%r14,8),%rbp
|
||||
movq %rbx,16(%rdi,%r14,8)
|
||||
|
||||
sbbq $0,%rax
|
||||
movq %rbp,24(%rdi,%r14,8)
|
||||
xorq %r14,%r14
|
||||
andq %rax,%rsi
|
||||
notq %rax
|
||||
movq %rdi,%rcx
|
||||
andq %rax,%rcx
|
||||
leaq -4(%r9),%r15
|
||||
orq %rcx,%rsi
|
||||
shrq $2,%r15
|
||||
|
||||
movdqu (%rsi),%xmm1
|
||||
movdqa %xmm0,(%rsp)
|
||||
movdqu %xmm1,(%rdi)
|
||||
jmp .Lcopy4x
|
||||
.align 16
|
||||
.Lcopy4x:
|
||||
movdqu 16(%rsi,%r14,1),%xmm2
|
||||
movdqu 32(%rsi,%r14,1),%xmm1
|
||||
movdqa %xmm0,16(%rsp,%r14,1)
|
||||
movdqu %xmm2,16(%rdi,%r14,1)
|
||||
movdqa %xmm0,32(%rsp,%r14,1)
|
||||
movdqu %xmm1,32(%rdi,%r14,1)
|
||||
leaq 32(%r14),%r14
|
||||
decq %r15
|
||||
jnz .Lcopy4x
|
||||
|
||||
movdqu 16(%rsi,%r14,1),%xmm2
|
||||
movdqa %xmm0,16(%rsp,%r14,1)
|
||||
movdqu %xmm2,16(%rdi,%r14,1)
|
||||
movq 8(%rsp,%r9,8),%rsi
|
||||
.cfi_def_cfa %rsi, 8
|
||||
movq $1,%rax
|
||||
movq -48(%rsi),%r15
|
||||
.cfi_restore %r15
|
||||
movq -40(%rsi),%r14
|
||||
.cfi_restore %r14
|
||||
movq -32(%rsi),%r13
|
||||
.cfi_restore %r13
|
||||
movq -24(%rsi),%r12
|
||||
.cfi_restore %r12
|
||||
movq -16(%rsi),%rbp
|
||||
.cfi_restore %rbp
|
||||
movq -8(%rsi),%rbx
|
||||
.cfi_restore %rbx
|
||||
leaq (%rsi),%rsp
|
||||
.cfi_def_cfa_register %rsp
|
||||
.Lmul4x_epilogue:
|
||||
.byte 0xf3,0xc3
|
||||
.cfi_endproc
|
||||
.size bn_mul4x_mont,.-bn_mul4x_mont
|
||||
.extern bn_sqr8x_internal
|
||||
.hidden bn_sqr8x_internal
|
||||
|
||||
.type bn_sqr8x_mont,@function
|
||||
.align 32
|
||||
bn_sqr8x_mont:
|
||||
.cfi_startproc
|
||||
movq %rsp,%rax
|
||||
.cfi_def_cfa_register %rax
|
||||
.Lsqr8x_enter:
|
||||
pushq %rbx
|
||||
.cfi_offset %rbx,-16
|
||||
pushq %rbp
|
||||
.cfi_offset %rbp,-24
|
||||
pushq %r12
|
||||
.cfi_offset %r12,-32
|
||||
pushq %r13
|
||||
.cfi_offset %r13,-40
|
||||
pushq %r14
|
||||
.cfi_offset %r14,-48
|
||||
pushq %r15
|
||||
.cfi_offset %r15,-56
|
||||
.Lsqr8x_prologue:
|
||||
|
||||
movl %r9d,%r10d
|
||||
shll $3,%r9d
|
||||
shlq $3+2,%r10
|
||||
negq %r9
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
leaq -64(%rsp,%r9,2),%r11
|
||||
movq %rsp,%rbp
|
||||
movq (%r8),%r8
|
||||
subq %rsi,%r11
|
||||
andq $4095,%r11
|
||||
cmpq %r11,%r10
|
||||
jb .Lsqr8x_sp_alt
|
||||
subq %r11,%rbp
|
||||
leaq -64(%rbp,%r9,2),%rbp
|
||||
jmp .Lsqr8x_sp_done
|
||||
|
||||
.align 32
|
||||
.Lsqr8x_sp_alt:
|
||||
leaq 4096-64(,%r9,2),%r10
|
||||
leaq -64(%rbp,%r9,2),%rbp
|
||||
subq %r10,%r11
|
||||
movq $0,%r10
|
||||
cmovcq %r10,%r11
|
||||
subq %r11,%rbp
|
||||
.Lsqr8x_sp_done:
|
||||
andq $-64,%rbp
|
||||
movq %rsp,%r11
|
||||
subq %rbp,%r11
|
||||
andq $-4096,%r11
|
||||
leaq (%r11,%rbp,1),%rsp
|
||||
movq (%rsp),%r10
|
||||
cmpq %rbp,%rsp
|
||||
ja .Lsqr8x_page_walk
|
||||
jmp .Lsqr8x_page_walk_done
|
||||
|
||||
.align 16
|
||||
.Lsqr8x_page_walk:
|
||||
leaq -4096(%rsp),%rsp
|
||||
movq (%rsp),%r10
|
||||
cmpq %rbp,%rsp
|
||||
ja .Lsqr8x_page_walk
|
||||
.Lsqr8x_page_walk_done:
|
||||
|
||||
movq %r9,%r10
|
||||
negq %r9
|
||||
|
||||
movq %r8,32(%rsp)
|
||||
movq %rax,40(%rsp)
|
||||
.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08
|
||||
.Lsqr8x_body:
|
||||
|
||||
.byte 102,72,15,110,209
|
||||
pxor %xmm0,%xmm0
|
||||
.byte 102,72,15,110,207
|
||||
.byte 102,73,15,110,218
|
||||
call bn_sqr8x_internal
|
||||
|
||||
|
||||
|
||||
|
||||
leaq (%rdi,%r9,1),%rbx
|
||||
movq %r9,%rcx
|
||||
movq %r9,%rdx
|
||||
.byte 102,72,15,126,207
|
||||
sarq $3+2,%rcx
|
||||
jmp .Lsqr8x_sub
|
||||
|
||||
.align 32
|
||||
.Lsqr8x_sub:
|
||||
movq 0(%rbx),%r12
|
||||
movq 8(%rbx),%r13
|
||||
movq 16(%rbx),%r14
|
||||
movq 24(%rbx),%r15
|
||||
leaq 32(%rbx),%rbx
|
||||
sbbq 0(%rbp),%r12
|
||||
sbbq 8(%rbp),%r13
|
||||
sbbq 16(%rbp),%r14
|
||||
sbbq 24(%rbp),%r15
|
||||
leaq 32(%rbp),%rbp
|
||||
movq %r12,0(%rdi)
|
||||
movq %r13,8(%rdi)
|
||||
movq %r14,16(%rdi)
|
||||
movq %r15,24(%rdi)
|
||||
leaq 32(%rdi),%rdi
|
||||
incq %rcx
|
||||
jnz .Lsqr8x_sub
|
||||
|
||||
sbbq $0,%rax
|
||||
leaq (%rbx,%r9,1),%rbx
|
||||
leaq (%rdi,%r9,1),%rdi
|
||||
|
||||
.byte 102,72,15,110,200
|
||||
pxor %xmm0,%xmm0
|
||||
pshufd $0,%xmm1,%xmm1
|
||||
movq 40(%rsp),%rsi
|
||||
.cfi_def_cfa %rsi,8
|
||||
jmp .Lsqr8x_cond_copy
|
||||
|
||||
.align 32
|
||||
.Lsqr8x_cond_copy:
|
||||
movdqa 0(%rbx),%xmm2
|
||||
movdqa 16(%rbx),%xmm3
|
||||
leaq 32(%rbx),%rbx
|
||||
movdqu 0(%rdi),%xmm4
|
||||
movdqu 16(%rdi),%xmm5
|
||||
leaq 32(%rdi),%rdi
|
||||
movdqa %xmm0,-32(%rbx)
|
||||
movdqa %xmm0,-16(%rbx)
|
||||
movdqa %xmm0,-32(%rbx,%rdx,1)
|
||||
movdqa %xmm0,-16(%rbx,%rdx,1)
|
||||
pcmpeqd %xmm1,%xmm0
|
||||
pand %xmm1,%xmm2
|
||||
pand %xmm1,%xmm3
|
||||
pand %xmm0,%xmm4
|
||||
pand %xmm0,%xmm5
|
||||
pxor %xmm0,%xmm0
|
||||
por %xmm2,%xmm4
|
||||
por %xmm3,%xmm5
|
||||
movdqu %xmm4,-32(%rdi)
|
||||
movdqu %xmm5,-16(%rdi)
|
||||
addq $32,%r9
|
||||
jnz .Lsqr8x_cond_copy
|
||||
|
||||
movq $1,%rax
|
||||
movq -48(%rsi),%r15
|
||||
.cfi_restore %r15
|
||||
movq -40(%rsi),%r14
|
||||
.cfi_restore %r14
|
||||
movq -32(%rsi),%r13
|
||||
.cfi_restore %r13
|
||||
movq -24(%rsi),%r12
|
||||
.cfi_restore %r12
|
||||
movq -16(%rsi),%rbp
|
||||
.cfi_restore %rbp
|
||||
movq -8(%rsi),%rbx
|
||||
.cfi_restore %rbx
|
||||
leaq (%rsi),%rsp
|
||||
.cfi_def_cfa_register %rsp
|
||||
.Lsqr8x_epilogue:
|
||||
.byte 0xf3,0xc3
|
||||
.cfi_endproc
|
||||
.size bn_sqr8x_mont,.-bn_sqr8x_mont
|
||||
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
||||
.align 16
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,968 @@
|
|||
#if defined(__i386__)
|
||||
.text
|
||||
.globl _ChaCha20_ctr32
|
||||
.private_extern _ChaCha20_ctr32
|
||||
.align 4
|
||||
_ChaCha20_ctr32:
|
||||
L_ChaCha20_ctr32_begin:
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
xorl %eax,%eax
|
||||
cmpl 28(%esp),%eax
|
||||
je L000no_data
|
||||
call Lpic_point
|
||||
Lpic_point:
|
||||
popl %eax
|
||||
movl L_OPENSSL_ia32cap_P$non_lazy_ptr-Lpic_point(%eax),%ebp
|
||||
testl $16777216,(%ebp)
|
||||
jz L001x86
|
||||
testl $512,4(%ebp)
|
||||
jz L001x86
|
||||
jmp Lssse3_shortcut
|
||||
L001x86:
|
||||
movl 32(%esp),%esi
|
||||
movl 36(%esp),%edi
|
||||
subl $132,%esp
|
||||
movl (%esi),%eax
|
||||
movl 4(%esi),%ebx
|
||||
movl 8(%esi),%ecx
|
||||
movl 12(%esi),%edx
|
||||
movl %eax,80(%esp)
|
||||
movl %ebx,84(%esp)
|
||||
movl %ecx,88(%esp)
|
||||
movl %edx,92(%esp)
|
||||
movl 16(%esi),%eax
|
||||
movl 20(%esi),%ebx
|
||||
movl 24(%esi),%ecx
|
||||
movl 28(%esi),%edx
|
||||
movl %eax,96(%esp)
|
||||
movl %ebx,100(%esp)
|
||||
movl %ecx,104(%esp)
|
||||
movl %edx,108(%esp)
|
||||
movl (%edi),%eax
|
||||
movl 4(%edi),%ebx
|
||||
movl 8(%edi),%ecx
|
||||
movl 12(%edi),%edx
|
||||
subl $1,%eax
|
||||
movl %eax,112(%esp)
|
||||
movl %ebx,116(%esp)
|
||||
movl %ecx,120(%esp)
|
||||
movl %edx,124(%esp)
|
||||
jmp L002entry
|
||||
.align 4,0x90
|
||||
L003outer_loop:
|
||||
movl %ebx,156(%esp)
|
||||
movl %eax,152(%esp)
|
||||
movl %ecx,160(%esp)
|
||||
L002entry:
|
||||
movl $1634760805,%eax
|
||||
movl $857760878,4(%esp)
|
||||
movl $2036477234,8(%esp)
|
||||
movl $1797285236,12(%esp)
|
||||
movl 84(%esp),%ebx
|
||||
movl 88(%esp),%ebp
|
||||
movl 104(%esp),%ecx
|
||||
movl 108(%esp),%esi
|
||||
movl 116(%esp),%edx
|
||||
movl 120(%esp),%edi
|
||||
movl %ebx,20(%esp)
|
||||
movl %ebp,24(%esp)
|
||||
movl %ecx,40(%esp)
|
||||
movl %esi,44(%esp)
|
||||
movl %edx,52(%esp)
|
||||
movl %edi,56(%esp)
|
||||
movl 92(%esp),%ebx
|
||||
movl 124(%esp),%edi
|
||||
movl 112(%esp),%edx
|
||||
movl 80(%esp),%ebp
|
||||
movl 96(%esp),%ecx
|
||||
movl 100(%esp),%esi
|
||||
addl $1,%edx
|
||||
movl %ebx,28(%esp)
|
||||
movl %edi,60(%esp)
|
||||
movl %edx,112(%esp)
|
||||
movl $10,%ebx
|
||||
jmp L004loop
|
||||
.align 4,0x90
|
||||
L004loop:
|
||||
addl %ebp,%eax
|
||||
movl %ebx,128(%esp)
|
||||
movl %ebp,%ebx
|
||||
xorl %eax,%edx
|
||||
roll $16,%edx
|
||||
addl %edx,%ecx
|
||||
xorl %ecx,%ebx
|
||||
movl 52(%esp),%edi
|
||||
roll $12,%ebx
|
||||
movl 20(%esp),%ebp
|
||||
addl %ebx,%eax
|
||||
xorl %eax,%edx
|
||||
movl %eax,(%esp)
|
||||
roll $8,%edx
|
||||
movl 4(%esp),%eax
|
||||
addl %edx,%ecx
|
||||
movl %edx,48(%esp)
|
||||
xorl %ecx,%ebx
|
||||
addl %ebp,%eax
|
||||
roll $7,%ebx
|
||||
xorl %eax,%edi
|
||||
movl %ecx,32(%esp)
|
||||
roll $16,%edi
|
||||
movl %ebx,16(%esp)
|
||||
addl %edi,%esi
|
||||
movl 40(%esp),%ecx
|
||||
xorl %esi,%ebp
|
||||
movl 56(%esp),%edx
|
||||
roll $12,%ebp
|
||||
movl 24(%esp),%ebx
|
||||
addl %ebp,%eax
|
||||
xorl %eax,%edi
|
||||
movl %eax,4(%esp)
|
||||
roll $8,%edi
|
||||
movl 8(%esp),%eax
|
||||
addl %edi,%esi
|
||||
movl %edi,52(%esp)
|
||||
xorl %esi,%ebp
|
||||
addl %ebx,%eax
|
||||
roll $7,%ebp
|
||||
xorl %eax,%edx
|
||||
movl %esi,36(%esp)
|
||||
roll $16,%edx
|
||||
movl %ebp,20(%esp)
|
||||
addl %edx,%ecx
|
||||
movl 44(%esp),%esi
|
||||
xorl %ecx,%ebx
|
||||
movl 60(%esp),%edi
|
||||
roll $12,%ebx
|
||||
movl 28(%esp),%ebp
|
||||
addl %ebx,%eax
|
||||
xorl %eax,%edx
|
||||
movl %eax,8(%esp)
|
||||
roll $8,%edx
|
||||
movl 12(%esp),%eax
|
||||
addl %edx,%ecx
|
||||
movl %edx,56(%esp)
|
||||
xorl %ecx,%ebx
|
||||
addl %ebp,%eax
|
||||
roll $7,%ebx
|
||||
xorl %eax,%edi
|
||||
roll $16,%edi
|
||||
movl %ebx,24(%esp)
|
||||
addl %edi,%esi
|
||||
xorl %esi,%ebp
|
||||
roll $12,%ebp
|
||||
movl 20(%esp),%ebx
|
||||
addl %ebp,%eax
|
||||
xorl %eax,%edi
|
||||
movl %eax,12(%esp)
|
||||
roll $8,%edi
|
||||
movl (%esp),%eax
|
||||
addl %edi,%esi
|
||||
movl %edi,%edx
|
||||
xorl %esi,%ebp
|
||||
addl %ebx,%eax
|
||||
roll $7,%ebp
|
||||
xorl %eax,%edx
|
||||
roll $16,%edx
|
||||
movl %ebp,28(%esp)
|
||||
addl %edx,%ecx
|
||||
xorl %ecx,%ebx
|
||||
movl 48(%esp),%edi
|
||||
roll $12,%ebx
|
||||
movl 24(%esp),%ebp
|
||||
addl %ebx,%eax
|
||||
xorl %eax,%edx
|
||||
movl %eax,(%esp)
|
||||
roll $8,%edx
|
||||
movl 4(%esp),%eax
|
||||
addl %edx,%ecx
|
||||
movl %edx,60(%esp)
|
||||
xorl %ecx,%ebx
|
||||
addl %ebp,%eax
|
||||
roll $7,%ebx
|
||||
xorl %eax,%edi
|
||||
movl %ecx,40(%esp)
|
||||
roll $16,%edi
|
||||
movl %ebx,20(%esp)
|
||||
addl %edi,%esi
|
||||
movl 32(%esp),%ecx
|
||||
xorl %esi,%ebp
|
||||
movl 52(%esp),%edx
|
||||
roll $12,%ebp
|
||||
movl 28(%esp),%ebx
|
||||
addl %ebp,%eax
|
||||
xorl %eax,%edi
|
||||
movl %eax,4(%esp)
|
||||
roll $8,%edi
|
||||
movl 8(%esp),%eax
|
||||
addl %edi,%esi
|
||||
movl %edi,48(%esp)
|
||||
xorl %esi,%ebp
|
||||
addl %ebx,%eax
|
||||
roll $7,%ebp
|
||||
xorl %eax,%edx
|
||||
movl %esi,44(%esp)
|
||||
roll $16,%edx
|
||||
movl %ebp,24(%esp)
|
||||
addl %edx,%ecx
|
||||
movl 36(%esp),%esi
|
||||
xorl %ecx,%ebx
|
||||
movl 56(%esp),%edi
|
||||
roll $12,%ebx
|
||||
movl 16(%esp),%ebp
|
||||
addl %ebx,%eax
|
||||
xorl %eax,%edx
|
||||
movl %eax,8(%esp)
|
||||
roll $8,%edx
|
||||
movl 12(%esp),%eax
|
||||
addl %edx,%ecx
|
||||
movl %edx,52(%esp)
|
||||
xorl %ecx,%ebx
|
||||
addl %ebp,%eax
|
||||
roll $7,%ebx
|
||||
xorl %eax,%edi
|
||||
roll $16,%edi
|
||||
movl %ebx,28(%esp)
|
||||
addl %edi,%esi
|
||||
xorl %esi,%ebp
|
||||
movl 48(%esp),%edx
|
||||
roll $12,%ebp
|
||||
movl 128(%esp),%ebx
|
||||
addl %ebp,%eax
|
||||
xorl %eax,%edi
|
||||
movl %eax,12(%esp)
|
||||
roll $8,%edi
|
||||
movl (%esp),%eax
|
||||
addl %edi,%esi
|
||||
movl %edi,56(%esp)
|
||||
xorl %esi,%ebp
|
||||
roll $7,%ebp
|
||||
decl %ebx
|
||||
jnz L004loop
|
||||
movl 160(%esp),%ebx
|
||||
addl $1634760805,%eax
|
||||
addl 80(%esp),%ebp
|
||||
addl 96(%esp),%ecx
|
||||
addl 100(%esp),%esi
|
||||
cmpl $64,%ebx
|
||||
jb L005tail
|
||||
movl 156(%esp),%ebx
|
||||
addl 112(%esp),%edx
|
||||
addl 120(%esp),%edi
|
||||
xorl (%ebx),%eax
|
||||
xorl 16(%ebx),%ebp
|
||||
movl %eax,(%esp)
|
||||
movl 152(%esp),%eax
|
||||
xorl 32(%ebx),%ecx
|
||||
xorl 36(%ebx),%esi
|
||||
xorl 48(%ebx),%edx
|
||||
xorl 56(%ebx),%edi
|
||||
movl %ebp,16(%eax)
|
||||
movl %ecx,32(%eax)
|
||||
movl %esi,36(%eax)
|
||||
movl %edx,48(%eax)
|
||||
movl %edi,56(%eax)
|
||||
movl 4(%esp),%ebp
|
||||
movl 8(%esp),%ecx
|
||||
movl 12(%esp),%esi
|
||||
movl 20(%esp),%edx
|
||||
movl 24(%esp),%edi
|
||||
addl $857760878,%ebp
|
||||
addl $2036477234,%ecx
|
||||
addl $1797285236,%esi
|
||||
addl 84(%esp),%edx
|
||||
addl 88(%esp),%edi
|
||||
xorl 4(%ebx),%ebp
|
||||
xorl 8(%ebx),%ecx
|
||||
xorl 12(%ebx),%esi
|
||||
xorl 20(%ebx),%edx
|
||||
xorl 24(%ebx),%edi
|
||||
movl %ebp,4(%eax)
|
||||
movl %ecx,8(%eax)
|
||||
movl %esi,12(%eax)
|
||||
movl %edx,20(%eax)
|
||||
movl %edi,24(%eax)
|
||||
movl 28(%esp),%ebp
|
||||
movl 40(%esp),%ecx
|
||||
movl 44(%esp),%esi
|
||||
movl 52(%esp),%edx
|
||||
movl 60(%esp),%edi
|
||||
addl 92(%esp),%ebp
|
||||
addl 104(%esp),%ecx
|
||||
addl 108(%esp),%esi
|
||||
addl 116(%esp),%edx
|
||||
addl 124(%esp),%edi
|
||||
xorl 28(%ebx),%ebp
|
||||
xorl 40(%ebx),%ecx
|
||||
xorl 44(%ebx),%esi
|
||||
xorl 52(%ebx),%edx
|
||||
xorl 60(%ebx),%edi
|
||||
leal 64(%ebx),%ebx
|
||||
movl %ebp,28(%eax)
|
||||
movl (%esp),%ebp
|
||||
movl %ecx,40(%eax)
|
||||
movl 160(%esp),%ecx
|
||||
movl %esi,44(%eax)
|
||||
movl %edx,52(%eax)
|
||||
movl %edi,60(%eax)
|
||||
movl %ebp,(%eax)
|
||||
leal 64(%eax),%eax
|
||||
subl $64,%ecx
|
||||
jnz L003outer_loop
|
||||
jmp L006done
|
||||
L005tail:
|
||||
addl 112(%esp),%edx
|
||||
addl 120(%esp),%edi
|
||||
movl %eax,(%esp)
|
||||
movl %ebp,16(%esp)
|
||||
movl %ecx,32(%esp)
|
||||
movl %esi,36(%esp)
|
||||
movl %edx,48(%esp)
|
||||
movl %edi,56(%esp)
|
||||
movl 4(%esp),%ebp
|
||||
movl 8(%esp),%ecx
|
||||
movl 12(%esp),%esi
|
||||
movl 20(%esp),%edx
|
||||
movl 24(%esp),%edi
|
||||
addl $857760878,%ebp
|
||||
addl $2036477234,%ecx
|
||||
addl $1797285236,%esi
|
||||
addl 84(%esp),%edx
|
||||
addl 88(%esp),%edi
|
||||
movl %ebp,4(%esp)
|
||||
movl %ecx,8(%esp)
|
||||
movl %esi,12(%esp)
|
||||
movl %edx,20(%esp)
|
||||
movl %edi,24(%esp)
|
||||
movl 28(%esp),%ebp
|
||||
movl 40(%esp),%ecx
|
||||
movl 44(%esp),%esi
|
||||
movl 52(%esp),%edx
|
||||
movl 60(%esp),%edi
|
||||
addl 92(%esp),%ebp
|
||||
addl 104(%esp),%ecx
|
||||
addl 108(%esp),%esi
|
||||
addl 116(%esp),%edx
|
||||
addl 124(%esp),%edi
|
||||
movl %ebp,28(%esp)
|
||||
movl 156(%esp),%ebp
|
||||
movl %ecx,40(%esp)
|
||||
movl 152(%esp),%ecx
|
||||
movl %esi,44(%esp)
|
||||
xorl %esi,%esi
|
||||
movl %edx,52(%esp)
|
||||
movl %edi,60(%esp)
|
||||
xorl %eax,%eax
|
||||
xorl %edx,%edx
|
||||
L007tail_loop:
|
||||
movb (%esi,%ebp,1),%al
|
||||
movb (%esp,%esi,1),%dl
|
||||
leal 1(%esi),%esi
|
||||
xorb %dl,%al
|
||||
movb %al,-1(%ecx,%esi,1)
|
||||
decl %ebx
|
||||
jnz L007tail_loop
|
||||
L006done:
|
||||
addl $132,%esp
|
||||
L000no_data:
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
.globl _ChaCha20_ssse3
|
||||
.private_extern _ChaCha20_ssse3
|
||||
.align 4
|
||||
_ChaCha20_ssse3:
|
||||
L_ChaCha20_ssse3_begin:
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
Lssse3_shortcut:
|
||||
movl 20(%esp),%edi
|
||||
movl 24(%esp),%esi
|
||||
movl 28(%esp),%ecx
|
||||
movl 32(%esp),%edx
|
||||
movl 36(%esp),%ebx
|
||||
movl %esp,%ebp
|
||||
subl $524,%esp
|
||||
andl $-64,%esp
|
||||
movl %ebp,512(%esp)
|
||||
leal Lssse3_data-Lpic_point(%eax),%eax
|
||||
movdqu (%ebx),%xmm3
|
||||
cmpl $256,%ecx
|
||||
jb L0081x
|
||||
movl %edx,516(%esp)
|
||||
movl %ebx,520(%esp)
|
||||
subl $256,%ecx
|
||||
leal 384(%esp),%ebp
|
||||
movdqu (%edx),%xmm7
|
||||
pshufd $0,%xmm3,%xmm0
|
||||
pshufd $85,%xmm3,%xmm1
|
||||
pshufd $170,%xmm3,%xmm2
|
||||
pshufd $255,%xmm3,%xmm3
|
||||
paddd 48(%eax),%xmm0
|
||||
pshufd $0,%xmm7,%xmm4
|
||||
pshufd $85,%xmm7,%xmm5
|
||||
psubd 64(%eax),%xmm0
|
||||
pshufd $170,%xmm7,%xmm6
|
||||
pshufd $255,%xmm7,%xmm7
|
||||
movdqa %xmm0,64(%ebp)
|
||||
movdqa %xmm1,80(%ebp)
|
||||
movdqa %xmm2,96(%ebp)
|
||||
movdqa %xmm3,112(%ebp)
|
||||
movdqu 16(%edx),%xmm3
|
||||
movdqa %xmm4,-64(%ebp)
|
||||
movdqa %xmm5,-48(%ebp)
|
||||
movdqa %xmm6,-32(%ebp)
|
||||
movdqa %xmm7,-16(%ebp)
|
||||
movdqa 32(%eax),%xmm7
|
||||
leal 128(%esp),%ebx
|
||||
pshufd $0,%xmm3,%xmm0
|
||||
pshufd $85,%xmm3,%xmm1
|
||||
pshufd $170,%xmm3,%xmm2
|
||||
pshufd $255,%xmm3,%xmm3
|
||||
pshufd $0,%xmm7,%xmm4
|
||||
pshufd $85,%xmm7,%xmm5
|
||||
pshufd $170,%xmm7,%xmm6
|
||||
pshufd $255,%xmm7,%xmm7
|
||||
movdqa %xmm0,(%ebp)
|
||||
movdqa %xmm1,16(%ebp)
|
||||
movdqa %xmm2,32(%ebp)
|
||||
movdqa %xmm3,48(%ebp)
|
||||
movdqa %xmm4,-128(%ebp)
|
||||
movdqa %xmm5,-112(%ebp)
|
||||
movdqa %xmm6,-96(%ebp)
|
||||
movdqa %xmm7,-80(%ebp)
|
||||
leal 128(%esi),%esi
|
||||
leal 128(%edi),%edi
|
||||
jmp L009outer_loop
|
||||
.align 4,0x90
|
||||
L009outer_loop:
|
||||
movdqa -112(%ebp),%xmm1
|
||||
movdqa -96(%ebp),%xmm2
|
||||
movdqa -80(%ebp),%xmm3
|
||||
movdqa -48(%ebp),%xmm5
|
||||
movdqa -32(%ebp),%xmm6
|
||||
movdqa -16(%ebp),%xmm7
|
||||
movdqa %xmm1,-112(%ebx)
|
||||
movdqa %xmm2,-96(%ebx)
|
||||
movdqa %xmm3,-80(%ebx)
|
||||
movdqa %xmm5,-48(%ebx)
|
||||
movdqa %xmm6,-32(%ebx)
|
||||
movdqa %xmm7,-16(%ebx)
|
||||
movdqa 32(%ebp),%xmm2
|
||||
movdqa 48(%ebp),%xmm3
|
||||
movdqa 64(%ebp),%xmm4
|
||||
movdqa 80(%ebp),%xmm5
|
||||
movdqa 96(%ebp),%xmm6
|
||||
movdqa 112(%ebp),%xmm7
|
||||
paddd 64(%eax),%xmm4
|
||||
movdqa %xmm2,32(%ebx)
|
||||
movdqa %xmm3,48(%ebx)
|
||||
movdqa %xmm4,64(%ebx)
|
||||
movdqa %xmm5,80(%ebx)
|
||||
movdqa %xmm6,96(%ebx)
|
||||
movdqa %xmm7,112(%ebx)
|
||||
movdqa %xmm4,64(%ebp)
|
||||
movdqa -128(%ebp),%xmm0
|
||||
movdqa %xmm4,%xmm6
|
||||
movdqa -64(%ebp),%xmm3
|
||||
movdqa (%ebp),%xmm4
|
||||
movdqa 16(%ebp),%xmm5
|
||||
movl $10,%edx
|
||||
nop
|
||||
.align 4,0x90
|
||||
L010loop:
|
||||
paddd %xmm3,%xmm0
|
||||
movdqa %xmm3,%xmm2
|
||||
pxor %xmm0,%xmm6
|
||||
pshufb (%eax),%xmm6
|
||||
paddd %xmm6,%xmm4
|
||||
pxor %xmm4,%xmm2
|
||||
movdqa -48(%ebx),%xmm3
|
||||
movdqa %xmm2,%xmm1
|
||||
pslld $12,%xmm2
|
||||
psrld $20,%xmm1
|
||||
por %xmm1,%xmm2
|
||||
movdqa -112(%ebx),%xmm1
|
||||
paddd %xmm2,%xmm0
|
||||
movdqa 80(%ebx),%xmm7
|
||||
pxor %xmm0,%xmm6
|
||||
movdqa %xmm0,-128(%ebx)
|
||||
pshufb 16(%eax),%xmm6
|
||||
paddd %xmm6,%xmm4
|
||||
movdqa %xmm6,64(%ebx)
|
||||
pxor %xmm4,%xmm2
|
||||
paddd %xmm3,%xmm1
|
||||
movdqa %xmm2,%xmm0
|
||||
pslld $7,%xmm2
|
||||
psrld $25,%xmm0
|
||||
pxor %xmm1,%xmm7
|
||||
por %xmm0,%xmm2
|
||||
movdqa %xmm4,(%ebx)
|
||||
pshufb (%eax),%xmm7
|
||||
movdqa %xmm2,-64(%ebx)
|
||||
paddd %xmm7,%xmm5
|
||||
movdqa 32(%ebx),%xmm4
|
||||
pxor %xmm5,%xmm3
|
||||
movdqa -32(%ebx),%xmm2
|
||||
movdqa %xmm3,%xmm0
|
||||
pslld $12,%xmm3
|
||||
psrld $20,%xmm0
|
||||
por %xmm0,%xmm3
|
||||
movdqa -96(%ebx),%xmm0
|
||||
paddd %xmm3,%xmm1
|
||||
movdqa 96(%ebx),%xmm6
|
||||
pxor %xmm1,%xmm7
|
||||
movdqa %xmm1,-112(%ebx)
|
||||
pshufb 16(%eax),%xmm7
|
||||
paddd %xmm7,%xmm5
|
||||
movdqa %xmm7,80(%ebx)
|
||||
pxor %xmm5,%xmm3
|
||||
paddd %xmm2,%xmm0
|
||||
movdqa %xmm3,%xmm1
|
||||
pslld $7,%xmm3
|
||||
psrld $25,%xmm1
|
||||
pxor %xmm0,%xmm6
|
||||
por %xmm1,%xmm3
|
||||
movdqa %xmm5,16(%ebx)
|
||||
pshufb (%eax),%xmm6
|
||||
movdqa %xmm3,-48(%ebx)
|
||||
paddd %xmm6,%xmm4
|
||||
movdqa 48(%ebx),%xmm5
|
||||
pxor %xmm4,%xmm2
|
||||
movdqa -16(%ebx),%xmm3
|
||||
movdqa %xmm2,%xmm1
|
||||
pslld $12,%xmm2
|
||||
psrld $20,%xmm1
|
||||
por %xmm1,%xmm2
|
||||
movdqa -80(%ebx),%xmm1
|
||||
paddd %xmm2,%xmm0
|
||||
movdqa 112(%ebx),%xmm7
|
||||
pxor %xmm0,%xmm6
|
||||
movdqa %xmm0,-96(%ebx)
|
||||
pshufb 16(%eax),%xmm6
|
||||
paddd %xmm6,%xmm4
|
||||
movdqa %xmm6,96(%ebx)
|
||||
pxor %xmm4,%xmm2
|
||||
paddd %xmm3,%xmm1
|
||||
movdqa %xmm2,%xmm0
|
||||
pslld $7,%xmm2
|
||||
psrld $25,%xmm0
|
||||
pxor %xmm1,%xmm7
|
||||
por %xmm0,%xmm2
|
||||
pshufb (%eax),%xmm7
|
||||
movdqa %xmm2,-32(%ebx)
|
||||
paddd %xmm7,%xmm5
|
||||
pxor %xmm5,%xmm3
|
||||
movdqa -48(%ebx),%xmm2
|
||||
movdqa %xmm3,%xmm0
|
||||
pslld $12,%xmm3
|
||||
psrld $20,%xmm0
|
||||
por %xmm0,%xmm3
|
||||
movdqa -128(%ebx),%xmm0
|
||||
paddd %xmm3,%xmm1
|
||||
pxor %xmm1,%xmm7
|
||||
movdqa %xmm1,-80(%ebx)
|
||||
pshufb 16(%eax),%xmm7
|
||||
paddd %xmm7,%xmm5
|
||||
movdqa %xmm7,%xmm6
|
||||
pxor %xmm5,%xmm3
|
||||
paddd %xmm2,%xmm0
|
||||
movdqa %xmm3,%xmm1
|
||||
pslld $7,%xmm3
|
||||
psrld $25,%xmm1
|
||||
pxor %xmm0,%xmm6
|
||||
por %xmm1,%xmm3
|
||||
pshufb (%eax),%xmm6
|
||||
movdqa %xmm3,-16(%ebx)
|
||||
paddd %xmm6,%xmm4
|
||||
pxor %xmm4,%xmm2
|
||||
movdqa -32(%ebx),%xmm3
|
||||
movdqa %xmm2,%xmm1
|
||||
pslld $12,%xmm2
|
||||
psrld $20,%xmm1
|
||||
por %xmm1,%xmm2
|
||||
movdqa -112(%ebx),%xmm1
|
||||
paddd %xmm2,%xmm0
|
||||
movdqa 64(%ebx),%xmm7
|
||||
pxor %xmm0,%xmm6
|
||||
movdqa %xmm0,-128(%ebx)
|
||||
pshufb 16(%eax),%xmm6
|
||||
paddd %xmm6,%xmm4
|
||||
movdqa %xmm6,112(%ebx)
|
||||
pxor %xmm4,%xmm2
|
||||
paddd %xmm3,%xmm1
|
||||
movdqa %xmm2,%xmm0
|
||||
pslld $7,%xmm2
|
||||
psrld $25,%xmm0
|
||||
pxor %xmm1,%xmm7
|
||||
por %xmm0,%xmm2
|
||||
movdqa %xmm4,32(%ebx)
|
||||
pshufb (%eax),%xmm7
|
||||
movdqa %xmm2,-48(%ebx)
|
||||
paddd %xmm7,%xmm5
|
||||
movdqa (%ebx),%xmm4
|
||||
pxor %xmm5,%xmm3
|
||||
movdqa -16(%ebx),%xmm2
|
||||
movdqa %xmm3,%xmm0
|
||||
pslld $12,%xmm3
|
||||
psrld $20,%xmm0
|
||||
por %xmm0,%xmm3
|
||||
movdqa -96(%ebx),%xmm0
|
||||
paddd %xmm3,%xmm1
|
||||
movdqa 80(%ebx),%xmm6
|
||||
pxor %xmm1,%xmm7
|
||||
movdqa %xmm1,-112(%ebx)
|
||||
pshufb 16(%eax),%xmm7
|
||||
paddd %xmm7,%xmm5
|
||||
movdqa %xmm7,64(%ebx)
|
||||
pxor %xmm5,%xmm3
|
||||
paddd %xmm2,%xmm0
|
||||
movdqa %xmm3,%xmm1
|
||||
pslld $7,%xmm3
|
||||
psrld $25,%xmm1
|
||||
pxor %xmm0,%xmm6
|
||||
por %xmm1,%xmm3
|
||||
movdqa %xmm5,48(%ebx)
|
||||
pshufb (%eax),%xmm6
|
||||
movdqa %xmm3,-32(%ebx)
|
||||
paddd %xmm6,%xmm4
|
||||
movdqa 16(%ebx),%xmm5
|
||||
pxor %xmm4,%xmm2
|
||||
movdqa -64(%ebx),%xmm3
|
||||
movdqa %xmm2,%xmm1
|
||||
pslld $12,%xmm2
|
||||
psrld $20,%xmm1
|
||||
por %xmm1,%xmm2
|
||||
movdqa -80(%ebx),%xmm1
|
||||
paddd %xmm2,%xmm0
|
||||
movdqa 96(%ebx),%xmm7
|
||||
pxor %xmm0,%xmm6
|
||||
movdqa %xmm0,-96(%ebx)
|
||||
pshufb 16(%eax),%xmm6
|
||||
paddd %xmm6,%xmm4
|
||||
movdqa %xmm6,80(%ebx)
|
||||
pxor %xmm4,%xmm2
|
||||
paddd %xmm3,%xmm1
|
||||
movdqa %xmm2,%xmm0
|
||||
pslld $7,%xmm2
|
||||
psrld $25,%xmm0
|
||||
pxor %xmm1,%xmm7
|
||||
por %xmm0,%xmm2
|
||||
pshufb (%eax),%xmm7
|
||||
movdqa %xmm2,-16(%ebx)
|
||||
paddd %xmm7,%xmm5
|
||||
pxor %xmm5,%xmm3
|
||||
movdqa %xmm3,%xmm0
|
||||
pslld $12,%xmm3
|
||||
psrld $20,%xmm0
|
||||
por %xmm0,%xmm3
|
||||
movdqa -128(%ebx),%xmm0
|
||||
paddd %xmm3,%xmm1
|
||||
movdqa 64(%ebx),%xmm6
|
||||
pxor %xmm1,%xmm7
|
||||
movdqa %xmm1,-80(%ebx)
|
||||
pshufb 16(%eax),%xmm7
|
||||
paddd %xmm7,%xmm5
|
||||
movdqa %xmm7,96(%ebx)
|
||||
pxor %xmm5,%xmm3
|
||||
movdqa %xmm3,%xmm1
|
||||
pslld $7,%xmm3
|
||||
psrld $25,%xmm1
|
||||
por %xmm1,%xmm3
|
||||
decl %edx
|
||||
jnz L010loop
|
||||
movdqa %xmm3,-64(%ebx)
|
||||
movdqa %xmm4,(%ebx)
|
||||
movdqa %xmm5,16(%ebx)
|
||||
movdqa %xmm6,64(%ebx)
|
||||
movdqa %xmm7,96(%ebx)
|
||||
movdqa -112(%ebx),%xmm1
|
||||
movdqa -96(%ebx),%xmm2
|
||||
movdqa -80(%ebx),%xmm3
|
||||
paddd -128(%ebp),%xmm0
|
||||
paddd -112(%ebp),%xmm1
|
||||
paddd -96(%ebp),%xmm2
|
||||
paddd -80(%ebp),%xmm3
|
||||
movdqa %xmm0,%xmm6
|
||||
punpckldq %xmm1,%xmm0
|
||||
movdqa %xmm2,%xmm7
|
||||
punpckldq %xmm3,%xmm2
|
||||
punpckhdq %xmm1,%xmm6
|
||||
punpckhdq %xmm3,%xmm7
|
||||
movdqa %xmm0,%xmm1
|
||||
punpcklqdq %xmm2,%xmm0
|
||||
movdqa %xmm6,%xmm3
|
||||
punpcklqdq %xmm7,%xmm6
|
||||
punpckhqdq %xmm2,%xmm1
|
||||
punpckhqdq %xmm7,%xmm3
|
||||
movdqu -128(%esi),%xmm4
|
||||
movdqu -64(%esi),%xmm5
|
||||
movdqu (%esi),%xmm2
|
||||
movdqu 64(%esi),%xmm7
|
||||
leal 16(%esi),%esi
|
||||
pxor %xmm0,%xmm4
|
||||
movdqa -64(%ebx),%xmm0
|
||||
pxor %xmm1,%xmm5
|
||||
movdqa -48(%ebx),%xmm1
|
||||
pxor %xmm2,%xmm6
|
||||
movdqa -32(%ebx),%xmm2
|
||||
pxor %xmm3,%xmm7
|
||||
movdqa -16(%ebx),%xmm3
|
||||
movdqu %xmm4,-128(%edi)
|
||||
movdqu %xmm5,-64(%edi)
|
||||
movdqu %xmm6,(%edi)
|
||||
movdqu %xmm7,64(%edi)
|
||||
leal 16(%edi),%edi
|
||||
paddd -64(%ebp),%xmm0
|
||||
paddd -48(%ebp),%xmm1
|
||||
paddd -32(%ebp),%xmm2
|
||||
paddd -16(%ebp),%xmm3
|
||||
movdqa %xmm0,%xmm6
|
||||
punpckldq %xmm1,%xmm0
|
||||
movdqa %xmm2,%xmm7
|
||||
punpckldq %xmm3,%xmm2
|
||||
punpckhdq %xmm1,%xmm6
|
||||
punpckhdq %xmm3,%xmm7
|
||||
movdqa %xmm0,%xmm1
|
||||
punpcklqdq %xmm2,%xmm0
|
||||
movdqa %xmm6,%xmm3
|
||||
punpcklqdq %xmm7,%xmm6
|
||||
punpckhqdq %xmm2,%xmm1
|
||||
punpckhqdq %xmm7,%xmm3
|
||||
movdqu -128(%esi),%xmm4
|
||||
movdqu -64(%esi),%xmm5
|
||||
movdqu (%esi),%xmm2
|
||||
movdqu 64(%esi),%xmm7
|
||||
leal 16(%esi),%esi
|
||||
pxor %xmm0,%xmm4
|
||||
movdqa (%ebx),%xmm0
|
||||
pxor %xmm1,%xmm5
|
||||
movdqa 16(%ebx),%xmm1
|
||||
pxor %xmm2,%xmm6
|
||||
movdqa 32(%ebx),%xmm2
|
||||
pxor %xmm3,%xmm7
|
||||
movdqa 48(%ebx),%xmm3
|
||||
movdqu %xmm4,-128(%edi)
|
||||
movdqu %xmm5,-64(%edi)
|
||||
movdqu %xmm6,(%edi)
|
||||
movdqu %xmm7,64(%edi)
|
||||
leal 16(%edi),%edi
|
||||
paddd (%ebp),%xmm0
|
||||
paddd 16(%ebp),%xmm1
|
||||
paddd 32(%ebp),%xmm2
|
||||
paddd 48(%ebp),%xmm3
|
||||
movdqa %xmm0,%xmm6
|
||||
punpckldq %xmm1,%xmm0
|
||||
movdqa %xmm2,%xmm7
|
||||
punpckldq %xmm3,%xmm2
|
||||
punpckhdq %xmm1,%xmm6
|
||||
punpckhdq %xmm3,%xmm7
|
||||
movdqa %xmm0,%xmm1
|
||||
punpcklqdq %xmm2,%xmm0
|
||||
movdqa %xmm6,%xmm3
|
||||
punpcklqdq %xmm7,%xmm6
|
||||
punpckhqdq %xmm2,%xmm1
|
||||
punpckhqdq %xmm7,%xmm3
|
||||
movdqu -128(%esi),%xmm4
|
||||
movdqu -64(%esi),%xmm5
|
||||
movdqu (%esi),%xmm2
|
||||
movdqu 64(%esi),%xmm7
|
||||
leal 16(%esi),%esi
|
||||
pxor %xmm0,%xmm4
|
||||
movdqa 64(%ebx),%xmm0
|
||||
pxor %xmm1,%xmm5
|
||||
movdqa 80(%ebx),%xmm1
|
||||
pxor %xmm2,%xmm6
|
||||
movdqa 96(%ebx),%xmm2
|
||||
pxor %xmm3,%xmm7
|
||||
movdqa 112(%ebx),%xmm3
|
||||
movdqu %xmm4,-128(%edi)
|
||||
movdqu %xmm5,-64(%edi)
|
||||
movdqu %xmm6,(%edi)
|
||||
movdqu %xmm7,64(%edi)
|
||||
leal 16(%edi),%edi
|
||||
paddd 64(%ebp),%xmm0
|
||||
paddd 80(%ebp),%xmm1
|
||||
paddd 96(%ebp),%xmm2
|
||||
paddd 112(%ebp),%xmm3
|
||||
movdqa %xmm0,%xmm6
|
||||
punpckldq %xmm1,%xmm0
|
||||
movdqa %xmm2,%xmm7
|
||||
punpckldq %xmm3,%xmm2
|
||||
punpckhdq %xmm1,%xmm6
|
||||
punpckhdq %xmm3,%xmm7
|
||||
movdqa %xmm0,%xmm1
|
||||
punpcklqdq %xmm2,%xmm0
|
||||
movdqa %xmm6,%xmm3
|
||||
punpcklqdq %xmm7,%xmm6
|
||||
punpckhqdq %xmm2,%xmm1
|
||||
punpckhqdq %xmm7,%xmm3
|
||||
movdqu -128(%esi),%xmm4
|
||||
movdqu -64(%esi),%xmm5
|
||||
movdqu (%esi),%xmm2
|
||||
movdqu 64(%esi),%xmm7
|
||||
leal 208(%esi),%esi
|
||||
pxor %xmm0,%xmm4
|
||||
pxor %xmm1,%xmm5
|
||||
pxor %xmm2,%xmm6
|
||||
pxor %xmm3,%xmm7
|
||||
movdqu %xmm4,-128(%edi)
|
||||
movdqu %xmm5,-64(%edi)
|
||||
movdqu %xmm6,(%edi)
|
||||
movdqu %xmm7,64(%edi)
|
||||
leal 208(%edi),%edi
|
||||
subl $256,%ecx
|
||||
jnc L009outer_loop
|
||||
addl $256,%ecx
|
||||
jz L011done
|
||||
movl 520(%esp),%ebx
|
||||
leal -128(%esi),%esi
|
||||
movl 516(%esp),%edx
|
||||
leal -128(%edi),%edi
|
||||
movd 64(%ebp),%xmm2
|
||||
movdqu (%ebx),%xmm3
|
||||
paddd 96(%eax),%xmm2
|
||||
pand 112(%eax),%xmm3
|
||||
por %xmm2,%xmm3
|
||||
L0081x:
|
||||
movdqa 32(%eax),%xmm0
|
||||
movdqu (%edx),%xmm1
|
||||
movdqu 16(%edx),%xmm2
|
||||
movdqa (%eax),%xmm6
|
||||
movdqa 16(%eax),%xmm7
|
||||
movl %ebp,48(%esp)
|
||||
movdqa %xmm0,(%esp)
|
||||
movdqa %xmm1,16(%esp)
|
||||
movdqa %xmm2,32(%esp)
|
||||
movdqa %xmm3,48(%esp)
|
||||
movl $10,%edx
|
||||
jmp L012loop1x
|
||||
.align 4,0x90
|
||||
L013outer1x:
|
||||
movdqa 80(%eax),%xmm3
|
||||
movdqa (%esp),%xmm0
|
||||
movdqa 16(%esp),%xmm1
|
||||
movdqa 32(%esp),%xmm2
|
||||
paddd 48(%esp),%xmm3
|
||||
movl $10,%edx
|
||||
movdqa %xmm3,48(%esp)
|
||||
jmp L012loop1x
|
||||
.align 4,0x90
|
||||
L012loop1x:
|
||||
paddd %xmm1,%xmm0
|
||||
pxor %xmm0,%xmm3
|
||||
.byte 102,15,56,0,222
|
||||
paddd %xmm3,%xmm2
|
||||
pxor %xmm2,%xmm1
|
||||
movdqa %xmm1,%xmm4
|
||||
psrld $20,%xmm1
|
||||
pslld $12,%xmm4
|
||||
por %xmm4,%xmm1
|
||||
paddd %xmm1,%xmm0
|
||||
pxor %xmm0,%xmm3
|
||||
.byte 102,15,56,0,223
|
||||
paddd %xmm3,%xmm2
|
||||
pxor %xmm2,%xmm1
|
||||
movdqa %xmm1,%xmm4
|
||||
psrld $25,%xmm1
|
||||
pslld $7,%xmm4
|
||||
por %xmm4,%xmm1
|
||||
pshufd $78,%xmm2,%xmm2
|
||||
pshufd $57,%xmm1,%xmm1
|
||||
pshufd $147,%xmm3,%xmm3
|
||||
nop
|
||||
paddd %xmm1,%xmm0
|
||||
pxor %xmm0,%xmm3
|
||||
.byte 102,15,56,0,222
|
||||
paddd %xmm3,%xmm2
|
||||
pxor %xmm2,%xmm1
|
||||
movdqa %xmm1,%xmm4
|
||||
psrld $20,%xmm1
|
||||
pslld $12,%xmm4
|
||||
por %xmm4,%xmm1
|
||||
paddd %xmm1,%xmm0
|
||||
pxor %xmm0,%xmm3
|
||||
.byte 102,15,56,0,223
|
||||
paddd %xmm3,%xmm2
|
||||
pxor %xmm2,%xmm1
|
||||
movdqa %xmm1,%xmm4
|
||||
psrld $25,%xmm1
|
||||
pslld $7,%xmm4
|
||||
por %xmm4,%xmm1
|
||||
pshufd $78,%xmm2,%xmm2
|
||||
pshufd $147,%xmm1,%xmm1
|
||||
pshufd $57,%xmm3,%xmm3
|
||||
decl %edx
|
||||
jnz L012loop1x
|
||||
paddd (%esp),%xmm0
|
||||
paddd 16(%esp),%xmm1
|
||||
paddd 32(%esp),%xmm2
|
||||
paddd 48(%esp),%xmm3
|
||||
cmpl $64,%ecx
|
||||
jb L014tail
|
||||
movdqu (%esi),%xmm4
|
||||
movdqu 16(%esi),%xmm5
|
||||
pxor %xmm4,%xmm0
|
||||
movdqu 32(%esi),%xmm4
|
||||
pxor %xmm5,%xmm1
|
||||
movdqu 48(%esi),%xmm5
|
||||
pxor %xmm4,%xmm2
|
||||
pxor %xmm5,%xmm3
|
||||
leal 64(%esi),%esi
|
||||
movdqu %xmm0,(%edi)
|
||||
movdqu %xmm1,16(%edi)
|
||||
movdqu %xmm2,32(%edi)
|
||||
movdqu %xmm3,48(%edi)
|
||||
leal 64(%edi),%edi
|
||||
subl $64,%ecx
|
||||
jnz L013outer1x
|
||||
jmp L011done
|
||||
L014tail:
|
||||
movdqa %xmm0,(%esp)
|
||||
movdqa %xmm1,16(%esp)
|
||||
movdqa %xmm2,32(%esp)
|
||||
movdqa %xmm3,48(%esp)
|
||||
xorl %eax,%eax
|
||||
xorl %edx,%edx
|
||||
xorl %ebp,%ebp
|
||||
L015tail_loop:
|
||||
movb (%esp,%ebp,1),%al
|
||||
movb (%esi,%ebp,1),%dl
|
||||
leal 1(%ebp),%ebp
|
||||
xorb %dl,%al
|
||||
movb %al,-1(%edi,%ebp,1)
|
||||
decl %ecx
|
||||
jnz L015tail_loop
|
||||
L011done:
|
||||
movl 512(%esp),%esp
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
.align 6,0x90
|
||||
Lssse3_data:
|
||||
.byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13
|
||||
.byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14
|
||||
.long 1634760805,857760878,2036477234,1797285236
|
||||
.long 0,1,2,3
|
||||
.long 4,4,4,4
|
||||
.long 1,0,0,0
|
||||
.long 4,0,0,0
|
||||
.long 0,-1,-1,-1
|
||||
.align 6,0x90
|
||||
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54
|
||||
.byte 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32
|
||||
.byte 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111
|
||||
.byte 114,103,62,0
|
||||
.section __IMPORT,__pointers,non_lazy_symbol_pointers
|
||||
L_OPENSSL_ia32cap_P$non_lazy_ptr:
|
||||
.indirect_symbol _OPENSSL_ia32cap_P
|
||||
.long 0
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,679 @@
|
|||
#if defined(__i386__)
|
||||
.text
|
||||
.globl _md5_block_asm_data_order
|
||||
.private_extern _md5_block_asm_data_order
|
||||
.align 4
|
||||
_md5_block_asm_data_order:
|
||||
L_md5_block_asm_data_order_begin:
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
movl 12(%esp),%edi
|
||||
movl 16(%esp),%esi
|
||||
movl 20(%esp),%ecx
|
||||
pushl %ebp
|
||||
shll $6,%ecx
|
||||
pushl %ebx
|
||||
addl %esi,%ecx
|
||||
subl $64,%ecx
|
||||
movl (%edi),%eax
|
||||
pushl %ecx
|
||||
movl 4(%edi),%ebx
|
||||
movl 8(%edi),%ecx
|
||||
movl 12(%edi),%edx
|
||||
L000start:
|
||||
|
||||
# R0 section
|
||||
movl %ecx,%edi
|
||||
movl (%esi),%ebp
|
||||
# R0 0
|
||||
xorl %edx,%edi
|
||||
andl %ebx,%edi
|
||||
leal 3614090360(%eax,%ebp,1),%eax
|
||||
xorl %edx,%edi
|
||||
addl %edi,%eax
|
||||
movl %ebx,%edi
|
||||
roll $7,%eax
|
||||
movl 4(%esi),%ebp
|
||||
addl %ebx,%eax
|
||||
# R0 1
|
||||
xorl %ecx,%edi
|
||||
andl %eax,%edi
|
||||
leal 3905402710(%edx,%ebp,1),%edx
|
||||
xorl %ecx,%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $12,%edx
|
||||
movl 8(%esi),%ebp
|
||||
addl %eax,%edx
|
||||
# R0 2
|
||||
xorl %ebx,%edi
|
||||
andl %edx,%edi
|
||||
leal 606105819(%ecx,%ebp,1),%ecx
|
||||
xorl %ebx,%edi
|
||||
addl %edi,%ecx
|
||||
movl %edx,%edi
|
||||
roll $17,%ecx
|
||||
movl 12(%esi),%ebp
|
||||
addl %edx,%ecx
|
||||
# R0 3
|
||||
xorl %eax,%edi
|
||||
andl %ecx,%edi
|
||||
leal 3250441966(%ebx,%ebp,1),%ebx
|
||||
xorl %eax,%edi
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $22,%ebx
|
||||
movl 16(%esi),%ebp
|
||||
addl %ecx,%ebx
|
||||
# R0 4
|
||||
xorl %edx,%edi
|
||||
andl %ebx,%edi
|
||||
leal 4118548399(%eax,%ebp,1),%eax
|
||||
xorl %edx,%edi
|
||||
addl %edi,%eax
|
||||
movl %ebx,%edi
|
||||
roll $7,%eax
|
||||
movl 20(%esi),%ebp
|
||||
addl %ebx,%eax
|
||||
# R0 5
|
||||
xorl %ecx,%edi
|
||||
andl %eax,%edi
|
||||
leal 1200080426(%edx,%ebp,1),%edx
|
||||
xorl %ecx,%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $12,%edx
|
||||
movl 24(%esi),%ebp
|
||||
addl %eax,%edx
|
||||
# R0 6
|
||||
xorl %ebx,%edi
|
||||
andl %edx,%edi
|
||||
leal 2821735955(%ecx,%ebp,1),%ecx
|
||||
xorl %ebx,%edi
|
||||
addl %edi,%ecx
|
||||
movl %edx,%edi
|
||||
roll $17,%ecx
|
||||
movl 28(%esi),%ebp
|
||||
addl %edx,%ecx
|
||||
# R0 7
|
||||
xorl %eax,%edi
|
||||
andl %ecx,%edi
|
||||
leal 4249261313(%ebx,%ebp,1),%ebx
|
||||
xorl %eax,%edi
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $22,%ebx
|
||||
movl 32(%esi),%ebp
|
||||
addl %ecx,%ebx
|
||||
# R0 8
|
||||
xorl %edx,%edi
|
||||
andl %ebx,%edi
|
||||
leal 1770035416(%eax,%ebp,1),%eax
|
||||
xorl %edx,%edi
|
||||
addl %edi,%eax
|
||||
movl %ebx,%edi
|
||||
roll $7,%eax
|
||||
movl 36(%esi),%ebp
|
||||
addl %ebx,%eax
|
||||
# R0 9
|
||||
xorl %ecx,%edi
|
||||
andl %eax,%edi
|
||||
leal 2336552879(%edx,%ebp,1),%edx
|
||||
xorl %ecx,%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $12,%edx
|
||||
movl 40(%esi),%ebp
|
||||
addl %eax,%edx
|
||||
# R0 10
|
||||
xorl %ebx,%edi
|
||||
andl %edx,%edi
|
||||
leal 4294925233(%ecx,%ebp,1),%ecx
|
||||
xorl %ebx,%edi
|
||||
addl %edi,%ecx
|
||||
movl %edx,%edi
|
||||
roll $17,%ecx
|
||||
movl 44(%esi),%ebp
|
||||
addl %edx,%ecx
|
||||
# R0 11
|
||||
xorl %eax,%edi
|
||||
andl %ecx,%edi
|
||||
leal 2304563134(%ebx,%ebp,1),%ebx
|
||||
xorl %eax,%edi
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $22,%ebx
|
||||
movl 48(%esi),%ebp
|
||||
addl %ecx,%ebx
|
||||
# R0 12
|
||||
xorl %edx,%edi
|
||||
andl %ebx,%edi
|
||||
leal 1804603682(%eax,%ebp,1),%eax
|
||||
xorl %edx,%edi
|
||||
addl %edi,%eax
|
||||
movl %ebx,%edi
|
||||
roll $7,%eax
|
||||
movl 52(%esi),%ebp
|
||||
addl %ebx,%eax
|
||||
# R0 13
|
||||
xorl %ecx,%edi
|
||||
andl %eax,%edi
|
||||
leal 4254626195(%edx,%ebp,1),%edx
|
||||
xorl %ecx,%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $12,%edx
|
||||
movl 56(%esi),%ebp
|
||||
addl %eax,%edx
|
||||
# R0 14
|
||||
xorl %ebx,%edi
|
||||
andl %edx,%edi
|
||||
leal 2792965006(%ecx,%ebp,1),%ecx
|
||||
xorl %ebx,%edi
|
||||
addl %edi,%ecx
|
||||
movl %edx,%edi
|
||||
roll $17,%ecx
|
||||
movl 60(%esi),%ebp
|
||||
addl %edx,%ecx
|
||||
# R0 15
|
||||
xorl %eax,%edi
|
||||
andl %ecx,%edi
|
||||
leal 1236535329(%ebx,%ebp,1),%ebx
|
||||
xorl %eax,%edi
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $22,%ebx
|
||||
movl 4(%esi),%ebp
|
||||
addl %ecx,%ebx
|
||||
|
||||
# R1 section
|
||||
# R1 16
|
||||
leal 4129170786(%eax,%ebp,1),%eax
|
||||
xorl %ebx,%edi
|
||||
andl %edx,%edi
|
||||
movl 24(%esi),%ebp
|
||||
xorl %ecx,%edi
|
||||
addl %edi,%eax
|
||||
movl %ebx,%edi
|
||||
roll $5,%eax
|
||||
addl %ebx,%eax
|
||||
# R1 17
|
||||
leal 3225465664(%edx,%ebp,1),%edx
|
||||
xorl %eax,%edi
|
||||
andl %ecx,%edi
|
||||
movl 44(%esi),%ebp
|
||||
xorl %ebx,%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $9,%edx
|
||||
addl %eax,%edx
|
||||
# R1 18
|
||||
leal 643717713(%ecx,%ebp,1),%ecx
|
||||
xorl %edx,%edi
|
||||
andl %ebx,%edi
|
||||
movl (%esi),%ebp
|
||||
xorl %eax,%edi
|
||||
addl %edi,%ecx
|
||||
movl %edx,%edi
|
||||
roll $14,%ecx
|
||||
addl %edx,%ecx
|
||||
# R1 19
|
||||
leal 3921069994(%ebx,%ebp,1),%ebx
|
||||
xorl %ecx,%edi
|
||||
andl %eax,%edi
|
||||
movl 20(%esi),%ebp
|
||||
xorl %edx,%edi
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $20,%ebx
|
||||
addl %ecx,%ebx
|
||||
# R1 20
|
||||
leal 3593408605(%eax,%ebp,1),%eax
|
||||
xorl %ebx,%edi
|
||||
andl %edx,%edi
|
||||
movl 40(%esi),%ebp
|
||||
xorl %ecx,%edi
|
||||
addl %edi,%eax
|
||||
movl %ebx,%edi
|
||||
roll $5,%eax
|
||||
addl %ebx,%eax
|
||||
# R1 21
|
||||
leal 38016083(%edx,%ebp,1),%edx
|
||||
xorl %eax,%edi
|
||||
andl %ecx,%edi
|
||||
movl 60(%esi),%ebp
|
||||
xorl %ebx,%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $9,%edx
|
||||
addl %eax,%edx
|
||||
# R1 22
|
||||
leal 3634488961(%ecx,%ebp,1),%ecx
|
||||
xorl %edx,%edi
|
||||
andl %ebx,%edi
|
||||
movl 16(%esi),%ebp
|
||||
xorl %eax,%edi
|
||||
addl %edi,%ecx
|
||||
movl %edx,%edi
|
||||
roll $14,%ecx
|
||||
addl %edx,%ecx
|
||||
# R1 23
|
||||
leal 3889429448(%ebx,%ebp,1),%ebx
|
||||
xorl %ecx,%edi
|
||||
andl %eax,%edi
|
||||
movl 36(%esi),%ebp
|
||||
xorl %edx,%edi
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $20,%ebx
|
||||
addl %ecx,%ebx
|
||||
# R1 24
|
||||
leal 568446438(%eax,%ebp,1),%eax
|
||||
xorl %ebx,%edi
|
||||
andl %edx,%edi
|
||||
movl 56(%esi),%ebp
|
||||
xorl %ecx,%edi
|
||||
addl %edi,%eax
|
||||
movl %ebx,%edi
|
||||
roll $5,%eax
|
||||
addl %ebx,%eax
|
||||
# R1 25
|
||||
leal 3275163606(%edx,%ebp,1),%edx
|
||||
xorl %eax,%edi
|
||||
andl %ecx,%edi
|
||||
movl 12(%esi),%ebp
|
||||
xorl %ebx,%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $9,%edx
|
||||
addl %eax,%edx
|
||||
# R1 26
|
||||
leal 4107603335(%ecx,%ebp,1),%ecx
|
||||
xorl %edx,%edi
|
||||
andl %ebx,%edi
|
||||
movl 32(%esi),%ebp
|
||||
xorl %eax,%edi
|
||||
addl %edi,%ecx
|
||||
movl %edx,%edi
|
||||
roll $14,%ecx
|
||||
addl %edx,%ecx
|
||||
# R1 27
|
||||
leal 1163531501(%ebx,%ebp,1),%ebx
|
||||
xorl %ecx,%edi
|
||||
andl %eax,%edi
|
||||
movl 52(%esi),%ebp
|
||||
xorl %edx,%edi
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $20,%ebx
|
||||
addl %ecx,%ebx
|
||||
# R1 28
|
||||
leal 2850285829(%eax,%ebp,1),%eax
|
||||
xorl %ebx,%edi
|
||||
andl %edx,%edi
|
||||
movl 8(%esi),%ebp
|
||||
xorl %ecx,%edi
|
||||
addl %edi,%eax
|
||||
movl %ebx,%edi
|
||||
roll $5,%eax
|
||||
addl %ebx,%eax
|
||||
# R1 29
|
||||
leal 4243563512(%edx,%ebp,1),%edx
|
||||
xorl %eax,%edi
|
||||
andl %ecx,%edi
|
||||
movl 28(%esi),%ebp
|
||||
xorl %ebx,%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $9,%edx
|
||||
addl %eax,%edx
|
||||
# R1 30
|
||||
leal 1735328473(%ecx,%ebp,1),%ecx
|
||||
xorl %edx,%edi
|
||||
andl %ebx,%edi
|
||||
movl 48(%esi),%ebp
|
||||
xorl %eax,%edi
|
||||
addl %edi,%ecx
|
||||
movl %edx,%edi
|
||||
roll $14,%ecx
|
||||
addl %edx,%ecx
|
||||
# R1 31
|
||||
leal 2368359562(%ebx,%ebp,1),%ebx
|
||||
xorl %ecx,%edi
|
||||
andl %eax,%edi
|
||||
movl 20(%esi),%ebp
|
||||
xorl %edx,%edi
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $20,%ebx
|
||||
addl %ecx,%ebx
|
||||
|
||||
# R2 section
|
||||
# R2 32
|
||||
xorl %edx,%edi
|
||||
xorl %ebx,%edi
|
||||
leal 4294588738(%eax,%ebp,1),%eax
|
||||
addl %edi,%eax
|
||||
roll $4,%eax
|
||||
movl 32(%esi),%ebp
|
||||
movl %ebx,%edi
|
||||
# R2 33
|
||||
leal 2272392833(%edx,%ebp,1),%edx
|
||||
addl %ebx,%eax
|
||||
xorl %ecx,%edi
|
||||
xorl %eax,%edi
|
||||
movl 44(%esi),%ebp
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $11,%edx
|
||||
addl %eax,%edx
|
||||
# R2 34
|
||||
xorl %ebx,%edi
|
||||
xorl %edx,%edi
|
||||
leal 1839030562(%ecx,%ebp,1),%ecx
|
||||
addl %edi,%ecx
|
||||
roll $16,%ecx
|
||||
movl 56(%esi),%ebp
|
||||
movl %edx,%edi
|
||||
# R2 35
|
||||
leal 4259657740(%ebx,%ebp,1),%ebx
|
||||
addl %edx,%ecx
|
||||
xorl %eax,%edi
|
||||
xorl %ecx,%edi
|
||||
movl 4(%esi),%ebp
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $23,%ebx
|
||||
addl %ecx,%ebx
|
||||
# R2 36
|
||||
xorl %edx,%edi
|
||||
xorl %ebx,%edi
|
||||
leal 2763975236(%eax,%ebp,1),%eax
|
||||
addl %edi,%eax
|
||||
roll $4,%eax
|
||||
movl 16(%esi),%ebp
|
||||
movl %ebx,%edi
|
||||
# R2 37
|
||||
leal 1272893353(%edx,%ebp,1),%edx
|
||||
addl %ebx,%eax
|
||||
xorl %ecx,%edi
|
||||
xorl %eax,%edi
|
||||
movl 28(%esi),%ebp
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $11,%edx
|
||||
addl %eax,%edx
|
||||
# R2 38
|
||||
xorl %ebx,%edi
|
||||
xorl %edx,%edi
|
||||
leal 4139469664(%ecx,%ebp,1),%ecx
|
||||
addl %edi,%ecx
|
||||
roll $16,%ecx
|
||||
movl 40(%esi),%ebp
|
||||
movl %edx,%edi
|
||||
# R2 39
|
||||
leal 3200236656(%ebx,%ebp,1),%ebx
|
||||
addl %edx,%ecx
|
||||
xorl %eax,%edi
|
||||
xorl %ecx,%edi
|
||||
movl 52(%esi),%ebp
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $23,%ebx
|
||||
addl %ecx,%ebx
|
||||
# R2 40
|
||||
xorl %edx,%edi
|
||||
xorl %ebx,%edi
|
||||
leal 681279174(%eax,%ebp,1),%eax
|
||||
addl %edi,%eax
|
||||
roll $4,%eax
|
||||
movl (%esi),%ebp
|
||||
movl %ebx,%edi
|
||||
# R2 41
|
||||
leal 3936430074(%edx,%ebp,1),%edx
|
||||
addl %ebx,%eax
|
||||
xorl %ecx,%edi
|
||||
xorl %eax,%edi
|
||||
movl 12(%esi),%ebp
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $11,%edx
|
||||
addl %eax,%edx
|
||||
# R2 42
|
||||
xorl %ebx,%edi
|
||||
xorl %edx,%edi
|
||||
leal 3572445317(%ecx,%ebp,1),%ecx
|
||||
addl %edi,%ecx
|
||||
roll $16,%ecx
|
||||
movl 24(%esi),%ebp
|
||||
movl %edx,%edi
|
||||
# R2 43
|
||||
leal 76029189(%ebx,%ebp,1),%ebx
|
||||
addl %edx,%ecx
|
||||
xorl %eax,%edi
|
||||
xorl %ecx,%edi
|
||||
movl 36(%esi),%ebp
|
||||
addl %edi,%ebx
|
||||
movl %ecx,%edi
|
||||
roll $23,%ebx
|
||||
addl %ecx,%ebx
|
||||
# R2 44
|
||||
xorl %edx,%edi
|
||||
xorl %ebx,%edi
|
||||
leal 3654602809(%eax,%ebp,1),%eax
|
||||
addl %edi,%eax
|
||||
roll $4,%eax
|
||||
movl 48(%esi),%ebp
|
||||
movl %ebx,%edi
|
||||
# R2 45
|
||||
leal 3873151461(%edx,%ebp,1),%edx
|
||||
addl %ebx,%eax
|
||||
xorl %ecx,%edi
|
||||
xorl %eax,%edi
|
||||
movl 60(%esi),%ebp
|
||||
addl %edi,%edx
|
||||
movl %eax,%edi
|
||||
roll $11,%edx
|
||||
addl %eax,%edx
|
||||
# R2 46
|
||||
xorl %ebx,%edi
|
||||
xorl %edx,%edi
|
||||
leal 530742520(%ecx,%ebp,1),%ecx
|
||||
addl %edi,%ecx
|
||||
roll $16,%ecx
|
||||
movl 8(%esi),%ebp
|
||||
movl %edx,%edi
|
||||
# R2 47
|
||||
leal 3299628645(%ebx,%ebp,1),%ebx
|
||||
addl %edx,%ecx
|
||||
xorl %eax,%edi
|
||||
xorl %ecx,%edi
|
||||
movl (%esi),%ebp
|
||||
addl %edi,%ebx
|
||||
movl $-1,%edi
|
||||
roll $23,%ebx
|
||||
addl %ecx,%ebx
|
||||
|
||||
# R3 section
|
||||
# R3 48
|
||||
xorl %edx,%edi
|
||||
orl %ebx,%edi
|
||||
leal 4096336452(%eax,%ebp,1),%eax
|
||||
xorl %ecx,%edi
|
||||
movl 28(%esi),%ebp
|
||||
addl %edi,%eax
|
||||
movl $-1,%edi
|
||||
roll $6,%eax
|
||||
xorl %ecx,%edi
|
||||
addl %ebx,%eax
|
||||
# R3 49
|
||||
orl %eax,%edi
|
||||
leal 1126891415(%edx,%ebp,1),%edx
|
||||
xorl %ebx,%edi
|
||||
movl 56(%esi),%ebp
|
||||
addl %edi,%edx
|
||||
movl $-1,%edi
|
||||
roll $10,%edx
|
||||
xorl %ebx,%edi
|
||||
addl %eax,%edx
|
||||
# R3 50
|
||||
orl %edx,%edi
|
||||
leal 2878612391(%ecx,%ebp,1),%ecx
|
||||
xorl %eax,%edi
|
||||
movl 20(%esi),%ebp
|
||||
addl %edi,%ecx
|
||||
movl $-1,%edi
|
||||
roll $15,%ecx
|
||||
xorl %eax,%edi
|
||||
addl %edx,%ecx
|
||||
# R3 51
|
||||
orl %ecx,%edi
|
||||
leal 4237533241(%ebx,%ebp,1),%ebx
|
||||
xorl %edx,%edi
|
||||
movl 48(%esi),%ebp
|
||||
addl %edi,%ebx
|
||||
movl $-1,%edi
|
||||
roll $21,%ebx
|
||||
xorl %edx,%edi
|
||||
addl %ecx,%ebx
|
||||
# R3 52
|
||||
orl %ebx,%edi
|
||||
leal 1700485571(%eax,%ebp,1),%eax
|
||||
xorl %ecx,%edi
|
||||
movl 12(%esi),%ebp
|
||||
addl %edi,%eax
|
||||
movl $-1,%edi
|
||||
roll $6,%eax
|
||||
xorl %ecx,%edi
|
||||
addl %ebx,%eax
|
||||
# R3 53
|
||||
orl %eax,%edi
|
||||
leal 2399980690(%edx,%ebp,1),%edx
|
||||
xorl %ebx,%edi
|
||||
movl 40(%esi),%ebp
|
||||
addl %edi,%edx
|
||||
movl $-1,%edi
|
||||
roll $10,%edx
|
||||
xorl %ebx,%edi
|
||||
addl %eax,%edx
|
||||
# R3 54
|
||||
orl %edx,%edi
|
||||
leal 4293915773(%ecx,%ebp,1),%ecx
|
||||
xorl %eax,%edi
|
||||
movl 4(%esi),%ebp
|
||||
addl %edi,%ecx
|
||||
movl $-1,%edi
|
||||
roll $15,%ecx
|
||||
xorl %eax,%edi
|
||||
addl %edx,%ecx
|
||||
# R3 55
|
||||
orl %ecx,%edi
|
||||
leal 2240044497(%ebx,%ebp,1),%ebx
|
||||
xorl %edx,%edi
|
||||
movl 32(%esi),%ebp
|
||||
addl %edi,%ebx
|
||||
movl $-1,%edi
|
||||
roll $21,%ebx
|
||||
xorl %edx,%edi
|
||||
addl %ecx,%ebx
|
||||
# R3 56
|
||||
orl %ebx,%edi
|
||||
leal 1873313359(%eax,%ebp,1),%eax
|
||||
xorl %ecx,%edi
|
||||
movl 60(%esi),%ebp
|
||||
addl %edi,%eax
|
||||
movl $-1,%edi
|
||||
roll $6,%eax
|
||||
xorl %ecx,%edi
|
||||
addl %ebx,%eax
|
||||
# R3 57
|
||||
orl %eax,%edi
|
||||
leal 4264355552(%edx,%ebp,1),%edx
|
||||
xorl %ebx,%edi
|
||||
movl 24(%esi),%ebp
|
||||
addl %edi,%edx
|
||||
movl $-1,%edi
|
||||
roll $10,%edx
|
||||
xorl %ebx,%edi
|
||||
addl %eax,%edx
|
||||
# R3 58
|
||||
orl %edx,%edi
|
||||
leal 2734768916(%ecx,%ebp,1),%ecx
|
||||
xorl %eax,%edi
|
||||
movl 52(%esi),%ebp
|
||||
addl %edi,%ecx
|
||||
movl $-1,%edi
|
||||
roll $15,%ecx
|
||||
xorl %eax,%edi
|
||||
addl %edx,%ecx
|
||||
# R3 59
|
||||
orl %ecx,%edi
|
||||
leal 1309151649(%ebx,%ebp,1),%ebx
|
||||
xorl %edx,%edi
|
||||
movl 16(%esi),%ebp
|
||||
addl %edi,%ebx
|
||||
movl $-1,%edi
|
||||
roll $21,%ebx
|
||||
xorl %edx,%edi
|
||||
addl %ecx,%ebx
|
||||
# R3 60
|
||||
orl %ebx,%edi
|
||||
leal 4149444226(%eax,%ebp,1),%eax
|
||||
xorl %ecx,%edi
|
||||
movl 44(%esi),%ebp
|
||||
addl %edi,%eax
|
||||
movl $-1,%edi
|
||||
roll $6,%eax
|
||||
xorl %ecx,%edi
|
||||
addl %ebx,%eax
|
||||
# R3 61
|
||||
orl %eax,%edi
|
||||
leal 3174756917(%edx,%ebp,1),%edx
|
||||
xorl %ebx,%edi
|
||||
movl 8(%esi),%ebp
|
||||
addl %edi,%edx
|
||||
movl $-1,%edi
|
||||
roll $10,%edx
|
||||
xorl %ebx,%edi
|
||||
addl %eax,%edx
|
||||
# R3 62
|
||||
orl %edx,%edi
|
||||
leal 718787259(%ecx,%ebp,1),%ecx
|
||||
xorl %eax,%edi
|
||||
movl 36(%esi),%ebp
|
||||
addl %edi,%ecx
|
||||
movl $-1,%edi
|
||||
roll $15,%ecx
|
||||
xorl %eax,%edi
|
||||
addl %edx,%ecx
|
||||
# R3 63
|
||||
orl %ecx,%edi
|
||||
leal 3951481745(%ebx,%ebp,1),%ebx
|
||||
xorl %edx,%edi
|
||||
movl 24(%esp),%ebp
|
||||
addl %edi,%ebx
|
||||
addl $64,%esi
|
||||
roll $21,%ebx
|
||||
movl (%ebp),%edi
|
||||
addl %ecx,%ebx
|
||||
addl %edi,%eax
|
||||
movl 4(%ebp),%edi
|
||||
addl %edi,%ebx
|
||||
movl 8(%ebp),%edi
|
||||
addl %edi,%ecx
|
||||
movl 12(%ebp),%edi
|
||||
addl %edi,%edx
|
||||
movl %eax,(%ebp)
|
||||
movl %ebx,4(%ebp)
|
||||
movl (%esp),%edi
|
||||
movl %ecx,8(%ebp)
|
||||
movl %edx,12(%ebp)
|
||||
cmpl %esi,%edi
|
||||
jae L000start
|
||||
popl %eax
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
popl %edi
|
||||
popl %esi
|
||||
ret
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,649 @@
|
|||
#if defined(__i386__)
|
||||
.text
|
||||
.align 6,0x90
|
||||
L_vpaes_consts:
|
||||
.long 218628480,235210255,168496130,67568393
|
||||
.long 252381056,17041926,33884169,51187212
|
||||
.long 252645135,252645135,252645135,252645135
|
||||
.long 1512730624,3266504856,1377990664,3401244816
|
||||
.long 830229760,1275146365,2969422977,3447763452
|
||||
.long 3411033600,2979783055,338359620,2782886510
|
||||
.long 4209124096,907596821,221174255,1006095553
|
||||
.long 191964160,3799684038,3164090317,1589111125
|
||||
.long 182528256,1777043520,2877432650,3265356744
|
||||
.long 1874708224,3503451415,3305285752,363511674
|
||||
.long 1606117888,3487855781,1093350906,2384367825
|
||||
.long 197121,67569157,134941193,202313229
|
||||
.long 67569157,134941193,202313229,197121
|
||||
.long 134941193,202313229,197121,67569157
|
||||
.long 202313229,197121,67569157,134941193
|
||||
.long 33619971,100992007,168364043,235736079
|
||||
.long 235736079,33619971,100992007,168364043
|
||||
.long 168364043,235736079,33619971,100992007
|
||||
.long 100992007,168364043,235736079,33619971
|
||||
.long 50462976,117835012,185207048,252579084
|
||||
.long 252314880,51251460,117574920,184942860
|
||||
.long 184682752,252054788,50987272,118359308
|
||||
.long 118099200,185467140,251790600,50727180
|
||||
.long 2946363062,528716217,1300004225,1881839624
|
||||
.long 1532713819,1532713819,1532713819,1532713819
|
||||
.long 3602276352,4288629033,3737020424,4153884961
|
||||
.long 1354558464,32357713,2958822624,3775749553
|
||||
.long 1201988352,132424512,1572796698,503232858
|
||||
.long 2213177600,1597421020,4103937655,675398315
|
||||
.long 2749646592,4273543773,1511898873,121693092
|
||||
.long 3040248576,1103263732,2871565598,1608280554
|
||||
.long 2236667136,2588920351,482954393,64377734
|
||||
.long 3069987328,291237287,2117370568,3650299247
|
||||
.long 533321216,3573750986,2572112006,1401264716
|
||||
.long 1339849704,2721158661,548607111,3445553514
|
||||
.long 2128193280,3054596040,2183486460,1257083700
|
||||
.long 655635200,1165381986,3923443150,2344132524
|
||||
.long 190078720,256924420,290342170,357187870
|
||||
.long 1610966272,2263057382,4103205268,309794674
|
||||
.long 2592527872,2233205587,1335446729,3402964816
|
||||
.long 3973531904,3225098121,3002836325,1918774430
|
||||
.long 3870401024,2102906079,2284471353,4117666579
|
||||
.long 617007872,1021508343,366931923,691083277
|
||||
.long 2528395776,3491914898,2968704004,1613121270
|
||||
.long 3445188352,3247741094,844474987,4093578302
|
||||
.long 651481088,1190302358,1689581232,574775300
|
||||
.long 4289380608,206939853,2555985458,2489840491
|
||||
.long 2130264064,327674451,3566485037,3349835193
|
||||
.long 2470714624,316102159,3636825756,3393945945
|
||||
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105
|
||||
.byte 111,110,32,65,69,83,32,102,111,114,32,120,56,54,47,83
|
||||
.byte 83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117
|
||||
.byte 114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105
|
||||
.byte 118,101,114,115,105,116,121,41,0
|
||||
.align 6,0x90
|
||||
.private_extern __vpaes_preheat
|
||||
.align 4
|
||||
__vpaes_preheat:
|
||||
addl (%esp),%ebp
|
||||
movdqa -48(%ebp),%xmm7
|
||||
movdqa -16(%ebp),%xmm6
|
||||
ret
|
||||
.private_extern __vpaes_encrypt_core
|
||||
.align 4
|
||||
__vpaes_encrypt_core:
|
||||
movl $16,%ecx
|
||||
movl 240(%edx),%eax
|
||||
movdqa %xmm6,%xmm1
|
||||
movdqa (%ebp),%xmm2
|
||||
pandn %xmm0,%xmm1
|
||||
pand %xmm6,%xmm0
|
||||
movdqu (%edx),%xmm5
|
||||
.byte 102,15,56,0,208
|
||||
movdqa 16(%ebp),%xmm0
|
||||
pxor %xmm5,%xmm2
|
||||
psrld $4,%xmm1
|
||||
addl $16,%edx
|
||||
.byte 102,15,56,0,193
|
||||
leal 192(%ebp),%ebx
|
||||
pxor %xmm2,%xmm0
|
||||
jmp L000enc_entry
|
||||
.align 4,0x90
|
||||
L001enc_loop:
|
||||
movdqa 32(%ebp),%xmm4
|
||||
movdqa 48(%ebp),%xmm0
|
||||
.byte 102,15,56,0,226
|
||||
.byte 102,15,56,0,195
|
||||
pxor %xmm5,%xmm4
|
||||
movdqa 64(%ebp),%xmm5
|
||||
pxor %xmm4,%xmm0
|
||||
movdqa -64(%ebx,%ecx,1),%xmm1
|
||||
.byte 102,15,56,0,234
|
||||
movdqa 80(%ebp),%xmm2
|
||||
movdqa (%ebx,%ecx,1),%xmm4
|
||||
.byte 102,15,56,0,211
|
||||
movdqa %xmm0,%xmm3
|
||||
pxor %xmm5,%xmm2
|
||||
.byte 102,15,56,0,193
|
||||
addl $16,%edx
|
||||
pxor %xmm2,%xmm0
|
||||
.byte 102,15,56,0,220
|
||||
addl $16,%ecx
|
||||
pxor %xmm0,%xmm3
|
||||
.byte 102,15,56,0,193
|
||||
andl $48,%ecx
|
||||
subl $1,%eax
|
||||
pxor %xmm3,%xmm0
|
||||
L000enc_entry:
|
||||
movdqa %xmm6,%xmm1
|
||||
movdqa -32(%ebp),%xmm5
|
||||
pandn %xmm0,%xmm1
|
||||
psrld $4,%xmm1
|
||||
pand %xmm6,%xmm0
|
||||
.byte 102,15,56,0,232
|
||||
movdqa %xmm7,%xmm3
|
||||
pxor %xmm1,%xmm0
|
||||
.byte 102,15,56,0,217
|
||||
movdqa %xmm7,%xmm4
|
||||
pxor %xmm5,%xmm3
|
||||
.byte 102,15,56,0,224
|
||||
movdqa %xmm7,%xmm2
|
||||
pxor %xmm5,%xmm4
|
||||
.byte 102,15,56,0,211
|
||||
movdqa %xmm7,%xmm3
|
||||
pxor %xmm0,%xmm2
|
||||
.byte 102,15,56,0,220
|
||||
movdqu (%edx),%xmm5
|
||||
pxor %xmm1,%xmm3
|
||||
jnz L001enc_loop
|
||||
movdqa 96(%ebp),%xmm4
|
||||
movdqa 112(%ebp),%xmm0
|
||||
.byte 102,15,56,0,226
|
||||
pxor %xmm5,%xmm4
|
||||
.byte 102,15,56,0,195
|
||||
movdqa 64(%ebx,%ecx,1),%xmm1
|
||||
pxor %xmm4,%xmm0
|
||||
.byte 102,15,56,0,193
|
||||
ret
|
||||
.private_extern __vpaes_decrypt_core
|
||||
.align 4
|
||||
__vpaes_decrypt_core:
|
||||
leal 608(%ebp),%ebx
|
||||
movl 240(%edx),%eax
|
||||
movdqa %xmm6,%xmm1
|
||||
movdqa -64(%ebx),%xmm2
|
||||
pandn %xmm0,%xmm1
|
||||
movl %eax,%ecx
|
||||
psrld $4,%xmm1
|
||||
movdqu (%edx),%xmm5
|
||||
shll $4,%ecx
|
||||
pand %xmm6,%xmm0
|
||||
.byte 102,15,56,0,208
|
||||
movdqa -48(%ebx),%xmm0
|
||||
xorl $48,%ecx
|
||||
.byte 102,15,56,0,193
|
||||
andl $48,%ecx
|
||||
pxor %xmm5,%xmm2
|
||||
movdqa 176(%ebp),%xmm5
|
||||
pxor %xmm2,%xmm0
|
||||
addl $16,%edx
|
||||
leal -352(%ebx,%ecx,1),%ecx
|
||||
jmp L002dec_entry
|
||||
.align 4,0x90
|
||||
L003dec_loop:
|
||||
movdqa -32(%ebx),%xmm4
|
||||
movdqa -16(%ebx),%xmm1
|
||||
.byte 102,15,56,0,226
|
||||
.byte 102,15,56,0,203
|
||||
pxor %xmm4,%xmm0
|
||||
movdqa (%ebx),%xmm4
|
||||
pxor %xmm1,%xmm0
|
||||
movdqa 16(%ebx),%xmm1
|
||||
.byte 102,15,56,0,226
|
||||
.byte 102,15,56,0,197
|
||||
.byte 102,15,56,0,203
|
||||
pxor %xmm4,%xmm0
|
||||
movdqa 32(%ebx),%xmm4
|
||||
pxor %xmm1,%xmm0
|
||||
movdqa 48(%ebx),%xmm1
|
||||
.byte 102,15,56,0,226
|
||||
.byte 102,15,56,0,197
|
||||
.byte 102,15,56,0,203
|
||||
pxor %xmm4,%xmm0
|
||||
movdqa 64(%ebx),%xmm4
|
||||
pxor %xmm1,%xmm0
|
||||
movdqa 80(%ebx),%xmm1
|
||||
.byte 102,15,56,0,226
|
||||
.byte 102,15,56,0,197
|
||||
.byte 102,15,56,0,203
|
||||
pxor %xmm4,%xmm0
|
||||
addl $16,%edx
|
||||
.byte 102,15,58,15,237,12
|
||||
pxor %xmm1,%xmm0
|
||||
subl $1,%eax
|
||||
L002dec_entry:
|
||||
movdqa %xmm6,%xmm1
|
||||
movdqa -32(%ebp),%xmm2
|
||||
pandn %xmm0,%xmm1
|
||||
pand %xmm6,%xmm0
|
||||
psrld $4,%xmm1
|
||||
.byte 102,15,56,0,208
|
||||
movdqa %xmm7,%xmm3
|
||||
pxor %xmm1,%xmm0
|
||||
.byte 102,15,56,0,217
|
||||
movdqa %xmm7,%xmm4
|
||||
pxor %xmm2,%xmm3
|
||||
.byte 102,15,56,0,224
|
||||
pxor %xmm2,%xmm4
|
||||
movdqa %xmm7,%xmm2
|
||||
.byte 102,15,56,0,211
|
||||
movdqa %xmm7,%xmm3
|
||||
pxor %xmm0,%xmm2
|
||||
.byte 102,15,56,0,220
|
||||
movdqu (%edx),%xmm0
|
||||
pxor %xmm1,%xmm3
|
||||
jnz L003dec_loop
|
||||
movdqa 96(%ebx),%xmm4
|
||||
.byte 102,15,56,0,226
|
||||
pxor %xmm0,%xmm4
|
||||
movdqa 112(%ebx),%xmm0
|
||||
movdqa (%ecx),%xmm2
|
||||
.byte 102,15,56,0,195
|
||||
pxor %xmm4,%xmm0
|
||||
.byte 102,15,56,0,194
|
||||
ret
|
||||
.private_extern __vpaes_schedule_core
|
||||
.align 4
|
||||
__vpaes_schedule_core:
|
||||
addl (%esp),%ebp
|
||||
movdqu (%esi),%xmm0
|
||||
movdqa 320(%ebp),%xmm2
|
||||
movdqa %xmm0,%xmm3
|
||||
leal (%ebp),%ebx
|
||||
movdqa %xmm2,4(%esp)
|
||||
call __vpaes_schedule_transform
|
||||
movdqa %xmm0,%xmm7
|
||||
testl %edi,%edi
|
||||
jnz L004schedule_am_decrypting
|
||||
movdqu %xmm0,(%edx)
|
||||
jmp L005schedule_go
|
||||
L004schedule_am_decrypting:
|
||||
movdqa 256(%ebp,%ecx,1),%xmm1
|
||||
.byte 102,15,56,0,217
|
||||
movdqu %xmm3,(%edx)
|
||||
xorl $48,%ecx
|
||||
L005schedule_go:
|
||||
cmpl $192,%eax
|
||||
ja L006schedule_256
|
||||
je L007schedule_192
|
||||
L008schedule_128:
|
||||
movl $10,%eax
|
||||
L009loop_schedule_128:
|
||||
call __vpaes_schedule_round
|
||||
decl %eax
|
||||
jz L010schedule_mangle_last
|
||||
call __vpaes_schedule_mangle
|
||||
jmp L009loop_schedule_128
|
||||
.align 4,0x90
|
||||
L007schedule_192:
|
||||
movdqu 8(%esi),%xmm0
|
||||
call __vpaes_schedule_transform
|
||||
movdqa %xmm0,%xmm6
|
||||
pxor %xmm4,%xmm4
|
||||
movhlps %xmm4,%xmm6
|
||||
movl $4,%eax
|
||||
L011loop_schedule_192:
|
||||
call __vpaes_schedule_round
|
||||
.byte 102,15,58,15,198,8
|
||||
call __vpaes_schedule_mangle
|
||||
call __vpaes_schedule_192_smear
|
||||
call __vpaes_schedule_mangle
|
||||
call __vpaes_schedule_round
|
||||
decl %eax
|
||||
jz L010schedule_mangle_last
|
||||
call __vpaes_schedule_mangle
|
||||
call __vpaes_schedule_192_smear
|
||||
jmp L011loop_schedule_192
|
||||
.align 4,0x90
|
||||
L006schedule_256:
|
||||
movdqu 16(%esi),%xmm0
|
||||
call __vpaes_schedule_transform
|
||||
movl $7,%eax
|
||||
L012loop_schedule_256:
|
||||
call __vpaes_schedule_mangle
|
||||
movdqa %xmm0,%xmm6
|
||||
call __vpaes_schedule_round
|
||||
decl %eax
|
||||
jz L010schedule_mangle_last
|
||||
call __vpaes_schedule_mangle
|
||||
pshufd $255,%xmm0,%xmm0
|
||||
movdqa %xmm7,20(%esp)
|
||||
movdqa %xmm6,%xmm7
|
||||
call L_vpaes_schedule_low_round
|
||||
movdqa 20(%esp),%xmm7
|
||||
jmp L012loop_schedule_256
|
||||
.align 4,0x90
|
||||
L010schedule_mangle_last:
|
||||
leal 384(%ebp),%ebx
|
||||
testl %edi,%edi
|
||||
jnz L013schedule_mangle_last_dec
|
||||
movdqa 256(%ebp,%ecx,1),%xmm1
|
||||
.byte 102,15,56,0,193
|
||||
leal 352(%ebp),%ebx
|
||||
addl $32,%edx
|
||||
L013schedule_mangle_last_dec:
|
||||
addl $-16,%edx
|
||||
pxor 336(%ebp),%xmm0
|
||||
call __vpaes_schedule_transform
|
||||
movdqu %xmm0,(%edx)
|
||||
pxor %xmm0,%xmm0
|
||||
pxor %xmm1,%xmm1
|
||||
pxor %xmm2,%xmm2
|
||||
pxor %xmm3,%xmm3
|
||||
pxor %xmm4,%xmm4
|
||||
pxor %xmm5,%xmm5
|
||||
pxor %xmm6,%xmm6
|
||||
pxor %xmm7,%xmm7
|
||||
ret
|
||||
.private_extern __vpaes_schedule_192_smear
|
||||
.align 4
|
||||
__vpaes_schedule_192_smear:
|
||||
pshufd $128,%xmm6,%xmm1
|
||||
pshufd $254,%xmm7,%xmm0
|
||||
pxor %xmm1,%xmm6
|
||||
pxor %xmm1,%xmm1
|
||||
pxor %xmm0,%xmm6
|
||||
movdqa %xmm6,%xmm0
|
||||
movhlps %xmm1,%xmm6
|
||||
ret
|
||||
.private_extern __vpaes_schedule_round
|
||||
.align 4
|
||||
__vpaes_schedule_round:
|
||||
movdqa 8(%esp),%xmm2
|
||||
pxor %xmm1,%xmm1
|
||||
.byte 102,15,58,15,202,15
|
||||
.byte 102,15,58,15,210,15
|
||||
pxor %xmm1,%xmm7
|
||||
pshufd $255,%xmm0,%xmm0
|
||||
.byte 102,15,58,15,192,1
|
||||
movdqa %xmm2,8(%esp)
|
||||
L_vpaes_schedule_low_round:
|
||||
movdqa %xmm7,%xmm1
|
||||
pslldq $4,%xmm7
|
||||
pxor %xmm1,%xmm7
|
||||
movdqa %xmm7,%xmm1
|
||||
pslldq $8,%xmm7
|
||||
pxor %xmm1,%xmm7
|
||||
pxor 336(%ebp),%xmm7
|
||||
movdqa -16(%ebp),%xmm4
|
||||
movdqa -48(%ebp),%xmm5
|
||||
movdqa %xmm4,%xmm1
|
||||
pandn %xmm0,%xmm1
|
||||
psrld $4,%xmm1
|
||||
pand %xmm4,%xmm0
|
||||
movdqa -32(%ebp),%xmm2
|
||||
.byte 102,15,56,0,208
|
||||
pxor %xmm1,%xmm0
|
||||
movdqa %xmm5,%xmm3
|
||||
.byte 102,15,56,0,217
|
||||
pxor %xmm2,%xmm3
|
||||
movdqa %xmm5,%xmm4
|
||||
.byte 102,15,56,0,224
|
||||
pxor %xmm2,%xmm4
|
||||
movdqa %xmm5,%xmm2
|
||||
.byte 102,15,56,0,211
|
||||
pxor %xmm0,%xmm2
|
||||
movdqa %xmm5,%xmm3
|
||||
.byte 102,15,56,0,220
|
||||
pxor %xmm1,%xmm3
|
||||
movdqa 32(%ebp),%xmm4
|
||||
.byte 102,15,56,0,226
|
||||
movdqa 48(%ebp),%xmm0
|
||||
.byte 102,15,56,0,195
|
||||
pxor %xmm4,%xmm0
|
||||
pxor %xmm7,%xmm0
|
||||
movdqa %xmm0,%xmm7
|
||||
ret
|
||||
.private_extern __vpaes_schedule_transform
|
||||
.align 4
|
||||
__vpaes_schedule_transform:
|
||||
movdqa -16(%ebp),%xmm2
|
||||
movdqa %xmm2,%xmm1
|
||||
pandn %xmm0,%xmm1
|
||||
psrld $4,%xmm1
|
||||
pand %xmm2,%xmm0
|
||||
movdqa (%ebx),%xmm2
|
||||
.byte 102,15,56,0,208
|
||||
movdqa 16(%ebx),%xmm0
|
||||
.byte 102,15,56,0,193
|
||||
pxor %xmm2,%xmm0
|
||||
ret
|
||||
.private_extern __vpaes_schedule_mangle
|
||||
.align 4
|
||||
__vpaes_schedule_mangle:
|
||||
movdqa %xmm0,%xmm4
|
||||
movdqa 128(%ebp),%xmm5
|
||||
testl %edi,%edi
|
||||
jnz L014schedule_mangle_dec
|
||||
addl $16,%edx
|
||||
pxor 336(%ebp),%xmm4
|
||||
.byte 102,15,56,0,229
|
||||
movdqa %xmm4,%xmm3
|
||||
.byte 102,15,56,0,229
|
||||
pxor %xmm4,%xmm3
|
||||
.byte 102,15,56,0,229
|
||||
pxor %xmm4,%xmm3
|
||||
jmp L015schedule_mangle_both
|
||||
.align 4,0x90
|
||||
L014schedule_mangle_dec:
|
||||
movdqa -16(%ebp),%xmm2
|
||||
leal 416(%ebp),%esi
|
||||
movdqa %xmm2,%xmm1
|
||||
pandn %xmm4,%xmm1
|
||||
psrld $4,%xmm1
|
||||
pand %xmm2,%xmm4
|
||||
movdqa (%esi),%xmm2
|
||||
.byte 102,15,56,0,212
|
||||
movdqa 16(%esi),%xmm3
|
||||
.byte 102,15,56,0,217
|
||||
pxor %xmm2,%xmm3
|
||||
.byte 102,15,56,0,221
|
||||
movdqa 32(%esi),%xmm2
|
||||
.byte 102,15,56,0,212
|
||||
pxor %xmm3,%xmm2
|
||||
movdqa 48(%esi),%xmm3
|
||||
.byte 102,15,56,0,217
|
||||
pxor %xmm2,%xmm3
|
||||
.byte 102,15,56,0,221
|
||||
movdqa 64(%esi),%xmm2
|
||||
.byte 102,15,56,0,212
|
||||
pxor %xmm3,%xmm2
|
||||
movdqa 80(%esi),%xmm3
|
||||
.byte 102,15,56,0,217
|
||||
pxor %xmm2,%xmm3
|
||||
.byte 102,15,56,0,221
|
||||
movdqa 96(%esi),%xmm2
|
||||
.byte 102,15,56,0,212
|
||||
pxor %xmm3,%xmm2
|
||||
movdqa 112(%esi),%xmm3
|
||||
.byte 102,15,56,0,217
|
||||
pxor %xmm2,%xmm3
|
||||
addl $-16,%edx
|
||||
L015schedule_mangle_both:
|
||||
movdqa 256(%ebp,%ecx,1),%xmm1
|
||||
.byte 102,15,56,0,217
|
||||
addl $-16,%ecx
|
||||
andl $48,%ecx
|
||||
movdqu %xmm3,(%edx)
|
||||
ret
|
||||
.globl _vpaes_set_encrypt_key
|
||||
.private_extern _vpaes_set_encrypt_key
|
||||
.align 4
|
||||
_vpaes_set_encrypt_key:
|
||||
L_vpaes_set_encrypt_key_begin:
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
movl 20(%esp),%esi
|
||||
leal -56(%esp),%ebx
|
||||
movl 24(%esp),%eax
|
||||
andl $-16,%ebx
|
||||
movl 28(%esp),%edx
|
||||
xchgl %esp,%ebx
|
||||
movl %ebx,48(%esp)
|
||||
movl %eax,%ebx
|
||||
shrl $5,%ebx
|
||||
addl $5,%ebx
|
||||
movl %ebx,240(%edx)
|
||||
movl $48,%ecx
|
||||
movl $0,%edi
|
||||
leal L_vpaes_consts+0x30-L016pic_point,%ebp
|
||||
call __vpaes_schedule_core
|
||||
L016pic_point:
|
||||
movl 48(%esp),%esp
|
||||
xorl %eax,%eax
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
.globl _vpaes_set_decrypt_key
|
||||
.private_extern _vpaes_set_decrypt_key
|
||||
.align 4
|
||||
_vpaes_set_decrypt_key:
|
||||
L_vpaes_set_decrypt_key_begin:
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
movl 20(%esp),%esi
|
||||
leal -56(%esp),%ebx
|
||||
movl 24(%esp),%eax
|
||||
andl $-16,%ebx
|
||||
movl 28(%esp),%edx
|
||||
xchgl %esp,%ebx
|
||||
movl %ebx,48(%esp)
|
||||
movl %eax,%ebx
|
||||
shrl $5,%ebx
|
||||
addl $5,%ebx
|
||||
movl %ebx,240(%edx)
|
||||
shll $4,%ebx
|
||||
leal 16(%edx,%ebx,1),%edx
|
||||
movl $1,%edi
|
||||
movl %eax,%ecx
|
||||
shrl $1,%ecx
|
||||
andl $32,%ecx
|
||||
xorl $32,%ecx
|
||||
leal L_vpaes_consts+0x30-L017pic_point,%ebp
|
||||
call __vpaes_schedule_core
|
||||
L017pic_point:
|
||||
movl 48(%esp),%esp
|
||||
xorl %eax,%eax
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
.globl _vpaes_encrypt
|
||||
.private_extern _vpaes_encrypt
|
||||
.align 4
|
||||
_vpaes_encrypt:
|
||||
L_vpaes_encrypt_begin:
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
leal L_vpaes_consts+0x30-L018pic_point,%ebp
|
||||
call __vpaes_preheat
|
||||
L018pic_point:
|
||||
movl 20(%esp),%esi
|
||||
leal -56(%esp),%ebx
|
||||
movl 24(%esp),%edi
|
||||
andl $-16,%ebx
|
||||
movl 28(%esp),%edx
|
||||
xchgl %esp,%ebx
|
||||
movl %ebx,48(%esp)
|
||||
movdqu (%esi),%xmm0
|
||||
call __vpaes_encrypt_core
|
||||
movdqu %xmm0,(%edi)
|
||||
movl 48(%esp),%esp
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
.globl _vpaes_decrypt
|
||||
.private_extern _vpaes_decrypt
|
||||
.align 4
|
||||
_vpaes_decrypt:
|
||||
L_vpaes_decrypt_begin:
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
leal L_vpaes_consts+0x30-L019pic_point,%ebp
|
||||
call __vpaes_preheat
|
||||
L019pic_point:
|
||||
movl 20(%esp),%esi
|
||||
leal -56(%esp),%ebx
|
||||
movl 24(%esp),%edi
|
||||
andl $-16,%ebx
|
||||
movl 28(%esp),%edx
|
||||
xchgl %esp,%ebx
|
||||
movl %ebx,48(%esp)
|
||||
movdqu (%esi),%xmm0
|
||||
call __vpaes_decrypt_core
|
||||
movdqu %xmm0,(%edi)
|
||||
movl 48(%esp),%esp
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
.globl _vpaes_cbc_encrypt
|
||||
.private_extern _vpaes_cbc_encrypt
|
||||
.align 4
|
||||
_vpaes_cbc_encrypt:
|
||||
L_vpaes_cbc_encrypt_begin:
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
movl 20(%esp),%esi
|
||||
movl 24(%esp),%edi
|
||||
movl 28(%esp),%eax
|
||||
movl 32(%esp),%edx
|
||||
subl $16,%eax
|
||||
jc L020cbc_abort
|
||||
leal -56(%esp),%ebx
|
||||
movl 36(%esp),%ebp
|
||||
andl $-16,%ebx
|
||||
movl 40(%esp),%ecx
|
||||
xchgl %esp,%ebx
|
||||
movdqu (%ebp),%xmm1
|
||||
subl %esi,%edi
|
||||
movl %ebx,48(%esp)
|
||||
movl %edi,(%esp)
|
||||
movl %edx,4(%esp)
|
||||
movl %ebp,8(%esp)
|
||||
movl %eax,%edi
|
||||
leal L_vpaes_consts+0x30-L021pic_point,%ebp
|
||||
call __vpaes_preheat
|
||||
L021pic_point:
|
||||
cmpl $0,%ecx
|
||||
je L022cbc_dec_loop
|
||||
jmp L023cbc_enc_loop
|
||||
.align 4,0x90
|
||||
L023cbc_enc_loop:
|
||||
movdqu (%esi),%xmm0
|
||||
pxor %xmm1,%xmm0
|
||||
call __vpaes_encrypt_core
|
||||
movl (%esp),%ebx
|
||||
movl 4(%esp),%edx
|
||||
movdqa %xmm0,%xmm1
|
||||
movdqu %xmm0,(%ebx,%esi,1)
|
||||
leal 16(%esi),%esi
|
||||
subl $16,%edi
|
||||
jnc L023cbc_enc_loop
|
||||
jmp L024cbc_done
|
||||
.align 4,0x90
|
||||
L022cbc_dec_loop:
|
||||
movdqu (%esi),%xmm0
|
||||
movdqa %xmm1,16(%esp)
|
||||
movdqa %xmm0,32(%esp)
|
||||
call __vpaes_decrypt_core
|
||||
movl (%esp),%ebx
|
||||
movl 4(%esp),%edx
|
||||
pxor 16(%esp),%xmm0
|
||||
movdqa 32(%esp),%xmm1
|
||||
movdqu %xmm0,(%ebx,%esi,1)
|
||||
leal 16(%esi),%esi
|
||||
subl $16,%edi
|
||||
jnc L022cbc_dec_loop
|
||||
L024cbc_done:
|
||||
movl 8(%esp),%ebx
|
||||
movl 48(%esp),%esp
|
||||
movdqu %xmm1,(%ebx)
|
||||
L020cbc_abort:
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
#endif
|
|
@ -0,0 +1,477 @@
|
|||
#if defined(__i386__)
|
||||
.text
|
||||
.globl _bn_mul_mont
|
||||
.private_extern _bn_mul_mont
|
||||
.align 4
|
||||
_bn_mul_mont:
|
||||
L_bn_mul_mont_begin:
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
xorl %eax,%eax
|
||||
movl 40(%esp),%edi
|
||||
cmpl $4,%edi
|
||||
jl L000just_leave
|
||||
leal 20(%esp),%esi
|
||||
leal 24(%esp),%edx
|
||||
addl $2,%edi
|
||||
negl %edi
|
||||
leal -32(%esp,%edi,4),%ebp
|
||||
negl %edi
|
||||
movl %ebp,%eax
|
||||
subl %edx,%eax
|
||||
andl $2047,%eax
|
||||
subl %eax,%ebp
|
||||
xorl %ebp,%edx
|
||||
andl $2048,%edx
|
||||
xorl $2048,%edx
|
||||
subl %edx,%ebp
|
||||
andl $-64,%ebp
|
||||
movl %esp,%eax
|
||||
subl %ebp,%eax
|
||||
andl $-4096,%eax
|
||||
movl %esp,%edx
|
||||
leal (%ebp,%eax,1),%esp
|
||||
movl (%esp),%eax
|
||||
cmpl %ebp,%esp
|
||||
ja L001page_walk
|
||||
jmp L002page_walk_done
|
||||
.align 4,0x90
|
||||
L001page_walk:
|
||||
leal -4096(%esp),%esp
|
||||
movl (%esp),%eax
|
||||
cmpl %ebp,%esp
|
||||
ja L001page_walk
|
||||
L002page_walk_done:
|
||||
movl (%esi),%eax
|
||||
movl 4(%esi),%ebx
|
||||
movl 8(%esi),%ecx
|
||||
movl 12(%esi),%ebp
|
||||
movl 16(%esi),%esi
|
||||
movl (%esi),%esi
|
||||
movl %eax,4(%esp)
|
||||
movl %ebx,8(%esp)
|
||||
movl %ecx,12(%esp)
|
||||
movl %ebp,16(%esp)
|
||||
movl %esi,20(%esp)
|
||||
leal -3(%edi),%ebx
|
||||
movl %edx,24(%esp)
|
||||
call L003PIC_me_up
|
||||
L003PIC_me_up:
|
||||
popl %eax
|
||||
movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L003PIC_me_up(%eax),%eax
|
||||
btl $26,(%eax)
|
||||
jnc L004non_sse2
|
||||
movl $-1,%eax
|
||||
movd %eax,%mm7
|
||||
movl 8(%esp),%esi
|
||||
movl 12(%esp),%edi
|
||||
movl 16(%esp),%ebp
|
||||
xorl %edx,%edx
|
||||
xorl %ecx,%ecx
|
||||
movd (%edi),%mm4
|
||||
movd (%esi),%mm5
|
||||
movd (%ebp),%mm3
|
||||
pmuludq %mm4,%mm5
|
||||
movq %mm5,%mm2
|
||||
movq %mm5,%mm0
|
||||
pand %mm7,%mm0
|
||||
pmuludq 20(%esp),%mm5
|
||||
pmuludq %mm5,%mm3
|
||||
paddq %mm0,%mm3
|
||||
movd 4(%ebp),%mm1
|
||||
movd 4(%esi),%mm0
|
||||
psrlq $32,%mm2
|
||||
psrlq $32,%mm3
|
||||
incl %ecx
|
||||
.align 4,0x90
|
||||
L0051st:
|
||||
pmuludq %mm4,%mm0
|
||||
pmuludq %mm5,%mm1
|
||||
paddq %mm0,%mm2
|
||||
paddq %mm1,%mm3
|
||||
movq %mm2,%mm0
|
||||
pand %mm7,%mm0
|
||||
movd 4(%ebp,%ecx,4),%mm1
|
||||
paddq %mm0,%mm3
|
||||
movd 4(%esi,%ecx,4),%mm0
|
||||
psrlq $32,%mm2
|
||||
movd %mm3,28(%esp,%ecx,4)
|
||||
psrlq $32,%mm3
|
||||
leal 1(%ecx),%ecx
|
||||
cmpl %ebx,%ecx
|
||||
jl L0051st
|
||||
pmuludq %mm4,%mm0
|
||||
pmuludq %mm5,%mm1
|
||||
paddq %mm0,%mm2
|
||||
paddq %mm1,%mm3
|
||||
movq %mm2,%mm0
|
||||
pand %mm7,%mm0
|
||||
paddq %mm0,%mm3
|
||||
movd %mm3,28(%esp,%ecx,4)
|
||||
psrlq $32,%mm2
|
||||
psrlq $32,%mm3
|
||||
paddq %mm2,%mm3
|
||||
movq %mm3,32(%esp,%ebx,4)
|
||||
incl %edx
|
||||
L006outer:
|
||||
xorl %ecx,%ecx
|
||||
movd (%edi,%edx,4),%mm4
|
||||
movd (%esi),%mm5
|
||||
movd 32(%esp),%mm6
|
||||
movd (%ebp),%mm3
|
||||
pmuludq %mm4,%mm5
|
||||
paddq %mm6,%mm5
|
||||
movq %mm5,%mm0
|
||||
movq %mm5,%mm2
|
||||
pand %mm7,%mm0
|
||||
pmuludq 20(%esp),%mm5
|
||||
pmuludq %mm5,%mm3
|
||||
paddq %mm0,%mm3
|
||||
movd 36(%esp),%mm6
|
||||
movd 4(%ebp),%mm1
|
||||
movd 4(%esi),%mm0
|
||||
psrlq $32,%mm2
|
||||
psrlq $32,%mm3
|
||||
paddq %mm6,%mm2
|
||||
incl %ecx
|
||||
decl %ebx
|
||||
L007inner:
|
||||
pmuludq %mm4,%mm0
|
||||
pmuludq %mm5,%mm1
|
||||
paddq %mm0,%mm2
|
||||
paddq %mm1,%mm3
|
||||
movq %mm2,%mm0
|
||||
movd 36(%esp,%ecx,4),%mm6
|
||||
pand %mm7,%mm0
|
||||
movd 4(%ebp,%ecx,4),%mm1
|
||||
paddq %mm0,%mm3
|
||||
movd 4(%esi,%ecx,4),%mm0
|
||||
psrlq $32,%mm2
|
||||
movd %mm3,28(%esp,%ecx,4)
|
||||
psrlq $32,%mm3
|
||||
paddq %mm6,%mm2
|
||||
decl %ebx
|
||||
leal 1(%ecx),%ecx
|
||||
jnz L007inner
|
||||
movl %ecx,%ebx
|
||||
pmuludq %mm4,%mm0
|
||||
pmuludq %mm5,%mm1
|
||||
paddq %mm0,%mm2
|
||||
paddq %mm1,%mm3
|
||||
movq %mm2,%mm0
|
||||
pand %mm7,%mm0
|
||||
paddq %mm0,%mm3
|
||||
movd %mm3,28(%esp,%ecx,4)
|
||||
psrlq $32,%mm2
|
||||
psrlq $32,%mm3
|
||||
movd 36(%esp,%ebx,4),%mm6
|
||||
paddq %mm2,%mm3
|
||||
paddq %mm6,%mm3
|
||||
movq %mm3,32(%esp,%ebx,4)
|
||||
leal 1(%edx),%edx
|
||||
cmpl %ebx,%edx
|
||||
jle L006outer
|
||||
emms
|
||||
jmp L008common_tail
|
||||
.align 4,0x90
|
||||
L004non_sse2:
|
||||
movl 8(%esp),%esi
|
||||
leal 1(%ebx),%ebp
|
||||
movl 12(%esp),%edi
|
||||
xorl %ecx,%ecx
|
||||
movl %esi,%edx
|
||||
andl $1,%ebp
|
||||
subl %edi,%edx
|
||||
leal 4(%edi,%ebx,4),%eax
|
||||
orl %edx,%ebp
|
||||
movl (%edi),%edi
|
||||
jz L009bn_sqr_mont
|
||||
movl %eax,28(%esp)
|
||||
movl (%esi),%eax
|
||||
xorl %edx,%edx
|
||||
.align 4,0x90
|
||||
L010mull:
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl %eax,%ebp
|
||||
leal 1(%ecx),%ecx
|
||||
adcl $0,%edx
|
||||
movl (%esi,%ecx,4),%eax
|
||||
cmpl %ebx,%ecx
|
||||
movl %ebp,28(%esp,%ecx,4)
|
||||
jl L010mull
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
movl 20(%esp),%edi
|
||||
addl %ebp,%eax
|
||||
movl 16(%esp),%esi
|
||||
adcl $0,%edx
|
||||
imull 32(%esp),%edi
|
||||
movl %eax,32(%esp,%ebx,4)
|
||||
xorl %ecx,%ecx
|
||||
movl %edx,36(%esp,%ebx,4)
|
||||
movl %ecx,40(%esp,%ebx,4)
|
||||
movl (%esi),%eax
|
||||
mull %edi
|
||||
addl 32(%esp),%eax
|
||||
movl 4(%esi),%eax
|
||||
adcl $0,%edx
|
||||
incl %ecx
|
||||
jmp L0112ndmadd
|
||||
.align 4,0x90
|
||||
L0121stmadd:
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl 32(%esp,%ecx,4),%ebp
|
||||
leal 1(%ecx),%ecx
|
||||
adcl $0,%edx
|
||||
addl %eax,%ebp
|
||||
movl (%esi,%ecx,4),%eax
|
||||
adcl $0,%edx
|
||||
cmpl %ebx,%ecx
|
||||
movl %ebp,28(%esp,%ecx,4)
|
||||
jl L0121stmadd
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl 32(%esp,%ebx,4),%eax
|
||||
movl 20(%esp),%edi
|
||||
adcl $0,%edx
|
||||
movl 16(%esp),%esi
|
||||
addl %eax,%ebp
|
||||
adcl $0,%edx
|
||||
imull 32(%esp),%edi
|
||||
xorl %ecx,%ecx
|
||||
addl 36(%esp,%ebx,4),%edx
|
||||
movl %ebp,32(%esp,%ebx,4)
|
||||
adcl $0,%ecx
|
||||
movl (%esi),%eax
|
||||
movl %edx,36(%esp,%ebx,4)
|
||||
movl %ecx,40(%esp,%ebx,4)
|
||||
mull %edi
|
||||
addl 32(%esp),%eax
|
||||
movl 4(%esi),%eax
|
||||
adcl $0,%edx
|
||||
movl $1,%ecx
|
||||
.align 4,0x90
|
||||
L0112ndmadd:
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl 32(%esp,%ecx,4),%ebp
|
||||
leal 1(%ecx),%ecx
|
||||
adcl $0,%edx
|
||||
addl %eax,%ebp
|
||||
movl (%esi,%ecx,4),%eax
|
||||
adcl $0,%edx
|
||||
cmpl %ebx,%ecx
|
||||
movl %ebp,24(%esp,%ecx,4)
|
||||
jl L0112ndmadd
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl 32(%esp,%ebx,4),%ebp
|
||||
adcl $0,%edx
|
||||
addl %eax,%ebp
|
||||
adcl $0,%edx
|
||||
movl %ebp,28(%esp,%ebx,4)
|
||||
xorl %eax,%eax
|
||||
movl 12(%esp),%ecx
|
||||
addl 36(%esp,%ebx,4),%edx
|
||||
adcl 40(%esp,%ebx,4),%eax
|
||||
leal 4(%ecx),%ecx
|
||||
movl %edx,32(%esp,%ebx,4)
|
||||
cmpl 28(%esp),%ecx
|
||||
movl %eax,36(%esp,%ebx,4)
|
||||
je L008common_tail
|
||||
movl (%ecx),%edi
|
||||
movl 8(%esp),%esi
|
||||
movl %ecx,12(%esp)
|
||||
xorl %ecx,%ecx
|
||||
xorl %edx,%edx
|
||||
movl (%esi),%eax
|
||||
jmp L0121stmadd
|
||||
.align 4,0x90
|
||||
L009bn_sqr_mont:
|
||||
movl %ebx,(%esp)
|
||||
movl %ecx,12(%esp)
|
||||
movl %edi,%eax
|
||||
mull %edi
|
||||
movl %eax,32(%esp)
|
||||
movl %edx,%ebx
|
||||
shrl $1,%edx
|
||||
andl $1,%ebx
|
||||
incl %ecx
|
||||
.align 4,0x90
|
||||
L013sqr:
|
||||
movl (%esi,%ecx,4),%eax
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl %ebp,%eax
|
||||
leal 1(%ecx),%ecx
|
||||
adcl $0,%edx
|
||||
leal (%ebx,%eax,2),%ebp
|
||||
shrl $31,%eax
|
||||
cmpl (%esp),%ecx
|
||||
movl %eax,%ebx
|
||||
movl %ebp,28(%esp,%ecx,4)
|
||||
jl L013sqr
|
||||
movl (%esi,%ecx,4),%eax
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl %ebp,%eax
|
||||
movl 20(%esp),%edi
|
||||
adcl $0,%edx
|
||||
movl 16(%esp),%esi
|
||||
leal (%ebx,%eax,2),%ebp
|
||||
imull 32(%esp),%edi
|
||||
shrl $31,%eax
|
||||
movl %ebp,32(%esp,%ecx,4)
|
||||
leal (%eax,%edx,2),%ebp
|
||||
movl (%esi),%eax
|
||||
shrl $31,%edx
|
||||
movl %ebp,36(%esp,%ecx,4)
|
||||
movl %edx,40(%esp,%ecx,4)
|
||||
mull %edi
|
||||
addl 32(%esp),%eax
|
||||
movl %ecx,%ebx
|
||||
adcl $0,%edx
|
||||
movl 4(%esi),%eax
|
||||
movl $1,%ecx
|
||||
.align 4,0x90
|
||||
L0143rdmadd:
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl 32(%esp,%ecx,4),%ebp
|
||||
adcl $0,%edx
|
||||
addl %eax,%ebp
|
||||
movl 4(%esi,%ecx,4),%eax
|
||||
adcl $0,%edx
|
||||
movl %ebp,28(%esp,%ecx,4)
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl 36(%esp,%ecx,4),%ebp
|
||||
leal 2(%ecx),%ecx
|
||||
adcl $0,%edx
|
||||
addl %eax,%ebp
|
||||
movl (%esi,%ecx,4),%eax
|
||||
adcl $0,%edx
|
||||
cmpl %ebx,%ecx
|
||||
movl %ebp,24(%esp,%ecx,4)
|
||||
jl L0143rdmadd
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl 32(%esp,%ebx,4),%ebp
|
||||
adcl $0,%edx
|
||||
addl %eax,%ebp
|
||||
adcl $0,%edx
|
||||
movl %ebp,28(%esp,%ebx,4)
|
||||
movl 12(%esp),%ecx
|
||||
xorl %eax,%eax
|
||||
movl 8(%esp),%esi
|
||||
addl 36(%esp,%ebx,4),%edx
|
||||
adcl 40(%esp,%ebx,4),%eax
|
||||
movl %edx,32(%esp,%ebx,4)
|
||||
cmpl %ebx,%ecx
|
||||
movl %eax,36(%esp,%ebx,4)
|
||||
je L008common_tail
|
||||
movl 4(%esi,%ecx,4),%edi
|
||||
leal 1(%ecx),%ecx
|
||||
movl %edi,%eax
|
||||
movl %ecx,12(%esp)
|
||||
mull %edi
|
||||
addl 32(%esp,%ecx,4),%eax
|
||||
adcl $0,%edx
|
||||
movl %eax,32(%esp,%ecx,4)
|
||||
xorl %ebp,%ebp
|
||||
cmpl %ebx,%ecx
|
||||
leal 1(%ecx),%ecx
|
||||
je L015sqrlast
|
||||
movl %edx,%ebx
|
||||
shrl $1,%edx
|
||||
andl $1,%ebx
|
||||
.align 4,0x90
|
||||
L016sqradd:
|
||||
movl (%esi,%ecx,4),%eax
|
||||
movl %edx,%ebp
|
||||
mull %edi
|
||||
addl %ebp,%eax
|
||||
leal (%eax,%eax,1),%ebp
|
||||
adcl $0,%edx
|
||||
shrl $31,%eax
|
||||
addl 32(%esp,%ecx,4),%ebp
|
||||
leal 1(%ecx),%ecx
|
||||
adcl $0,%eax
|
||||
addl %ebx,%ebp
|
||||
adcl $0,%eax
|
||||
cmpl (%esp),%ecx
|
||||
movl %ebp,28(%esp,%ecx,4)
|
||||
movl %eax,%ebx
|
||||
jle L016sqradd
|
||||
movl %edx,%ebp
|
||||
addl %edx,%edx
|
||||
shrl $31,%ebp
|
||||
addl %ebx,%edx
|
||||
adcl $0,%ebp
|
||||
L015sqrlast:
|
||||
movl 20(%esp),%edi
|
||||
movl 16(%esp),%esi
|
||||
imull 32(%esp),%edi
|
||||
addl 32(%esp,%ecx,4),%edx
|
||||
movl (%esi),%eax
|
||||
adcl $0,%ebp
|
||||
movl %edx,32(%esp,%ecx,4)
|
||||
movl %ebp,36(%esp,%ecx,4)
|
||||
mull %edi
|
||||
addl 32(%esp),%eax
|
||||
leal -1(%ecx),%ebx
|
||||
adcl $0,%edx
|
||||
movl $1,%ecx
|
||||
movl 4(%esi),%eax
|
||||
jmp L0143rdmadd
|
||||
.align 4,0x90
|
||||
L008common_tail:
|
||||
movl 16(%esp),%ebp
|
||||
movl 4(%esp),%edi
|
||||
leal 32(%esp),%esi
|
||||
movl (%esi),%eax
|
||||
movl %ebx,%ecx
|
||||
xorl %edx,%edx
|
||||
.align 4,0x90
|
||||
L017sub:
|
||||
sbbl (%ebp,%edx,4),%eax
|
||||
movl %eax,(%edi,%edx,4)
|
||||
decl %ecx
|
||||
movl 4(%esi,%edx,4),%eax
|
||||
leal 1(%edx),%edx
|
||||
jge L017sub
|
||||
sbbl $0,%eax
|
||||
andl %eax,%esi
|
||||
notl %eax
|
||||
movl %edi,%ebp
|
||||
andl %eax,%ebp
|
||||
orl %ebp,%esi
|
||||
.align 4,0x90
|
||||
L018copy:
|
||||
movl (%esi,%ebx,4),%eax
|
||||
movl %eax,(%edi,%ebx,4)
|
||||
movl %ecx,32(%esp,%ebx,4)
|
||||
decl %ebx
|
||||
jge L018copy
|
||||
movl 24(%esp),%esp
|
||||
movl $1,%eax
|
||||
L000just_leave:
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105
|
||||
.byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56
|
||||
.byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121
|
||||
.byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46
|
||||
.byte 111,114,103,62,0
|
||||
.section __IMPORT,__pointers,non_lazy_symbol_pointers
|
||||
L_OPENSSL_ia32cap_P$non_lazy_ptr:
|
||||
.indirect_symbol _OPENSSL_ia32cap_P
|
||||
.long 0
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,798 @@
|
|||
#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM)
|
||||
.text
|
||||
|
||||
|
||||
.p2align 5
|
||||
_aesni_ctr32_ghash_6x:
|
||||
vmovdqu 32(%r11),%xmm2
|
||||
subq $6,%rdx
|
||||
vpxor %xmm4,%xmm4,%xmm4
|
||||
vmovdqu 0-128(%rcx),%xmm15
|
||||
vpaddb %xmm2,%xmm1,%xmm10
|
||||
vpaddb %xmm2,%xmm10,%xmm11
|
||||
vpaddb %xmm2,%xmm11,%xmm12
|
||||
vpaddb %xmm2,%xmm12,%xmm13
|
||||
vpaddb %xmm2,%xmm13,%xmm14
|
||||
vpxor %xmm15,%xmm1,%xmm9
|
||||
vmovdqu %xmm4,16+8(%rsp)
|
||||
jmp L$oop6x
|
||||
|
||||
.p2align 5
|
||||
L$oop6x:
|
||||
addl $100663296,%ebx
|
||||
jc L$handle_ctr32
|
||||
vmovdqu 0-32(%r9),%xmm3
|
||||
vpaddb %xmm2,%xmm14,%xmm1
|
||||
vpxor %xmm15,%xmm10,%xmm10
|
||||
vpxor %xmm15,%xmm11,%xmm11
|
||||
|
||||
L$resume_ctr32:
|
||||
vmovdqu %xmm1,(%r8)
|
||||
vpclmulqdq $0x10,%xmm3,%xmm7,%xmm5
|
||||
vpxor %xmm15,%xmm12,%xmm12
|
||||
vmovups 16-128(%rcx),%xmm2
|
||||
vpclmulqdq $0x01,%xmm3,%xmm7,%xmm6
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
xorq %r12,%r12
|
||||
cmpq %r14,%r15
|
||||
|
||||
vaesenc %xmm2,%xmm9,%xmm9
|
||||
vmovdqu 48+8(%rsp),%xmm0
|
||||
vpxor %xmm15,%xmm13,%xmm13
|
||||
vpclmulqdq $0x00,%xmm3,%xmm7,%xmm1
|
||||
vaesenc %xmm2,%xmm10,%xmm10
|
||||
vpxor %xmm15,%xmm14,%xmm14
|
||||
setnc %r12b
|
||||
vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7
|
||||
vaesenc %xmm2,%xmm11,%xmm11
|
||||
vmovdqu 16-32(%r9),%xmm3
|
||||
negq %r12
|
||||
vaesenc %xmm2,%xmm12,%xmm12
|
||||
vpxor %xmm5,%xmm6,%xmm6
|
||||
vpclmulqdq $0x00,%xmm3,%xmm0,%xmm5
|
||||
vpxor %xmm4,%xmm8,%xmm8
|
||||
vaesenc %xmm2,%xmm13,%xmm13
|
||||
vpxor %xmm5,%xmm1,%xmm4
|
||||
andq $0x60,%r12
|
||||
vmovups 32-128(%rcx),%xmm15
|
||||
vpclmulqdq $0x10,%xmm3,%xmm0,%xmm1
|
||||
vaesenc %xmm2,%xmm14,%xmm14
|
||||
|
||||
vpclmulqdq $0x01,%xmm3,%xmm0,%xmm2
|
||||
leaq (%r14,%r12,1),%r14
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vpxor 16+8(%rsp),%xmm8,%xmm8
|
||||
vpclmulqdq $0x11,%xmm3,%xmm0,%xmm3
|
||||
vmovdqu 64+8(%rsp),%xmm0
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
movbeq 88(%r14),%r13
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
movbeq 80(%r14),%r12
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
movq %r13,32+8(%rsp)
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
movq %r12,40+8(%rsp)
|
||||
vmovdqu 48-32(%r9),%xmm5
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
|
||||
vmovups 48-128(%rcx),%xmm15
|
||||
vpxor %xmm1,%xmm6,%xmm6
|
||||
vpclmulqdq $0x00,%xmm5,%xmm0,%xmm1
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vpxor %xmm2,%xmm6,%xmm6
|
||||
vpclmulqdq $0x10,%xmm5,%xmm0,%xmm2
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
vpxor %xmm3,%xmm7,%xmm7
|
||||
vpclmulqdq $0x01,%xmm5,%xmm0,%xmm3
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
vpclmulqdq $0x11,%xmm5,%xmm0,%xmm5
|
||||
vmovdqu 80+8(%rsp),%xmm0
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
vpxor %xmm1,%xmm4,%xmm4
|
||||
vmovdqu 64-32(%r9),%xmm1
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
|
||||
vmovups 64-128(%rcx),%xmm15
|
||||
vpxor %xmm2,%xmm6,%xmm6
|
||||
vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vpxor %xmm3,%xmm6,%xmm6
|
||||
vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
movbeq 72(%r14),%r13
|
||||
vpxor %xmm5,%xmm7,%xmm7
|
||||
vpclmulqdq $0x01,%xmm1,%xmm0,%xmm5
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
movbeq 64(%r14),%r12
|
||||
vpclmulqdq $0x11,%xmm1,%xmm0,%xmm1
|
||||
vmovdqu 96+8(%rsp),%xmm0
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
movq %r13,48+8(%rsp)
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
movq %r12,56+8(%rsp)
|
||||
vpxor %xmm2,%xmm4,%xmm4
|
||||
vmovdqu 96-32(%r9),%xmm2
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
|
||||
vmovups 80-128(%rcx),%xmm15
|
||||
vpxor %xmm3,%xmm6,%xmm6
|
||||
vpclmulqdq $0x00,%xmm2,%xmm0,%xmm3
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vpxor %xmm5,%xmm6,%xmm6
|
||||
vpclmulqdq $0x10,%xmm2,%xmm0,%xmm5
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
movbeq 56(%r14),%r13
|
||||
vpxor %xmm1,%xmm7,%xmm7
|
||||
vpclmulqdq $0x01,%xmm2,%xmm0,%xmm1
|
||||
vpxor 112+8(%rsp),%xmm8,%xmm8
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
movbeq 48(%r14),%r12
|
||||
vpclmulqdq $0x11,%xmm2,%xmm0,%xmm2
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
movq %r13,64+8(%rsp)
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
movq %r12,72+8(%rsp)
|
||||
vpxor %xmm3,%xmm4,%xmm4
|
||||
vmovdqu 112-32(%r9),%xmm3
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
|
||||
vmovups 96-128(%rcx),%xmm15
|
||||
vpxor %xmm5,%xmm6,%xmm6
|
||||
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm5
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vpxor %xmm1,%xmm6,%xmm6
|
||||
vpclmulqdq $0x01,%xmm3,%xmm8,%xmm1
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
movbeq 40(%r14),%r13
|
||||
vpxor %xmm2,%xmm7,%xmm7
|
||||
vpclmulqdq $0x00,%xmm3,%xmm8,%xmm2
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
movbeq 32(%r14),%r12
|
||||
vpclmulqdq $0x11,%xmm3,%xmm8,%xmm8
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
movq %r13,80+8(%rsp)
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
movq %r12,88+8(%rsp)
|
||||
vpxor %xmm5,%xmm6,%xmm6
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
vpxor %xmm1,%xmm6,%xmm6
|
||||
|
||||
vmovups 112-128(%rcx),%xmm15
|
||||
vpslldq $8,%xmm6,%xmm5
|
||||
vpxor %xmm2,%xmm4,%xmm4
|
||||
vmovdqu 16(%r11),%xmm3
|
||||
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vpxor %xmm8,%xmm7,%xmm7
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
vpxor %xmm5,%xmm4,%xmm4
|
||||
movbeq 24(%r14),%r13
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
movbeq 16(%r14),%r12
|
||||
vpalignr $8,%xmm4,%xmm4,%xmm0
|
||||
vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4
|
||||
movq %r13,96+8(%rsp)
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
movq %r12,104+8(%rsp)
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
vmovups 128-128(%rcx),%xmm1
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
|
||||
vaesenc %xmm1,%xmm9,%xmm9
|
||||
vmovups 144-128(%rcx),%xmm15
|
||||
vaesenc %xmm1,%xmm10,%xmm10
|
||||
vpsrldq $8,%xmm6,%xmm6
|
||||
vaesenc %xmm1,%xmm11,%xmm11
|
||||
vpxor %xmm6,%xmm7,%xmm7
|
||||
vaesenc %xmm1,%xmm12,%xmm12
|
||||
vpxor %xmm0,%xmm4,%xmm4
|
||||
movbeq 8(%r14),%r13
|
||||
vaesenc %xmm1,%xmm13,%xmm13
|
||||
movbeq 0(%r14),%r12
|
||||
vaesenc %xmm1,%xmm14,%xmm14
|
||||
vmovups 160-128(%rcx),%xmm1
|
||||
cmpl $11,%ebp
|
||||
jb L$enc_tail
|
||||
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
|
||||
vaesenc %xmm1,%xmm9,%xmm9
|
||||
vaesenc %xmm1,%xmm10,%xmm10
|
||||
vaesenc %xmm1,%xmm11,%xmm11
|
||||
vaesenc %xmm1,%xmm12,%xmm12
|
||||
vaesenc %xmm1,%xmm13,%xmm13
|
||||
vmovups 176-128(%rcx),%xmm15
|
||||
vaesenc %xmm1,%xmm14,%xmm14
|
||||
vmovups 192-128(%rcx),%xmm1
|
||||
je L$enc_tail
|
||||
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
|
||||
vaesenc %xmm1,%xmm9,%xmm9
|
||||
vaesenc %xmm1,%xmm10,%xmm10
|
||||
vaesenc %xmm1,%xmm11,%xmm11
|
||||
vaesenc %xmm1,%xmm12,%xmm12
|
||||
vaesenc %xmm1,%xmm13,%xmm13
|
||||
vmovups 208-128(%rcx),%xmm15
|
||||
vaesenc %xmm1,%xmm14,%xmm14
|
||||
vmovups 224-128(%rcx),%xmm1
|
||||
jmp L$enc_tail
|
||||
|
||||
.p2align 5
|
||||
L$handle_ctr32:
|
||||
vmovdqu (%r11),%xmm0
|
||||
vpshufb %xmm0,%xmm1,%xmm6
|
||||
vmovdqu 48(%r11),%xmm5
|
||||
vpaddd 64(%r11),%xmm6,%xmm10
|
||||
vpaddd %xmm5,%xmm6,%xmm11
|
||||
vmovdqu 0-32(%r9),%xmm3
|
||||
vpaddd %xmm5,%xmm10,%xmm12
|
||||
vpshufb %xmm0,%xmm10,%xmm10
|
||||
vpaddd %xmm5,%xmm11,%xmm13
|
||||
vpshufb %xmm0,%xmm11,%xmm11
|
||||
vpxor %xmm15,%xmm10,%xmm10
|
||||
vpaddd %xmm5,%xmm12,%xmm14
|
||||
vpshufb %xmm0,%xmm12,%xmm12
|
||||
vpxor %xmm15,%xmm11,%xmm11
|
||||
vpaddd %xmm5,%xmm13,%xmm1
|
||||
vpshufb %xmm0,%xmm13,%xmm13
|
||||
vpshufb %xmm0,%xmm14,%xmm14
|
||||
vpshufb %xmm0,%xmm1,%xmm1
|
||||
jmp L$resume_ctr32
|
||||
|
||||
.p2align 5
|
||||
L$enc_tail:
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vmovdqu %xmm7,16+8(%rsp)
|
||||
vpalignr $8,%xmm4,%xmm4,%xmm8
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4
|
||||
vpxor 0(%rdi),%xmm1,%xmm2
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
vpxor 16(%rdi),%xmm1,%xmm0
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
vpxor 32(%rdi),%xmm1,%xmm5
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
vpxor 48(%rdi),%xmm1,%xmm6
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
vpxor 64(%rdi),%xmm1,%xmm7
|
||||
vpxor 80(%rdi),%xmm1,%xmm3
|
||||
vmovdqu (%r8),%xmm1
|
||||
|
||||
vaesenclast %xmm2,%xmm9,%xmm9
|
||||
vmovdqu 32(%r11),%xmm2
|
||||
vaesenclast %xmm0,%xmm10,%xmm10
|
||||
vpaddb %xmm2,%xmm1,%xmm0
|
||||
movq %r13,112+8(%rsp)
|
||||
leaq 96(%rdi),%rdi
|
||||
vaesenclast %xmm5,%xmm11,%xmm11
|
||||
vpaddb %xmm2,%xmm0,%xmm5
|
||||
movq %r12,120+8(%rsp)
|
||||
leaq 96(%rsi),%rsi
|
||||
vmovdqu 0-128(%rcx),%xmm15
|
||||
vaesenclast %xmm6,%xmm12,%xmm12
|
||||
vpaddb %xmm2,%xmm5,%xmm6
|
||||
vaesenclast %xmm7,%xmm13,%xmm13
|
||||
vpaddb %xmm2,%xmm6,%xmm7
|
||||
vaesenclast %xmm3,%xmm14,%xmm14
|
||||
vpaddb %xmm2,%xmm7,%xmm3
|
||||
|
||||
addq $0x60,%r10
|
||||
subq $0x6,%rdx
|
||||
jc L$6x_done
|
||||
|
||||
vmovups %xmm9,-96(%rsi)
|
||||
vpxor %xmm15,%xmm1,%xmm9
|
||||
vmovups %xmm10,-80(%rsi)
|
||||
vmovdqa %xmm0,%xmm10
|
||||
vmovups %xmm11,-64(%rsi)
|
||||
vmovdqa %xmm5,%xmm11
|
||||
vmovups %xmm12,-48(%rsi)
|
||||
vmovdqa %xmm6,%xmm12
|
||||
vmovups %xmm13,-32(%rsi)
|
||||
vmovdqa %xmm7,%xmm13
|
||||
vmovups %xmm14,-16(%rsi)
|
||||
vmovdqa %xmm3,%xmm14
|
||||
vmovdqu 32+8(%rsp),%xmm7
|
||||
jmp L$oop6x
|
||||
|
||||
L$6x_done:
|
||||
vpxor 16+8(%rsp),%xmm8,%xmm8
|
||||
vpxor %xmm4,%xmm8,%xmm8
|
||||
|
||||
.byte 0xf3,0xc3
|
||||
|
||||
.globl _aesni_gcm_decrypt
|
||||
.private_extern _aesni_gcm_decrypt
|
||||
|
||||
.p2align 5
|
||||
_aesni_gcm_decrypt:
|
||||
xorq %r10,%r10
|
||||
|
||||
|
||||
|
||||
cmpq $0x60,%rdx
|
||||
jb L$gcm_dec_abort
|
||||
|
||||
leaq (%rsp),%rax
|
||||
pushq %rbx
|
||||
pushq %rbp
|
||||
pushq %r12
|
||||
pushq %r13
|
||||
pushq %r14
|
||||
pushq %r15
|
||||
vzeroupper
|
||||
|
||||
vmovdqu (%r8),%xmm1
|
||||
addq $-128,%rsp
|
||||
movl 12(%r8),%ebx
|
||||
leaq L$bswap_mask(%rip),%r11
|
||||
leaq -128(%rcx),%r14
|
||||
movq $0xf80,%r15
|
||||
vmovdqu (%r9),%xmm8
|
||||
andq $-128,%rsp
|
||||
vmovdqu (%r11),%xmm0
|
||||
leaq 128(%rcx),%rcx
|
||||
leaq 32+32(%r9),%r9
|
||||
movl 240-128(%rcx),%ebp
|
||||
vpshufb %xmm0,%xmm8,%xmm8
|
||||
|
||||
andq %r15,%r14
|
||||
andq %rsp,%r15
|
||||
subq %r14,%r15
|
||||
jc L$dec_no_key_aliasing
|
||||
cmpq $768,%r15
|
||||
jnc L$dec_no_key_aliasing
|
||||
subq %r15,%rsp
|
||||
L$dec_no_key_aliasing:
|
||||
|
||||
vmovdqu 80(%rdi),%xmm7
|
||||
leaq (%rdi),%r14
|
||||
vmovdqu 64(%rdi),%xmm4
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
leaq -192(%rdi,%rdx,1),%r15
|
||||
|
||||
vmovdqu 48(%rdi),%xmm5
|
||||
shrq $4,%rdx
|
||||
xorq %r10,%r10
|
||||
vmovdqu 32(%rdi),%xmm6
|
||||
vpshufb %xmm0,%xmm7,%xmm7
|
||||
vmovdqu 16(%rdi),%xmm2
|
||||
vpshufb %xmm0,%xmm4,%xmm4
|
||||
vmovdqu (%rdi),%xmm3
|
||||
vpshufb %xmm0,%xmm5,%xmm5
|
||||
vmovdqu %xmm4,48(%rsp)
|
||||
vpshufb %xmm0,%xmm6,%xmm6
|
||||
vmovdqu %xmm5,64(%rsp)
|
||||
vpshufb %xmm0,%xmm2,%xmm2
|
||||
vmovdqu %xmm6,80(%rsp)
|
||||
vpshufb %xmm0,%xmm3,%xmm3
|
||||
vmovdqu %xmm2,96(%rsp)
|
||||
vmovdqu %xmm3,112(%rsp)
|
||||
|
||||
call _aesni_ctr32_ghash_6x
|
||||
|
||||
vmovups %xmm9,-96(%rsi)
|
||||
vmovups %xmm10,-80(%rsi)
|
||||
vmovups %xmm11,-64(%rsi)
|
||||
vmovups %xmm12,-48(%rsi)
|
||||
vmovups %xmm13,-32(%rsi)
|
||||
vmovups %xmm14,-16(%rsi)
|
||||
|
||||
vpshufb (%r11),%xmm8,%xmm8
|
||||
vmovdqu %xmm8,-64(%r9)
|
||||
|
||||
vzeroupper
|
||||
movq -48(%rax),%r15
|
||||
movq -40(%rax),%r14
|
||||
movq -32(%rax),%r13
|
||||
movq -24(%rax),%r12
|
||||
movq -16(%rax),%rbp
|
||||
movq -8(%rax),%rbx
|
||||
leaq (%rax),%rsp
|
||||
L$gcm_dec_abort:
|
||||
movq %r10,%rax
|
||||
.byte 0xf3,0xc3
|
||||
|
||||
|
||||
.p2align 5
|
||||
_aesni_ctr32_6x:
|
||||
vmovdqu 0-128(%rcx),%xmm4
|
||||
vmovdqu 32(%r11),%xmm2
|
||||
leaq -1(%rbp),%r13
|
||||
vmovups 16-128(%rcx),%xmm15
|
||||
leaq 32-128(%rcx),%r12
|
||||
vpxor %xmm4,%xmm1,%xmm9
|
||||
addl $100663296,%ebx
|
||||
jc L$handle_ctr32_2
|
||||
vpaddb %xmm2,%xmm1,%xmm10
|
||||
vpaddb %xmm2,%xmm10,%xmm11
|
||||
vpxor %xmm4,%xmm10,%xmm10
|
||||
vpaddb %xmm2,%xmm11,%xmm12
|
||||
vpxor %xmm4,%xmm11,%xmm11
|
||||
vpaddb %xmm2,%xmm12,%xmm13
|
||||
vpxor %xmm4,%xmm12,%xmm12
|
||||
vpaddb %xmm2,%xmm13,%xmm14
|
||||
vpxor %xmm4,%xmm13,%xmm13
|
||||
vpaddb %xmm2,%xmm14,%xmm1
|
||||
vpxor %xmm4,%xmm14,%xmm14
|
||||
jmp L$oop_ctr32
|
||||
|
||||
.p2align 4
|
||||
L$oop_ctr32:
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
vmovups (%r12),%xmm15
|
||||
leaq 16(%r12),%r12
|
||||
decl %r13d
|
||||
jnz L$oop_ctr32
|
||||
|
||||
vmovdqu (%r12),%xmm3
|
||||
vaesenc %xmm15,%xmm9,%xmm9
|
||||
vpxor 0(%rdi),%xmm3,%xmm4
|
||||
vaesenc %xmm15,%xmm10,%xmm10
|
||||
vpxor 16(%rdi),%xmm3,%xmm5
|
||||
vaesenc %xmm15,%xmm11,%xmm11
|
||||
vpxor 32(%rdi),%xmm3,%xmm6
|
||||
vaesenc %xmm15,%xmm12,%xmm12
|
||||
vpxor 48(%rdi),%xmm3,%xmm8
|
||||
vaesenc %xmm15,%xmm13,%xmm13
|
||||
vpxor 64(%rdi),%xmm3,%xmm2
|
||||
vaesenc %xmm15,%xmm14,%xmm14
|
||||
vpxor 80(%rdi),%xmm3,%xmm3
|
||||
leaq 96(%rdi),%rdi
|
||||
|
||||
vaesenclast %xmm4,%xmm9,%xmm9
|
||||
vaesenclast %xmm5,%xmm10,%xmm10
|
||||
vaesenclast %xmm6,%xmm11,%xmm11
|
||||
vaesenclast %xmm8,%xmm12,%xmm12
|
||||
vaesenclast %xmm2,%xmm13,%xmm13
|
||||
vaesenclast %xmm3,%xmm14,%xmm14
|
||||
vmovups %xmm9,0(%rsi)
|
||||
vmovups %xmm10,16(%rsi)
|
||||
vmovups %xmm11,32(%rsi)
|
||||
vmovups %xmm12,48(%rsi)
|
||||
vmovups %xmm13,64(%rsi)
|
||||
vmovups %xmm14,80(%rsi)
|
||||
leaq 96(%rsi),%rsi
|
||||
|
||||
.byte 0xf3,0xc3
|
||||
.p2align 5
|
||||
L$handle_ctr32_2:
|
||||
vpshufb %xmm0,%xmm1,%xmm6
|
||||
vmovdqu 48(%r11),%xmm5
|
||||
vpaddd 64(%r11),%xmm6,%xmm10
|
||||
vpaddd %xmm5,%xmm6,%xmm11
|
||||
vpaddd %xmm5,%xmm10,%xmm12
|
||||
vpshufb %xmm0,%xmm10,%xmm10
|
||||
vpaddd %xmm5,%xmm11,%xmm13
|
||||
vpshufb %xmm0,%xmm11,%xmm11
|
||||
vpxor %xmm4,%xmm10,%xmm10
|
||||
vpaddd %xmm5,%xmm12,%xmm14
|
||||
vpshufb %xmm0,%xmm12,%xmm12
|
||||
vpxor %xmm4,%xmm11,%xmm11
|
||||
vpaddd %xmm5,%xmm13,%xmm1
|
||||
vpshufb %xmm0,%xmm13,%xmm13
|
||||
vpxor %xmm4,%xmm12,%xmm12
|
||||
vpshufb %xmm0,%xmm14,%xmm14
|
||||
vpxor %xmm4,%xmm13,%xmm13
|
||||
vpshufb %xmm0,%xmm1,%xmm1
|
||||
vpxor %xmm4,%xmm14,%xmm14
|
||||
jmp L$oop_ctr32
|
||||
|
||||
|
||||
.globl _aesni_gcm_encrypt
|
||||
.private_extern _aesni_gcm_encrypt
|
||||
|
||||
.p2align 5
|
||||
_aesni_gcm_encrypt:
|
||||
xorq %r10,%r10
|
||||
|
||||
|
||||
|
||||
|
||||
cmpq $288,%rdx
|
||||
jb L$gcm_enc_abort
|
||||
|
||||
leaq (%rsp),%rax
|
||||
pushq %rbx
|
||||
pushq %rbp
|
||||
pushq %r12
|
||||
pushq %r13
|
||||
pushq %r14
|
||||
pushq %r15
|
||||
vzeroupper
|
||||
|
||||
vmovdqu (%r8),%xmm1
|
||||
addq $-128,%rsp
|
||||
movl 12(%r8),%ebx
|
||||
leaq L$bswap_mask(%rip),%r11
|
||||
leaq -128(%rcx),%r14
|
||||
movq $0xf80,%r15
|
||||
leaq 128(%rcx),%rcx
|
||||
vmovdqu (%r11),%xmm0
|
||||
andq $-128,%rsp
|
||||
movl 240-128(%rcx),%ebp
|
||||
|
||||
andq %r15,%r14
|
||||
andq %rsp,%r15
|
||||
subq %r14,%r15
|
||||
jc L$enc_no_key_aliasing
|
||||
cmpq $768,%r15
|
||||
jnc L$enc_no_key_aliasing
|
||||
subq %r15,%rsp
|
||||
L$enc_no_key_aliasing:
|
||||
|
||||
leaq (%rsi),%r14
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
leaq -192(%rsi,%rdx,1),%r15
|
||||
|
||||
shrq $4,%rdx
|
||||
|
||||
call _aesni_ctr32_6x
|
||||
vpshufb %xmm0,%xmm9,%xmm8
|
||||
vpshufb %xmm0,%xmm10,%xmm2
|
||||
vmovdqu %xmm8,112(%rsp)
|
||||
vpshufb %xmm0,%xmm11,%xmm4
|
||||
vmovdqu %xmm2,96(%rsp)
|
||||
vpshufb %xmm0,%xmm12,%xmm5
|
||||
vmovdqu %xmm4,80(%rsp)
|
||||
vpshufb %xmm0,%xmm13,%xmm6
|
||||
vmovdqu %xmm5,64(%rsp)
|
||||
vpshufb %xmm0,%xmm14,%xmm7
|
||||
vmovdqu %xmm6,48(%rsp)
|
||||
|
||||
call _aesni_ctr32_6x
|
||||
|
||||
vmovdqu (%r9),%xmm8
|
||||
leaq 32+32(%r9),%r9
|
||||
subq $12,%rdx
|
||||
movq $192,%r10
|
||||
vpshufb %xmm0,%xmm8,%xmm8
|
||||
|
||||
call _aesni_ctr32_ghash_6x
|
||||
vmovdqu 32(%rsp),%xmm7
|
||||
vmovdqu (%r11),%xmm0
|
||||
vmovdqu 0-32(%r9),%xmm3
|
||||
vpunpckhqdq %xmm7,%xmm7,%xmm1
|
||||
vmovdqu 32-32(%r9),%xmm15
|
||||
vmovups %xmm9,-96(%rsi)
|
||||
vpshufb %xmm0,%xmm9,%xmm9
|
||||
vpxor %xmm7,%xmm1,%xmm1
|
||||
vmovups %xmm10,-80(%rsi)
|
||||
vpshufb %xmm0,%xmm10,%xmm10
|
||||
vmovups %xmm11,-64(%rsi)
|
||||
vpshufb %xmm0,%xmm11,%xmm11
|
||||
vmovups %xmm12,-48(%rsi)
|
||||
vpshufb %xmm0,%xmm12,%xmm12
|
||||
vmovups %xmm13,-32(%rsi)
|
||||
vpshufb %xmm0,%xmm13,%xmm13
|
||||
vmovups %xmm14,-16(%rsi)
|
||||
vpshufb %xmm0,%xmm14,%xmm14
|
||||
vmovdqu %xmm9,16(%rsp)
|
||||
vmovdqu 48(%rsp),%xmm6
|
||||
vmovdqu 16-32(%r9),%xmm0
|
||||
vpunpckhqdq %xmm6,%xmm6,%xmm2
|
||||
vpclmulqdq $0x00,%xmm3,%xmm7,%xmm5
|
||||
vpxor %xmm6,%xmm2,%xmm2
|
||||
vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7
|
||||
vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1
|
||||
|
||||
vmovdqu 64(%rsp),%xmm9
|
||||
vpclmulqdq $0x00,%xmm0,%xmm6,%xmm4
|
||||
vmovdqu 48-32(%r9),%xmm3
|
||||
vpxor %xmm5,%xmm4,%xmm4
|
||||
vpunpckhqdq %xmm9,%xmm9,%xmm5
|
||||
vpclmulqdq $0x11,%xmm0,%xmm6,%xmm6
|
||||
vpxor %xmm9,%xmm5,%xmm5
|
||||
vpxor %xmm7,%xmm6,%xmm6
|
||||
vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2
|
||||
vmovdqu 80-32(%r9),%xmm15
|
||||
vpxor %xmm1,%xmm2,%xmm2
|
||||
|
||||
vmovdqu 80(%rsp),%xmm1
|
||||
vpclmulqdq $0x00,%xmm3,%xmm9,%xmm7
|
||||
vmovdqu 64-32(%r9),%xmm0
|
||||
vpxor %xmm4,%xmm7,%xmm7
|
||||
vpunpckhqdq %xmm1,%xmm1,%xmm4
|
||||
vpclmulqdq $0x11,%xmm3,%xmm9,%xmm9
|
||||
vpxor %xmm1,%xmm4,%xmm4
|
||||
vpxor %xmm6,%xmm9,%xmm9
|
||||
vpclmulqdq $0x00,%xmm15,%xmm5,%xmm5
|
||||
vpxor %xmm2,%xmm5,%xmm5
|
||||
|
||||
vmovdqu 96(%rsp),%xmm2
|
||||
vpclmulqdq $0x00,%xmm0,%xmm1,%xmm6
|
||||
vmovdqu 96-32(%r9),%xmm3
|
||||
vpxor %xmm7,%xmm6,%xmm6
|
||||
vpunpckhqdq %xmm2,%xmm2,%xmm7
|
||||
vpclmulqdq $0x11,%xmm0,%xmm1,%xmm1
|
||||
vpxor %xmm2,%xmm7,%xmm7
|
||||
vpxor %xmm9,%xmm1,%xmm1
|
||||
vpclmulqdq $0x10,%xmm15,%xmm4,%xmm4
|
||||
vmovdqu 128-32(%r9),%xmm15
|
||||
vpxor %xmm5,%xmm4,%xmm4
|
||||
|
||||
vpxor 112(%rsp),%xmm8,%xmm8
|
||||
vpclmulqdq $0x00,%xmm3,%xmm2,%xmm5
|
||||
vmovdqu 112-32(%r9),%xmm0
|
||||
vpunpckhqdq %xmm8,%xmm8,%xmm9
|
||||
vpxor %xmm6,%xmm5,%xmm5
|
||||
vpclmulqdq $0x11,%xmm3,%xmm2,%xmm2
|
||||
vpxor %xmm8,%xmm9,%xmm9
|
||||
vpxor %xmm1,%xmm2,%xmm2
|
||||
vpclmulqdq $0x00,%xmm15,%xmm7,%xmm7
|
||||
vpxor %xmm4,%xmm7,%xmm4
|
||||
|
||||
vpclmulqdq $0x00,%xmm0,%xmm8,%xmm6
|
||||
vmovdqu 0-32(%r9),%xmm3
|
||||
vpunpckhqdq %xmm14,%xmm14,%xmm1
|
||||
vpclmulqdq $0x11,%xmm0,%xmm8,%xmm8
|
||||
vpxor %xmm14,%xmm1,%xmm1
|
||||
vpxor %xmm5,%xmm6,%xmm5
|
||||
vpclmulqdq $0x10,%xmm15,%xmm9,%xmm9
|
||||
vmovdqu 32-32(%r9),%xmm15
|
||||
vpxor %xmm2,%xmm8,%xmm7
|
||||
vpxor %xmm4,%xmm9,%xmm6
|
||||
|
||||
vmovdqu 16-32(%r9),%xmm0
|
||||
vpxor %xmm5,%xmm7,%xmm9
|
||||
vpclmulqdq $0x00,%xmm3,%xmm14,%xmm4
|
||||
vpxor %xmm9,%xmm6,%xmm6
|
||||
vpunpckhqdq %xmm13,%xmm13,%xmm2
|
||||
vpclmulqdq $0x11,%xmm3,%xmm14,%xmm14
|
||||
vpxor %xmm13,%xmm2,%xmm2
|
||||
vpslldq $8,%xmm6,%xmm9
|
||||
vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1
|
||||
vpxor %xmm9,%xmm5,%xmm8
|
||||
vpsrldq $8,%xmm6,%xmm6
|
||||
vpxor %xmm6,%xmm7,%xmm7
|
||||
|
||||
vpclmulqdq $0x00,%xmm0,%xmm13,%xmm5
|
||||
vmovdqu 48-32(%r9),%xmm3
|
||||
vpxor %xmm4,%xmm5,%xmm5
|
||||
vpunpckhqdq %xmm12,%xmm12,%xmm9
|
||||
vpclmulqdq $0x11,%xmm0,%xmm13,%xmm13
|
||||
vpxor %xmm12,%xmm9,%xmm9
|
||||
vpxor %xmm14,%xmm13,%xmm13
|
||||
vpalignr $8,%xmm8,%xmm8,%xmm14
|
||||
vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2
|
||||
vmovdqu 80-32(%r9),%xmm15
|
||||
vpxor %xmm1,%xmm2,%xmm2
|
||||
|
||||
vpclmulqdq $0x00,%xmm3,%xmm12,%xmm4
|
||||
vmovdqu 64-32(%r9),%xmm0
|
||||
vpxor %xmm5,%xmm4,%xmm4
|
||||
vpunpckhqdq %xmm11,%xmm11,%xmm1
|
||||
vpclmulqdq $0x11,%xmm3,%xmm12,%xmm12
|
||||
vpxor %xmm11,%xmm1,%xmm1
|
||||
vpxor %xmm13,%xmm12,%xmm12
|
||||
vxorps 16(%rsp),%xmm7,%xmm7
|
||||
vpclmulqdq $0x00,%xmm15,%xmm9,%xmm9
|
||||
vpxor %xmm2,%xmm9,%xmm9
|
||||
|
||||
vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8
|
||||
vxorps %xmm14,%xmm8,%xmm8
|
||||
|
||||
vpclmulqdq $0x00,%xmm0,%xmm11,%xmm5
|
||||
vmovdqu 96-32(%r9),%xmm3
|
||||
vpxor %xmm4,%xmm5,%xmm5
|
||||
vpunpckhqdq %xmm10,%xmm10,%xmm2
|
||||
vpclmulqdq $0x11,%xmm0,%xmm11,%xmm11
|
||||
vpxor %xmm10,%xmm2,%xmm2
|
||||
vpalignr $8,%xmm8,%xmm8,%xmm14
|
||||
vpxor %xmm12,%xmm11,%xmm11
|
||||
vpclmulqdq $0x10,%xmm15,%xmm1,%xmm1
|
||||
vmovdqu 128-32(%r9),%xmm15
|
||||
vpxor %xmm9,%xmm1,%xmm1
|
||||
|
||||
vxorps %xmm7,%xmm14,%xmm14
|
||||
vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8
|
||||
vxorps %xmm14,%xmm8,%xmm8
|
||||
|
||||
vpclmulqdq $0x00,%xmm3,%xmm10,%xmm4
|
||||
vmovdqu 112-32(%r9),%xmm0
|
||||
vpxor %xmm5,%xmm4,%xmm4
|
||||
vpunpckhqdq %xmm8,%xmm8,%xmm9
|
||||
vpclmulqdq $0x11,%xmm3,%xmm10,%xmm10
|
||||
vpxor %xmm8,%xmm9,%xmm9
|
||||
vpxor %xmm11,%xmm10,%xmm10
|
||||
vpclmulqdq $0x00,%xmm15,%xmm2,%xmm2
|
||||
vpxor %xmm1,%xmm2,%xmm2
|
||||
|
||||
vpclmulqdq $0x00,%xmm0,%xmm8,%xmm5
|
||||
vpclmulqdq $0x11,%xmm0,%xmm8,%xmm7
|
||||
vpxor %xmm4,%xmm5,%xmm5
|
||||
vpclmulqdq $0x10,%xmm15,%xmm9,%xmm6
|
||||
vpxor %xmm10,%xmm7,%xmm7
|
||||
vpxor %xmm2,%xmm6,%xmm6
|
||||
|
||||
vpxor %xmm5,%xmm7,%xmm4
|
||||
vpxor %xmm4,%xmm6,%xmm6
|
||||
vpslldq $8,%xmm6,%xmm1
|
||||
vmovdqu 16(%r11),%xmm3
|
||||
vpsrldq $8,%xmm6,%xmm6
|
||||
vpxor %xmm1,%xmm5,%xmm8
|
||||
vpxor %xmm6,%xmm7,%xmm7
|
||||
|
||||
vpalignr $8,%xmm8,%xmm8,%xmm2
|
||||
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8
|
||||
vpxor %xmm2,%xmm8,%xmm8
|
||||
|
||||
vpalignr $8,%xmm8,%xmm8,%xmm2
|
||||
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8
|
||||
vpxor %xmm7,%xmm2,%xmm2
|
||||
vpxor %xmm2,%xmm8,%xmm8
|
||||
vpshufb (%r11),%xmm8,%xmm8
|
||||
vmovdqu %xmm8,-64(%r9)
|
||||
|
||||
vzeroupper
|
||||
movq -48(%rax),%r15
|
||||
movq -40(%rax),%r14
|
||||
movq -32(%rax),%r13
|
||||
movq -24(%rax),%r12
|
||||
movq -16(%rax),%rbp
|
||||
movq -8(%rax),%rbx
|
||||
leaq (%rax),%rsp
|
||||
L$gcm_enc_abort:
|
||||
movq %r10,%rax
|
||||
.byte 0xf3,0xc3
|
||||
|
||||
.p2align 6
|
||||
L$bswap_mask:
|
||||
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
|
||||
L$poly:
|
||||
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
|
||||
L$one_msb:
|
||||
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
|
||||
L$two_lsb:
|
||||
.byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
||||
L$one_lsb:
|
||||
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
||||
.byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
||||
.p2align 6
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,671 @@
|
|||
#if defined(__x86_64__) && !defined(OPENSSL_NO_ASM)
|
||||
.text
|
||||
.p2align 4
|
||||
|
||||
.globl _md5_block_asm_data_order
|
||||
.private_extern _md5_block_asm_data_order
|
||||
|
||||
_md5_block_asm_data_order:
|
||||
pushq %rbp
|
||||
pushq %rbx
|
||||
pushq %r12
|
||||
pushq %r14
|
||||
pushq %r15
|
||||
L$prologue:
|
||||
|
||||
|
||||
|
||||
|
||||
movq %rdi,%rbp
|
||||
shlq $6,%rdx
|
||||
leaq (%rsi,%rdx,1),%rdi
|
||||
movl 0(%rbp),%eax
|
||||
movl 4(%rbp),%ebx
|
||||
movl 8(%rbp),%ecx
|
||||
movl 12(%rbp),%edx
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
cmpq %rdi,%rsi
|
||||
je L$end
|
||||
|
||||
|
||||
L$loop:
|
||||
movl %eax,%r8d
|
||||
movl %ebx,%r9d
|
||||
movl %ecx,%r14d
|
||||
movl %edx,%r15d
|
||||
movl 0(%rsi),%r10d
|
||||
movl %edx,%r11d
|
||||
xorl %ecx,%r11d
|
||||
leal -680876936(%rax,%r10,1),%eax
|
||||
andl %ebx,%r11d
|
||||
xorl %edx,%r11d
|
||||
movl 4(%rsi),%r10d
|
||||
addl %r11d,%eax
|
||||
roll $7,%eax
|
||||
movl %ecx,%r11d
|
||||
addl %ebx,%eax
|
||||
xorl %ebx,%r11d
|
||||
leal -389564586(%rdx,%r10,1),%edx
|
||||
andl %eax,%r11d
|
||||
xorl %ecx,%r11d
|
||||
movl 8(%rsi),%r10d
|
||||
addl %r11d,%edx
|
||||
roll $12,%edx
|
||||
movl %ebx,%r11d
|
||||
addl %eax,%edx
|
||||
xorl %eax,%r11d
|
||||
leal 606105819(%rcx,%r10,1),%ecx
|
||||
andl %edx,%r11d
|
||||
xorl %ebx,%r11d
|
||||
movl 12(%rsi),%r10d
|
||||
addl %r11d,%ecx
|
||||
roll $17,%ecx
|
||||
movl %eax,%r11d
|
||||
addl %edx,%ecx
|
||||
xorl %edx,%r11d
|
||||
leal -1044525330(%rbx,%r10,1),%ebx
|
||||
andl %ecx,%r11d
|
||||
xorl %eax,%r11d
|
||||
movl 16(%rsi),%r10d
|
||||
addl %r11d,%ebx
|
||||
roll $22,%ebx
|
||||
movl %edx,%r11d
|
||||
addl %ecx,%ebx
|
||||
xorl %ecx,%r11d
|
||||
leal -176418897(%rax,%r10,1),%eax
|
||||
andl %ebx,%r11d
|
||||
xorl %edx,%r11d
|
||||
movl 20(%rsi),%r10d
|
||||
addl %r11d,%eax
|
||||
roll $7,%eax
|
||||
movl %ecx,%r11d
|
||||
addl %ebx,%eax
|
||||
xorl %ebx,%r11d
|
||||
leal 1200080426(%rdx,%r10,1),%edx
|
||||
andl %eax,%r11d
|
||||
xorl %ecx,%r11d
|
||||
movl 24(%rsi),%r10d
|
||||
addl %r11d,%edx
|
||||
roll $12,%edx
|
||||
movl %ebx,%r11d
|
||||
addl %eax,%edx
|
||||
xorl %eax,%r11d
|
||||
leal -1473231341(%rcx,%r10,1),%ecx
|
||||
andl %edx,%r11d
|
||||
xorl %ebx,%r11d
|
||||
movl 28(%rsi),%r10d
|
||||
addl %r11d,%ecx
|
||||
roll $17,%ecx
|
||||
movl %eax,%r11d
|
||||
addl %edx,%ecx
|
||||
xorl %edx,%r11d
|
||||
leal -45705983(%rbx,%r10,1),%ebx
|
||||
andl %ecx,%r11d
|
||||
xorl %eax,%r11d
|
||||
movl 32(%rsi),%r10d
|
||||
addl %r11d,%ebx
|
||||
roll $22,%ebx
|
||||
movl %edx,%r11d
|
||||
addl %ecx,%ebx
|
||||
xorl %ecx,%r11d
|
||||
leal 1770035416(%rax,%r10,1),%eax
|
||||
andl %ebx,%r11d
|
||||
xorl %edx,%r11d
|
||||
movl 36(%rsi),%r10d
|
||||
addl %r11d,%eax
|
||||
roll $7,%eax
|
||||
movl %ecx,%r11d
|
||||
addl %ebx,%eax
|
||||
xorl %ebx,%r11d
|
||||
leal -1958414417(%rdx,%r10,1),%edx
|
||||
andl %eax,%r11d
|
||||
xorl %ecx,%r11d
|
||||
movl 40(%rsi),%r10d
|
||||
addl %r11d,%edx
|
||||
roll $12,%edx
|
||||
movl %ebx,%r11d
|
||||
addl %eax,%edx
|
||||
xorl %eax,%r11d
|
||||
leal -42063(%rcx,%r10,1),%ecx
|
||||
andl %edx,%r11d
|
||||
xorl %ebx,%r11d
|
||||
movl 44(%rsi),%r10d
|
||||
addl %r11d,%ecx
|
||||
roll $17,%ecx
|
||||
movl %eax,%r11d
|
||||
addl %edx,%ecx
|
||||
xorl %edx,%r11d
|
||||
leal -1990404162(%rbx,%r10,1),%ebx
|
||||
andl %ecx,%r11d
|
||||
xorl %eax,%r11d
|
||||
movl 48(%rsi),%r10d
|
||||
addl %r11d,%ebx
|
||||
roll $22,%ebx
|
||||
movl %edx,%r11d
|
||||
addl %ecx,%ebx
|
||||
xorl %ecx,%r11d
|
||||
leal 1804603682(%rax,%r10,1),%eax
|
||||
andl %ebx,%r11d
|
||||
xorl %edx,%r11d
|
||||
movl 52(%rsi),%r10d
|
||||
addl %r11d,%eax
|
||||
roll $7,%eax
|
||||
movl %ecx,%r11d
|
||||
addl %ebx,%eax
|
||||
xorl %ebx,%r11d
|
||||
leal -40341101(%rdx,%r10,1),%edx
|
||||
andl %eax,%r11d
|
||||
xorl %ecx,%r11d
|
||||
movl 56(%rsi),%r10d
|
||||
addl %r11d,%edx
|
||||
roll $12,%edx
|
||||
movl %ebx,%r11d
|
||||
addl %eax,%edx
|
||||
xorl %eax,%r11d
|
||||
leal -1502002290(%rcx,%r10,1),%ecx
|
||||
andl %edx,%r11d
|
||||
xorl %ebx,%r11d
|
||||
movl 60(%rsi),%r10d
|
||||
addl %r11d,%ecx
|
||||
roll $17,%ecx
|
||||
movl %eax,%r11d
|
||||
addl %edx,%ecx
|
||||
xorl %edx,%r11d
|
||||
leal 1236535329(%rbx,%r10,1),%ebx
|
||||
andl %ecx,%r11d
|
||||
xorl %eax,%r11d
|
||||
movl 0(%rsi),%r10d
|
||||
addl %r11d,%ebx
|
||||
roll $22,%ebx
|
||||
movl %edx,%r11d
|
||||
addl %ecx,%ebx
|
||||
movl 4(%rsi),%r10d
|
||||
movl %edx,%r11d
|
||||
movl %edx,%r12d
|
||||
notl %r11d
|
||||
leal -165796510(%rax,%r10,1),%eax
|
||||
andl %ebx,%r12d
|
||||
andl %ecx,%r11d
|
||||
movl 24(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %ecx,%r11d
|
||||
addl %r12d,%eax
|
||||
movl %ecx,%r12d
|
||||
roll $5,%eax
|
||||
addl %ebx,%eax
|
||||
notl %r11d
|
||||
leal -1069501632(%rdx,%r10,1),%edx
|
||||
andl %eax,%r12d
|
||||
andl %ebx,%r11d
|
||||
movl 44(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %ebx,%r11d
|
||||
addl %r12d,%edx
|
||||
movl %ebx,%r12d
|
||||
roll $9,%edx
|
||||
addl %eax,%edx
|
||||
notl %r11d
|
||||
leal 643717713(%rcx,%r10,1),%ecx
|
||||
andl %edx,%r12d
|
||||
andl %eax,%r11d
|
||||
movl 0(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %eax,%r11d
|
||||
addl %r12d,%ecx
|
||||
movl %eax,%r12d
|
||||
roll $14,%ecx
|
||||
addl %edx,%ecx
|
||||
notl %r11d
|
||||
leal -373897302(%rbx,%r10,1),%ebx
|
||||
andl %ecx,%r12d
|
||||
andl %edx,%r11d
|
||||
movl 20(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %edx,%r11d
|
||||
addl %r12d,%ebx
|
||||
movl %edx,%r12d
|
||||
roll $20,%ebx
|
||||
addl %ecx,%ebx
|
||||
notl %r11d
|
||||
leal -701558691(%rax,%r10,1),%eax
|
||||
andl %ebx,%r12d
|
||||
andl %ecx,%r11d
|
||||
movl 40(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %ecx,%r11d
|
||||
addl %r12d,%eax
|
||||
movl %ecx,%r12d
|
||||
roll $5,%eax
|
||||
addl %ebx,%eax
|
||||
notl %r11d
|
||||
leal 38016083(%rdx,%r10,1),%edx
|
||||
andl %eax,%r12d
|
||||
andl %ebx,%r11d
|
||||
movl 60(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %ebx,%r11d
|
||||
addl %r12d,%edx
|
||||
movl %ebx,%r12d
|
||||
roll $9,%edx
|
||||
addl %eax,%edx
|
||||
notl %r11d
|
||||
leal -660478335(%rcx,%r10,1),%ecx
|
||||
andl %edx,%r12d
|
||||
andl %eax,%r11d
|
||||
movl 16(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %eax,%r11d
|
||||
addl %r12d,%ecx
|
||||
movl %eax,%r12d
|
||||
roll $14,%ecx
|
||||
addl %edx,%ecx
|
||||
notl %r11d
|
||||
leal -405537848(%rbx,%r10,1),%ebx
|
||||
andl %ecx,%r12d
|
||||
andl %edx,%r11d
|
||||
movl 36(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %edx,%r11d
|
||||
addl %r12d,%ebx
|
||||
movl %edx,%r12d
|
||||
roll $20,%ebx
|
||||
addl %ecx,%ebx
|
||||
notl %r11d
|
||||
leal 568446438(%rax,%r10,1),%eax
|
||||
andl %ebx,%r12d
|
||||
andl %ecx,%r11d
|
||||
movl 56(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %ecx,%r11d
|
||||
addl %r12d,%eax
|
||||
movl %ecx,%r12d
|
||||
roll $5,%eax
|
||||
addl %ebx,%eax
|
||||
notl %r11d
|
||||
leal -1019803690(%rdx,%r10,1),%edx
|
||||
andl %eax,%r12d
|
||||
andl %ebx,%r11d
|
||||
movl 12(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %ebx,%r11d
|
||||
addl %r12d,%edx
|
||||
movl %ebx,%r12d
|
||||
roll $9,%edx
|
||||
addl %eax,%edx
|
||||
notl %r11d
|
||||
leal -187363961(%rcx,%r10,1),%ecx
|
||||
andl %edx,%r12d
|
||||
andl %eax,%r11d
|
||||
movl 32(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %eax,%r11d
|
||||
addl %r12d,%ecx
|
||||
movl %eax,%r12d
|
||||
roll $14,%ecx
|
||||
addl %edx,%ecx
|
||||
notl %r11d
|
||||
leal 1163531501(%rbx,%r10,1),%ebx
|
||||
andl %ecx,%r12d
|
||||
andl %edx,%r11d
|
||||
movl 52(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %edx,%r11d
|
||||
addl %r12d,%ebx
|
||||
movl %edx,%r12d
|
||||
roll $20,%ebx
|
||||
addl %ecx,%ebx
|
||||
notl %r11d
|
||||
leal -1444681467(%rax,%r10,1),%eax
|
||||
andl %ebx,%r12d
|
||||
andl %ecx,%r11d
|
||||
movl 8(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %ecx,%r11d
|
||||
addl %r12d,%eax
|
||||
movl %ecx,%r12d
|
||||
roll $5,%eax
|
||||
addl %ebx,%eax
|
||||
notl %r11d
|
||||
leal -51403784(%rdx,%r10,1),%edx
|
||||
andl %eax,%r12d
|
||||
andl %ebx,%r11d
|
||||
movl 28(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %ebx,%r11d
|
||||
addl %r12d,%edx
|
||||
movl %ebx,%r12d
|
||||
roll $9,%edx
|
||||
addl %eax,%edx
|
||||
notl %r11d
|
||||
leal 1735328473(%rcx,%r10,1),%ecx
|
||||
andl %edx,%r12d
|
||||
andl %eax,%r11d
|
||||
movl 48(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %eax,%r11d
|
||||
addl %r12d,%ecx
|
||||
movl %eax,%r12d
|
||||
roll $14,%ecx
|
||||
addl %edx,%ecx
|
||||
notl %r11d
|
||||
leal -1926607734(%rbx,%r10,1),%ebx
|
||||
andl %ecx,%r12d
|
||||
andl %edx,%r11d
|
||||
movl 0(%rsi),%r10d
|
||||
orl %r11d,%r12d
|
||||
movl %edx,%r11d
|
||||
addl %r12d,%ebx
|
||||
movl %edx,%r12d
|
||||
roll $20,%ebx
|
||||
addl %ecx,%ebx
|
||||
movl 20(%rsi),%r10d
|
||||
movl %ecx,%r11d
|
||||
leal -378558(%rax,%r10,1),%eax
|
||||
movl 32(%rsi),%r10d
|
||||
xorl %edx,%r11d
|
||||
xorl %ebx,%r11d
|
||||
addl %r11d,%eax
|
||||
roll $4,%eax
|
||||
movl %ebx,%r11d
|
||||
addl %ebx,%eax
|
||||
leal -2022574463(%rdx,%r10,1),%edx
|
||||
movl 44(%rsi),%r10d
|
||||
xorl %ecx,%r11d
|
||||
xorl %eax,%r11d
|
||||
addl %r11d,%edx
|
||||
roll $11,%edx
|
||||
movl %eax,%r11d
|
||||
addl %eax,%edx
|
||||
leal 1839030562(%rcx,%r10,1),%ecx
|
||||
movl 56(%rsi),%r10d
|
||||
xorl %ebx,%r11d
|
||||
xorl %edx,%r11d
|
||||
addl %r11d,%ecx
|
||||
roll $16,%ecx
|
||||
movl %edx,%r11d
|
||||
addl %edx,%ecx
|
||||
leal -35309556(%rbx,%r10,1),%ebx
|
||||
movl 4(%rsi),%r10d
|
||||
xorl %eax,%r11d
|
||||
xorl %ecx,%r11d
|
||||
addl %r11d,%ebx
|
||||
roll $23,%ebx
|
||||
movl %ecx,%r11d
|
||||
addl %ecx,%ebx
|
||||
leal -1530992060(%rax,%r10,1),%eax
|
||||
movl 16(%rsi),%r10d
|
||||
xorl %edx,%r11d
|
||||
xorl %ebx,%r11d
|
||||
addl %r11d,%eax
|
||||
roll $4,%eax
|
||||
movl %ebx,%r11d
|
||||
addl %ebx,%eax
|
||||
leal 1272893353(%rdx,%r10,1),%edx
|
||||
movl 28(%rsi),%r10d
|
||||
xorl %ecx,%r11d
|
||||
xorl %eax,%r11d
|
||||
addl %r11d,%edx
|
||||
roll $11,%edx
|
||||
movl %eax,%r11d
|
||||
addl %eax,%edx
|
||||
leal -155497632(%rcx,%r10,1),%ecx
|
||||
movl 40(%rsi),%r10d
|
||||
xorl %ebx,%r11d
|
||||
xorl %edx,%r11d
|
||||
addl %r11d,%ecx
|
||||
roll $16,%ecx
|
||||
movl %edx,%r11d
|
||||
addl %edx,%ecx
|
||||
leal -1094730640(%rbx,%r10,1),%ebx
|
||||
movl 52(%rsi),%r10d
|
||||
xorl %eax,%r11d
|
||||
xorl %ecx,%r11d
|
||||
addl %r11d,%ebx
|
||||
roll $23,%ebx
|
||||
movl %ecx,%r11d
|
||||
addl %ecx,%ebx
|
||||
leal 681279174(%rax,%r10,1),%eax
|
||||
movl 0(%rsi),%r10d
|
||||
xorl %edx,%r11d
|
||||
xorl %ebx,%r11d
|
||||
addl %r11d,%eax
|
||||
roll $4,%eax
|
||||
movl %ebx,%r11d
|
||||
addl %ebx,%eax
|
||||
leal -358537222(%rdx,%r10,1),%edx
|
||||
movl 12(%rsi),%r10d
|
||||
xorl %ecx,%r11d
|
||||
xorl %eax,%r11d
|
||||
addl %r11d,%edx
|
||||
roll $11,%edx
|
||||
movl %eax,%r11d
|
||||
addl %eax,%edx
|
||||
leal -722521979(%rcx,%r10,1),%ecx
|
||||
movl 24(%rsi),%r10d
|
||||
xorl %ebx,%r11d
|
||||
xorl %edx,%r11d
|
||||
addl %r11d,%ecx
|
||||
roll $16,%ecx
|
||||
movl %edx,%r11d
|
||||
addl %edx,%ecx
|
||||
leal 76029189(%rbx,%r10,1),%ebx
|
||||
movl 36(%rsi),%r10d
|
||||
xorl %eax,%r11d
|
||||
xorl %ecx,%r11d
|
||||
addl %r11d,%ebx
|
||||
roll $23,%ebx
|
||||
movl %ecx,%r11d
|
||||
addl %ecx,%ebx
|
||||
leal -640364487(%rax,%r10,1),%eax
|
||||
movl 48(%rsi),%r10d
|
||||
xorl %edx,%r11d
|
||||
xorl %ebx,%r11d
|
||||
addl %r11d,%eax
|
||||
roll $4,%eax
|
||||
movl %ebx,%r11d
|
||||
addl %ebx,%eax
|
||||
leal -421815835(%rdx,%r10,1),%edx
|
||||
movl 60(%rsi),%r10d
|
||||
xorl %ecx,%r11d
|
||||
xorl %eax,%r11d
|
||||
addl %r11d,%edx
|
||||
roll $11,%edx
|
||||
movl %eax,%r11d
|
||||
addl %eax,%edx
|
||||
leal 530742520(%rcx,%r10,1),%ecx
|
||||
movl 8(%rsi),%r10d
|
||||
xorl %ebx,%r11d
|
||||
xorl %edx,%r11d
|
||||
addl %r11d,%ecx
|
||||
roll $16,%ecx
|
||||
movl %edx,%r11d
|
||||
addl %edx,%ecx
|
||||
leal -995338651(%rbx,%r10,1),%ebx
|
||||
movl 0(%rsi),%r10d
|
||||
xorl %eax,%r11d
|
||||
xorl %ecx,%r11d
|
||||
addl %r11d,%ebx
|
||||
roll $23,%ebx
|
||||
movl %ecx,%r11d
|
||||
addl %ecx,%ebx
|
||||
movl 0(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
xorl %edx,%r11d
|
||||
leal -198630844(%rax,%r10,1),%eax
|
||||
orl %ebx,%r11d
|
||||
xorl %ecx,%r11d
|
||||
addl %r11d,%eax
|
||||
movl 28(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $6,%eax
|
||||
xorl %ecx,%r11d
|
||||
addl %ebx,%eax
|
||||
leal 1126891415(%rdx,%r10,1),%edx
|
||||
orl %eax,%r11d
|
||||
xorl %ebx,%r11d
|
||||
addl %r11d,%edx
|
||||
movl 56(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $10,%edx
|
||||
xorl %ebx,%r11d
|
||||
addl %eax,%edx
|
||||
leal -1416354905(%rcx,%r10,1),%ecx
|
||||
orl %edx,%r11d
|
||||
xorl %eax,%r11d
|
||||
addl %r11d,%ecx
|
||||
movl 20(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $15,%ecx
|
||||
xorl %eax,%r11d
|
||||
addl %edx,%ecx
|
||||
leal -57434055(%rbx,%r10,1),%ebx
|
||||
orl %ecx,%r11d
|
||||
xorl %edx,%r11d
|
||||
addl %r11d,%ebx
|
||||
movl 48(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $21,%ebx
|
||||
xorl %edx,%r11d
|
||||
addl %ecx,%ebx
|
||||
leal 1700485571(%rax,%r10,1),%eax
|
||||
orl %ebx,%r11d
|
||||
xorl %ecx,%r11d
|
||||
addl %r11d,%eax
|
||||
movl 12(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $6,%eax
|
||||
xorl %ecx,%r11d
|
||||
addl %ebx,%eax
|
||||
leal -1894986606(%rdx,%r10,1),%edx
|
||||
orl %eax,%r11d
|
||||
xorl %ebx,%r11d
|
||||
addl %r11d,%edx
|
||||
movl 40(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $10,%edx
|
||||
xorl %ebx,%r11d
|
||||
addl %eax,%edx
|
||||
leal -1051523(%rcx,%r10,1),%ecx
|
||||
orl %edx,%r11d
|
||||
xorl %eax,%r11d
|
||||
addl %r11d,%ecx
|
||||
movl 4(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $15,%ecx
|
||||
xorl %eax,%r11d
|
||||
addl %edx,%ecx
|
||||
leal -2054922799(%rbx,%r10,1),%ebx
|
||||
orl %ecx,%r11d
|
||||
xorl %edx,%r11d
|
||||
addl %r11d,%ebx
|
||||
movl 32(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $21,%ebx
|
||||
xorl %edx,%r11d
|
||||
addl %ecx,%ebx
|
||||
leal 1873313359(%rax,%r10,1),%eax
|
||||
orl %ebx,%r11d
|
||||
xorl %ecx,%r11d
|
||||
addl %r11d,%eax
|
||||
movl 60(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $6,%eax
|
||||
xorl %ecx,%r11d
|
||||
addl %ebx,%eax
|
||||
leal -30611744(%rdx,%r10,1),%edx
|
||||
orl %eax,%r11d
|
||||
xorl %ebx,%r11d
|
||||
addl %r11d,%edx
|
||||
movl 24(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $10,%edx
|
||||
xorl %ebx,%r11d
|
||||
addl %eax,%edx
|
||||
leal -1560198380(%rcx,%r10,1),%ecx
|
||||
orl %edx,%r11d
|
||||
xorl %eax,%r11d
|
||||
addl %r11d,%ecx
|
||||
movl 52(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $15,%ecx
|
||||
xorl %eax,%r11d
|
||||
addl %edx,%ecx
|
||||
leal 1309151649(%rbx,%r10,1),%ebx
|
||||
orl %ecx,%r11d
|
||||
xorl %edx,%r11d
|
||||
addl %r11d,%ebx
|
||||
movl 16(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $21,%ebx
|
||||
xorl %edx,%r11d
|
||||
addl %ecx,%ebx
|
||||
leal -145523070(%rax,%r10,1),%eax
|
||||
orl %ebx,%r11d
|
||||
xorl %ecx,%r11d
|
||||
addl %r11d,%eax
|
||||
movl 44(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $6,%eax
|
||||
xorl %ecx,%r11d
|
||||
addl %ebx,%eax
|
||||
leal -1120210379(%rdx,%r10,1),%edx
|
||||
orl %eax,%r11d
|
||||
xorl %ebx,%r11d
|
||||
addl %r11d,%edx
|
||||
movl 8(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $10,%edx
|
||||
xorl %ebx,%r11d
|
||||
addl %eax,%edx
|
||||
leal 718787259(%rcx,%r10,1),%ecx
|
||||
orl %edx,%r11d
|
||||
xorl %eax,%r11d
|
||||
addl %r11d,%ecx
|
||||
movl 36(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $15,%ecx
|
||||
xorl %eax,%r11d
|
||||
addl %edx,%ecx
|
||||
leal -343485551(%rbx,%r10,1),%ebx
|
||||
orl %ecx,%r11d
|
||||
xorl %edx,%r11d
|
||||
addl %r11d,%ebx
|
||||
movl 0(%rsi),%r10d
|
||||
movl $0xffffffff,%r11d
|
||||
roll $21,%ebx
|
||||
xorl %edx,%r11d
|
||||
addl %ecx,%ebx
|
||||
|
||||
addl %r8d,%eax
|
||||
addl %r9d,%ebx
|
||||
addl %r14d,%ecx
|
||||
addl %r15d,%edx
|
||||
|
||||
|
||||
addq $64,%rsi
|
||||
cmpq %rdi,%rsi
|
||||
jb L$loop
|
||||
|
||||
|
||||
L$end:
|
||||
movl %eax,0(%rbp)
|
||||
movl %ebx,4(%rbp)
|
||||
movl %ecx,8(%rbp)
|
||||
movl %edx,12(%rbp)
|
||||
|
||||
movq (%rsp),%r15
|
||||
movq 8(%rsp),%r14
|
||||
movq 16(%rsp),%r12
|
||||
movq 24(%rsp),%rbx
|
||||
movq 32(%rsp),%rbp
|
||||
addq $40,%rsp
|
||||
L$epilogue:
|
||||
.byte 0xf3,0xc3
|
||||
|
||||
#endif
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue