зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1621350 - land NSS 710d10a72934 UPGRADE_NSS_RELEASE, r=jcj
2020-03-10 Kevin Jacobs <kjacobs@mozilla.com>
* lib/ssl/ssl3exthandle.c:
Bug 1618915 - Fix UBSAN issue in ssl_ParseSessionTicket
r=jcj,bbeurdouche
[710d10a72934] [tip]
2020-03-09 Kevin Jacobs <kjacobs@mozilla.com>
* lib/ssl/ssl3exthandle.c:
Bug 1618739
- Don't assert fuzzer behavior in SSL_ParseSessionTicket
r=jcj
[12fc91fad84a]
2020-03-03 Benjamin Beurdouche <bbeurdouche@mozilla.com>
* readme.md:
Bug 1619056 - Update README: TLS 1.3 is not experimental anymore.
r=jcj
[08944e50dce0]
2020-03-09 Kevin Jacobs <kjacobs@mozilla.com>
* gtests/ssl_gtest/ssl_version_unittest.cc, lib/ssl/sslexp.h,
lib/ssl/sslimpl.h, lib/ssl/sslsock.c, lib/ssl/tls13exthandle.c:
Bug 1619102 - Add workaround option to include both DTLS and TLS
versions in DTLS supported_versions. r=mt
Add an experimental function for enabling a DTLS 1.3
supported_versions compatibility workaround.
[53803dc4628f]
2020-03-09 Benjamin Beurdouche <bbeurdouche@mozilla.com>
* automation/taskcluster/scripts/run_hacl.sh,
lib/freebl/verified/Hacl_Poly1305_128.c,
lib/freebl/verified/Hacl_Poly1305_256.c:
Bug 1612493 - Fix Firefox build for Windows 2012 x64. r=kjacobs
[7e09cdab32d0]
2020-03-02 Kevin Jacobs <kjacobs@mozilla.com>
* lib/freebl/blinit.c:
Bug 1614183 - Fixup, clang-format. r=me
[b17a367b83de] [NSS_3_51_BETA1]
2020-03-02 Giulio Benetti <giulio.benetti@benettiengineering.com>
* lib/freebl/blinit.c:
Bug 1614183 - Check if PPC __has_include(<sys/auxv.h>). r=kjacobs
Some build environment doesn't provide <sys/auxv.h> and this causes
build failure, so let's check if that header exists by using
__has_include() helper.
Signed-off-by: Giulio Benetti
<giulio.benetti@benettiengineering.com>
[bb7c46049f26]
2020-03-02 Kurt Miller <kurt@intricatesoftware.com>
* lib/freebl/blinit.c:
Bug 1618400 - Fix unused variable 'getauxval' on OpenBSD/arm64 r=jcj
https://bugzilla.mozilla.org/show_bug.cgi?id=1618400
[2c989888dee7]
2020-02-28 Benjamin Beurdouche <bbeurdouche@mozilla.com>
* automation/taskcluster/graph/src/extend.js, coreconf/arch.mk,
coreconf/config.mk, lib/freebl/Makefile, lib/freebl/blapii.h,
lib/freebl/blinit.c, lib/freebl/chacha20poly1305.c,
lib/freebl/freebl.gyp,
lib/freebl/verified/Hacl_Chacha20Poly1305_256.c,
lib/freebl/verified/Hacl_Chacha20Poly1305_256.h,
lib/freebl/verified/Hacl_Chacha20_Vec256.c,
lib/freebl/verified/Hacl_Chacha20_Vec256.h,
lib/freebl/verified/Hacl_Poly1305_256.c,
lib/freebl/verified/Hacl_Poly1305_256.h, nss-tool/hw-support.c:
Bug 1612493 - Support for HACL* AVX2 code for Chacha20, Poly1305 and
Chacha20Poly1305. r=kjacobs
*** Bug 1612493 - Import AVX2 code from HACL*
*** Bug 1612493 - Add CPU detection for AVX2, BMI1, BMI2, FMA, MOVBE
*** Bug 1612493 - New flag NSS_DISABLE_AVX2 for freebl/Makefile and
freebl.gyp
*** Bug 1612493 - Disable use of AVX2 on GCC 4.4 which doesn’t
support -mavx2
*** Bug 1612493 - Disable tests when the platform doesn't have
support for AVX2
[d5deac55f543]
* automation/taskcluster/scripts/run_hacl.sh,
lib/freebl/verified/Hacl_Chacha20.c,
lib/freebl/verified/Hacl_Chacha20Poly1305_128.c,
lib/freebl/verified/Hacl_Chacha20Poly1305_32.c,
lib/freebl/verified/Hacl_Chacha20_Vec128.c,
lib/freebl/verified/Hacl_Curve25519_51.c,
lib/freebl/verified/Hacl_Kremlib.h,
lib/freebl/verified/Hacl_Poly1305_128.c,
lib/freebl/verified/Hacl_Poly1305_32.c,
lib/freebl/verified/kremlin/include/kremlin/internal/types.h,
lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt128.h, li
b/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt128_Verifie
d.h, lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt_8_1
6_32_64.h, lib/freebl/verified/kremlin/kremlib/dist/minimal/LowStar_
Endianness.h, lib/freebl/verified/kremlin/kremlib/dist/minimal/fstar
_uint128_gcc64.h, lib/freebl/verified/libintvector.h:
Bug 1617533 - Update of HACL* after libintvector.h and coding style
changes. r=kjacobs
*** Bug 1617533 - Clang format
*** Bug 1617533 - Update HACL* commit for job in Taskcluster
*** Bug 1617533 - Update HACL* Kremlin code
[b6677ae9067e]
Differential Revision: https://phabricator.services.mozilla.com/D66264
--HG--
extra : moz-landing-system : lando
This commit is contained in:
Родитель
6ec8c529d1
Коммит
24e1ed50fa
|
@ -1487,7 +1487,7 @@ MOZ_ARG_WITH_BOOL(system-nss,
|
|||
_USE_SYSTEM_NSS=1 )
|
||||
|
||||
if test -n "$_USE_SYSTEM_NSS"; then
|
||||
AM_PATH_NSS(3.51, [MOZ_SYSTEM_NSS=1], [AC_MSG_ERROR([you don't have NSS installed or your version is too old])])
|
||||
AM_PATH_NSS(3.52, [MOZ_SYSTEM_NSS=1], [AC_MSG_ERROR([you don't have NSS installed or your version is too old])])
|
||||
fi
|
||||
|
||||
NSS_CFLAGS="$NSS_CFLAGS -I${DIST}/include/nss"
|
||||
|
|
|
@ -1 +1 @@
|
|||
NSS_3_51_RTM
|
||||
710d10a72934
|
|
@ -1 +1 @@
|
|||
NSS_3_50_BRANCH
|
||||
NSS_3_51_BRANCH
|
||||
|
|
|
@ -101,7 +101,7 @@ queue.filter(task => {
|
|||
// Don't run all additional hardware tests on ARM.
|
||||
if (task.group == "Cipher" && task.platform == "aarch64" && task.env &&
|
||||
(task.env.NSS_DISABLE_PCLMUL == "1" || task.env.NSS_DISABLE_HW_AES == "1"
|
||||
|| task.env.NSS_DISABLE_AVX == "1")) {
|
||||
|| task.env.NSS_DISABLE_AVX == "1" || task.env.NSS_DISABLE_AVX2 == "1")) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1014,6 +1014,10 @@ function scheduleTests(task_build, task_cert, test_base) {
|
|||
name: "Cipher tests", symbol: "NoAVX", tests: "cipher",
|
||||
env: {NSS_DISABLE_AVX: "1"}, group: "Cipher"
|
||||
}));
|
||||
queue.scheduleTask(merge(cert_base_long, {
|
||||
name: "Cipher tests", symbol: "NoAVX2", tests: "cipher",
|
||||
env: {NSS_DISABLE_AVX2: "1"}, group: "Cipher"
|
||||
}));
|
||||
queue.scheduleTask(merge(cert_base_long, {
|
||||
name: "Cipher tests", symbol: "NoSSSE3|NEON", tests: "cipher",
|
||||
env: {
|
||||
|
|
|
@ -13,7 +13,7 @@ set -e -x -v
|
|||
# HACL CI.
|
||||
# When bug 1593647 is resolved, extract the code on CI again.
|
||||
git clone -q "https://github.com/project-everest/hacl-star" ~/hacl-star
|
||||
git -C ~/hacl-star checkout -q 186a985597d57e3b587ceb0ef6deb0b5de706ae2
|
||||
git -C ~/hacl-star checkout -q 079854e0072041d60859b6d8af2743bc6a37dc05
|
||||
|
||||
# Format the C snapshot.
|
||||
cd ~/hacl-star/dist/mozilla
|
||||
|
|
|
@ -139,6 +139,35 @@ ifeq ($(OS_ARCH),OS_2)
|
|||
OS_RELEASE := $(shell uname -v)
|
||||
endif
|
||||
|
||||
#######################################################################
|
||||
# Master "Core Components" macros for Hardware features #
|
||||
#######################################################################
|
||||
|
||||
ifndef NSS_DISABLE_AVX2
|
||||
ifneq ($(CPU_ARCH),x86_64)
|
||||
# Disable AVX2 entirely on non-Intel platforms
|
||||
NSS_DISABLE_AVX2 = 1
|
||||
$(warning CPU_ARCH is not x86_64, disabling -mavx2)
|
||||
else
|
||||
ifdef CC_IS_CLANG
|
||||
# Clang reports its version as an older gcc, but it's OK
|
||||
NSS_DISABLE_AVX2 = 0
|
||||
else
|
||||
ifneq (,$(filter 4.8 4.9,$(word 1,$(GCC_VERSION)).$(word 2,$(GCC_VERSION))))
|
||||
NSS_DISABLE_AVX2 = 0
|
||||
endif
|
||||
ifeq (,$(filter 0 1 2 3 4,$(word 1,$(GCC_VERSION))))
|
||||
NSS_DISABLE_AVX2 = 0
|
||||
endif
|
||||
endif
|
||||
ifndef NSS_DISABLE_AVX2
|
||||
$(warning Unable to find gcc 4.8 or greater, disabling -Werror)
|
||||
NSS_DISABLE_AVX2 = 1
|
||||
endif
|
||||
endif
|
||||
export NSS_DISABLE_AVX2
|
||||
endif #ndef NSS_DISABLE_AVX2
|
||||
|
||||
#######################################################################
|
||||
# Master "Core Components" macros for getting the OS target #
|
||||
#######################################################################
|
||||
|
|
|
@ -162,6 +162,10 @@ ifdef NSS_DISABLE_DBM
|
|||
DEFINES += -DNSS_DISABLE_DBM
|
||||
endif
|
||||
|
||||
ifdef NSS_DISABLE_AVX2
|
||||
DEFINES += -DNSS_DISABLE_AVX2
|
||||
endif
|
||||
|
||||
ifdef NSS_DISABLE_CHACHAPOLY
|
||||
DEFINES += -DNSS_DISABLE_CHACHAPOLY
|
||||
endif
|
||||
|
|
|
@ -10,3 +10,4 @@
|
|||
*/
|
||||
|
||||
#error "Do not include this header file."
|
||||
|
||||
|
|
|
@ -355,6 +355,36 @@ TEST_F(DtlsConnectTest, DtlsSupportedVersionsEncoding) {
|
|||
EXPECT_EQ(SSL_LIBRARY_VERSION_DTLS_1_0_WIRE, static_cast<int>(version));
|
||||
}
|
||||
|
||||
// Verify the DTLS 1.3 supported_versions interop workaround.
|
||||
TEST_F(DtlsConnectTest, Dtls13VersionWorkaround) {
|
||||
static const uint16_t kExpectVersionsWorkaround[] = {
|
||||
0x7f00 | DTLS_1_3_DRAFT_VERSION, SSL_LIBRARY_VERSION_DTLS_1_2_WIRE,
|
||||
SSL_LIBRARY_VERSION_TLS_1_2, SSL_LIBRARY_VERSION_DTLS_1_0_WIRE,
|
||||
SSL_LIBRARY_VERSION_TLS_1_1};
|
||||
const int min_ver = SSL_LIBRARY_VERSION_TLS_1_1,
|
||||
max_ver = SSL_LIBRARY_VERSION_TLS_1_3;
|
||||
|
||||
// Toggle the workaround, then verify both encodings are present.
|
||||
EnsureTlsSetup();
|
||||
SSL_SetDtls13VersionWorkaround(client_->ssl_fd(), PR_TRUE);
|
||||
SSL_SetDtls13VersionWorkaround(client_->ssl_fd(), PR_FALSE);
|
||||
SSL_SetDtls13VersionWorkaround(client_->ssl_fd(), PR_TRUE);
|
||||
client_->SetVersionRange(min_ver, max_ver);
|
||||
server_->SetVersionRange(min_ver, max_ver);
|
||||
auto capture = MakeTlsFilter<TlsExtensionCapture>(
|
||||
client_, ssl_tls13_supported_versions_xtn);
|
||||
Connect();
|
||||
|
||||
uint32_t version = 0;
|
||||
size_t off = 1;
|
||||
ASSERT_EQ(1 + sizeof(kExpectVersionsWorkaround), capture->extension().len());
|
||||
for (unsigned int i = 0; i < PR_ARRAY_SIZE(kExpectVersionsWorkaround); i++) {
|
||||
ASSERT_TRUE(capture->extension().Read(off, 2, &version));
|
||||
EXPECT_EQ(kExpectVersionsWorkaround[i], static_cast<uint16_t>(version));
|
||||
off += 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the client sends only TLS versions in supported_versions
|
||||
TEST_F(TlsConnectTest, TlsSupportedVersionsEncoding) {
|
||||
client_->SetVersionRange(SSL_LIBRARY_VERSION_TLS_1_0,
|
||||
|
|
|
@ -526,8 +526,12 @@ ifneq ($(shell $(CC) -? 2>&1 >/dev/null </dev/null | sed -e 's/:.*//;1q'),lcc)
|
|||
HAVE_INT128_SUPPORT = 1
|
||||
DEFINES += -DHAVE_INT128_SUPPORT
|
||||
endif
|
||||
ifneq (,$(filter 4.8 4.9,$(word 1,$(GCC_VERSION)).$(word 2,$(GCC_VERSION))))
|
||||
NSS_DISABLE_AVX2 = 1
|
||||
endif
|
||||
ifeq (,$(filter 0 1 2 3 4,$(word 1,$(GCC_VERSION))))
|
||||
HAVE_INT128_SUPPORT = 1
|
||||
NSS_DISABLE_AVX2 = 0
|
||||
DEFINES += -DHAVE_INT128_SUPPORT
|
||||
endif
|
||||
endif
|
||||
|
@ -540,7 +544,11 @@ endif
|
|||
|
||||
ifndef NSS_DISABLE_CHACHAPOLY
|
||||
ifeq ($(CPU_ARCH),x86_64)
|
||||
ifndef NSS_DISABLE_AVX2
|
||||
EXTRA_SRCS += Hacl_Poly1305_256.c Hacl_Chacha20_Vec256.c Hacl_Chacha20Poly1305_256.c
|
||||
else
|
||||
EXTRA_SRCS += Hacl_Poly1305_128.c Hacl_Chacha20_Vec128.c Hacl_Chacha20Poly1305_128.c
|
||||
endif # NSS_DISABLE_AVX2
|
||||
endif # x86_64
|
||||
|
||||
VERIFIED_SRCS += Hacl_Poly1305_32.c Hacl_Chacha20.c Hacl_Chacha20Poly1305_32.c
|
||||
|
@ -788,3 +796,9 @@ endif
|
|||
$(OBJDIR)/$(PROG_PREFIX)Hacl_Chacha20_Vec128$(OBJ_SUFFIX): CFLAGS += -mssse3 -msse4 -mavx -maes
|
||||
$(OBJDIR)/$(PROG_PREFIX)Hacl_Chacha20Poly1305_128$(OBJ_SUFFIX): CFLAGS += -mssse3 -msse4 -mavx -maes
|
||||
$(OBJDIR)/$(PROG_PREFIX)Hacl_Poly1305_128$(OBJ_SUFFIX): CFLAGS += -mssse3 -msse4 -mavx -maes -mpclmul
|
||||
|
||||
ifndef NSS_DISABLE_AVX2
|
||||
$(OBJDIR)/$(PROG_PREFIX)Hacl_Chacha20Poly1305_256$(OBJ_SUFFIX): CFLAGS += -mssse3 -msse4 -mavx2 -maes
|
||||
$(OBJDIR)/$(PROG_PREFIX)Hacl_Chacha20_Vec256$(OBJ_SUFFIX): CFLAGS += -mssse3 -msse4 -mavx -mavx2 -maes
|
||||
$(OBJDIR)/$(PROG_PREFIX)Hacl_Poly1305_256$(OBJ_SUFFIX): CFLAGS += -mssse3 -msse4 -mavx -mavx2 -maes -mpclmul
|
||||
endif
|
||||
|
|
|
@ -80,6 +80,7 @@ SECStatus generate_prime(mp_int *prime, int primeLen);
|
|||
PRBool aesni_support();
|
||||
PRBool clmul_support();
|
||||
PRBool avx_support();
|
||||
PRBool avx2_support();
|
||||
PRBool ssse3_support();
|
||||
PRBool sse4_1_support();
|
||||
PRBool sse4_2_support();
|
||||
|
|
|
@ -27,6 +27,7 @@ static PRCallOnceType coFreeblInit;
|
|||
static PRBool aesni_support_ = PR_FALSE;
|
||||
static PRBool clmul_support_ = PR_FALSE;
|
||||
static PRBool avx_support_ = PR_FALSE;
|
||||
static PRBool avx2_support_ = PR_FALSE;
|
||||
static PRBool ssse3_support_ = PR_FALSE;
|
||||
static PRBool sse4_1_support_ = PR_FALSE;
|
||||
static PRBool sse4_2_support_ = PR_FALSE;
|
||||
|
@ -75,28 +76,43 @@ check_xcr0_ymm()
|
|||
#define ECX_XSAVE (1 << 26)
|
||||
#define ECX_OSXSAVE (1 << 27)
|
||||
#define ECX_AVX (1 << 28)
|
||||
#define EBX_AVX2 (1 << 5)
|
||||
#define EBX_BMI1 (1 << 3)
|
||||
#define EBX_BMI2 (1 << 8)
|
||||
#define ECX_FMA (1 << 12)
|
||||
#define ECX_MOVBE (1 << 22)
|
||||
#define ECX_SSSE3 (1 << 9)
|
||||
#define ECX_SSE4_1 (1 << 19)
|
||||
#define ECX_SSE4_2 (1 << 20)
|
||||
#define AVX_BITS (ECX_XSAVE | ECX_OSXSAVE | ECX_AVX)
|
||||
#define AVX2_EBX_BITS (EBX_AVX2 | EBX_BMI1 | EBX_BMI2)
|
||||
#define AVX2_ECX_BITS (ECX_FMA | ECX_MOVBE)
|
||||
|
||||
void
|
||||
CheckX86CPUSupport()
|
||||
{
|
||||
unsigned long eax, ebx, ecx, edx;
|
||||
unsigned long eax7, ebx7, ecx7, edx7;
|
||||
char *disable_hw_aes = PR_GetEnvSecure("NSS_DISABLE_HW_AES");
|
||||
char *disable_pclmul = PR_GetEnvSecure("NSS_DISABLE_PCLMUL");
|
||||
char *disable_avx = PR_GetEnvSecure("NSS_DISABLE_AVX");
|
||||
char *disable_avx2 = PR_GetEnvSecure("NSS_DISABLE_AVX2");
|
||||
char *disable_ssse3 = PR_GetEnvSecure("NSS_DISABLE_SSSE3");
|
||||
char *disable_sse4_1 = PR_GetEnvSecure("NSS_DISABLE_SSE4_1");
|
||||
char *disable_sse4_2 = PR_GetEnvSecure("NSS_DISABLE_SSE4_2");
|
||||
freebl_cpuid(1, &eax, &ebx, &ecx, &edx);
|
||||
freebl_cpuid(7, &eax7, &ebx7, &ecx7, &edx7);
|
||||
aesni_support_ = (PRBool)((ecx & ECX_AESNI) != 0 && disable_hw_aes == NULL);
|
||||
clmul_support_ = (PRBool)((ecx & ECX_CLMUL) != 0 && disable_pclmul == NULL);
|
||||
/* For AVX we check AVX, OSXSAVE, and XSAVE
|
||||
* as well as XMM and YMM state. */
|
||||
avx_support_ = (PRBool)((ecx & AVX_BITS) == AVX_BITS) && check_xcr0_ymm() &&
|
||||
disable_avx == NULL;
|
||||
/* For AVX2 we check AVX2, BMI1, BMI2, FMA, MOVBE.
|
||||
* We do not check for AVX above. */
|
||||
avx2_support_ = (PRBool)((ebx7 & AVX2_EBX_BITS) == AVX2_EBX_BITS &&
|
||||
(ecx & AVX2_ECX_BITS) == AVX2_ECX_BITS &&
|
||||
disable_avx2 == NULL);
|
||||
ssse3_support_ = (PRBool)((ecx & ECX_SSSE3) != 0 &&
|
||||
disable_ssse3 == NULL);
|
||||
sse4_1_support_ = (PRBool)((ecx & ECX_SSE4_1) != 0 &&
|
||||
|
@ -384,6 +400,11 @@ avx_support()
|
|||
return avx_support_;
|
||||
}
|
||||
PRBool
|
||||
avx2_support()
|
||||
{
|
||||
return avx2_support_;
|
||||
}
|
||||
PRBool
|
||||
ssse3_support()
|
||||
{
|
||||
return ssse3_support_;
|
||||
|
|
|
@ -15,9 +15,17 @@
|
|||
#include "blapii.h"
|
||||
#include "chacha20poly1305.h"
|
||||
|
||||
// There are two implementations of ChaCha20Poly1305:
|
||||
// 1) 128-bit with hardware acceleration used on x64
|
||||
// 2) 32-bit used on all other platforms
|
||||
// There are three implementations of ChaCha20Poly1305:
|
||||
// 1) 128-bit with AVX hardware acceleration used on x64
|
||||
// 2) 256-bit with AVX2 hardware acceleration used on x64
|
||||
// 3) 32-bit used on all other platforms
|
||||
|
||||
// On x64 when AVX2 and other necessary registers are available,
|
||||
// the 256bit-verctorized version will be used. When AVX2 features
|
||||
// are unavailable or disabled but AVX registers are available, the
|
||||
// 128bit-vectorized version will be used. In all other cases the
|
||||
// scalar version of the HACL* code will be used.
|
||||
|
||||
// Instead of including the headers (they bring other things we don't want),
|
||||
// we declare the functions here.
|
||||
// Usage is guarded by runtime checks of required hardware features.
|
||||
|
@ -35,6 +43,19 @@ Hacl_Chacha20Poly1305_128_aead_decrypt(uint8_t *k, uint8_t *n1, uint32_t aadlen,
|
|||
uint8_t *aad, uint32_t mlen, uint8_t *m,
|
||||
uint8_t *cipher, uint8_t *mac);
|
||||
|
||||
// Forward declaration from Hacl_Chacha20_Vec256.h and Hacl_Chacha20Poly1305_256.h.
|
||||
extern void Hacl_Chacha20_Vec256_chacha20_encrypt_256(uint32_t len, uint8_t *out,
|
||||
uint8_t *text, uint8_t *key,
|
||||
uint8_t *n1, uint32_t ctr);
|
||||
extern void
|
||||
Hacl_Chacha20Poly1305_256_aead_encrypt(uint8_t *k, uint8_t *n1, uint32_t aadlen,
|
||||
uint8_t *aad, uint32_t mlen, uint8_t *m,
|
||||
uint8_t *cipher, uint8_t *mac);
|
||||
extern uint32_t
|
||||
Hacl_Chacha20Poly1305_256_aead_decrypt(uint8_t *k, uint8_t *n1, uint32_t aadlen,
|
||||
uint8_t *aad, uint32_t mlen, uint8_t *m,
|
||||
uint8_t *cipher, uint8_t *mac);
|
||||
|
||||
// Forward declaration from Hacl_Chacha20.h and Hacl_Chacha20Poly1305_32.h.
|
||||
extern void Hacl_Chacha20_chacha20_encrypt(uint32_t len, uint8_t *out,
|
||||
uint8_t *text, uint8_t *key,
|
||||
|
@ -113,7 +134,15 @@ ChaCha20Xor(uint8_t *output, uint8_t *block, uint32_t len, uint8_t *k,
|
|||
{
|
||||
#ifdef NSS_X64
|
||||
if (ssse3_support() && sse4_1_support() && avx_support()) {
|
||||
#ifdef NSS_DISABLE_AVX2
|
||||
Hacl_Chacha20_Vec128_chacha20_encrypt_128(len, output, block, k, nonce, ctr);
|
||||
#else
|
||||
if (avx2_support()) {
|
||||
Hacl_Chacha20_Vec256_chacha20_encrypt_256(len, output, block, k, nonce, ctr);
|
||||
} else {
|
||||
Hacl_Chacha20_Vec128_chacha20_encrypt_128(len, output, block, k, nonce, ctr);
|
||||
}
|
||||
#endif
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
|
@ -167,9 +196,21 @@ ChaCha20Poly1305_Seal(const ChaCha20Poly1305Context *ctx, unsigned char *output,
|
|||
|
||||
#ifdef NSS_X64
|
||||
if (ssse3_support() && sse4_1_support() && avx_support()) {
|
||||
#ifdef NSS_DISABLE_AVX2
|
||||
Hacl_Chacha20Poly1305_128_aead_encrypt(
|
||||
(uint8_t *)ctx->key, (uint8_t *)nonce, adLen, (uint8_t *)ad, inputLen,
|
||||
(uint8_t *)input, output, output + inputLen);
|
||||
#else
|
||||
if (avx2_support()) {
|
||||
Hacl_Chacha20Poly1305_256_aead_encrypt(
|
||||
(uint8_t *)ctx->key, (uint8_t *)nonce, adLen, (uint8_t *)ad, inputLen,
|
||||
(uint8_t *)input, output, output + inputLen);
|
||||
} else {
|
||||
Hacl_Chacha20Poly1305_128_aead_encrypt(
|
||||
(uint8_t *)ctx->key, (uint8_t *)nonce, adLen, (uint8_t *)ad, inputLen,
|
||||
(uint8_t *)input, output, output + inputLen);
|
||||
}
|
||||
#endif
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
|
@ -217,9 +258,21 @@ ChaCha20Poly1305_Open(const ChaCha20Poly1305Context *ctx, unsigned char *output,
|
|||
uint32_t res = 1;
|
||||
#ifdef NSS_X64
|
||||
if (ssse3_support() && sse4_1_support() && avx_support()) {
|
||||
#ifdef NSS_DISABLE_AVX2
|
||||
res = Hacl_Chacha20Poly1305_128_aead_decrypt(
|
||||
(uint8_t *)ctx->key, (uint8_t *)nonce, adLen, (uint8_t *)ad, ciphertextLen,
|
||||
(uint8_t *)output, (uint8_t *)input, (uint8_t *)input + ciphertextLen);
|
||||
#else
|
||||
if (avx2_support()) {
|
||||
res = Hacl_Chacha20Poly1305_256_aead_decrypt(
|
||||
(uint8_t *)ctx->key, (uint8_t *)nonce, adLen, (uint8_t *)ad, ciphertextLen,
|
||||
(uint8_t *)output, (uint8_t *)input, (uint8_t *)input + ciphertextLen);
|
||||
} else {
|
||||
res = Hacl_Chacha20Poly1305_128_aead_decrypt(
|
||||
(uint8_t *)ctx->key, (uint8_t *)nonce, adLen, (uint8_t *)ad, ciphertextLen,
|
||||
(uint8_t *)output, (uint8_t *)input, (uint8_t *)input + ciphertextLen);
|
||||
}
|
||||
#endif
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
|
|
|
@ -54,11 +54,10 @@
|
|||
],
|
||||
},
|
||||
{
|
||||
# TODO: make this so that all hardware accelerated code is in here.
|
||||
'target_name': 'hw-acc-crypto',
|
||||
'target_name': 'hw-acc-crypto-avx',
|
||||
'type': 'static_library',
|
||||
# 'sources': [
|
||||
# All hardware accelerated crypto currently requires x64
|
||||
# All AVX hardware accelerated crypto currently requires x64
|
||||
# ],
|
||||
'dependencies': [
|
||||
'<(DEPTH)/exports.gyp:nss_exports'
|
||||
|
@ -117,6 +116,72 @@
|
|||
}],
|
||||
],
|
||||
},
|
||||
{
|
||||
'target_name': 'hw-acc-crypto-avx2',
|
||||
'type': 'static_library',
|
||||
# 'sources': [
|
||||
# All AVX2 hardware accelerated crypto currently requires x64
|
||||
# ],
|
||||
'dependencies': [
|
||||
'<(DEPTH)/exports.gyp:nss_exports'
|
||||
],
|
||||
'conditions': [
|
||||
[ 'target_arch=="x64"', {
|
||||
'cflags': [
|
||||
'-mssse3',
|
||||
'-msse4'
|
||||
],
|
||||
'cflags_mozilla': [
|
||||
'-mssse3',
|
||||
'-msse4',
|
||||
'-mpclmul',
|
||||
'-maes',
|
||||
'-mavx',
|
||||
'-mavx2',
|
||||
],
|
||||
# GCC doesn't define this.
|
||||
'defines': [
|
||||
'__SSSE3__',
|
||||
],
|
||||
}],
|
||||
[ 'OS=="linux" or OS=="android" or OS=="dragonfly" or OS=="freebsd" or \
|
||||
OS=="netbsd" or OS=="openbsd"', {
|
||||
'cflags': [
|
||||
'-mpclmul',
|
||||
'-maes',
|
||||
'-mavx',
|
||||
'-mavx2',
|
||||
],
|
||||
}],
|
||||
# macOS build doesn't use cflags.
|
||||
[ 'OS=="mac" or OS=="ios"', {
|
||||
'xcode_settings': {
|
||||
'OTHER_CFLAGS': [
|
||||
'-mssse3',
|
||||
'-msse4',
|
||||
'-mpclmul',
|
||||
'-maes',
|
||||
'-mavx',
|
||||
'-mavx2',
|
||||
],
|
||||
},
|
||||
}],
|
||||
[ 'target_arch=="arm"', {
|
||||
# Gecko doesn't support non-NEON platform on Android, but tier-3
|
||||
# platform such as Linux/arm will need it
|
||||
'cflags_mozilla': [
|
||||
'-mfpu=neon'
|
||||
],
|
||||
}],
|
||||
[ 'target_arch=="x64"', {
|
||||
'sources': [
|
||||
'verified/Hacl_Poly1305_256.c',
|
||||
'verified/Hacl_Chacha20_Vec256.c',
|
||||
'verified/Hacl_Chacha20Poly1305_256.c',
|
||||
],
|
||||
}],
|
||||
],
|
||||
},
|
||||
{
|
||||
'target_name': 'gcm-aes-x86_c_lib',
|
||||
'type': 'static_library',
|
||||
|
@ -253,7 +318,8 @@
|
|||
],
|
||||
'dependencies': [
|
||||
'<(DEPTH)/exports.gyp:nss_exports',
|
||||
'hw-acc-crypto',
|
||||
'hw-acc-crypto-avx',
|
||||
'hw-acc-crypto-avx2',
|
||||
],
|
||||
'conditions': [
|
||||
[ 'target_arch=="ia32" or target_arch=="x64"', {
|
||||
|
@ -314,7 +380,8 @@
|
|||
],
|
||||
'dependencies': [
|
||||
'<(DEPTH)/exports.gyp:nss_exports',
|
||||
'hw-acc-crypto',
|
||||
'hw-acc-crypto-avx',
|
||||
'hw-acc-crypto-avx2',
|
||||
],
|
||||
'conditions': [
|
||||
[ 'target_arch=="ia32" or target_arch=="x64"', {
|
||||
|
@ -394,7 +461,8 @@
|
|||
'type': 'shared_library',
|
||||
'dependencies': [
|
||||
'<(DEPTH)/exports.gyp:nss_exports',
|
||||
'hw-acc-crypto',
|
||||
'hw-acc-crypto-avx',
|
||||
'hw-acc-crypto-avx2',
|
||||
],
|
||||
},
|
||||
{
|
||||
|
@ -410,7 +478,8 @@
|
|||
],
|
||||
'dependencies': [
|
||||
'<(DEPTH)/exports.gyp:nss_exports',
|
||||
'hw-acc-crypto',
|
||||
'hw-acc-crypto-avx',
|
||||
'hw-acc-crypto-avx2',
|
||||
],
|
||||
'asflags_mozilla': [
|
||||
'-mcpu=v9', '-Wa,-xarch=v9a'
|
||||
|
|
|
@ -27,13 +27,8 @@ uint32_t
|
|||
Hacl_Impl_Chacha20_Vec_chacha20_constants[4U] =
|
||||
{ (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U };
|
||||
|
||||
inline static void
|
||||
Hacl_Impl_Chacha20_Core32_quarter_round(
|
||||
uint32_t *st,
|
||||
uint32_t a,
|
||||
uint32_t b,
|
||||
uint32_t c,
|
||||
uint32_t d)
|
||||
static inline void
|
||||
quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
|
||||
{
|
||||
uint32_t sta = st[a];
|
||||
uint32_t stb0 = st[b];
|
||||
|
@ -69,74 +64,42 @@ Hacl_Impl_Chacha20_Core32_quarter_round(
|
|||
st[b] = std22;
|
||||
}
|
||||
|
||||
inline static void
|
||||
Hacl_Impl_Chacha20_Core32_double_round(uint32_t *st)
|
||||
static inline void
|
||||
double_round(uint32_t *st)
|
||||
{
|
||||
Hacl_Impl_Chacha20_Core32_quarter_round(st,
|
||||
(uint32_t)0U,
|
||||
(uint32_t)4U,
|
||||
(uint32_t)8U,
|
||||
(uint32_t)12U);
|
||||
Hacl_Impl_Chacha20_Core32_quarter_round(st,
|
||||
(uint32_t)1U,
|
||||
(uint32_t)5U,
|
||||
(uint32_t)9U,
|
||||
(uint32_t)13U);
|
||||
Hacl_Impl_Chacha20_Core32_quarter_round(st,
|
||||
(uint32_t)2U,
|
||||
(uint32_t)6U,
|
||||
(uint32_t)10U,
|
||||
(uint32_t)14U);
|
||||
Hacl_Impl_Chacha20_Core32_quarter_round(st,
|
||||
(uint32_t)3U,
|
||||
(uint32_t)7U,
|
||||
(uint32_t)11U,
|
||||
(uint32_t)15U);
|
||||
Hacl_Impl_Chacha20_Core32_quarter_round(st,
|
||||
(uint32_t)0U,
|
||||
(uint32_t)5U,
|
||||
(uint32_t)10U,
|
||||
(uint32_t)15U);
|
||||
Hacl_Impl_Chacha20_Core32_quarter_round(st,
|
||||
(uint32_t)1U,
|
||||
(uint32_t)6U,
|
||||
(uint32_t)11U,
|
||||
(uint32_t)12U);
|
||||
Hacl_Impl_Chacha20_Core32_quarter_round(st,
|
||||
(uint32_t)2U,
|
||||
(uint32_t)7U,
|
||||
(uint32_t)8U,
|
||||
(uint32_t)13U);
|
||||
Hacl_Impl_Chacha20_Core32_quarter_round(st,
|
||||
(uint32_t)3U,
|
||||
(uint32_t)4U,
|
||||
(uint32_t)9U,
|
||||
(uint32_t)14U);
|
||||
quarter_round(st, (uint32_t)0U, (uint32_t)4U, (uint32_t)8U, (uint32_t)12U);
|
||||
quarter_round(st, (uint32_t)1U, (uint32_t)5U, (uint32_t)9U, (uint32_t)13U);
|
||||
quarter_round(st, (uint32_t)2U, (uint32_t)6U, (uint32_t)10U, (uint32_t)14U);
|
||||
quarter_round(st, (uint32_t)3U, (uint32_t)7U, (uint32_t)11U, (uint32_t)15U);
|
||||
quarter_round(st, (uint32_t)0U, (uint32_t)5U, (uint32_t)10U, (uint32_t)15U);
|
||||
quarter_round(st, (uint32_t)1U, (uint32_t)6U, (uint32_t)11U, (uint32_t)12U);
|
||||
quarter_round(st, (uint32_t)2U, (uint32_t)7U, (uint32_t)8U, (uint32_t)13U);
|
||||
quarter_round(st, (uint32_t)3U, (uint32_t)4U, (uint32_t)9U, (uint32_t)14U);
|
||||
}
|
||||
|
||||
inline static void
|
||||
Hacl_Impl_Chacha20_rounds(uint32_t *st)
|
||||
static inline void
|
||||
rounds(uint32_t *st)
|
||||
{
|
||||
Hacl_Impl_Chacha20_Core32_double_round(st);
|
||||
Hacl_Impl_Chacha20_Core32_double_round(st);
|
||||
Hacl_Impl_Chacha20_Core32_double_round(st);
|
||||
Hacl_Impl_Chacha20_Core32_double_round(st);
|
||||
Hacl_Impl_Chacha20_Core32_double_round(st);
|
||||
Hacl_Impl_Chacha20_Core32_double_round(st);
|
||||
Hacl_Impl_Chacha20_Core32_double_round(st);
|
||||
Hacl_Impl_Chacha20_Core32_double_round(st);
|
||||
Hacl_Impl_Chacha20_Core32_double_round(st);
|
||||
Hacl_Impl_Chacha20_Core32_double_round(st);
|
||||
double_round(st);
|
||||
double_round(st);
|
||||
double_round(st);
|
||||
double_round(st);
|
||||
double_round(st);
|
||||
double_round(st);
|
||||
double_round(st);
|
||||
double_round(st);
|
||||
double_round(st);
|
||||
double_round(st);
|
||||
}
|
||||
|
||||
inline static void
|
||||
Hacl_Impl_Chacha20_chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
|
||||
static inline void
|
||||
chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
|
||||
{
|
||||
memcpy(k, ctx, (uint32_t)16U * sizeof ctx[0U]);
|
||||
memcpy(k, ctx, (uint32_t)16U * sizeof(ctx[0U]));
|
||||
uint32_t ctr_u32 = ctr;
|
||||
k[12U] = k[12U] + ctr_u32;
|
||||
Hacl_Impl_Chacha20_rounds(k);
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
rounds(k);
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
|
||||
uint32_t *os = k;
|
||||
uint32_t x = k[i] + ctx[i];
|
||||
os[i] = x;
|
||||
|
@ -145,20 +108,20 @@ Hacl_Impl_Chacha20_chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
|
|||
}
|
||||
|
||||
static uint32_t
|
||||
Hacl_Impl_Chacha20_chacha20_constants[4U] =
|
||||
chacha20_constants[4U] =
|
||||
{ (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U };
|
||||
|
||||
inline static void
|
||||
Hacl_Impl_Chacha20_chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n1, uint32_t ctr)
|
||||
static inline void
|
||||
chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n1, uint32_t ctr)
|
||||
{
|
||||
uint32_t *uu____0 = ctx;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i++) {
|
||||
uint32_t *os = uu____0;
|
||||
uint32_t x = Hacl_Impl_Chacha20_chacha20_constants[i];
|
||||
uint32_t x = chacha20_constants[i];
|
||||
os[i] = x;
|
||||
}
|
||||
uint32_t *uu____1 = ctx + (uint32_t)4U;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)8U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)8U; i++) {
|
||||
uint32_t *os = uu____1;
|
||||
uint8_t *bj = k + i * (uint32_t)4U;
|
||||
uint32_t u = load32_le(bj);
|
||||
|
@ -168,7 +131,7 @@ Hacl_Impl_Chacha20_chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n1, uint32_
|
|||
}
|
||||
ctx[12U] = ctr;
|
||||
uint32_t *uu____2 = ctx + (uint32_t)13U;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)3U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)3U; i++) {
|
||||
uint32_t *os = uu____2;
|
||||
uint8_t *bj = n1 + i * (uint32_t)4U;
|
||||
uint32_t u = load32_le(bj);
|
||||
|
@ -178,17 +141,13 @@ Hacl_Impl_Chacha20_chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n1, uint32_
|
|||
}
|
||||
}
|
||||
|
||||
inline static void
|
||||
Hacl_Impl_Chacha20_chacha20_encrypt_block(
|
||||
uint32_t *ctx,
|
||||
uint8_t *out,
|
||||
uint32_t incr1,
|
||||
uint8_t *text)
|
||||
static inline void
|
||||
chacha20_encrypt_block(uint32_t *ctx, uint8_t *out, uint32_t incr1, uint8_t *text)
|
||||
{
|
||||
uint32_t k[16U] = { 0U };
|
||||
Hacl_Impl_Chacha20_chacha20_core(k, ctx, incr1);
|
||||
chacha20_core(k, ctx, incr1);
|
||||
uint32_t bl[16U] = { 0U };
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
|
||||
uint32_t *os = bl;
|
||||
uint8_t *bj = text + i * (uint32_t)4U;
|
||||
uint32_t u = load32_le(bj);
|
||||
|
@ -196,48 +155,36 @@ Hacl_Impl_Chacha20_chacha20_encrypt_block(
|
|||
uint32_t x = r;
|
||||
os[i] = x;
|
||||
}
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
|
||||
uint32_t *os = bl;
|
||||
uint32_t x = bl[i] ^ k[i];
|
||||
os[i] = x;
|
||||
}
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
|
||||
store32_le(out + i * (uint32_t)4U, bl[i]);
|
||||
}
|
||||
}
|
||||
|
||||
inline static void
|
||||
Hacl_Impl_Chacha20_chacha20_encrypt_last(
|
||||
uint32_t *ctx,
|
||||
uint32_t len,
|
||||
uint8_t *out,
|
||||
uint32_t incr1,
|
||||
uint8_t *text)
|
||||
static inline void
|
||||
chacha20_encrypt_last(uint32_t *ctx, uint32_t len, uint8_t *out, uint32_t incr1, uint8_t *text)
|
||||
{
|
||||
uint8_t plain[64U] = { 0U };
|
||||
memcpy(plain, text, len * sizeof text[0U]);
|
||||
Hacl_Impl_Chacha20_chacha20_encrypt_block(ctx, plain, incr1, plain);
|
||||
memcpy(out, plain, len * sizeof plain[0U]);
|
||||
memcpy(plain, text, len * sizeof(text[0U]));
|
||||
chacha20_encrypt_block(ctx, plain, incr1, plain);
|
||||
memcpy(out, plain, len * sizeof(plain[0U]));
|
||||
}
|
||||
|
||||
inline static void
|
||||
Hacl_Impl_Chacha20_chacha20_update(uint32_t *ctx, uint32_t len, uint8_t *out, uint8_t *text)
|
||||
static inline void
|
||||
chacha20_update(uint32_t *ctx, uint32_t len, uint8_t *out, uint8_t *text)
|
||||
{
|
||||
uint32_t rem1 = len % (uint32_t)64U;
|
||||
uint32_t nb = len / (uint32_t)64U;
|
||||
uint32_t rem2 = len % (uint32_t)64U;
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i = i + (uint32_t)1U) {
|
||||
Hacl_Impl_Chacha20_chacha20_encrypt_block(ctx,
|
||||
out + i * (uint32_t)64U,
|
||||
i,
|
||||
text + i * (uint32_t)64U);
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
|
||||
chacha20_encrypt_block(ctx, out + i * (uint32_t)64U, i, text + i * (uint32_t)64U);
|
||||
}
|
||||
if (rem2 > (uint32_t)0U) {
|
||||
Hacl_Impl_Chacha20_chacha20_encrypt_last(ctx,
|
||||
rem1,
|
||||
out + nb * (uint32_t)64U,
|
||||
nb,
|
||||
text + nb * (uint32_t)64U);
|
||||
chacha20_encrypt_last(ctx, rem1, out + nb * (uint32_t)64U, nb, text + nb * (uint32_t)64U);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -251,8 +198,8 @@ Hacl_Chacha20_chacha20_encrypt(
|
|||
uint32_t ctr)
|
||||
{
|
||||
uint32_t ctx[16U] = { 0U };
|
||||
Hacl_Impl_Chacha20_chacha20_init(ctx, key, n1, ctr);
|
||||
Hacl_Impl_Chacha20_chacha20_update(ctx, len, out, text);
|
||||
chacha20_init(ctx, key, n1, ctr);
|
||||
chacha20_update(ctx, len, out, text);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -265,6 +212,6 @@ Hacl_Chacha20_chacha20_decrypt(
|
|||
uint32_t ctr)
|
||||
{
|
||||
uint32_t ctx[16U] = { 0U };
|
||||
Hacl_Impl_Chacha20_chacha20_init(ctx, key, n1, ctr);
|
||||
Hacl_Impl_Chacha20_chacha20_update(ctx, len, out, cipher);
|
||||
chacha20_init(ctx, key, n1, ctr);
|
||||
chacha20_update(ctx, len, out, cipher);
|
||||
}
|
||||
|
|
|
@ -23,11 +23,8 @@
|
|||
|
||||
#include "Hacl_Chacha20Poly1305_128.h"
|
||||
|
||||
inline static void
|
||||
Hacl_Chacha20Poly1305_128_poly1305_padded_128(
|
||||
Lib_IntVector_Intrinsics_vec128 *ctx,
|
||||
uint32_t len,
|
||||
uint8_t *text)
|
||||
static inline void
|
||||
poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t *text)
|
||||
{
|
||||
uint32_t n1 = len / (uint32_t)16U;
|
||||
uint32_t r = len % (uint32_t)16U;
|
||||
|
@ -45,7 +42,7 @@ Hacl_Chacha20Poly1305_128_poly1305_padded_128(
|
|||
uint32_t len1 = len0 - bs;
|
||||
uint8_t *text1 = t00 + bs;
|
||||
uint32_t nb = len1 / bs;
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
|
||||
uint8_t *block = text1 + i * bs;
|
||||
Lib_IntVector_Intrinsics_vec128 e[5U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
|
||||
|
@ -200,57 +197,43 @@ Hacl_Chacha20Poly1305_128_poly1305_padded_128(
|
|||
Lib_IntVector_Intrinsics_vec128 t3 = a34;
|
||||
Lib_IntVector_Intrinsics_vec128 t4 = a44;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l = Lib_IntVector_Intrinsics_vec128_add64(t01, Lib_IntVector_Intrinsics_vec128_zero);
|
||||
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp0 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c01 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t1, c01);
|
||||
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
|
||||
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp1 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l0,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c11 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c11);
|
||||
z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp2 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l1,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
|
||||
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
|
||||
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
|
||||
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c21 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c21);
|
||||
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp3 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l2,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
|
||||
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c31 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c31);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp4 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l3,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l4 =
|
||||
Lib_IntVector_Intrinsics_vec128_add64(tmp0,
|
||||
Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp01 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l4,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
|
||||
Lib_IntVector_Intrinsics_vec128 o00 = tmp01;
|
||||
Lib_IntVector_Intrinsics_vec128 o10 = tmp11;
|
||||
Lib_IntVector_Intrinsics_vec128 o20 = tmp2;
|
||||
Lib_IntVector_Intrinsics_vec128 o30 = tmp3;
|
||||
Lib_IntVector_Intrinsics_vec128 o40 = tmp4;
|
||||
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
|
||||
Lib_IntVector_Intrinsics_vec128 o00 = x02;
|
||||
Lib_IntVector_Intrinsics_vec128 o10 = x12;
|
||||
Lib_IntVector_Intrinsics_vec128 o20 = x21;
|
||||
Lib_IntVector_Intrinsics_vec128 o30 = x32;
|
||||
Lib_IntVector_Intrinsics_vec128 o40 = x42;
|
||||
acc0[0U] = o00;
|
||||
acc0[1U] = o10;
|
||||
acc0[2U] = o20;
|
||||
|
@ -283,7 +266,7 @@ Hacl_Chacha20Poly1305_128_poly1305_padded_128(
|
|||
uint8_t *t10 = blocks + len0;
|
||||
uint32_t nb = len1 / (uint32_t)16U;
|
||||
uint32_t rem2 = len1 % (uint32_t)16U;
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
|
||||
uint8_t *block = t10 + i * (uint32_t)16U;
|
||||
Lib_IntVector_Intrinsics_vec128 e[5U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
|
||||
|
@ -448,57 +431,43 @@ Hacl_Chacha20Poly1305_128_poly1305_padded_128(
|
|||
Lib_IntVector_Intrinsics_vec128 t3 = a36;
|
||||
Lib_IntVector_Intrinsics_vec128 t4 = a46;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l = Lib_IntVector_Intrinsics_vec128_add64(t01, Lib_IntVector_Intrinsics_vec128_zero);
|
||||
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp0 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c01 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t11, c01);
|
||||
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
|
||||
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp1 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l0,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c11 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c11);
|
||||
z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp2 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l1,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
|
||||
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
|
||||
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
|
||||
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c21 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c21);
|
||||
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp3 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l2,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
|
||||
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c31 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c31);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp4 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l3,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l4 =
|
||||
Lib_IntVector_Intrinsics_vec128_add64(tmp0,
|
||||
Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp01 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l4,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = tmp01;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = tmp11;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
|
||||
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = x02;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = x12;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = x21;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = x32;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = x42;
|
||||
acc0[0U] = o0;
|
||||
acc0[1U] = o1;
|
||||
acc0[2U] = o2;
|
||||
|
@ -511,7 +480,7 @@ Hacl_Chacha20Poly1305_128_poly1305_padded_128(
|
|||
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
|
||||
e[_i] = Lib_IntVector_Intrinsics_vec128_zero;
|
||||
uint8_t tmp[16U] = { 0U };
|
||||
memcpy(tmp, last1, rem2 * sizeof last1[0U]);
|
||||
memcpy(tmp, last1, rem2 * sizeof(last1[0U]));
|
||||
uint64_t u0 = load64_le(tmp);
|
||||
uint64_t lo = u0;
|
||||
uint64_t u = load64_le(tmp + (uint32_t)8U);
|
||||
|
@ -672,57 +641,43 @@ Hacl_Chacha20Poly1305_128_poly1305_padded_128(
|
|||
Lib_IntVector_Intrinsics_vec128 t3 = a36;
|
||||
Lib_IntVector_Intrinsics_vec128 t4 = a46;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l = Lib_IntVector_Intrinsics_vec128_add64(t01, Lib_IntVector_Intrinsics_vec128_zero);
|
||||
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp0 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c01 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t11, c01);
|
||||
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
|
||||
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp1 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l0,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c11 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c11);
|
||||
z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp2 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l1,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
|
||||
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
|
||||
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
|
||||
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c21 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c21);
|
||||
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp3 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l2,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
|
||||
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c31 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c31);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp4 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l3,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l4 =
|
||||
Lib_IntVector_Intrinsics_vec128_add64(tmp0,
|
||||
Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp01 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l4,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = tmp01;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = tmp11;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
|
||||
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = x02;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = x12;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = x21;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = x32;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = x42;
|
||||
acc0[0U] = o0;
|
||||
acc0[1U] = o1;
|
||||
acc0[2U] = o2;
|
||||
|
@ -730,7 +685,7 @@ Hacl_Chacha20Poly1305_128_poly1305_padded_128(
|
|||
acc0[4U] = o4;
|
||||
}
|
||||
uint8_t tmp[16U] = { 0U };
|
||||
memcpy(tmp, rem1, r * sizeof rem1[0U]);
|
||||
memcpy(tmp, rem1, r * sizeof(rem1[0U]));
|
||||
if (r > (uint32_t)0U) {
|
||||
Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
|
||||
Lib_IntVector_Intrinsics_vec128 *acc = ctx;
|
||||
|
@ -897,57 +852,43 @@ Hacl_Chacha20Poly1305_128_poly1305_padded_128(
|
|||
Lib_IntVector_Intrinsics_vec128 t3 = a36;
|
||||
Lib_IntVector_Intrinsics_vec128 t4 = a46;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l = Lib_IntVector_Intrinsics_vec128_add64(t0, Lib_IntVector_Intrinsics_vec128_zero);
|
||||
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp0 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c01 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t1, c01);
|
||||
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
|
||||
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp1 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l0,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c11 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c11);
|
||||
z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp2 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l1,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
|
||||
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
|
||||
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
|
||||
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c21 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c21);
|
||||
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp3 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l2,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
|
||||
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c31 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c31);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp4 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l3,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l4 =
|
||||
Lib_IntVector_Intrinsics_vec128_add64(tmp0,
|
||||
Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp01 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l4,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = tmp01;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = tmp11;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
|
||||
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = x02;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = x12;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = x21;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = x32;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = x42;
|
||||
acc[0U] = o0;
|
||||
acc[1U] = o1;
|
||||
acc[2U] = o2;
|
||||
|
@ -957,8 +898,8 @@ Hacl_Chacha20Poly1305_128_poly1305_padded_128(
|
|||
}
|
||||
}
|
||||
|
||||
inline static void
|
||||
Hacl_Chacha20Poly1305_128_poly1305_do_128(
|
||||
static inline void
|
||||
poly1305_do_128(
|
||||
uint8_t *k,
|
||||
uint32_t aadlen,
|
||||
uint8_t *aad,
|
||||
|
@ -971,8 +912,8 @@ Hacl_Chacha20Poly1305_128_poly1305_do_128(
|
|||
ctx[_i] = Lib_IntVector_Intrinsics_vec128_zero;
|
||||
uint8_t block[16U] = { 0U };
|
||||
Hacl_Poly1305_128_poly1305_init(ctx, k);
|
||||
Hacl_Chacha20Poly1305_128_poly1305_padded_128(ctx, aadlen, aad);
|
||||
Hacl_Chacha20Poly1305_128_poly1305_padded_128(ctx, mlen, m);
|
||||
poly1305_padded_128(ctx, aadlen, aad);
|
||||
poly1305_padded_128(ctx, mlen, m);
|
||||
store64_le(block, (uint64_t)aadlen);
|
||||
store64_le(block + (uint32_t)8U, (uint64_t)mlen);
|
||||
Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
|
||||
|
@ -1140,57 +1081,43 @@ Hacl_Chacha20Poly1305_128_poly1305_do_128(
|
|||
Lib_IntVector_Intrinsics_vec128 t3 = a36;
|
||||
Lib_IntVector_Intrinsics_vec128 t4 = a46;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l = Lib_IntVector_Intrinsics_vec128_add64(t0, Lib_IntVector_Intrinsics_vec128_zero);
|
||||
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp0 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c01 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t1, c01);
|
||||
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
|
||||
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp1 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l0,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c11 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c11);
|
||||
z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp2 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l1,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
|
||||
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
|
||||
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
|
||||
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c21 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c21);
|
||||
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp3 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l2,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
|
||||
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c31 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c31);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp4 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l3,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l4 =
|
||||
Lib_IntVector_Intrinsics_vec128_add64(tmp0,
|
||||
Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp01 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l4,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = tmp01;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = tmp11;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
|
||||
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = x02;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = x12;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = x21;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = x32;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = x42;
|
||||
acc[0U] = o0;
|
||||
acc[1U] = o1;
|
||||
acc[2U] = o2;
|
||||
|
@ -1214,7 +1141,7 @@ Hacl_Chacha20Poly1305_128_aead_encrypt(
|
|||
uint8_t tmp[64U] = { 0U };
|
||||
Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n1, (uint32_t)0U);
|
||||
uint8_t *key = tmp;
|
||||
Hacl_Chacha20Poly1305_128_poly1305_do_128(key, aadlen, aad, mlen, cipher, mac);
|
||||
poly1305_do_128(key, aadlen, aad, mlen, cipher, mac);
|
||||
}
|
||||
|
||||
uint32_t
|
||||
|
@ -1232,9 +1159,9 @@ Hacl_Chacha20Poly1305_128_aead_decrypt(
|
|||
uint8_t tmp[64U] = { 0U };
|
||||
Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n1, (uint32_t)0U);
|
||||
uint8_t *key = tmp;
|
||||
Hacl_Chacha20Poly1305_128_poly1305_do_128(key, aadlen, aad, mlen, cipher, computed_mac);
|
||||
poly1305_do_128(key, aadlen, aad, mlen, cipher, computed_mac);
|
||||
uint8_t res = (uint8_t)255U;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
|
||||
uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
|
||||
res = uu____0 & res;
|
||||
}
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,60 @@
|
|||
/* MIT License
|
||||
*
|
||||
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in all
|
||||
* copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "libintvector.h"
|
||||
#include "kremlin/internal/types.h"
|
||||
#include "kremlin/lowstar_endianness.h"
|
||||
#include <string.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#ifndef __Hacl_Chacha20Poly1305_256_H
|
||||
#define __Hacl_Chacha20Poly1305_256_H
|
||||
|
||||
#include "Hacl_Kremlib.h"
|
||||
#include "Hacl_Chacha20_Vec256.h"
|
||||
#include "Hacl_Poly1305_256.h"
|
||||
|
||||
void
|
||||
Hacl_Chacha20Poly1305_256_aead_encrypt(
|
||||
uint8_t *k,
|
||||
uint8_t *n1,
|
||||
uint32_t aadlen,
|
||||
uint8_t *aad,
|
||||
uint32_t mlen,
|
||||
uint8_t *m,
|
||||
uint8_t *cipher,
|
||||
uint8_t *mac);
|
||||
|
||||
uint32_t
|
||||
Hacl_Chacha20Poly1305_256_aead_decrypt(
|
||||
uint8_t *k,
|
||||
uint8_t *n1,
|
||||
uint32_t aadlen,
|
||||
uint8_t *aad,
|
||||
uint32_t mlen,
|
||||
uint8_t *m,
|
||||
uint8_t *cipher,
|
||||
uint8_t *mac);
|
||||
|
||||
#define __Hacl_Chacha20Poly1305_256_H_DEFINED
|
||||
#endif
|
|
@ -23,8 +23,8 @@
|
|||
|
||||
#include "Hacl_Chacha20Poly1305_32.h"
|
||||
|
||||
static void
|
||||
Hacl_Chacha20Poly1305_32_poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
|
||||
static inline void
|
||||
poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
|
||||
{
|
||||
uint32_t n1 = len / (uint32_t)16U;
|
||||
uint32_t r = len % (uint32_t)16U;
|
||||
|
@ -34,7 +34,7 @@ Hacl_Chacha20Poly1305_32_poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t
|
|||
uint64_t *acc0 = ctx;
|
||||
uint32_t nb = n1 * (uint32_t)16U / (uint32_t)16U;
|
||||
uint32_t rem2 = n1 * (uint32_t)16U % (uint32_t)16U;
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
|
||||
uint8_t *block = blocks + i * (uint32_t)16U;
|
||||
uint64_t e[5U] = { 0U };
|
||||
uint64_t u0 = load64_le(block);
|
||||
|
@ -118,30 +118,35 @@ Hacl_Chacha20Poly1305_32_poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t
|
|||
uint64_t t2 = a26;
|
||||
uint64_t t3 = a36;
|
||||
uint64_t t4 = a46;
|
||||
uint64_t l = t0 + (uint64_t)0U;
|
||||
uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
|
||||
uint64_t c01 = l >> (uint32_t)26U;
|
||||
uint64_t l0 = t1 + c01;
|
||||
uint64_t tmp1 = l0 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c11 = l0 >> (uint32_t)26U;
|
||||
uint64_t l1 = t2 + c11;
|
||||
uint64_t tmp2 = l1 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c21 = l1 >> (uint32_t)26U;
|
||||
uint64_t l2 = t3 + c21;
|
||||
uint64_t tmp3 = l2 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c31 = l2 >> (uint32_t)26U;
|
||||
uint64_t l3 = t4 + c31;
|
||||
uint64_t tmp4 = l3 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c4 = l3 >> (uint32_t)26U;
|
||||
uint64_t l4 = tmp0 + c4 * (uint64_t)5U;
|
||||
uint64_t tmp01 = l4 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c5 = l4 >> (uint32_t)26U;
|
||||
uint64_t tmp11 = tmp1 + c5;
|
||||
uint64_t o0 = tmp01;
|
||||
uint64_t o1 = tmp11;
|
||||
uint64_t o2 = tmp2;
|
||||
uint64_t o3 = tmp3;
|
||||
uint64_t o4 = tmp4;
|
||||
uint64_t mask261 = (uint64_t)0x3ffffffU;
|
||||
uint64_t z0 = t0 >> (uint32_t)26U;
|
||||
uint64_t z1 = t3 >> (uint32_t)26U;
|
||||
uint64_t x0 = t0 & mask261;
|
||||
uint64_t x3 = t3 & mask261;
|
||||
uint64_t x1 = t1 + z0;
|
||||
uint64_t x4 = t4 + z1;
|
||||
uint64_t z01 = x1 >> (uint32_t)26U;
|
||||
uint64_t z11 = x4 >> (uint32_t)26U;
|
||||
uint64_t t = z11 << (uint32_t)2U;
|
||||
uint64_t z12 = z11 + t;
|
||||
uint64_t x11 = x1 & mask261;
|
||||
uint64_t x41 = x4 & mask261;
|
||||
uint64_t x2 = t2 + z01;
|
||||
uint64_t x01 = x0 + z12;
|
||||
uint64_t z02 = x2 >> (uint32_t)26U;
|
||||
uint64_t z13 = x01 >> (uint32_t)26U;
|
||||
uint64_t x21 = x2 & mask261;
|
||||
uint64_t x02 = x01 & mask261;
|
||||
uint64_t x31 = x3 + z02;
|
||||
uint64_t x12 = x11 + z13;
|
||||
uint64_t z03 = x31 >> (uint32_t)26U;
|
||||
uint64_t x32 = x31 & mask261;
|
||||
uint64_t x42 = x41 + z03;
|
||||
uint64_t o0 = x02;
|
||||
uint64_t o1 = x12;
|
||||
uint64_t o2 = x21;
|
||||
uint64_t o3 = x32;
|
||||
uint64_t o4 = x42;
|
||||
acc0[0U] = o0;
|
||||
acc0[1U] = o1;
|
||||
acc0[2U] = o2;
|
||||
|
@ -152,7 +157,7 @@ Hacl_Chacha20Poly1305_32_poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t
|
|||
uint8_t *last1 = blocks + nb * (uint32_t)16U;
|
||||
uint64_t e[5U] = { 0U };
|
||||
uint8_t tmp[16U] = { 0U };
|
||||
memcpy(tmp, last1, rem2 * sizeof last1[0U]);
|
||||
memcpy(tmp, last1, rem2 * sizeof(last1[0U]));
|
||||
uint64_t u0 = load64_le(tmp);
|
||||
uint64_t lo = u0;
|
||||
uint64_t u = load64_le(tmp + (uint32_t)8U);
|
||||
|
@ -234,30 +239,35 @@ Hacl_Chacha20Poly1305_32_poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t
|
|||
uint64_t t2 = a26;
|
||||
uint64_t t3 = a36;
|
||||
uint64_t t4 = a46;
|
||||
uint64_t l = t0 + (uint64_t)0U;
|
||||
uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
|
||||
uint64_t c01 = l >> (uint32_t)26U;
|
||||
uint64_t l0 = t1 + c01;
|
||||
uint64_t tmp1 = l0 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c11 = l0 >> (uint32_t)26U;
|
||||
uint64_t l1 = t2 + c11;
|
||||
uint64_t tmp2 = l1 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c21 = l1 >> (uint32_t)26U;
|
||||
uint64_t l2 = t3 + c21;
|
||||
uint64_t tmp3 = l2 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c31 = l2 >> (uint32_t)26U;
|
||||
uint64_t l3 = t4 + c31;
|
||||
uint64_t tmp4 = l3 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c4 = l3 >> (uint32_t)26U;
|
||||
uint64_t l4 = tmp0 + c4 * (uint64_t)5U;
|
||||
uint64_t tmp01 = l4 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c5 = l4 >> (uint32_t)26U;
|
||||
uint64_t tmp11 = tmp1 + c5;
|
||||
uint64_t o0 = tmp01;
|
||||
uint64_t o1 = tmp11;
|
||||
uint64_t o2 = tmp2;
|
||||
uint64_t o3 = tmp3;
|
||||
uint64_t o4 = tmp4;
|
||||
uint64_t mask261 = (uint64_t)0x3ffffffU;
|
||||
uint64_t z0 = t0 >> (uint32_t)26U;
|
||||
uint64_t z1 = t3 >> (uint32_t)26U;
|
||||
uint64_t x0 = t0 & mask261;
|
||||
uint64_t x3 = t3 & mask261;
|
||||
uint64_t x1 = t1 + z0;
|
||||
uint64_t x4 = t4 + z1;
|
||||
uint64_t z01 = x1 >> (uint32_t)26U;
|
||||
uint64_t z11 = x4 >> (uint32_t)26U;
|
||||
uint64_t t = z11 << (uint32_t)2U;
|
||||
uint64_t z12 = z11 + t;
|
||||
uint64_t x11 = x1 & mask261;
|
||||
uint64_t x41 = x4 & mask261;
|
||||
uint64_t x2 = t2 + z01;
|
||||
uint64_t x01 = x0 + z12;
|
||||
uint64_t z02 = x2 >> (uint32_t)26U;
|
||||
uint64_t z13 = x01 >> (uint32_t)26U;
|
||||
uint64_t x21 = x2 & mask261;
|
||||
uint64_t x02 = x01 & mask261;
|
||||
uint64_t x31 = x3 + z02;
|
||||
uint64_t x12 = x11 + z13;
|
||||
uint64_t z03 = x31 >> (uint32_t)26U;
|
||||
uint64_t x32 = x31 & mask261;
|
||||
uint64_t x42 = x41 + z03;
|
||||
uint64_t o0 = x02;
|
||||
uint64_t o1 = x12;
|
||||
uint64_t o2 = x21;
|
||||
uint64_t o3 = x32;
|
||||
uint64_t o4 = x42;
|
||||
acc0[0U] = o0;
|
||||
acc0[1U] = o1;
|
||||
acc0[2U] = o2;
|
||||
|
@ -265,7 +275,7 @@ Hacl_Chacha20Poly1305_32_poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t
|
|||
acc0[4U] = o4;
|
||||
}
|
||||
uint8_t tmp[16U] = { 0U };
|
||||
memcpy(tmp, rem1, r * sizeof rem1[0U]);
|
||||
memcpy(tmp, rem1, r * sizeof(rem1[0U]));
|
||||
if (r > (uint32_t)0U) {
|
||||
uint64_t *pre = ctx + (uint32_t)5U;
|
||||
uint64_t *acc = ctx;
|
||||
|
@ -351,30 +361,35 @@ Hacl_Chacha20Poly1305_32_poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t
|
|||
uint64_t t2 = a26;
|
||||
uint64_t t3 = a36;
|
||||
uint64_t t4 = a46;
|
||||
uint64_t l = t0 + (uint64_t)0U;
|
||||
uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
|
||||
uint64_t c01 = l >> (uint32_t)26U;
|
||||
uint64_t l0 = t1 + c01;
|
||||
uint64_t tmp1 = l0 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c11 = l0 >> (uint32_t)26U;
|
||||
uint64_t l1 = t2 + c11;
|
||||
uint64_t tmp2 = l1 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c21 = l1 >> (uint32_t)26U;
|
||||
uint64_t l2 = t3 + c21;
|
||||
uint64_t tmp3 = l2 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c31 = l2 >> (uint32_t)26U;
|
||||
uint64_t l3 = t4 + c31;
|
||||
uint64_t tmp4 = l3 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c4 = l3 >> (uint32_t)26U;
|
||||
uint64_t l4 = tmp0 + c4 * (uint64_t)5U;
|
||||
uint64_t tmp01 = l4 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c5 = l4 >> (uint32_t)26U;
|
||||
uint64_t tmp11 = tmp1 + c5;
|
||||
uint64_t o0 = tmp01;
|
||||
uint64_t o1 = tmp11;
|
||||
uint64_t o2 = tmp2;
|
||||
uint64_t o3 = tmp3;
|
||||
uint64_t o4 = tmp4;
|
||||
uint64_t mask261 = (uint64_t)0x3ffffffU;
|
||||
uint64_t z0 = t0 >> (uint32_t)26U;
|
||||
uint64_t z1 = t3 >> (uint32_t)26U;
|
||||
uint64_t x0 = t0 & mask261;
|
||||
uint64_t x3 = t3 & mask261;
|
||||
uint64_t x1 = t1 + z0;
|
||||
uint64_t x4 = t4 + z1;
|
||||
uint64_t z01 = x1 >> (uint32_t)26U;
|
||||
uint64_t z11 = x4 >> (uint32_t)26U;
|
||||
uint64_t t = z11 << (uint32_t)2U;
|
||||
uint64_t z12 = z11 + t;
|
||||
uint64_t x11 = x1 & mask261;
|
||||
uint64_t x41 = x4 & mask261;
|
||||
uint64_t x2 = t2 + z01;
|
||||
uint64_t x01 = x0 + z12;
|
||||
uint64_t z02 = x2 >> (uint32_t)26U;
|
||||
uint64_t z13 = x01 >> (uint32_t)26U;
|
||||
uint64_t x21 = x2 & mask261;
|
||||
uint64_t x02 = x01 & mask261;
|
||||
uint64_t x31 = x3 + z02;
|
||||
uint64_t x12 = x11 + z13;
|
||||
uint64_t z03 = x31 >> (uint32_t)26U;
|
||||
uint64_t x32 = x31 & mask261;
|
||||
uint64_t x42 = x41 + z03;
|
||||
uint64_t o0 = x02;
|
||||
uint64_t o1 = x12;
|
||||
uint64_t o2 = x21;
|
||||
uint64_t o3 = x32;
|
||||
uint64_t o4 = x42;
|
||||
acc[0U] = o0;
|
||||
acc[1U] = o1;
|
||||
acc[2U] = o2;
|
||||
|
@ -384,8 +399,8 @@ Hacl_Chacha20Poly1305_32_poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
Hacl_Chacha20Poly1305_32_poly1305_do_32(
|
||||
static inline void
|
||||
poly1305_do_32(
|
||||
uint8_t *k,
|
||||
uint32_t aadlen,
|
||||
uint8_t *aad,
|
||||
|
@ -396,8 +411,8 @@ Hacl_Chacha20Poly1305_32_poly1305_do_32(
|
|||
uint64_t ctx[25U] = { 0U };
|
||||
uint8_t block[16U] = { 0U };
|
||||
Hacl_Poly1305_32_poly1305_init(ctx, k);
|
||||
Hacl_Chacha20Poly1305_32_poly1305_padded_32(ctx, aadlen, aad);
|
||||
Hacl_Chacha20Poly1305_32_poly1305_padded_32(ctx, mlen, m);
|
||||
poly1305_padded_32(ctx, aadlen, aad);
|
||||
poly1305_padded_32(ctx, mlen, m);
|
||||
store64_le(block, (uint64_t)aadlen);
|
||||
store64_le(block + (uint32_t)8U, (uint64_t)mlen);
|
||||
uint64_t *pre = ctx + (uint32_t)5U;
|
||||
|
@ -484,30 +499,35 @@ Hacl_Chacha20Poly1305_32_poly1305_do_32(
|
|||
uint64_t t2 = a26;
|
||||
uint64_t t3 = a36;
|
||||
uint64_t t4 = a46;
|
||||
uint64_t l = t0 + (uint64_t)0U;
|
||||
uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
|
||||
uint64_t c01 = l >> (uint32_t)26U;
|
||||
uint64_t l0 = t1 + c01;
|
||||
uint64_t tmp1 = l0 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c11 = l0 >> (uint32_t)26U;
|
||||
uint64_t l1 = t2 + c11;
|
||||
uint64_t tmp2 = l1 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c21 = l1 >> (uint32_t)26U;
|
||||
uint64_t l2 = t3 + c21;
|
||||
uint64_t tmp3 = l2 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c31 = l2 >> (uint32_t)26U;
|
||||
uint64_t l3 = t4 + c31;
|
||||
uint64_t tmp4 = l3 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c4 = l3 >> (uint32_t)26U;
|
||||
uint64_t l4 = tmp0 + c4 * (uint64_t)5U;
|
||||
uint64_t tmp01 = l4 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c5 = l4 >> (uint32_t)26U;
|
||||
uint64_t tmp11 = tmp1 + c5;
|
||||
uint64_t o0 = tmp01;
|
||||
uint64_t o1 = tmp11;
|
||||
uint64_t o2 = tmp2;
|
||||
uint64_t o3 = tmp3;
|
||||
uint64_t o4 = tmp4;
|
||||
uint64_t mask261 = (uint64_t)0x3ffffffU;
|
||||
uint64_t z0 = t0 >> (uint32_t)26U;
|
||||
uint64_t z1 = t3 >> (uint32_t)26U;
|
||||
uint64_t x0 = t0 & mask261;
|
||||
uint64_t x3 = t3 & mask261;
|
||||
uint64_t x1 = t1 + z0;
|
||||
uint64_t x4 = t4 + z1;
|
||||
uint64_t z01 = x1 >> (uint32_t)26U;
|
||||
uint64_t z11 = x4 >> (uint32_t)26U;
|
||||
uint64_t t = z11 << (uint32_t)2U;
|
||||
uint64_t z12 = z11 + t;
|
||||
uint64_t x11 = x1 & mask261;
|
||||
uint64_t x41 = x4 & mask261;
|
||||
uint64_t x2 = t2 + z01;
|
||||
uint64_t x01 = x0 + z12;
|
||||
uint64_t z02 = x2 >> (uint32_t)26U;
|
||||
uint64_t z13 = x01 >> (uint32_t)26U;
|
||||
uint64_t x21 = x2 & mask261;
|
||||
uint64_t x02 = x01 & mask261;
|
||||
uint64_t x31 = x3 + z02;
|
||||
uint64_t x12 = x11 + z13;
|
||||
uint64_t z03 = x31 >> (uint32_t)26U;
|
||||
uint64_t x32 = x31 & mask261;
|
||||
uint64_t x42 = x41 + z03;
|
||||
uint64_t o0 = x02;
|
||||
uint64_t o1 = x12;
|
||||
uint64_t o2 = x21;
|
||||
uint64_t o3 = x32;
|
||||
uint64_t o4 = x42;
|
||||
acc[0U] = o0;
|
||||
acc[1U] = o1;
|
||||
acc[2U] = o2;
|
||||
|
@ -531,7 +551,7 @@ Hacl_Chacha20Poly1305_32_aead_encrypt(
|
|||
uint8_t tmp[64U] = { 0U };
|
||||
Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n1, (uint32_t)0U);
|
||||
uint8_t *key = tmp;
|
||||
Hacl_Chacha20Poly1305_32_poly1305_do_32(key, aadlen, aad, mlen, cipher, mac);
|
||||
poly1305_do_32(key, aadlen, aad, mlen, cipher, mac);
|
||||
}
|
||||
|
||||
uint32_t
|
||||
|
@ -549,9 +569,9 @@ Hacl_Chacha20Poly1305_32_aead_decrypt(
|
|||
uint8_t tmp[64U] = { 0U };
|
||||
Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n1, (uint32_t)0U);
|
||||
uint8_t *key = tmp;
|
||||
Hacl_Chacha20Poly1305_32_poly1305_do_32(key, aadlen, aad, mlen, cipher, computed_mac);
|
||||
poly1305_do_32(key, aadlen, aad, mlen, cipher, computed_mac);
|
||||
uint8_t res = (uint8_t)255U;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
|
||||
uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
|
||||
res = uu____0 & res;
|
||||
}
|
||||
|
|
|
@ -23,8 +23,8 @@
|
|||
|
||||
#include "Hacl_Chacha20_Vec128.h"
|
||||
|
||||
static void
|
||||
Hacl_Chacha20_Vec128_double_round_128(Lib_IntVector_Intrinsics_vec128 *st)
|
||||
static inline void
|
||||
double_round_128(Lib_IntVector_Intrinsics_vec128 *st)
|
||||
{
|
||||
st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[4U]);
|
||||
Lib_IntVector_Intrinsics_vec128 std = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[0U]);
|
||||
|
@ -124,27 +124,27 @@ Hacl_Chacha20_Vec128_double_round_128(Lib_IntVector_Intrinsics_vec128 *st)
|
|||
st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std30, (uint32_t)7U);
|
||||
}
|
||||
|
||||
static void
|
||||
Hacl_Chacha20_Vec128_chacha20_core_128(
|
||||
static inline void
|
||||
chacha20_core_128(
|
||||
Lib_IntVector_Intrinsics_vec128 *k,
|
||||
Lib_IntVector_Intrinsics_vec128 *ctx,
|
||||
uint32_t ctr)
|
||||
{
|
||||
memcpy(k, ctx, (uint32_t)16U * sizeof ctx[0U]);
|
||||
memcpy(k, ctx, (uint32_t)16U * sizeof(ctx[0U]));
|
||||
uint32_t ctr_u32 = (uint32_t)4U * ctr;
|
||||
Lib_IntVector_Intrinsics_vec128 cv = Lib_IntVector_Intrinsics_vec128_load32(ctr_u32);
|
||||
k[12U] = Lib_IntVector_Intrinsics_vec128_add32(k[12U], cv);
|
||||
Hacl_Chacha20_Vec128_double_round_128(k);
|
||||
Hacl_Chacha20_Vec128_double_round_128(k);
|
||||
Hacl_Chacha20_Vec128_double_round_128(k);
|
||||
Hacl_Chacha20_Vec128_double_round_128(k);
|
||||
Hacl_Chacha20_Vec128_double_round_128(k);
|
||||
Hacl_Chacha20_Vec128_double_round_128(k);
|
||||
Hacl_Chacha20_Vec128_double_round_128(k);
|
||||
Hacl_Chacha20_Vec128_double_round_128(k);
|
||||
Hacl_Chacha20_Vec128_double_round_128(k);
|
||||
Hacl_Chacha20_Vec128_double_round_128(k);
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
double_round_128(k);
|
||||
double_round_128(k);
|
||||
double_round_128(k);
|
||||
double_round_128(k);
|
||||
double_round_128(k);
|
||||
double_round_128(k);
|
||||
double_round_128(k);
|
||||
double_round_128(k);
|
||||
double_round_128(k);
|
||||
double_round_128(k);
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
|
||||
Lib_IntVector_Intrinsics_vec128 *os = k;
|
||||
Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_add32(k[i], ctx[i]);
|
||||
os[i] = x;
|
||||
|
@ -152,22 +152,18 @@ Hacl_Chacha20_Vec128_chacha20_core_128(
|
|||
k[12U] = Lib_IntVector_Intrinsics_vec128_add32(k[12U], cv);
|
||||
}
|
||||
|
||||
static void
|
||||
Hacl_Chacha20_Vec128_chacha20_init_128(
|
||||
Lib_IntVector_Intrinsics_vec128 *ctx,
|
||||
uint8_t *k,
|
||||
uint8_t *n1,
|
||||
uint32_t ctr)
|
||||
static inline void
|
||||
chacha20_init_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k, uint8_t *n1, uint32_t ctr)
|
||||
{
|
||||
uint32_t ctx1[16U] = { 0U };
|
||||
uint32_t *uu____0 = ctx1;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i++) {
|
||||
uint32_t *os = uu____0;
|
||||
uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i];
|
||||
os[i] = x;
|
||||
}
|
||||
uint32_t *uu____1 = ctx1 + (uint32_t)4U;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)8U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)8U; i++) {
|
||||
uint32_t *os = uu____1;
|
||||
uint8_t *bj = k + i * (uint32_t)4U;
|
||||
uint32_t u = load32_le(bj);
|
||||
|
@ -177,7 +173,7 @@ Hacl_Chacha20_Vec128_chacha20_init_128(
|
|||
}
|
||||
ctx1[12U] = ctr;
|
||||
uint32_t *uu____2 = ctx1 + (uint32_t)13U;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)3U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)3U; i++) {
|
||||
uint32_t *os = uu____2;
|
||||
uint8_t *bj = n1 + i * (uint32_t)4U;
|
||||
uint32_t u = load32_le(bj);
|
||||
|
@ -185,7 +181,7 @@ Hacl_Chacha20_Vec128_chacha20_init_128(
|
|||
uint32_t x = r;
|
||||
os[i] = x;
|
||||
}
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
|
||||
Lib_IntVector_Intrinsics_vec128 *os = ctx;
|
||||
uint32_t x = ctx1[i];
|
||||
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_load32(x);
|
||||
|
@ -193,10 +189,10 @@ Hacl_Chacha20_Vec128_chacha20_init_128(
|
|||
}
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
ctr1 =
|
||||
Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)3U,
|
||||
(uint32_t)2U,
|
||||
Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)0U,
|
||||
(uint32_t)1U,
|
||||
(uint32_t)0U);
|
||||
(uint32_t)2U,
|
||||
(uint32_t)3U);
|
||||
Lib_IntVector_Intrinsics_vec128 c12 = ctx[12U];
|
||||
ctx[12U] = Lib_IntVector_Intrinsics_vec128_add32(c12, ctr1);
|
||||
}
|
||||
|
@ -213,26 +209,17 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
|
|||
Lib_IntVector_Intrinsics_vec128 ctx[16U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
|
||||
ctx[_i] = Lib_IntVector_Intrinsics_vec128_zero;
|
||||
Hacl_Chacha20_Vec128_chacha20_init_128(ctx, key, n1, ctr);
|
||||
uint32_t rem1 = len % ((uint32_t)4U * (uint32_t)64U);
|
||||
uint32_t nb = len / ((uint32_t)4U * (uint32_t)64U);
|
||||
uint32_t rem2 = len % ((uint32_t)4U * (uint32_t)64U);
|
||||
for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0 = i0 + (uint32_t)1U) {
|
||||
uint8_t *uu____0 = out + i0 * (uint32_t)4U * (uint32_t)64U;
|
||||
uint8_t *uu____1 = text + i0 * (uint32_t)256U;
|
||||
chacha20_init_128(ctx, key, n1, ctr);
|
||||
uint32_t rem1 = len % (uint32_t)256U;
|
||||
uint32_t nb = len / (uint32_t)256U;
|
||||
uint32_t rem2 = len % (uint32_t)256U;
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
|
||||
uint8_t *uu____0 = out + i * (uint32_t)256U;
|
||||
uint8_t *uu____1 = text + i * (uint32_t)256U;
|
||||
Lib_IntVector_Intrinsics_vec128 k[16U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
|
||||
k[_i] = Lib_IntVector_Intrinsics_vec128_zero;
|
||||
Hacl_Chacha20_Vec128_chacha20_core_128(k, ctx, i0);
|
||||
Lib_IntVector_Intrinsics_vec128 bl[16U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
|
||||
bl[_i] = Lib_IntVector_Intrinsics_vec128_zero;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
Lib_IntVector_Intrinsics_vec128 *os = bl;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
x = Lib_IntVector_Intrinsics_vec128_load_le(uu____1 + i * (uint32_t)4U * (uint32_t)4U);
|
||||
os[i] = x;
|
||||
}
|
||||
chacha20_core_128(k, ctx, i);
|
||||
Lib_IntVector_Intrinsics_vec128 v00 = k[0U];
|
||||
Lib_IntVector_Intrinsics_vec128 v16 = k[1U];
|
||||
Lib_IntVector_Intrinsics_vec128 v20 = k[2U];
|
||||
|
@ -345,33 +332,22 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
|
|||
k[13U] = v7;
|
||||
k[14U] = v11;
|
||||
k[15U] = v15;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
Lib_IntVector_Intrinsics_vec128 *os = bl;
|
||||
Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_xor(bl[i], k[i]);
|
||||
os[i] = x;
|
||||
}
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
Lib_IntVector_Intrinsics_vec128_store_le(uu____0 + i * (uint32_t)16U, bl[i]);
|
||||
for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)16U; i0++) {
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
x = Lib_IntVector_Intrinsics_vec128_load_le(uu____1 + i0 * (uint32_t)16U);
|
||||
Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i0]);
|
||||
Lib_IntVector_Intrinsics_vec128_store_le(uu____0 + i0 * (uint32_t)16U, y);
|
||||
}
|
||||
}
|
||||
if (rem2 > (uint32_t)0U) {
|
||||
uint8_t *uu____2 = out + nb * (uint32_t)4U * (uint32_t)64U;
|
||||
uint8_t *uu____3 = text + nb * (uint32_t)4U * (uint32_t)64U;
|
||||
uint8_t *uu____2 = out + nb * (uint32_t)256U;
|
||||
uint8_t *uu____3 = text + nb * (uint32_t)256U;
|
||||
uint8_t plain[256U] = { 0U };
|
||||
memcpy(plain, uu____3, rem1 * sizeof uu____3[0U]);
|
||||
memcpy(plain, uu____3, rem1 * sizeof(uu____3[0U]));
|
||||
Lib_IntVector_Intrinsics_vec128 k[16U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
|
||||
k[_i] = Lib_IntVector_Intrinsics_vec128_zero;
|
||||
Hacl_Chacha20_Vec128_chacha20_core_128(k, ctx, nb);
|
||||
Lib_IntVector_Intrinsics_vec128 bl[16U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
|
||||
bl[_i] = Lib_IntVector_Intrinsics_vec128_zero;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
Lib_IntVector_Intrinsics_vec128 *os = bl;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
x = Lib_IntVector_Intrinsics_vec128_load_le(plain + i * (uint32_t)4U * (uint32_t)4U);
|
||||
os[i] = x;
|
||||
}
|
||||
chacha20_core_128(k, ctx, nb);
|
||||
Lib_IntVector_Intrinsics_vec128 v00 = k[0U];
|
||||
Lib_IntVector_Intrinsics_vec128 v16 = k[1U];
|
||||
Lib_IntVector_Intrinsics_vec128 v20 = k[2U];
|
||||
|
@ -484,15 +460,13 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
|
|||
k[13U] = v7;
|
||||
k[14U] = v11;
|
||||
k[15U] = v15;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
Lib_IntVector_Intrinsics_vec128 *os = bl;
|
||||
Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_xor(bl[i], k[i]);
|
||||
os[i] = x;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
x = Lib_IntVector_Intrinsics_vec128_load_le(plain + i * (uint32_t)16U);
|
||||
Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]);
|
||||
Lib_IntVector_Intrinsics_vec128_store_le(plain + i * (uint32_t)16U, y);
|
||||
}
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
Lib_IntVector_Intrinsics_vec128_store_le(plain + i * (uint32_t)16U, bl[i]);
|
||||
}
|
||||
memcpy(uu____2, plain, rem1 * sizeof plain[0U]);
|
||||
memcpy(uu____2, plain, rem1 * sizeof(plain[0U]));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -508,26 +482,17 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
|
|||
Lib_IntVector_Intrinsics_vec128 ctx[16U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
|
||||
ctx[_i] = Lib_IntVector_Intrinsics_vec128_zero;
|
||||
Hacl_Chacha20_Vec128_chacha20_init_128(ctx, key, n1, ctr);
|
||||
uint32_t rem1 = len % ((uint32_t)4U * (uint32_t)64U);
|
||||
uint32_t nb = len / ((uint32_t)4U * (uint32_t)64U);
|
||||
uint32_t rem2 = len % ((uint32_t)4U * (uint32_t)64U);
|
||||
for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0 = i0 + (uint32_t)1U) {
|
||||
uint8_t *uu____0 = out + i0 * (uint32_t)4U * (uint32_t)64U;
|
||||
uint8_t *uu____1 = cipher + i0 * (uint32_t)256U;
|
||||
chacha20_init_128(ctx, key, n1, ctr);
|
||||
uint32_t rem1 = len % (uint32_t)256U;
|
||||
uint32_t nb = len / (uint32_t)256U;
|
||||
uint32_t rem2 = len % (uint32_t)256U;
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
|
||||
uint8_t *uu____0 = out + i * (uint32_t)256U;
|
||||
uint8_t *uu____1 = cipher + i * (uint32_t)256U;
|
||||
Lib_IntVector_Intrinsics_vec128 k[16U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
|
||||
k[_i] = Lib_IntVector_Intrinsics_vec128_zero;
|
||||
Hacl_Chacha20_Vec128_chacha20_core_128(k, ctx, i0);
|
||||
Lib_IntVector_Intrinsics_vec128 bl[16U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
|
||||
bl[_i] = Lib_IntVector_Intrinsics_vec128_zero;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
Lib_IntVector_Intrinsics_vec128 *os = bl;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
x = Lib_IntVector_Intrinsics_vec128_load_le(uu____1 + i * (uint32_t)4U * (uint32_t)4U);
|
||||
os[i] = x;
|
||||
}
|
||||
chacha20_core_128(k, ctx, i);
|
||||
Lib_IntVector_Intrinsics_vec128 v00 = k[0U];
|
||||
Lib_IntVector_Intrinsics_vec128 v16 = k[1U];
|
||||
Lib_IntVector_Intrinsics_vec128 v20 = k[2U];
|
||||
|
@ -640,33 +605,22 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
|
|||
k[13U] = v7;
|
||||
k[14U] = v11;
|
||||
k[15U] = v15;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
Lib_IntVector_Intrinsics_vec128 *os = bl;
|
||||
Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_xor(bl[i], k[i]);
|
||||
os[i] = x;
|
||||
}
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
Lib_IntVector_Intrinsics_vec128_store_le(uu____0 + i * (uint32_t)16U, bl[i]);
|
||||
for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)16U; i0++) {
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
x = Lib_IntVector_Intrinsics_vec128_load_le(uu____1 + i0 * (uint32_t)16U);
|
||||
Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i0]);
|
||||
Lib_IntVector_Intrinsics_vec128_store_le(uu____0 + i0 * (uint32_t)16U, y);
|
||||
}
|
||||
}
|
||||
if (rem2 > (uint32_t)0U) {
|
||||
uint8_t *uu____2 = out + nb * (uint32_t)4U * (uint32_t)64U;
|
||||
uint8_t *uu____3 = cipher + nb * (uint32_t)4U * (uint32_t)64U;
|
||||
uint8_t *uu____2 = out + nb * (uint32_t)256U;
|
||||
uint8_t *uu____3 = cipher + nb * (uint32_t)256U;
|
||||
uint8_t plain[256U] = { 0U };
|
||||
memcpy(plain, uu____3, rem1 * sizeof uu____3[0U]);
|
||||
memcpy(plain, uu____3, rem1 * sizeof(uu____3[0U]));
|
||||
Lib_IntVector_Intrinsics_vec128 k[16U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
|
||||
k[_i] = Lib_IntVector_Intrinsics_vec128_zero;
|
||||
Hacl_Chacha20_Vec128_chacha20_core_128(k, ctx, nb);
|
||||
Lib_IntVector_Intrinsics_vec128 bl[16U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
|
||||
bl[_i] = Lib_IntVector_Intrinsics_vec128_zero;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
Lib_IntVector_Intrinsics_vec128 *os = bl;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
x = Lib_IntVector_Intrinsics_vec128_load_le(plain + i * (uint32_t)4U * (uint32_t)4U);
|
||||
os[i] = x;
|
||||
}
|
||||
chacha20_core_128(k, ctx, nb);
|
||||
Lib_IntVector_Intrinsics_vec128 v00 = k[0U];
|
||||
Lib_IntVector_Intrinsics_vec128 v16 = k[1U];
|
||||
Lib_IntVector_Intrinsics_vec128 v20 = k[2U];
|
||||
|
@ -779,14 +733,12 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
|
|||
k[13U] = v7;
|
||||
k[14U] = v11;
|
||||
k[15U] = v15;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
Lib_IntVector_Intrinsics_vec128 *os = bl;
|
||||
Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_xor(bl[i], k[i]);
|
||||
os[i] = x;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
x = Lib_IntVector_Intrinsics_vec128_load_le(plain + i * (uint32_t)16U);
|
||||
Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]);
|
||||
Lib_IntVector_Intrinsics_vec128_store_le(plain + i * (uint32_t)16U, y);
|
||||
}
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
|
||||
Lib_IntVector_Intrinsics_vec128_store_le(plain + i * (uint32_t)16U, bl[i]);
|
||||
}
|
||||
memcpy(uu____2, plain, rem1 * sizeof plain[0U]);
|
||||
memcpy(uu____2, plain, rem1 * sizeof(plain[0U]));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,876 @@
|
|||
/* MIT License
|
||||
*
|
||||
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in all
|
||||
* copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "Hacl_Chacha20_Vec256.h"
|
||||
|
||||
static inline void
|
||||
double_round_256(Lib_IntVector_Intrinsics_vec256 *st)
|
||||
{
|
||||
st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[4U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[0U]);
|
||||
st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std, (uint32_t)16U);
|
||||
st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[12U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std0 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[8U]);
|
||||
st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std0, (uint32_t)12U);
|
||||
st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[4U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std1 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[0U]);
|
||||
st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std1, (uint32_t)8U);
|
||||
st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[12U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std2 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[8U]);
|
||||
st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std2, (uint32_t)7U);
|
||||
st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[5U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std3 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[1U]);
|
||||
st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std3, (uint32_t)16U);
|
||||
st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[13U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std4 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[9U]);
|
||||
st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std4, (uint32_t)12U);
|
||||
st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[5U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std5 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[1U]);
|
||||
st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std5, (uint32_t)8U);
|
||||
st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[13U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std6 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[9U]);
|
||||
st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std6, (uint32_t)7U);
|
||||
st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[6U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std7 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[2U]);
|
||||
st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std7, (uint32_t)16U);
|
||||
st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[14U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std8 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[10U]);
|
||||
st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std8, (uint32_t)12U);
|
||||
st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[6U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std9 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[2U]);
|
||||
st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std9, (uint32_t)8U);
|
||||
st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[14U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std10 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[10U]);
|
||||
st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std10, (uint32_t)7U);
|
||||
st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[7U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std11 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[3U]);
|
||||
st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std11, (uint32_t)16U);
|
||||
st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[15U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std12 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[11U]);
|
||||
st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std12, (uint32_t)12U);
|
||||
st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[7U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std13 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[3U]);
|
||||
st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std13, (uint32_t)8U);
|
||||
st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[15U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std14 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[11U]);
|
||||
st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std14, (uint32_t)7U);
|
||||
st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[5U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std15 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[0U]);
|
||||
st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std15, (uint32_t)16U);
|
||||
st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[15U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std16 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[10U]);
|
||||
st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std16, (uint32_t)12U);
|
||||
st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[5U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std17 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[0U]);
|
||||
st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std17, (uint32_t)8U);
|
||||
st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[15U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std18 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[10U]);
|
||||
st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std18, (uint32_t)7U);
|
||||
st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[6U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std19 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[1U]);
|
||||
st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std19, (uint32_t)16U);
|
||||
st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[12U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std20 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[11U]);
|
||||
st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std20, (uint32_t)12U);
|
||||
st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[6U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std21 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[1U]);
|
||||
st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std21, (uint32_t)8U);
|
||||
st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[12U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std22 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[11U]);
|
||||
st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std22, (uint32_t)7U);
|
||||
st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[7U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std23 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[2U]);
|
||||
st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std23, (uint32_t)16U);
|
||||
st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[13U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std24 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[8U]);
|
||||
st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std24, (uint32_t)12U);
|
||||
st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[7U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std25 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[2U]);
|
||||
st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std25, (uint32_t)8U);
|
||||
st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[13U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std26 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[8U]);
|
||||
st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std26, (uint32_t)7U);
|
||||
st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[4U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std27 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[3U]);
|
||||
st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std27, (uint32_t)16U);
|
||||
st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[14U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std28 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[9U]);
|
||||
st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std28, (uint32_t)12U);
|
||||
st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[4U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std29 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[3U]);
|
||||
st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std29, (uint32_t)8U);
|
||||
st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[14U]);
|
||||
Lib_IntVector_Intrinsics_vec256 std30 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[9U]);
|
||||
st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std30, (uint32_t)7U);
|
||||
}
|
||||
|
||||
static inline void
|
||||
chacha20_core_256(
|
||||
Lib_IntVector_Intrinsics_vec256 *k,
|
||||
Lib_IntVector_Intrinsics_vec256 *ctx,
|
||||
uint32_t ctr)
|
||||
{
|
||||
memcpy(k, ctx, (uint32_t)16U * sizeof(ctx[0U]));
|
||||
uint32_t ctr_u32 = (uint32_t)8U * ctr;
|
||||
Lib_IntVector_Intrinsics_vec256 cv = Lib_IntVector_Intrinsics_vec256_load32(ctr_u32);
|
||||
k[12U] = Lib_IntVector_Intrinsics_vec256_add32(k[12U], cv);
|
||||
double_round_256(k);
|
||||
double_round_256(k);
|
||||
double_round_256(k);
|
||||
double_round_256(k);
|
||||
double_round_256(k);
|
||||
double_round_256(k);
|
||||
double_round_256(k);
|
||||
double_round_256(k);
|
||||
double_round_256(k);
|
||||
double_round_256(k);
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
|
||||
Lib_IntVector_Intrinsics_vec256 *os = k;
|
||||
Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_add32(k[i], ctx[i]);
|
||||
os[i] = x;
|
||||
}
|
||||
k[12U] = Lib_IntVector_Intrinsics_vec256_add32(k[12U], cv);
|
||||
}
|
||||
|
||||
static inline void
|
||||
chacha20_init_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *k, uint8_t *n1, uint32_t ctr)
|
||||
{
|
||||
uint32_t ctx1[16U] = { 0U };
|
||||
uint32_t *uu____0 = ctx1;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i++) {
|
||||
uint32_t *os = uu____0;
|
||||
uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i];
|
||||
os[i] = x;
|
||||
}
|
||||
uint32_t *uu____1 = ctx1 + (uint32_t)4U;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)8U; i++) {
|
||||
uint32_t *os = uu____1;
|
||||
uint8_t *bj = k + i * (uint32_t)4U;
|
||||
uint32_t u = load32_le(bj);
|
||||
uint32_t r = u;
|
||||
uint32_t x = r;
|
||||
os[i] = x;
|
||||
}
|
||||
ctx1[12U] = ctr;
|
||||
uint32_t *uu____2 = ctx1 + (uint32_t)13U;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)3U; i++) {
|
||||
uint32_t *os = uu____2;
|
||||
uint8_t *bj = n1 + i * (uint32_t)4U;
|
||||
uint32_t u = load32_le(bj);
|
||||
uint32_t r = u;
|
||||
uint32_t x = r;
|
||||
os[i] = x;
|
||||
}
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
|
||||
Lib_IntVector_Intrinsics_vec256 *os = ctx;
|
||||
uint32_t x = ctx1[i];
|
||||
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_load32(x);
|
||||
os[i] = x0;
|
||||
}
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
ctr1 =
|
||||
Lib_IntVector_Intrinsics_vec256_load32s((uint32_t)0U,
|
||||
(uint32_t)1U,
|
||||
(uint32_t)2U,
|
||||
(uint32_t)3U,
|
||||
(uint32_t)4U,
|
||||
(uint32_t)5U,
|
||||
(uint32_t)6U,
|
||||
(uint32_t)7U);
|
||||
Lib_IntVector_Intrinsics_vec256 c12 = ctx[12U];
|
||||
ctx[12U] = Lib_IntVector_Intrinsics_vec256_add32(c12, ctr1);
|
||||
}
|
||||
|
||||
void
|
||||
Hacl_Chacha20_Vec256_chacha20_encrypt_256(
|
||||
uint32_t len,
|
||||
uint8_t *out,
|
||||
uint8_t *text,
|
||||
uint8_t *key,
|
||||
uint8_t *n1,
|
||||
uint32_t ctr)
|
||||
{
|
||||
Lib_IntVector_Intrinsics_vec256 ctx[16U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
|
||||
ctx[_i] = Lib_IntVector_Intrinsics_vec256_zero;
|
||||
chacha20_init_256(ctx, key, n1, ctr);
|
||||
uint32_t rem1 = len % (uint32_t)512U;
|
||||
uint32_t nb = len / (uint32_t)512U;
|
||||
uint32_t rem2 = len % (uint32_t)512U;
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
|
||||
uint8_t *uu____0 = out + i * (uint32_t)512U;
|
||||
uint8_t *uu____1 = text + i * (uint32_t)512U;
|
||||
Lib_IntVector_Intrinsics_vec256 k[16U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
|
||||
k[_i] = Lib_IntVector_Intrinsics_vec256_zero;
|
||||
chacha20_core_256(k, ctx, i);
|
||||
Lib_IntVector_Intrinsics_vec256 v00 = k[0U];
|
||||
Lib_IntVector_Intrinsics_vec256 v16 = k[1U];
|
||||
Lib_IntVector_Intrinsics_vec256 v20 = k[2U];
|
||||
Lib_IntVector_Intrinsics_vec256 v30 = k[3U];
|
||||
Lib_IntVector_Intrinsics_vec256 v40 = k[4U];
|
||||
Lib_IntVector_Intrinsics_vec256 v50 = k[5U];
|
||||
Lib_IntVector_Intrinsics_vec256 v60 = k[6U];
|
||||
Lib_IntVector_Intrinsics_vec256 v70 = k[7U];
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v00, v16);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v00, v16);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v20, v30);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v20, v30);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v40, v50);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v40, v50);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v60, v70);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v60, v70);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_, v2_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_, v2_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_, v3_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_, v3_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4__ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_, v6_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5__ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_, v6_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6__ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_, v7_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7__ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_, v7_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0___ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0__, v4__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1___ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0__, v4__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2___ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1__, v5__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3___ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1__, v5__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4___ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2__, v6__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5___ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2__, v6__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6___ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3__, v7__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7___ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3__, v7__);
|
||||
Lib_IntVector_Intrinsics_vec256 v0 = v0___;
|
||||
Lib_IntVector_Intrinsics_vec256 v1 = v2___;
|
||||
Lib_IntVector_Intrinsics_vec256 v2 = v4___;
|
||||
Lib_IntVector_Intrinsics_vec256 v3 = v6___;
|
||||
Lib_IntVector_Intrinsics_vec256 v4 = v1___;
|
||||
Lib_IntVector_Intrinsics_vec256 v5 = v3___;
|
||||
Lib_IntVector_Intrinsics_vec256 v6 = v5___;
|
||||
Lib_IntVector_Intrinsics_vec256 v7 = v7___;
|
||||
Lib_IntVector_Intrinsics_vec256 v01 = k[8U];
|
||||
Lib_IntVector_Intrinsics_vec256 v110 = k[9U];
|
||||
Lib_IntVector_Intrinsics_vec256 v21 = k[10U];
|
||||
Lib_IntVector_Intrinsics_vec256 v31 = k[11U];
|
||||
Lib_IntVector_Intrinsics_vec256 v41 = k[12U];
|
||||
Lib_IntVector_Intrinsics_vec256 v51 = k[13U];
|
||||
Lib_IntVector_Intrinsics_vec256 v61 = k[14U];
|
||||
Lib_IntVector_Intrinsics_vec256 v71 = k[15U];
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v01, v110);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v01, v110);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v21, v31);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v21, v31);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4_0 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v41, v51);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5_0 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v41, v51);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6_0 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v61, v71);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7_0 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v61, v71);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_0, v2_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_0, v2_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_0, v3_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_0, v3_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4__0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_0, v6_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5__0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_0, v6_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6__0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_0, v7_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7__0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_0, v7_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0___0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0__0, v4__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1___0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0__0, v4__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2___0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1__0, v5__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3___0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1__0, v5__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4___0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2__0, v6__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5___0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2__0, v6__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6___0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3__0, v7__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7___0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3__0, v7__0);
|
||||
Lib_IntVector_Intrinsics_vec256 v8 = v0___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v9 = v2___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v10 = v4___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v11 = v6___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v12 = v1___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v13 = v3___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v14 = v5___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v15 = v7___0;
|
||||
k[0U] = v0;
|
||||
k[1U] = v8;
|
||||
k[2U] = v1;
|
||||
k[3U] = v9;
|
||||
k[4U] = v2;
|
||||
k[5U] = v10;
|
||||
k[6U] = v3;
|
||||
k[7U] = v11;
|
||||
k[8U] = v4;
|
||||
k[9U] = v12;
|
||||
k[10U] = v5;
|
||||
k[11U] = v13;
|
||||
k[12U] = v6;
|
||||
k[13U] = v14;
|
||||
k[14U] = v7;
|
||||
k[15U] = v15;
|
||||
for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)16U; i0++) {
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
x = Lib_IntVector_Intrinsics_vec256_load_le(uu____1 + i0 * (uint32_t)32U);
|
||||
Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i0]);
|
||||
Lib_IntVector_Intrinsics_vec256_store_le(uu____0 + i0 * (uint32_t)32U, y);
|
||||
}
|
||||
}
|
||||
if (rem2 > (uint32_t)0U) {
|
||||
uint8_t *uu____2 = out + nb * (uint32_t)512U;
|
||||
uint8_t *uu____3 = text + nb * (uint32_t)512U;
|
||||
uint8_t plain[512U] = { 0U };
|
||||
memcpy(plain, uu____3, rem1 * sizeof(uu____3[0U]));
|
||||
Lib_IntVector_Intrinsics_vec256 k[16U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
|
||||
k[_i] = Lib_IntVector_Intrinsics_vec256_zero;
|
||||
chacha20_core_256(k, ctx, nb);
|
||||
Lib_IntVector_Intrinsics_vec256 v00 = k[0U];
|
||||
Lib_IntVector_Intrinsics_vec256 v16 = k[1U];
|
||||
Lib_IntVector_Intrinsics_vec256 v20 = k[2U];
|
||||
Lib_IntVector_Intrinsics_vec256 v30 = k[3U];
|
||||
Lib_IntVector_Intrinsics_vec256 v40 = k[4U];
|
||||
Lib_IntVector_Intrinsics_vec256 v50 = k[5U];
|
||||
Lib_IntVector_Intrinsics_vec256 v60 = k[6U];
|
||||
Lib_IntVector_Intrinsics_vec256 v70 = k[7U];
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v00, v16);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v00, v16);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v20, v30);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v20, v30);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v40, v50);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v40, v50);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v60, v70);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v60, v70);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_, v2_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_, v2_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_, v3_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_, v3_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4__ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_, v6_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5__ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_, v6_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6__ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_, v7_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7__ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_, v7_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0___ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0__, v4__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1___ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0__, v4__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2___ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1__, v5__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3___ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1__, v5__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4___ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2__, v6__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5___ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2__, v6__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6___ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3__, v7__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7___ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3__, v7__);
|
||||
Lib_IntVector_Intrinsics_vec256 v0 = v0___;
|
||||
Lib_IntVector_Intrinsics_vec256 v1 = v2___;
|
||||
Lib_IntVector_Intrinsics_vec256 v2 = v4___;
|
||||
Lib_IntVector_Intrinsics_vec256 v3 = v6___;
|
||||
Lib_IntVector_Intrinsics_vec256 v4 = v1___;
|
||||
Lib_IntVector_Intrinsics_vec256 v5 = v3___;
|
||||
Lib_IntVector_Intrinsics_vec256 v6 = v5___;
|
||||
Lib_IntVector_Intrinsics_vec256 v7 = v7___;
|
||||
Lib_IntVector_Intrinsics_vec256 v01 = k[8U];
|
||||
Lib_IntVector_Intrinsics_vec256 v110 = k[9U];
|
||||
Lib_IntVector_Intrinsics_vec256 v21 = k[10U];
|
||||
Lib_IntVector_Intrinsics_vec256 v31 = k[11U];
|
||||
Lib_IntVector_Intrinsics_vec256 v41 = k[12U];
|
||||
Lib_IntVector_Intrinsics_vec256 v51 = k[13U];
|
||||
Lib_IntVector_Intrinsics_vec256 v61 = k[14U];
|
||||
Lib_IntVector_Intrinsics_vec256 v71 = k[15U];
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v01, v110);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v01, v110);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v21, v31);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v21, v31);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4_0 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v41, v51);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5_0 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v41, v51);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6_0 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v61, v71);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7_0 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v61, v71);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_0, v2_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_0, v2_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_0, v3_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_0, v3_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4__0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_0, v6_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5__0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_0, v6_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6__0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_0, v7_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7__0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_0, v7_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0___0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0__0, v4__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1___0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0__0, v4__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2___0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1__0, v5__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3___0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1__0, v5__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4___0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2__0, v6__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5___0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2__0, v6__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6___0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3__0, v7__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7___0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3__0, v7__0);
|
||||
Lib_IntVector_Intrinsics_vec256 v8 = v0___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v9 = v2___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v10 = v4___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v11 = v6___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v12 = v1___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v13 = v3___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v14 = v5___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v15 = v7___0;
|
||||
k[0U] = v0;
|
||||
k[1U] = v8;
|
||||
k[2U] = v1;
|
||||
k[3U] = v9;
|
||||
k[4U] = v2;
|
||||
k[5U] = v10;
|
||||
k[6U] = v3;
|
||||
k[7U] = v11;
|
||||
k[8U] = v4;
|
||||
k[9U] = v12;
|
||||
k[10U] = v5;
|
||||
k[11U] = v13;
|
||||
k[12U] = v6;
|
||||
k[13U] = v14;
|
||||
k[14U] = v7;
|
||||
k[15U] = v15;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
x = Lib_IntVector_Intrinsics_vec256_load_le(plain + i * (uint32_t)32U);
|
||||
Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i]);
|
||||
Lib_IntVector_Intrinsics_vec256_store_le(plain + i * (uint32_t)32U, y);
|
||||
}
|
||||
memcpy(uu____2, plain, rem1 * sizeof(plain[0U]));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
Hacl_Chacha20_Vec256_chacha20_decrypt_256(
|
||||
uint32_t len,
|
||||
uint8_t *out,
|
||||
uint8_t *cipher,
|
||||
uint8_t *key,
|
||||
uint8_t *n1,
|
||||
uint32_t ctr)
|
||||
{
|
||||
Lib_IntVector_Intrinsics_vec256 ctx[16U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
|
||||
ctx[_i] = Lib_IntVector_Intrinsics_vec256_zero;
|
||||
chacha20_init_256(ctx, key, n1, ctr);
|
||||
uint32_t rem1 = len % (uint32_t)512U;
|
||||
uint32_t nb = len / (uint32_t)512U;
|
||||
uint32_t rem2 = len % (uint32_t)512U;
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
|
||||
uint8_t *uu____0 = out + i * (uint32_t)512U;
|
||||
uint8_t *uu____1 = cipher + i * (uint32_t)512U;
|
||||
Lib_IntVector_Intrinsics_vec256 k[16U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
|
||||
k[_i] = Lib_IntVector_Intrinsics_vec256_zero;
|
||||
chacha20_core_256(k, ctx, i);
|
||||
Lib_IntVector_Intrinsics_vec256 v00 = k[0U];
|
||||
Lib_IntVector_Intrinsics_vec256 v16 = k[1U];
|
||||
Lib_IntVector_Intrinsics_vec256 v20 = k[2U];
|
||||
Lib_IntVector_Intrinsics_vec256 v30 = k[3U];
|
||||
Lib_IntVector_Intrinsics_vec256 v40 = k[4U];
|
||||
Lib_IntVector_Intrinsics_vec256 v50 = k[5U];
|
||||
Lib_IntVector_Intrinsics_vec256 v60 = k[6U];
|
||||
Lib_IntVector_Intrinsics_vec256 v70 = k[7U];
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v00, v16);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v00, v16);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v20, v30);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v20, v30);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v40, v50);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v40, v50);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v60, v70);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v60, v70);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_, v2_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_, v2_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_, v3_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_, v3_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4__ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_, v6_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5__ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_, v6_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6__ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_, v7_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7__ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_, v7_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0___ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0__, v4__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1___ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0__, v4__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2___ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1__, v5__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3___ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1__, v5__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4___ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2__, v6__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5___ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2__, v6__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6___ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3__, v7__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7___ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3__, v7__);
|
||||
Lib_IntVector_Intrinsics_vec256 v0 = v0___;
|
||||
Lib_IntVector_Intrinsics_vec256 v1 = v2___;
|
||||
Lib_IntVector_Intrinsics_vec256 v2 = v4___;
|
||||
Lib_IntVector_Intrinsics_vec256 v3 = v6___;
|
||||
Lib_IntVector_Intrinsics_vec256 v4 = v1___;
|
||||
Lib_IntVector_Intrinsics_vec256 v5 = v3___;
|
||||
Lib_IntVector_Intrinsics_vec256 v6 = v5___;
|
||||
Lib_IntVector_Intrinsics_vec256 v7 = v7___;
|
||||
Lib_IntVector_Intrinsics_vec256 v01 = k[8U];
|
||||
Lib_IntVector_Intrinsics_vec256 v110 = k[9U];
|
||||
Lib_IntVector_Intrinsics_vec256 v21 = k[10U];
|
||||
Lib_IntVector_Intrinsics_vec256 v31 = k[11U];
|
||||
Lib_IntVector_Intrinsics_vec256 v41 = k[12U];
|
||||
Lib_IntVector_Intrinsics_vec256 v51 = k[13U];
|
||||
Lib_IntVector_Intrinsics_vec256 v61 = k[14U];
|
||||
Lib_IntVector_Intrinsics_vec256 v71 = k[15U];
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v01, v110);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v01, v110);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v21, v31);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v21, v31);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4_0 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v41, v51);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5_0 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v41, v51);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6_0 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v61, v71);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7_0 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v61, v71);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_0, v2_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_0, v2_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_0, v3_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_0, v3_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4__0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_0, v6_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5__0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_0, v6_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6__0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_0, v7_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7__0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_0, v7_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0___0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0__0, v4__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1___0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0__0, v4__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2___0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1__0, v5__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3___0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1__0, v5__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4___0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2__0, v6__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5___0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2__0, v6__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6___0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3__0, v7__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7___0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3__0, v7__0);
|
||||
Lib_IntVector_Intrinsics_vec256 v8 = v0___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v9 = v2___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v10 = v4___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v11 = v6___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v12 = v1___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v13 = v3___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v14 = v5___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v15 = v7___0;
|
||||
k[0U] = v0;
|
||||
k[1U] = v8;
|
||||
k[2U] = v1;
|
||||
k[3U] = v9;
|
||||
k[4U] = v2;
|
||||
k[5U] = v10;
|
||||
k[6U] = v3;
|
||||
k[7U] = v11;
|
||||
k[8U] = v4;
|
||||
k[9U] = v12;
|
||||
k[10U] = v5;
|
||||
k[11U] = v13;
|
||||
k[12U] = v6;
|
||||
k[13U] = v14;
|
||||
k[14U] = v7;
|
||||
k[15U] = v15;
|
||||
for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)16U; i0++) {
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
x = Lib_IntVector_Intrinsics_vec256_load_le(uu____1 + i0 * (uint32_t)32U);
|
||||
Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i0]);
|
||||
Lib_IntVector_Intrinsics_vec256_store_le(uu____0 + i0 * (uint32_t)32U, y);
|
||||
}
|
||||
}
|
||||
if (rem2 > (uint32_t)0U) {
|
||||
uint8_t *uu____2 = out + nb * (uint32_t)512U;
|
||||
uint8_t *uu____3 = cipher + nb * (uint32_t)512U;
|
||||
uint8_t plain[512U] = { 0U };
|
||||
memcpy(plain, uu____3, rem1 * sizeof(uu____3[0U]));
|
||||
Lib_IntVector_Intrinsics_vec256 k[16U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
|
||||
k[_i] = Lib_IntVector_Intrinsics_vec256_zero;
|
||||
chacha20_core_256(k, ctx, nb);
|
||||
Lib_IntVector_Intrinsics_vec256 v00 = k[0U];
|
||||
Lib_IntVector_Intrinsics_vec256 v16 = k[1U];
|
||||
Lib_IntVector_Intrinsics_vec256 v20 = k[2U];
|
||||
Lib_IntVector_Intrinsics_vec256 v30 = k[3U];
|
||||
Lib_IntVector_Intrinsics_vec256 v40 = k[4U];
|
||||
Lib_IntVector_Intrinsics_vec256 v50 = k[5U];
|
||||
Lib_IntVector_Intrinsics_vec256 v60 = k[6U];
|
||||
Lib_IntVector_Intrinsics_vec256 v70 = k[7U];
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v00, v16);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v00, v16);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v20, v30);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v20, v30);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v40, v50);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v40, v50);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v60, v70);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v60, v70);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_, v2_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_, v2_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_, v3_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_, v3_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4__ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_, v6_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5__ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_, v6_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6__ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_, v7_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7__ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_, v7_);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0___ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0__, v4__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1___ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0__, v4__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2___ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1__, v5__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3___ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1__, v5__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4___ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2__, v6__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5___ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2__, v6__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6___ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3__, v7__);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7___ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3__, v7__);
|
||||
Lib_IntVector_Intrinsics_vec256 v0 = v0___;
|
||||
Lib_IntVector_Intrinsics_vec256 v1 = v2___;
|
||||
Lib_IntVector_Intrinsics_vec256 v2 = v4___;
|
||||
Lib_IntVector_Intrinsics_vec256 v3 = v6___;
|
||||
Lib_IntVector_Intrinsics_vec256 v4 = v1___;
|
||||
Lib_IntVector_Intrinsics_vec256 v5 = v3___;
|
||||
Lib_IntVector_Intrinsics_vec256 v6 = v5___;
|
||||
Lib_IntVector_Intrinsics_vec256 v7 = v7___;
|
||||
Lib_IntVector_Intrinsics_vec256 v01 = k[8U];
|
||||
Lib_IntVector_Intrinsics_vec256 v110 = k[9U];
|
||||
Lib_IntVector_Intrinsics_vec256 v21 = k[10U];
|
||||
Lib_IntVector_Intrinsics_vec256 v31 = k[11U];
|
||||
Lib_IntVector_Intrinsics_vec256 v41 = k[12U];
|
||||
Lib_IntVector_Intrinsics_vec256 v51 = k[13U];
|
||||
Lib_IntVector_Intrinsics_vec256 v61 = k[14U];
|
||||
Lib_IntVector_Intrinsics_vec256 v71 = k[15U];
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v01, v110);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v01, v110);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v21, v31);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v21, v31);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4_0 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v41, v51);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5_0 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v41, v51);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6_0 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v61, v71);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7_0 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v61, v71);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_0, v2_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_0, v2_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_0, v3_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_0, v3_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4__0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_0, v6_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5__0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_0, v6_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6__0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_0, v7_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7__0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_0, v7_0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v0___0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0__0, v4__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v1___0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0__0, v4__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v2___0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1__0, v5__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v3___0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1__0, v5__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v4___0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2__0, v6__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v5___0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2__0, v6__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v6___0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3__0, v7__0);
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
v7___0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3__0, v7__0);
|
||||
Lib_IntVector_Intrinsics_vec256 v8 = v0___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v9 = v2___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v10 = v4___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v11 = v6___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v12 = v1___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v13 = v3___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v14 = v5___0;
|
||||
Lib_IntVector_Intrinsics_vec256 v15 = v7___0;
|
||||
k[0U] = v0;
|
||||
k[1U] = v8;
|
||||
k[2U] = v1;
|
||||
k[3U] = v9;
|
||||
k[4U] = v2;
|
||||
k[5U] = v10;
|
||||
k[6U] = v3;
|
||||
k[7U] = v11;
|
||||
k[8U] = v4;
|
||||
k[9U] = v12;
|
||||
k[10U] = v5;
|
||||
k[11U] = v13;
|
||||
k[12U] = v6;
|
||||
k[13U] = v14;
|
||||
k[14U] = v7;
|
||||
k[15U] = v15;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
|
||||
Lib_IntVector_Intrinsics_vec256
|
||||
x = Lib_IntVector_Intrinsics_vec256_load_le(plain + i * (uint32_t)32U);
|
||||
Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i]);
|
||||
Lib_IntVector_Intrinsics_vec256_store_le(plain + i * (uint32_t)32U, y);
|
||||
}
|
||||
memcpy(uu____2, plain, rem1 * sizeof(plain[0U]));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
/* MIT License
|
||||
*
|
||||
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in all
|
||||
* copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "libintvector.h"
|
||||
#include "kremlin/internal/types.h"
|
||||
#include "kremlin/lowstar_endianness.h"
|
||||
#include <string.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#ifndef __Hacl_Chacha20_Vec256_H
|
||||
#define __Hacl_Chacha20_Vec256_H
|
||||
|
||||
#include "Hacl_Chacha20.h"
|
||||
#include "Hacl_Kremlib.h"
|
||||
|
||||
void
|
||||
Hacl_Chacha20_Vec256_chacha20_encrypt_256(
|
||||
uint32_t len,
|
||||
uint8_t *out,
|
||||
uint8_t *text,
|
||||
uint8_t *key,
|
||||
uint8_t *n1,
|
||||
uint32_t ctr);
|
||||
|
||||
void
|
||||
Hacl_Chacha20_Vec256_chacha20_decrypt_256(
|
||||
uint32_t len,
|
||||
uint8_t *out,
|
||||
uint8_t *cipher,
|
||||
uint8_t *key,
|
||||
uint8_t *n1,
|
||||
uint32_t ctr);
|
||||
|
||||
#define __Hacl_Chacha20_Vec256_H_DEFINED
|
||||
#endif
|
|
@ -23,8 +23,8 @@
|
|||
|
||||
#include "Hacl_Curve25519_51.h"
|
||||
|
||||
inline static void
|
||||
Hacl_Impl_Curve25519_Field51_fadd(uint64_t *out, uint64_t *f1, uint64_t *f2)
|
||||
static inline void
|
||||
fadd0(uint64_t *out, uint64_t *f1, uint64_t *f2)
|
||||
{
|
||||
uint64_t f10 = f1[0U];
|
||||
uint64_t f20 = f2[0U];
|
||||
|
@ -43,8 +43,8 @@ Hacl_Impl_Curve25519_Field51_fadd(uint64_t *out, uint64_t *f1, uint64_t *f2)
|
|||
out[4U] = f14 + f24;
|
||||
}
|
||||
|
||||
inline static void
|
||||
Hacl_Impl_Curve25519_Field51_fsub(uint64_t *out, uint64_t *f1, uint64_t *f2)
|
||||
static inline void
|
||||
fsub0(uint64_t *out, uint64_t *f1, uint64_t *f2)
|
||||
{
|
||||
uint64_t f10 = f1[0U];
|
||||
uint64_t f20 = f2[0U];
|
||||
|
@ -63,12 +63,8 @@ Hacl_Impl_Curve25519_Field51_fsub(uint64_t *out, uint64_t *f1, uint64_t *f2)
|
|||
out[4U] = f14 + (uint64_t)0x3ffffffffffff8U - f24;
|
||||
}
|
||||
|
||||
inline static void
|
||||
Hacl_Impl_Curve25519_Field51_fmul(
|
||||
uint64_t *out,
|
||||
uint64_t *f1,
|
||||
uint64_t *f2,
|
||||
FStar_UInt128_uint128 *uu____2959)
|
||||
static inline void
|
||||
fmul0(uint64_t *out, uint64_t *f1, uint64_t *f2)
|
||||
{
|
||||
uint64_t f10 = f1[0U];
|
||||
uint64_t f11 = f1[1U];
|
||||
|
@ -145,12 +141,8 @@ Hacl_Impl_Curve25519_Field51_fmul(
|
|||
out[4U] = o4;
|
||||
}
|
||||
|
||||
inline static void
|
||||
Hacl_Impl_Curve25519_Field51_fmul2(
|
||||
uint64_t *out,
|
||||
uint64_t *f1,
|
||||
uint64_t *f2,
|
||||
FStar_UInt128_uint128 *uu____4281)
|
||||
static inline void
|
||||
fmul20(uint64_t *out, uint64_t *f1, uint64_t *f2)
|
||||
{
|
||||
uint64_t f10 = f1[0U];
|
||||
uint64_t f11 = f1[1U];
|
||||
|
@ -310,8 +302,8 @@ Hacl_Impl_Curve25519_Field51_fmul2(
|
|||
out[9U] = o24;
|
||||
}
|
||||
|
||||
inline static void
|
||||
Hacl_Impl_Curve25519_Field51_fmul1(uint64_t *out, uint64_t *f1, uint64_t f2)
|
||||
static inline void
|
||||
fmul1(uint64_t *out, uint64_t *f1, uint64_t f2)
|
||||
{
|
||||
uint64_t f10 = f1[0U];
|
||||
uint64_t f11 = f1[1U];
|
||||
|
@ -354,11 +346,8 @@ Hacl_Impl_Curve25519_Field51_fmul1(uint64_t *out, uint64_t *f1, uint64_t f2)
|
|||
out[4U] = o4;
|
||||
}
|
||||
|
||||
inline static void
|
||||
Hacl_Impl_Curve25519_Field51_fsqr(
|
||||
uint64_t *out,
|
||||
uint64_t *f,
|
||||
FStar_UInt128_uint128 *uu____6941)
|
||||
static inline void
|
||||
fsqr0(uint64_t *out, uint64_t *f)
|
||||
{
|
||||
uint64_t f0 = f[0U];
|
||||
uint64_t f1 = f[1U];
|
||||
|
@ -432,11 +421,8 @@ Hacl_Impl_Curve25519_Field51_fsqr(
|
|||
out[4U] = o4;
|
||||
}
|
||||
|
||||
inline static void
|
||||
Hacl_Impl_Curve25519_Field51_fsqr2(
|
||||
uint64_t *out,
|
||||
uint64_t *f,
|
||||
FStar_UInt128_uint128 *uu____7692)
|
||||
static inline void
|
||||
fsqr20(uint64_t *out, uint64_t *f)
|
||||
{
|
||||
uint64_t f10 = f[0U];
|
||||
uint64_t f11 = f[1U];
|
||||
|
@ -591,7 +577,7 @@ Hacl_Impl_Curve25519_Field51_fsqr2(
|
|||
}
|
||||
|
||||
static void
|
||||
Hacl_Impl_Curve25519_Field51_store_felem(uint64_t *u64s, uint64_t *f)
|
||||
store_felem(uint64_t *u64s, uint64_t *f)
|
||||
{
|
||||
uint64_t f0 = f[0U];
|
||||
uint64_t f1 = f[1U];
|
||||
|
@ -651,32 +637,21 @@ Hacl_Impl_Curve25519_Field51_store_felem(uint64_t *u64s, uint64_t *f)
|
|||
u64s[3U] = o3;
|
||||
}
|
||||
|
||||
inline static void
|
||||
Hacl_Impl_Curve25519_Field51_cswap2(uint64_t bit, uint64_t *p1, uint64_t *p2)
|
||||
static inline void
|
||||
cswap20(uint64_t bit, uint64_t *p1, uint64_t *p2)
|
||||
{
|
||||
uint64_t mask = (uint64_t)0U - bit;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)10U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)10U; i++) {
|
||||
uint64_t dummy = mask & (p1[i] ^ p2[i]);
|
||||
p1[i] = p1[i] ^ dummy;
|
||||
p2[i] = p2[i] ^ dummy;
|
||||
}
|
||||
}
|
||||
|
||||
static uint8_t
|
||||
Hacl_Curve25519_51_g25519[32U] =
|
||||
{
|
||||
(uint8_t)9U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
|
||||
(uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
|
||||
(uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
|
||||
(uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
|
||||
(uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U
|
||||
};
|
||||
static uint8_t g25519[32U] = { (uint8_t)9U };
|
||||
|
||||
static void
|
||||
Hacl_Curve25519_51_point_add_and_double(
|
||||
uint64_t *q,
|
||||
uint64_t *p01_tmp1,
|
||||
FStar_UInt128_uint128 *tmp2)
|
||||
point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_uint128 *tmp2)
|
||||
{
|
||||
uint64_t *nq = p01_tmp1;
|
||||
uint64_t *nq_p1 = p01_tmp1 + (uint32_t)10U;
|
||||
|
@ -689,39 +664,39 @@ Hacl_Curve25519_51_point_add_and_double(
|
|||
uint64_t *b = tmp1 + (uint32_t)5U;
|
||||
uint64_t *ab = tmp1;
|
||||
uint64_t *dc = tmp1 + (uint32_t)10U;
|
||||
Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2);
|
||||
Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2);
|
||||
fadd0(a, x2, z2);
|
||||
fsub0(b, x2, z2);
|
||||
uint64_t *x3 = nq_p1;
|
||||
uint64_t *z31 = nq_p1 + (uint32_t)5U;
|
||||
uint64_t *d0 = dc;
|
||||
uint64_t *c0 = dc + (uint32_t)5U;
|
||||
Hacl_Impl_Curve25519_Field51_fadd(c0, x3, z31);
|
||||
Hacl_Impl_Curve25519_Field51_fsub(d0, x3, z31);
|
||||
Hacl_Impl_Curve25519_Field51_fmul2(dc, dc, ab, tmp2);
|
||||
Hacl_Impl_Curve25519_Field51_fadd(x3, d0, c0);
|
||||
Hacl_Impl_Curve25519_Field51_fsub(z31, d0, c0);
|
||||
fadd0(c0, x3, z31);
|
||||
fsub0(d0, x3, z31);
|
||||
fmul20(dc, dc, ab);
|
||||
fadd0(x3, d0, c0);
|
||||
fsub0(z31, d0, c0);
|
||||
uint64_t *a1 = tmp1;
|
||||
uint64_t *b1 = tmp1 + (uint32_t)5U;
|
||||
uint64_t *d = tmp1 + (uint32_t)10U;
|
||||
uint64_t *c = tmp1 + (uint32_t)15U;
|
||||
uint64_t *ab1 = tmp1;
|
||||
uint64_t *dc1 = tmp1 + (uint32_t)10U;
|
||||
Hacl_Impl_Curve25519_Field51_fsqr2(dc1, ab1, tmp2);
|
||||
Hacl_Impl_Curve25519_Field51_fsqr2(nq_p1, nq_p1, tmp2);
|
||||
fsqr20(dc1, ab1);
|
||||
fsqr20(nq_p1, nq_p1);
|
||||
a1[0U] = c[0U];
|
||||
a1[1U] = c[1U];
|
||||
a1[2U] = c[2U];
|
||||
a1[3U] = c[3U];
|
||||
a1[4U] = c[4U];
|
||||
Hacl_Impl_Curve25519_Field51_fsub(c, d, c);
|
||||
Hacl_Impl_Curve25519_Field51_fmul1(b1, c, (uint64_t)121665U);
|
||||
Hacl_Impl_Curve25519_Field51_fadd(b1, b1, d);
|
||||
Hacl_Impl_Curve25519_Field51_fmul2(nq, dc1, ab1, tmp2);
|
||||
Hacl_Impl_Curve25519_Field51_fmul(z3, z3, x1, tmp2);
|
||||
fsub0(c, d, c);
|
||||
fmul1(b1, c, (uint64_t)121665U);
|
||||
fadd0(b1, b1, d);
|
||||
fmul20(nq, dc1, ab1);
|
||||
fmul0(z3, z3, x1);
|
||||
}
|
||||
|
||||
static void
|
||||
Hacl_Curve25519_51_point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tmp2)
|
||||
point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tmp2)
|
||||
{
|
||||
uint64_t *x2 = nq;
|
||||
uint64_t *z2 = nq + (uint32_t)5U;
|
||||
|
@ -731,22 +706,22 @@ Hacl_Curve25519_51_point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint
|
|||
uint64_t *c = tmp1 + (uint32_t)15U;
|
||||
uint64_t *ab = tmp1;
|
||||
uint64_t *dc = tmp1 + (uint32_t)10U;
|
||||
Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2);
|
||||
Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2);
|
||||
Hacl_Impl_Curve25519_Field51_fsqr2(dc, ab, tmp2);
|
||||
fadd0(a, x2, z2);
|
||||
fsub0(b, x2, z2);
|
||||
fsqr20(dc, ab);
|
||||
a[0U] = c[0U];
|
||||
a[1U] = c[1U];
|
||||
a[2U] = c[2U];
|
||||
a[3U] = c[3U];
|
||||
a[4U] = c[4U];
|
||||
Hacl_Impl_Curve25519_Field51_fsub(c, d, c);
|
||||
Hacl_Impl_Curve25519_Field51_fmul1(b, c, (uint64_t)121665U);
|
||||
Hacl_Impl_Curve25519_Field51_fadd(b, b, d);
|
||||
Hacl_Impl_Curve25519_Field51_fmul2(nq, dc, ab, tmp2);
|
||||
fsub0(c, d, c);
|
||||
fmul1(b, c, (uint64_t)121665U);
|
||||
fadd0(b, b, d);
|
||||
fmul20(nq, dc, ab);
|
||||
}
|
||||
|
||||
static void
|
||||
Hacl_Curve25519_51_montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init1)
|
||||
montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init1)
|
||||
{
|
||||
FStar_UInt128_uint128 tmp2[10U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
|
||||
|
@ -756,7 +731,7 @@ Hacl_Curve25519_51_montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init
|
|||
uint64_t *p01 = p01_tmp1_swap;
|
||||
uint64_t *p03 = p01;
|
||||
uint64_t *p11 = p01 + (uint32_t)10U;
|
||||
memcpy(p11, init1, (uint32_t)10U * sizeof init1[0U]);
|
||||
memcpy(p11, init1, (uint32_t)10U * sizeof(init1[0U]));
|
||||
uint64_t *x0 = p03;
|
||||
uint64_t *z0 = p03 + (uint32_t)5U;
|
||||
x0[0U] = (uint64_t)1U;
|
||||
|
@ -774,10 +749,10 @@ Hacl_Curve25519_51_montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init
|
|||
uint64_t *nq1 = p01_tmp1_swap;
|
||||
uint64_t *nq_p11 = p01_tmp1_swap + (uint32_t)10U;
|
||||
uint64_t *swap1 = p01_tmp1_swap + (uint32_t)40U;
|
||||
Hacl_Impl_Curve25519_Field51_cswap2((uint64_t)1U, nq1, nq_p11);
|
||||
Hacl_Curve25519_51_point_add_and_double(init1, p01_tmp11, tmp2);
|
||||
cswap20((uint64_t)1U, nq1, nq_p11);
|
||||
point_add_and_double(init1, p01_tmp11, tmp2);
|
||||
swap1[0U] = (uint64_t)1U;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)251U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)251U; i++) {
|
||||
uint64_t *p01_tmp12 = p01_tmp1_swap;
|
||||
uint64_t *swap2 = p01_tmp1_swap + (uint32_t)40U;
|
||||
uint64_t *nq2 = p01_tmp12;
|
||||
|
@ -786,35 +761,31 @@ Hacl_Curve25519_51_montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init
|
|||
bit =
|
||||
(uint64_t)(key[((uint32_t)253U - i) / (uint32_t)8U] >> ((uint32_t)253U - i) % (uint32_t)8U & (uint8_t)1U);
|
||||
uint64_t sw = swap2[0U] ^ bit;
|
||||
Hacl_Impl_Curve25519_Field51_cswap2(sw, nq2, nq_p12);
|
||||
Hacl_Curve25519_51_point_add_and_double(init1, p01_tmp12, tmp2);
|
||||
cswap20(sw, nq2, nq_p12);
|
||||
point_add_and_double(init1, p01_tmp12, tmp2);
|
||||
swap2[0U] = bit;
|
||||
}
|
||||
uint64_t sw = swap1[0U];
|
||||
Hacl_Impl_Curve25519_Field51_cswap2(sw, nq1, nq_p11);
|
||||
cswap20(sw, nq1, nq_p11);
|
||||
uint64_t *nq10 = p01_tmp1;
|
||||
uint64_t *tmp1 = p01_tmp1 + (uint32_t)20U;
|
||||
Hacl_Curve25519_51_point_double(nq10, tmp1, tmp2);
|
||||
Hacl_Curve25519_51_point_double(nq10, tmp1, tmp2);
|
||||
Hacl_Curve25519_51_point_double(nq10, tmp1, tmp2);
|
||||
memcpy(out, p0, (uint32_t)10U * sizeof p0[0U]);
|
||||
point_double(nq10, tmp1, tmp2);
|
||||
point_double(nq10, tmp1, tmp2);
|
||||
point_double(nq10, tmp1, tmp2);
|
||||
memcpy(out, p0, (uint32_t)10U * sizeof(p0[0U]));
|
||||
}
|
||||
|
||||
static void
|
||||
Hacl_Curve25519_51_fsquare_times(
|
||||
uint64_t *o,
|
||||
uint64_t *inp,
|
||||
FStar_UInt128_uint128 *tmp,
|
||||
uint32_t n1)
|
||||
fsquare_times(uint64_t *o, uint64_t *inp, FStar_UInt128_uint128 *tmp, uint32_t n1)
|
||||
{
|
||||
Hacl_Impl_Curve25519_Field51_fsqr(o, inp, tmp);
|
||||
for (uint32_t i = (uint32_t)0U; i < n1 - (uint32_t)1U; i = i + (uint32_t)1U) {
|
||||
Hacl_Impl_Curve25519_Field51_fsqr(o, o, tmp);
|
||||
fsqr0(o, inp);
|
||||
for (uint32_t i = (uint32_t)0U; i < n1 - (uint32_t)1U; i++) {
|
||||
fsqr0(o, o);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
Hacl_Curve25519_51_finv(uint64_t *o, uint64_t *i, FStar_UInt128_uint128 *tmp)
|
||||
finv(uint64_t *o, uint64_t *i, FStar_UInt128_uint128 *tmp)
|
||||
{
|
||||
uint64_t t1[20U] = { 0U };
|
||||
uint64_t *a = t1;
|
||||
|
@ -822,34 +793,34 @@ Hacl_Curve25519_51_finv(uint64_t *o, uint64_t *i, FStar_UInt128_uint128 *tmp)
|
|||
uint64_t *c = t1 + (uint32_t)10U;
|
||||
uint64_t *t00 = t1 + (uint32_t)15U;
|
||||
FStar_UInt128_uint128 *tmp1 = tmp;
|
||||
Hacl_Curve25519_51_fsquare_times(a, i, tmp1, (uint32_t)1U);
|
||||
Hacl_Curve25519_51_fsquare_times(t00, a, tmp1, (uint32_t)2U);
|
||||
Hacl_Impl_Curve25519_Field51_fmul(b, t00, i, tmp);
|
||||
Hacl_Impl_Curve25519_Field51_fmul(a, b, a, tmp);
|
||||
Hacl_Curve25519_51_fsquare_times(t00, a, tmp1, (uint32_t)1U);
|
||||
Hacl_Impl_Curve25519_Field51_fmul(b, t00, b, tmp);
|
||||
Hacl_Curve25519_51_fsquare_times(t00, b, tmp1, (uint32_t)5U);
|
||||
Hacl_Impl_Curve25519_Field51_fmul(b, t00, b, tmp);
|
||||
Hacl_Curve25519_51_fsquare_times(t00, b, tmp1, (uint32_t)10U);
|
||||
Hacl_Impl_Curve25519_Field51_fmul(c, t00, b, tmp);
|
||||
Hacl_Curve25519_51_fsquare_times(t00, c, tmp1, (uint32_t)20U);
|
||||
Hacl_Impl_Curve25519_Field51_fmul(t00, t00, c, tmp);
|
||||
Hacl_Curve25519_51_fsquare_times(t00, t00, tmp1, (uint32_t)10U);
|
||||
Hacl_Impl_Curve25519_Field51_fmul(b, t00, b, tmp);
|
||||
Hacl_Curve25519_51_fsquare_times(t00, b, tmp1, (uint32_t)50U);
|
||||
Hacl_Impl_Curve25519_Field51_fmul(c, t00, b, tmp);
|
||||
Hacl_Curve25519_51_fsquare_times(t00, c, tmp1, (uint32_t)100U);
|
||||
Hacl_Impl_Curve25519_Field51_fmul(t00, t00, c, tmp);
|
||||
Hacl_Curve25519_51_fsquare_times(t00, t00, tmp1, (uint32_t)50U);
|
||||
Hacl_Impl_Curve25519_Field51_fmul(t00, t00, b, tmp);
|
||||
Hacl_Curve25519_51_fsquare_times(t00, t00, tmp1, (uint32_t)5U);
|
||||
fsquare_times(a, i, tmp1, (uint32_t)1U);
|
||||
fsquare_times(t00, a, tmp1, (uint32_t)2U);
|
||||
fmul0(b, t00, i);
|
||||
fmul0(a, b, a);
|
||||
fsquare_times(t00, a, tmp1, (uint32_t)1U);
|
||||
fmul0(b, t00, b);
|
||||
fsquare_times(t00, b, tmp1, (uint32_t)5U);
|
||||
fmul0(b, t00, b);
|
||||
fsquare_times(t00, b, tmp1, (uint32_t)10U);
|
||||
fmul0(c, t00, b);
|
||||
fsquare_times(t00, c, tmp1, (uint32_t)20U);
|
||||
fmul0(t00, t00, c);
|
||||
fsquare_times(t00, t00, tmp1, (uint32_t)10U);
|
||||
fmul0(b, t00, b);
|
||||
fsquare_times(t00, b, tmp1, (uint32_t)50U);
|
||||
fmul0(c, t00, b);
|
||||
fsquare_times(t00, c, tmp1, (uint32_t)100U);
|
||||
fmul0(t00, t00, c);
|
||||
fsquare_times(t00, t00, tmp1, (uint32_t)50U);
|
||||
fmul0(t00, t00, b);
|
||||
fsquare_times(t00, t00, tmp1, (uint32_t)5U);
|
||||
uint64_t *a0 = t1;
|
||||
uint64_t *t0 = t1 + (uint32_t)15U;
|
||||
Hacl_Impl_Curve25519_Field51_fmul(o, t0, a0, tmp);
|
||||
fmul0(o, t0, a0);
|
||||
}
|
||||
|
||||
static void
|
||||
Hacl_Curve25519_51_encode_point(uint8_t *o, uint64_t *i)
|
||||
encode_point(uint8_t *o, uint64_t *i)
|
||||
{
|
||||
uint64_t *x = i;
|
||||
uint64_t *z = i + (uint32_t)5U;
|
||||
|
@ -858,10 +829,10 @@ Hacl_Curve25519_51_encode_point(uint8_t *o, uint64_t *i)
|
|||
FStar_UInt128_uint128 tmp_w[10U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
|
||||
tmp_w[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
|
||||
Hacl_Curve25519_51_finv(tmp, z, tmp_w);
|
||||
Hacl_Impl_Curve25519_Field51_fmul(tmp, tmp, x, tmp_w);
|
||||
Hacl_Impl_Curve25519_Field51_store_felem(u64s, tmp);
|
||||
for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)4U; i0 = i0 + (uint32_t)1U) {
|
||||
finv(tmp, z, tmp_w);
|
||||
fmul0(tmp, tmp, x);
|
||||
store_felem(u64s, tmp);
|
||||
for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)4U; i0++) {
|
||||
store64_le(o + i0 * (uint32_t)8U, u64s[i0]);
|
||||
}
|
||||
}
|
||||
|
@ -871,7 +842,7 @@ Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
|
|||
{
|
||||
uint64_t init1[10U] = { 0U };
|
||||
uint64_t tmp[4U] = { 0U };
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i++) {
|
||||
uint64_t *os = tmp;
|
||||
uint8_t *bj = pub + i * (uint32_t)8U;
|
||||
uint64_t u = load64_le(bj);
|
||||
|
@ -901,17 +872,17 @@ Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
|
|||
x[2U] = f1h | f2l;
|
||||
x[3U] = f2h | f3l;
|
||||
x[4U] = f3h;
|
||||
Hacl_Curve25519_51_montgomery_ladder(init1, priv, init1);
|
||||
Hacl_Curve25519_51_encode_point(out, init1);
|
||||
montgomery_ladder(init1, priv, init1);
|
||||
encode_point(out, init1);
|
||||
}
|
||||
|
||||
void
|
||||
Hacl_Curve25519_51_secret_to_public(uint8_t *pub, uint8_t *priv)
|
||||
{
|
||||
uint8_t basepoint[32U] = { 0U };
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++) {
|
||||
uint8_t *os = basepoint;
|
||||
uint8_t x = Hacl_Curve25519_51_g25519[i];
|
||||
uint8_t x = g25519[i];
|
||||
os[i] = x;
|
||||
}
|
||||
Hacl_Curve25519_51_scalarmult(pub, priv, basepoint);
|
||||
|
@ -923,7 +894,7 @@ Hacl_Curve25519_51_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub)
|
|||
uint8_t zeros1[32U] = { 0U };
|
||||
Hacl_Curve25519_51_scalarmult(out, priv, pub);
|
||||
uint8_t res = (uint8_t)255U;
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++) {
|
||||
uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros1[i]);
|
||||
res = uu____0 & res;
|
||||
}
|
||||
|
|
|
@ -29,23 +29,23 @@
|
|||
#ifndef __Hacl_Kremlib_H
|
||||
#define __Hacl_Kremlib_H
|
||||
|
||||
inline static uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b);
|
||||
static inline uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b);
|
||||
|
||||
inline static uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b);
|
||||
static inline uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b);
|
||||
|
||||
inline static uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b);
|
||||
static inline uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b);
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s);
|
||||
|
||||
inline static FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a);
|
||||
static inline FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a);
|
||||
|
||||
inline static uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a);
|
||||
static inline uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a);
|
||||
|
||||
inline static FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y);
|
||||
static inline FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y);
|
||||
|
||||
#define __Hacl_Kremlib_H_DEFINED
|
||||
#endif
|
||||
|
|
|
@ -239,61 +239,47 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
|
|||
Lib_IntVector_Intrinsics_vec128 t3 = a35;
|
||||
Lib_IntVector_Intrinsics_vec128 t4 = a45;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l0 = Lib_IntVector_Intrinsics_vec128_add64(t0, Lib_IntVector_Intrinsics_vec128_zero);
|
||||
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp00 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l0,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c00 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t1, c00);
|
||||
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
|
||||
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp10 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l1,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c10 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t2, c10);
|
||||
z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp20 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l2,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
|
||||
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
|
||||
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
|
||||
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c20 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t3, c20);
|
||||
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp30 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l3,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
|
||||
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c30 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l4 = Lib_IntVector_Intrinsics_vec128_add64(t4, c30);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp40 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l4,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c40 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l5 =
|
||||
Lib_IntVector_Intrinsics_vec128_add64(tmp00,
|
||||
Lib_IntVector_Intrinsics_vec128_smul64(c40, (uint64_t)5U));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp01 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l5,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c50 = Lib_IntVector_Intrinsics_vec128_shift_right64(l5, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp10, c50);
|
||||
Lib_IntVector_Intrinsics_vec128 o00 = tmp01;
|
||||
Lib_IntVector_Intrinsics_vec128 o10 = tmp11;
|
||||
Lib_IntVector_Intrinsics_vec128 o20 = tmp20;
|
||||
Lib_IntVector_Intrinsics_vec128 o30 = tmp30;
|
||||
Lib_IntVector_Intrinsics_vec128 o40 = tmp40;
|
||||
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = x02;
|
||||
Lib_IntVector_Intrinsics_vec128 o10 = x12;
|
||||
Lib_IntVector_Intrinsics_vec128 o20 = x21;
|
||||
Lib_IntVector_Intrinsics_vec128 o30 = x32;
|
||||
Lib_IntVector_Intrinsics_vec128 o40 = x42;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
o01 =
|
||||
Lib_IntVector_Intrinsics_vec128_add64(o00,
|
||||
Lib_IntVector_Intrinsics_vec128_interleave_high64(o00, o00));
|
||||
Lib_IntVector_Intrinsics_vec128_add64(o0,
|
||||
Lib_IntVector_Intrinsics_vec128_interleave_high64(o0, o0));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
o11 =
|
||||
Lib_IntVector_Intrinsics_vec128_add64(o10,
|
||||
|
@ -318,50 +304,43 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
|
|||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l6 = Lib_IntVector_Intrinsics_vec128_add64(o11, c0);
|
||||
Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(o11, c0);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp1 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l6,
|
||||
Lib_IntVector_Intrinsics_vec128_and(l0,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l6, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l7 = Lib_IntVector_Intrinsics_vec128_add64(o21, c1);
|
||||
c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(o21, c1);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp2 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l7,
|
||||
Lib_IntVector_Intrinsics_vec128_and(l1,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l7, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l8 = Lib_IntVector_Intrinsics_vec128_add64(o31, c2);
|
||||
c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(o31, c2);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp3 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l8,
|
||||
Lib_IntVector_Intrinsics_vec128_and(l2,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l8, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l9 = Lib_IntVector_Intrinsics_vec128_add64(o41, c3);
|
||||
c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(o41, c3);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp4 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l9,
|
||||
Lib_IntVector_Intrinsics_vec128_and(l3,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l9, (uint32_t)26U);
|
||||
c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l10 =
|
||||
o00 =
|
||||
Lib_IntVector_Intrinsics_vec128_add64(tmp0,
|
||||
Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp0_ =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l10,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l10, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = tmp0_;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = tmp1;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
|
||||
out[0U] = o0;
|
||||
out[0U] = o00;
|
||||
out[1U] = o1;
|
||||
out[2U] = o2;
|
||||
out[3U] = o3;
|
||||
|
@ -535,57 +514,43 @@ Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k
|
|||
Lib_IntVector_Intrinsics_vec128 t3 = a34;
|
||||
Lib_IntVector_Intrinsics_vec128 t4 = a44;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l = Lib_IntVector_Intrinsics_vec128_add64(t0, Lib_IntVector_Intrinsics_vec128_zero);
|
||||
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp0 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t1, c0);
|
||||
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
|
||||
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp1 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l0,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c1);
|
||||
z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp2 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l1,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
|
||||
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
|
||||
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
|
||||
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c2);
|
||||
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp3 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l2,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
|
||||
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c3);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp4 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l3,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l4 =
|
||||
Lib_IntVector_Intrinsics_vec128_add64(tmp0,
|
||||
Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp01 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l4,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = tmp01;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = tmp11;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
|
||||
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = x02;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = x12;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = x21;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = x32;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = x42;
|
||||
rn[0U] = o0;
|
||||
rn[1U] = o1;
|
||||
rn[2U] = o2;
|
||||
|
@ -771,57 +736,43 @@ Hacl_Poly1305_128_poly1305_update1(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t
|
|||
Lib_IntVector_Intrinsics_vec128 t3 = a36;
|
||||
Lib_IntVector_Intrinsics_vec128 t4 = a46;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l = Lib_IntVector_Intrinsics_vec128_add64(t0, Lib_IntVector_Intrinsics_vec128_zero);
|
||||
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp0 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t1, c0);
|
||||
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
|
||||
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp1 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l0,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c1);
|
||||
z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp2 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l1,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
|
||||
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
|
||||
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
|
||||
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c2);
|
||||
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp3 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l2,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
|
||||
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c3);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp4 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l3,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l4 =
|
||||
Lib_IntVector_Intrinsics_vec128_add64(tmp0,
|
||||
Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp01 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l4,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = tmp01;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = tmp11;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
|
||||
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = x02;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = x12;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = x21;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = x32;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = x42;
|
||||
acc[0U] = o0;
|
||||
acc[1U] = o1;
|
||||
acc[2U] = o2;
|
||||
|
@ -847,7 +798,7 @@ Hacl_Poly1305_128_poly1305_update(
|
|||
uint32_t len1 = len0 - bs;
|
||||
uint8_t *text1 = t0 + bs;
|
||||
uint32_t nb = len1 / bs;
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
|
||||
uint8_t *block = text1 + i * bs;
|
||||
Lib_IntVector_Intrinsics_vec128 e[5U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
|
||||
|
@ -1002,57 +953,43 @@ Hacl_Poly1305_128_poly1305_update(
|
|||
Lib_IntVector_Intrinsics_vec128 t3 = a34;
|
||||
Lib_IntVector_Intrinsics_vec128 t4 = a44;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l = Lib_IntVector_Intrinsics_vec128_add64(t01, Lib_IntVector_Intrinsics_vec128_zero);
|
||||
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp0 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t1, c0);
|
||||
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
|
||||
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp1 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l0,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c1);
|
||||
z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp2 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l1,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
|
||||
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
|
||||
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
|
||||
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c2);
|
||||
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp3 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l2,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
|
||||
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c3);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp4 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l3,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l4 =
|
||||
Lib_IntVector_Intrinsics_vec128_add64(tmp0,
|
||||
Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp01 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l4,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
|
||||
Lib_IntVector_Intrinsics_vec128 o00 = tmp01;
|
||||
Lib_IntVector_Intrinsics_vec128 o10 = tmp11;
|
||||
Lib_IntVector_Intrinsics_vec128 o20 = tmp2;
|
||||
Lib_IntVector_Intrinsics_vec128 o30 = tmp3;
|
||||
Lib_IntVector_Intrinsics_vec128 o40 = tmp4;
|
||||
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
|
||||
Lib_IntVector_Intrinsics_vec128 o00 = x02;
|
||||
Lib_IntVector_Intrinsics_vec128 o10 = x12;
|
||||
Lib_IntVector_Intrinsics_vec128 o20 = x21;
|
||||
Lib_IntVector_Intrinsics_vec128 o30 = x32;
|
||||
Lib_IntVector_Intrinsics_vec128 o40 = x42;
|
||||
acc[0U] = o00;
|
||||
acc[1U] = o10;
|
||||
acc[2U] = o20;
|
||||
|
@ -1085,7 +1022,7 @@ Hacl_Poly1305_128_poly1305_update(
|
|||
uint8_t *t1 = text + len0;
|
||||
uint32_t nb = len1 / (uint32_t)16U;
|
||||
uint32_t rem1 = len1 % (uint32_t)16U;
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
|
||||
uint8_t *block = t1 + i * (uint32_t)16U;
|
||||
Lib_IntVector_Intrinsics_vec128 e[5U];
|
||||
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
|
||||
|
@ -1250,57 +1187,43 @@ Hacl_Poly1305_128_poly1305_update(
|
|||
Lib_IntVector_Intrinsics_vec128 t3 = a36;
|
||||
Lib_IntVector_Intrinsics_vec128 t4 = a46;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l = Lib_IntVector_Intrinsics_vec128_add64(t01, Lib_IntVector_Intrinsics_vec128_zero);
|
||||
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp0 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t11, c0);
|
||||
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
|
||||
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp1 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l0,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c1);
|
||||
z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp2 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l1,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
|
||||
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
|
||||
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
|
||||
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c2);
|
||||
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp3 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l2,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
|
||||
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c3);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp4 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l3,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l4 =
|
||||
Lib_IntVector_Intrinsics_vec128_add64(tmp0,
|
||||
Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp01 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l4,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = tmp01;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = tmp11;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
|
||||
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = x02;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = x12;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = x21;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = x32;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = x42;
|
||||
acc[0U] = o0;
|
||||
acc[1U] = o1;
|
||||
acc[2U] = o2;
|
||||
|
@ -1313,7 +1236,7 @@ Hacl_Poly1305_128_poly1305_update(
|
|||
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
|
||||
e[_i] = Lib_IntVector_Intrinsics_vec128_zero;
|
||||
uint8_t tmp[16U] = { 0U };
|
||||
memcpy(tmp, last1, rem1 * sizeof last1[0U]);
|
||||
memcpy(tmp, last1, rem1 * sizeof(last1[0U]));
|
||||
uint64_t u0 = load64_le(tmp);
|
||||
uint64_t lo = u0;
|
||||
uint64_t u = load64_le(tmp + (uint32_t)8U);
|
||||
|
@ -1474,57 +1397,43 @@ Hacl_Poly1305_128_poly1305_update(
|
|||
Lib_IntVector_Intrinsics_vec128 t3 = a36;
|
||||
Lib_IntVector_Intrinsics_vec128 t4 = a46;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l = Lib_IntVector_Intrinsics_vec128_add64(t01, Lib_IntVector_Intrinsics_vec128_zero);
|
||||
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp0 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t11, c0);
|
||||
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
|
||||
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp1 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l0,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c1);
|
||||
z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp2 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l1,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
|
||||
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
|
||||
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
|
||||
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c2);
|
||||
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp3 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l2,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
|
||||
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c3);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp4 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l3,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l4 =
|
||||
Lib_IntVector_Intrinsics_vec128_add64(tmp0,
|
||||
Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp01 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l4,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = tmp01;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = tmp11;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
|
||||
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
|
||||
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = x02;
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = x12;
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = x21;
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = x32;
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = x42;
|
||||
acc[0U] = o0;
|
||||
acc[1U] = o1;
|
||||
acc[2U] = o2;
|
||||
|
@ -1543,89 +1452,126 @@ Hacl_Poly1305_128_poly1305_finish(
|
|||
Lib_IntVector_Intrinsics_vec128 *acc = ctx;
|
||||
uint8_t *ks = key + (uint32_t)16U;
|
||||
Lib_IntVector_Intrinsics_vec128 f0 = acc[0U];
|
||||
Lib_IntVector_Intrinsics_vec128 f12 = acc[1U];
|
||||
Lib_IntVector_Intrinsics_vec128 f22 = acc[2U];
|
||||
Lib_IntVector_Intrinsics_vec128 f32 = acc[3U];
|
||||
Lib_IntVector_Intrinsics_vec128 f13 = acc[1U];
|
||||
Lib_IntVector_Intrinsics_vec128 f23 = acc[2U];
|
||||
Lib_IntVector_Intrinsics_vec128 f33 = acc[3U];
|
||||
Lib_IntVector_Intrinsics_vec128 f40 = acc[4U];
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l = Lib_IntVector_Intrinsics_vec128_add64(f0, Lib_IntVector_Intrinsics_vec128_zero);
|
||||
l0 = Lib_IntVector_Intrinsics_vec128_add64(f0, Lib_IntVector_Intrinsics_vec128_zero);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp00 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l0,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c00 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(f13, c00);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp10 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l1,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c10 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(f23, c10);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp20 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l2,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c20 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(f33, c20);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp30 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l3,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c30 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l4 = Lib_IntVector_Intrinsics_vec128_add64(f40, c30);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp40 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l4,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c40 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
f010 =
|
||||
Lib_IntVector_Intrinsics_vec128_add64(tmp00,
|
||||
Lib_IntVector_Intrinsics_vec128_smul64(c40, (uint64_t)5U));
|
||||
Lib_IntVector_Intrinsics_vec128 f110 = tmp10;
|
||||
Lib_IntVector_Intrinsics_vec128 f210 = tmp20;
|
||||
Lib_IntVector_Intrinsics_vec128 f310 = tmp30;
|
||||
Lib_IntVector_Intrinsics_vec128 f410 = tmp40;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l = Lib_IntVector_Intrinsics_vec128_add64(f010, Lib_IntVector_Intrinsics_vec128_zero);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp0 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(f12, c0);
|
||||
Lib_IntVector_Intrinsics_vec128 l5 = Lib_IntVector_Intrinsics_vec128_add64(f110, c0);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp1 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l0,
|
||||
Lib_IntVector_Intrinsics_vec128_and(l5,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(f22, c1);
|
||||
c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l5, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l6 = Lib_IntVector_Intrinsics_vec128_add64(f210, c1);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp2 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l1,
|
||||
Lib_IntVector_Intrinsics_vec128_and(l6,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(f32, c2);
|
||||
c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l6, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l7 = Lib_IntVector_Intrinsics_vec128_add64(f310, c2);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp3 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l2,
|
||||
Lib_IntVector_Intrinsics_vec128_and(l7,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(f40, c3);
|
||||
c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l7, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 l8 = Lib_IntVector_Intrinsics_vec128_add64(f410, c3);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp4 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l3,
|
||||
Lib_IntVector_Intrinsics_vec128_and(l8,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
|
||||
c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l8, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
l4 =
|
||||
f02 =
|
||||
Lib_IntVector_Intrinsics_vec128_add64(tmp0,
|
||||
Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
tmp0_ =
|
||||
Lib_IntVector_Intrinsics_vec128_and(l4,
|
||||
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
|
||||
Lib_IntVector_Intrinsics_vec128 f010 = tmp0_;
|
||||
Lib_IntVector_Intrinsics_vec128 f110 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
|
||||
Lib_IntVector_Intrinsics_vec128 f210 = tmp2;
|
||||
Lib_IntVector_Intrinsics_vec128 f310 = tmp3;
|
||||
Lib_IntVector_Intrinsics_vec128 f410 = tmp4;
|
||||
Lib_IntVector_Intrinsics_vec128 f12 = tmp1;
|
||||
Lib_IntVector_Intrinsics_vec128 f22 = tmp2;
|
||||
Lib_IntVector_Intrinsics_vec128 f32 = tmp3;
|
||||
Lib_IntVector_Intrinsics_vec128 f42 = tmp4;
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
mh = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
ml = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffffbU);
|
||||
Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_eq64(f410, mh);
|
||||
Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_eq64(f42, mh);
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
mask1 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(mask,
|
||||
Lib_IntVector_Intrinsics_vec128_eq64(f310, mh));
|
||||
Lib_IntVector_Intrinsics_vec128_eq64(f32, mh));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
mask2 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(mask1,
|
||||
Lib_IntVector_Intrinsics_vec128_eq64(f210, mh));
|
||||
Lib_IntVector_Intrinsics_vec128_eq64(f22, mh));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
mask3 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(mask2,
|
||||
Lib_IntVector_Intrinsics_vec128_eq64(f110, mh));
|
||||
Lib_IntVector_Intrinsics_vec128_eq64(f12, mh));
|
||||
Lib_IntVector_Intrinsics_vec128
|
||||
mask4 =
|
||||
Lib_IntVector_Intrinsics_vec128_and(mask3,
|
||||
Lib_IntVector_Intrinsics_vec128_lognot(Lib_IntVector_Intrinsics_vec128_gt64(ml, f010)));
|
||||
Lib_IntVector_Intrinsics_vec128_lognot(Lib_IntVector_Intrinsics_vec128_gt64(ml, f02)));
|
||||
Lib_IntVector_Intrinsics_vec128 ph = Lib_IntVector_Intrinsics_vec128_and(mask4, mh);
|
||||
Lib_IntVector_Intrinsics_vec128 pl = Lib_IntVector_Intrinsics_vec128_and(mask4, ml);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = Lib_IntVector_Intrinsics_vec128_sub64(f010, pl);
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = Lib_IntVector_Intrinsics_vec128_sub64(f110, ph);
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = Lib_IntVector_Intrinsics_vec128_sub64(f210, ph);
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = Lib_IntVector_Intrinsics_vec128_sub64(f310, ph);
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = Lib_IntVector_Intrinsics_vec128_sub64(f410, ph);
|
||||
Lib_IntVector_Intrinsics_vec128 o0 = Lib_IntVector_Intrinsics_vec128_sub64(f02, pl);
|
||||
Lib_IntVector_Intrinsics_vec128 o1 = Lib_IntVector_Intrinsics_vec128_sub64(f12, ph);
|
||||
Lib_IntVector_Intrinsics_vec128 o2 = Lib_IntVector_Intrinsics_vec128_sub64(f22, ph);
|
||||
Lib_IntVector_Intrinsics_vec128 o3 = Lib_IntVector_Intrinsics_vec128_sub64(f32, ph);
|
||||
Lib_IntVector_Intrinsics_vec128 o4 = Lib_IntVector_Intrinsics_vec128_sub64(f42, ph);
|
||||
Lib_IntVector_Intrinsics_vec128 f011 = o0;
|
||||
Lib_IntVector_Intrinsics_vec128 f111 = o1;
|
||||
Lib_IntVector_Intrinsics_vec128 f211 = o2;
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,66 @@
|
|||
/* MIT License
|
||||
*
|
||||
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in all
|
||||
* copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "libintvector.h"
|
||||
#include "kremlin/internal/types.h"
|
||||
#include "kremlin/lowstar_endianness.h"
|
||||
#include <string.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#ifndef __Hacl_Poly1305_256_H
|
||||
#define __Hacl_Poly1305_256_H
|
||||
|
||||
#include "Hacl_Kremlib.h"
|
||||
|
||||
void
|
||||
Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc, uint8_t *b);
|
||||
|
||||
void
|
||||
Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
|
||||
Lib_IntVector_Intrinsics_vec256 *out,
|
||||
Lib_IntVector_Intrinsics_vec256 *p);
|
||||
|
||||
extern uint32_t Hacl_Poly1305_256_blocklen;
|
||||
|
||||
typedef Lib_IntVector_Intrinsics_vec256 *Hacl_Poly1305_256_poly1305_ctx;
|
||||
|
||||
void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *key);
|
||||
|
||||
void Hacl_Poly1305_256_poly1305_update1(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *text);
|
||||
|
||||
void
|
||||
Hacl_Poly1305_256_poly1305_update(
|
||||
Lib_IntVector_Intrinsics_vec256 *ctx,
|
||||
uint32_t len,
|
||||
uint8_t *text);
|
||||
|
||||
void
|
||||
Hacl_Poly1305_256_poly1305_finish(
|
||||
uint8_t *tag,
|
||||
uint8_t *key,
|
||||
Lib_IntVector_Intrinsics_vec256 *ctx);
|
||||
|
||||
void Hacl_Poly1305_256_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key);
|
||||
|
||||
#define __Hacl_Poly1305_256_H_DEFINED
|
||||
#endif
|
|
@ -174,30 +174,35 @@ Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text)
|
|||
uint64_t t2 = a26;
|
||||
uint64_t t3 = a36;
|
||||
uint64_t t4 = a46;
|
||||
uint64_t l = t0 + (uint64_t)0U;
|
||||
uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
|
||||
uint64_t c0 = l >> (uint32_t)26U;
|
||||
uint64_t l0 = t1 + c0;
|
||||
uint64_t tmp1 = l0 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c1 = l0 >> (uint32_t)26U;
|
||||
uint64_t l1 = t2 + c1;
|
||||
uint64_t tmp2 = l1 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c2 = l1 >> (uint32_t)26U;
|
||||
uint64_t l2 = t3 + c2;
|
||||
uint64_t tmp3 = l2 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c3 = l2 >> (uint32_t)26U;
|
||||
uint64_t l3 = t4 + c3;
|
||||
uint64_t tmp4 = l3 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c4 = l3 >> (uint32_t)26U;
|
||||
uint64_t l4 = tmp0 + c4 * (uint64_t)5U;
|
||||
uint64_t tmp01 = l4 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c5 = l4 >> (uint32_t)26U;
|
||||
uint64_t tmp11 = tmp1 + c5;
|
||||
uint64_t o0 = tmp01;
|
||||
uint64_t o1 = tmp11;
|
||||
uint64_t o2 = tmp2;
|
||||
uint64_t o3 = tmp3;
|
||||
uint64_t o4 = tmp4;
|
||||
uint64_t mask261 = (uint64_t)0x3ffffffU;
|
||||
uint64_t z0 = t0 >> (uint32_t)26U;
|
||||
uint64_t z1 = t3 >> (uint32_t)26U;
|
||||
uint64_t x0 = t0 & mask261;
|
||||
uint64_t x3 = t3 & mask261;
|
||||
uint64_t x1 = t1 + z0;
|
||||
uint64_t x4 = t4 + z1;
|
||||
uint64_t z01 = x1 >> (uint32_t)26U;
|
||||
uint64_t z11 = x4 >> (uint32_t)26U;
|
||||
uint64_t t = z11 << (uint32_t)2U;
|
||||
uint64_t z12 = z11 + t;
|
||||
uint64_t x11 = x1 & mask261;
|
||||
uint64_t x41 = x4 & mask261;
|
||||
uint64_t x2 = t2 + z01;
|
||||
uint64_t x01 = x0 + z12;
|
||||
uint64_t z02 = x2 >> (uint32_t)26U;
|
||||
uint64_t z13 = x01 >> (uint32_t)26U;
|
||||
uint64_t x21 = x2 & mask261;
|
||||
uint64_t x02 = x01 & mask261;
|
||||
uint64_t x31 = x3 + z02;
|
||||
uint64_t x12 = x11 + z13;
|
||||
uint64_t z03 = x31 >> (uint32_t)26U;
|
||||
uint64_t x32 = x31 & mask261;
|
||||
uint64_t x42 = x41 + z03;
|
||||
uint64_t o0 = x02;
|
||||
uint64_t o1 = x12;
|
||||
uint64_t o2 = x21;
|
||||
uint64_t o3 = x32;
|
||||
uint64_t o4 = x42;
|
||||
acc[0U] = o0;
|
||||
acc[1U] = o1;
|
||||
acc[2U] = o2;
|
||||
|
@ -212,7 +217,7 @@ Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
|
|||
uint64_t *acc = ctx;
|
||||
uint32_t nb = len / (uint32_t)16U;
|
||||
uint32_t rem1 = len % (uint32_t)16U;
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i = i + (uint32_t)1U) {
|
||||
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
|
||||
uint8_t *block = text + i * (uint32_t)16U;
|
||||
uint64_t e[5U] = { 0U };
|
||||
uint64_t u0 = load64_le(block);
|
||||
|
@ -296,30 +301,35 @@ Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
|
|||
uint64_t t2 = a26;
|
||||
uint64_t t3 = a36;
|
||||
uint64_t t4 = a46;
|
||||
uint64_t l = t0 + (uint64_t)0U;
|
||||
uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
|
||||
uint64_t c0 = l >> (uint32_t)26U;
|
||||
uint64_t l0 = t1 + c0;
|
||||
uint64_t tmp1 = l0 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c1 = l0 >> (uint32_t)26U;
|
||||
uint64_t l1 = t2 + c1;
|
||||
uint64_t tmp2 = l1 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c2 = l1 >> (uint32_t)26U;
|
||||
uint64_t l2 = t3 + c2;
|
||||
uint64_t tmp3 = l2 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c3 = l2 >> (uint32_t)26U;
|
||||
uint64_t l3 = t4 + c3;
|
||||
uint64_t tmp4 = l3 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c4 = l3 >> (uint32_t)26U;
|
||||
uint64_t l4 = tmp0 + c4 * (uint64_t)5U;
|
||||
uint64_t tmp01 = l4 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c5 = l4 >> (uint32_t)26U;
|
||||
uint64_t tmp11 = tmp1 + c5;
|
||||
uint64_t o0 = tmp01;
|
||||
uint64_t o1 = tmp11;
|
||||
uint64_t o2 = tmp2;
|
||||
uint64_t o3 = tmp3;
|
||||
uint64_t o4 = tmp4;
|
||||
uint64_t mask261 = (uint64_t)0x3ffffffU;
|
||||
uint64_t z0 = t0 >> (uint32_t)26U;
|
||||
uint64_t z1 = t3 >> (uint32_t)26U;
|
||||
uint64_t x0 = t0 & mask261;
|
||||
uint64_t x3 = t3 & mask261;
|
||||
uint64_t x1 = t1 + z0;
|
||||
uint64_t x4 = t4 + z1;
|
||||
uint64_t z01 = x1 >> (uint32_t)26U;
|
||||
uint64_t z11 = x4 >> (uint32_t)26U;
|
||||
uint64_t t = z11 << (uint32_t)2U;
|
||||
uint64_t z12 = z11 + t;
|
||||
uint64_t x11 = x1 & mask261;
|
||||
uint64_t x41 = x4 & mask261;
|
||||
uint64_t x2 = t2 + z01;
|
||||
uint64_t x01 = x0 + z12;
|
||||
uint64_t z02 = x2 >> (uint32_t)26U;
|
||||
uint64_t z13 = x01 >> (uint32_t)26U;
|
||||
uint64_t x21 = x2 & mask261;
|
||||
uint64_t x02 = x01 & mask261;
|
||||
uint64_t x31 = x3 + z02;
|
||||
uint64_t x12 = x11 + z13;
|
||||
uint64_t z03 = x31 >> (uint32_t)26U;
|
||||
uint64_t x32 = x31 & mask261;
|
||||
uint64_t x42 = x41 + z03;
|
||||
uint64_t o0 = x02;
|
||||
uint64_t o1 = x12;
|
||||
uint64_t o2 = x21;
|
||||
uint64_t o3 = x32;
|
||||
uint64_t o4 = x42;
|
||||
acc[0U] = o0;
|
||||
acc[1U] = o1;
|
||||
acc[2U] = o2;
|
||||
|
@ -330,7 +340,7 @@ Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
|
|||
uint8_t *last1 = text + nb * (uint32_t)16U;
|
||||
uint64_t e[5U] = { 0U };
|
||||
uint8_t tmp[16U] = { 0U };
|
||||
memcpy(tmp, last1, rem1 * sizeof last1[0U]);
|
||||
memcpy(tmp, last1, rem1 * sizeof(last1[0U]));
|
||||
uint64_t u0 = load64_le(tmp);
|
||||
uint64_t lo = u0;
|
||||
uint64_t u = load64_le(tmp + (uint32_t)8U);
|
||||
|
@ -412,30 +422,35 @@ Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
|
|||
uint64_t t2 = a26;
|
||||
uint64_t t3 = a36;
|
||||
uint64_t t4 = a46;
|
||||
uint64_t l = t0 + (uint64_t)0U;
|
||||
uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
|
||||
uint64_t c0 = l >> (uint32_t)26U;
|
||||
uint64_t l0 = t1 + c0;
|
||||
uint64_t tmp1 = l0 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c1 = l0 >> (uint32_t)26U;
|
||||
uint64_t l1 = t2 + c1;
|
||||
uint64_t tmp2 = l1 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c2 = l1 >> (uint32_t)26U;
|
||||
uint64_t l2 = t3 + c2;
|
||||
uint64_t tmp3 = l2 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c3 = l2 >> (uint32_t)26U;
|
||||
uint64_t l3 = t4 + c3;
|
||||
uint64_t tmp4 = l3 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c4 = l3 >> (uint32_t)26U;
|
||||
uint64_t l4 = tmp0 + c4 * (uint64_t)5U;
|
||||
uint64_t tmp01 = l4 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c5 = l4 >> (uint32_t)26U;
|
||||
uint64_t tmp11 = tmp1 + c5;
|
||||
uint64_t o0 = tmp01;
|
||||
uint64_t o1 = tmp11;
|
||||
uint64_t o2 = tmp2;
|
||||
uint64_t o3 = tmp3;
|
||||
uint64_t o4 = tmp4;
|
||||
uint64_t mask261 = (uint64_t)0x3ffffffU;
|
||||
uint64_t z0 = t0 >> (uint32_t)26U;
|
||||
uint64_t z1 = t3 >> (uint32_t)26U;
|
||||
uint64_t x0 = t0 & mask261;
|
||||
uint64_t x3 = t3 & mask261;
|
||||
uint64_t x1 = t1 + z0;
|
||||
uint64_t x4 = t4 + z1;
|
||||
uint64_t z01 = x1 >> (uint32_t)26U;
|
||||
uint64_t z11 = x4 >> (uint32_t)26U;
|
||||
uint64_t t = z11 << (uint32_t)2U;
|
||||
uint64_t z12 = z11 + t;
|
||||
uint64_t x11 = x1 & mask261;
|
||||
uint64_t x41 = x4 & mask261;
|
||||
uint64_t x2 = t2 + z01;
|
||||
uint64_t x01 = x0 + z12;
|
||||
uint64_t z02 = x2 >> (uint32_t)26U;
|
||||
uint64_t z13 = x01 >> (uint32_t)26U;
|
||||
uint64_t x21 = x2 & mask261;
|
||||
uint64_t x02 = x01 & mask261;
|
||||
uint64_t x31 = x3 + z02;
|
||||
uint64_t x12 = x11 + z13;
|
||||
uint64_t z03 = x31 >> (uint32_t)26U;
|
||||
uint64_t x32 = x31 & mask261;
|
||||
uint64_t x42 = x41 + z03;
|
||||
uint64_t o0 = x02;
|
||||
uint64_t o1 = x12;
|
||||
uint64_t o2 = x21;
|
||||
uint64_t o3 = x32;
|
||||
uint64_t o4 = x42;
|
||||
acc[0U] = o0;
|
||||
acc[1U] = o1;
|
||||
acc[2U] = o2;
|
||||
|
@ -451,47 +466,64 @@ Hacl_Poly1305_32_poly1305_finish(uint8_t *tag, uint8_t *key, uint64_t *ctx)
|
|||
uint64_t *acc = ctx;
|
||||
uint8_t *ks = key + (uint32_t)16U;
|
||||
uint64_t f0 = acc[0U];
|
||||
uint64_t f12 = acc[1U];
|
||||
uint64_t f22 = acc[2U];
|
||||
uint64_t f32 = acc[3U];
|
||||
uint64_t f13 = acc[1U];
|
||||
uint64_t f23 = acc[2U];
|
||||
uint64_t f33 = acc[3U];
|
||||
uint64_t f40 = acc[4U];
|
||||
uint64_t l = f0 + (uint64_t)0U;
|
||||
uint64_t l0 = f0 + (uint64_t)0U;
|
||||
uint64_t tmp00 = l0 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c00 = l0 >> (uint32_t)26U;
|
||||
uint64_t l1 = f13 + c00;
|
||||
uint64_t tmp10 = l1 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c10 = l1 >> (uint32_t)26U;
|
||||
uint64_t l2 = f23 + c10;
|
||||
uint64_t tmp20 = l2 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c20 = l2 >> (uint32_t)26U;
|
||||
uint64_t l3 = f33 + c20;
|
||||
uint64_t tmp30 = l3 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c30 = l3 >> (uint32_t)26U;
|
||||
uint64_t l4 = f40 + c30;
|
||||
uint64_t tmp40 = l4 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c40 = l4 >> (uint32_t)26U;
|
||||
uint64_t f010 = tmp00 + c40 * (uint64_t)5U;
|
||||
uint64_t f110 = tmp10;
|
||||
uint64_t f210 = tmp20;
|
||||
uint64_t f310 = tmp30;
|
||||
uint64_t f410 = tmp40;
|
||||
uint64_t l = f010 + (uint64_t)0U;
|
||||
uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
|
||||
uint64_t c0 = l >> (uint32_t)26U;
|
||||
uint64_t l0 = f12 + c0;
|
||||
uint64_t tmp1 = l0 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c1 = l0 >> (uint32_t)26U;
|
||||
uint64_t l1 = f22 + c1;
|
||||
uint64_t tmp2 = l1 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c2 = l1 >> (uint32_t)26U;
|
||||
uint64_t l2 = f32 + c2;
|
||||
uint64_t tmp3 = l2 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c3 = l2 >> (uint32_t)26U;
|
||||
uint64_t l3 = f40 + c3;
|
||||
uint64_t tmp4 = l3 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c4 = l3 >> (uint32_t)26U;
|
||||
uint64_t l4 = tmp0 + c4 * (uint64_t)5U;
|
||||
uint64_t tmp0_ = l4 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c5 = l4 >> (uint32_t)26U;
|
||||
uint64_t f010 = tmp0_;
|
||||
uint64_t f110 = tmp1 + c5;
|
||||
uint64_t f210 = tmp2;
|
||||
uint64_t f310 = tmp3;
|
||||
uint64_t f410 = tmp4;
|
||||
uint64_t l5 = f110 + c0;
|
||||
uint64_t tmp1 = l5 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c1 = l5 >> (uint32_t)26U;
|
||||
uint64_t l6 = f210 + c1;
|
||||
uint64_t tmp2 = l6 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c2 = l6 >> (uint32_t)26U;
|
||||
uint64_t l7 = f310 + c2;
|
||||
uint64_t tmp3 = l7 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c3 = l7 >> (uint32_t)26U;
|
||||
uint64_t l8 = f410 + c3;
|
||||
uint64_t tmp4 = l8 & (uint64_t)0x3ffffffU;
|
||||
uint64_t c4 = l8 >> (uint32_t)26U;
|
||||
uint64_t f02 = tmp0 + c4 * (uint64_t)5U;
|
||||
uint64_t f12 = tmp1;
|
||||
uint64_t f22 = tmp2;
|
||||
uint64_t f32 = tmp3;
|
||||
uint64_t f42 = tmp4;
|
||||
uint64_t mh = (uint64_t)0x3ffffffU;
|
||||
uint64_t ml = (uint64_t)0x3fffffbU;
|
||||
uint64_t mask = FStar_UInt64_eq_mask(f410, mh);
|
||||
uint64_t mask1 = mask & FStar_UInt64_eq_mask(f310, mh);
|
||||
uint64_t mask2 = mask1 & FStar_UInt64_eq_mask(f210, mh);
|
||||
uint64_t mask3 = mask2 & FStar_UInt64_eq_mask(f110, mh);
|
||||
uint64_t mask4 = mask3 & ~~FStar_UInt64_gte_mask(f010, ml);
|
||||
uint64_t mask = FStar_UInt64_eq_mask(f42, mh);
|
||||
uint64_t mask1 = mask & FStar_UInt64_eq_mask(f32, mh);
|
||||
uint64_t mask2 = mask1 & FStar_UInt64_eq_mask(f22, mh);
|
||||
uint64_t mask3 = mask2 & FStar_UInt64_eq_mask(f12, mh);
|
||||
uint64_t mask4 = mask3 & ~~FStar_UInt64_gte_mask(f02, ml);
|
||||
uint64_t ph = mask4 & mh;
|
||||
uint64_t pl = mask4 & ml;
|
||||
uint64_t o0 = f010 - pl;
|
||||
uint64_t o1 = f110 - ph;
|
||||
uint64_t o2 = f210 - ph;
|
||||
uint64_t o3 = f310 - ph;
|
||||
uint64_t o4 = f410 - ph;
|
||||
uint64_t o0 = f02 - pl;
|
||||
uint64_t o1 = f12 - ph;
|
||||
uint64_t o2 = f22 - ph;
|
||||
uint64_t o3 = f32 - ph;
|
||||
uint64_t o4 = f42 - ph;
|
||||
uint64_t f011 = o0;
|
||||
uint64_t f111 = o1;
|
||||
uint64_t f211 = o2;
|
||||
|
|
|
@ -52,12 +52,16 @@ typedef const char *Prims_string;
|
|||
/* The uint128 type is a special case since we offer several implementations of
|
||||
* it, depending on the compiler and whether the user wants the verified
|
||||
* implementation or not. */
|
||||
#if !defined(KRML_VERIFIED_UINT128) && defined(_MSC_VER) && defined(_M_X64)
|
||||
#if !defined(KRML_VERIFIED_UINT128) && defined(_MSC_VER) && defined(_M_X64) && \
|
||||
!defined(__clang__)
|
||||
#include <emmintrin.h>
|
||||
typedef __m128i FStar_UInt128_uint128;
|
||||
#elif !defined(KRML_VERIFIED_UINT128) && !defined(_MSC_VER) && \
|
||||
(defined(__x86_64__) || defined(__x86_64) || defined(__aarch64__))
|
||||
(defined(__x86_64__) || defined(__x86_64) || defined(__aarch64__) || \
|
||||
(defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)))
|
||||
typedef unsigned __int128 FStar_UInt128_uint128;
|
||||
#elif !defined(KRML_VERIFIED_UINT128) && defined(_MSC_VER) && defined(__clang__)
|
||||
typedef __uint128_t FStar_UInt128_uint128;
|
||||
#else
|
||||
typedef struct FStar_UInt128_uint128_s {
|
||||
uint64_t low;
|
||||
|
@ -72,11 +76,11 @@ typedef FStar_UInt128_uint128 FStar_UInt128_t, uint128_t;
|
|||
#include "kremlin/lowstar_endianness.h"
|
||||
|
||||
/* This one is always included, because it defines C.Endianness functions too. */
|
||||
#if !defined(_MSC_VER)
|
||||
#if !defined(_MSC_VER) || defined(__clang__)
|
||||
#include "fstar_uint128_gcc64.h"
|
||||
#endif
|
||||
|
||||
#if !defined(KRML_VERIFIED_UINT128) && defined(_MSC_VER)
|
||||
#if !defined(KRML_VERIFIED_UINT128) && defined(_MSC_VER) && !defined(__clang__)
|
||||
#include "fstar_uint128_msvc.h"
|
||||
#elif defined(KRML_VERIFIED_UINT128)
|
||||
#include "FStar_UInt128_Verified.h"
|
||||
|
|
|
@ -1,11 +1,6 @@
|
|||
/*
|
||||
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License.
|
||||
|
||||
This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
|
||||
KreMLin invocation: ../krml -fparentheses -fcurly-braces -fno-shadow -header copyright-header.txt -minimal -skip-compilation -extract-uints -tmpdir dist/minimal -bundle FStar.UInt64+FStar.UInt32+FStar.UInt16+FStar.UInt8=[rename=FStar_UInt_8_16_32_64] -library FStar.UInt128 -add-include <inttypes.h> -add-include <stdbool.h> -add-include "kremlin/internal/compat.h" -add-include "kremlin/lowstar_endianness.h" -add-include "kremlin/internal/types.h" -add-include "kremlin/internal/target.h" -bundle LowStar.Endianness= -bundle FStar.UInt128= -bundle *,WindowsWorkaroundSigh fstar_uint128.c -o libkremlib.a .extract/prims.krml .extract/FStar_Pervasives_Native.krml .extract/FStar_Pervasives.krml .extract/FStar_Preorder.krml .extract/FStar_Calc.krml .extract/FStar_Squash.krml .extract/FStar_Classical.krml .extract/FStar_StrongExcludedMiddle.krml .extract/FStar_FunctionalExtensionality.krml .extract/FStar_List_Tot_Base.krml .extract/FStar_List_Tot_Properties.krml .extract/FStar_List_Tot.krml .extract/FStar_Seq_Base.krml .extract/FStar_Seq_Properties.krml .extract/FStar_Seq.krml .extract/FStar_Mul.krml .extract/FStar_Math_Lib.krml .extract/FStar_Math_Lemmas.krml .extract/FStar_BitVector.krml .extract/FStar_UInt.krml .extract/FStar_UInt32.krml .extract/FStar_Int.krml .extract/FStar_Int16.krml .extract/FStar_Reflection_Types.krml .extract/FStar_Reflection_Data.krml .extract/FStar_Order.krml .extract/FStar_Reflection_Basic.krml .extract/FStar_Ghost.krml .extract/FStar_ErasedLogic.krml .extract/FStar_UInt64.krml .extract/FStar_UInt8.krml .extract/FStar_Endianness.krml .extract/FStar_Set.krml .extract/FStar_PropositionalExtensionality.krml .extract/FStar_PredicateExtensionality.krml .extract/FStar_TSet.krml .extract/FStar_Monotonic_Heap.krml .extract/FStar_Heap.krml .extract/FStar_Map.krml .extract/FStar_Monotonic_HyperHeap.krml .extract/FStar_Monotonic_HyperStack.krml .extract/FStar_HyperStack.krml .extract/FStar_Monotonic_Witnessed.krml .extract/FStar_HyperStack_ST.krml .extract/FStar_HyperStack_All.krml .extract/FStar_Char.krml .extract/FStar_Exn.krml .extract/FStar_ST.krml .extract/FStar_All.krml .extract/FStar_List.krml .extract/FStar_String.krml .extract/FStar_Reflection_Const.krml .extract/FStar_Reflection_Derived.krml .extract/FStar_Reflection_Derived_Lemmas.krml .extract/FStar_Date.krml .extract/FStar_Universe.krml .extract/FStar_GSet.krml .extract/FStar_ModifiesGen.krml .extract/FStar_Range.krml .extract/FStar_Tactics_Types.krml .extract/FStar_Tactics_Result.krml .extract/FStar_Tactics_Effect.krml .extract/FStar_Tactics_Builtins.krml .extract/FStar_Reflection.krml .extract/FStar_Tactics_SyntaxHelpers.krml .extract/FStar_Tactics_Util.krml .extract/FStar_Reflection_Formula.krml .extract/FStar_Tactics_Derived.krml .extract/FStar_Tactics_Logic.krml .extract/FStar_Tactics.krml .extract/FStar_BigOps.krml .extract/LowStar_Monotonic_Buffer.krml .extract/LowStar_Buffer.krml .extract/Spec_Loops.krml .extract/LowStar_BufferOps.krml .extract/C_Loops.krml .extract/FStar_Kremlin_Endianness.krml .extract/FStar_UInt63.krml .extract/FStar_Dyn.krml .extract/FStar_Int63.krml .extract/FStar_Int64.krml .extract/FStar_Int32.krml .extract/FStar_Int8.krml .extract/FStar_UInt16.krml .extract/FStar_Int_Cast.krml .extract/FStar_UInt128.krml .extract/C_Endianness.krml .extract/WasmSupport.krml .extract/FStar_Float.krml .extract/FStar_IO.krml .extract/C.krml .extract/LowStar_Modifies.krml .extract/C_String.krml .extract/FStar_Bytes.krml .extract/FStar_HyperStack_IO.krml .extract/LowStar_Printf.krml .extract/LowStar_Endianness.krml .extract/C_Failure.krml .extract/TestLib.krml .extract/FStar_Int_Cast_Full.krml
|
||||
F* version: b0467796
|
||||
KreMLin version: ab4c97c6
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
|
@ -18,70 +13,70 @@
|
|||
#ifndef __FStar_UInt128_H
|
||||
#define __FStar_UInt128_H
|
||||
|
||||
inline static uint64_t
|
||||
static inline uint64_t
|
||||
FStar_UInt128___proj__Mkuint128__item__low(FStar_UInt128_uint128 projectee);
|
||||
|
||||
inline static uint64_t
|
||||
static inline uint64_t
|
||||
FStar_UInt128___proj__Mkuint128__item__high(FStar_UInt128_uint128 projectee);
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_add_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_sub_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_sub_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static FStar_UInt128_uint128 FStar_UInt128_lognot(FStar_UInt128_uint128 a);
|
||||
static inline FStar_UInt128_uint128 FStar_UInt128_lognot(FStar_UInt128_uint128 a);
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s);
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s);
|
||||
|
||||
inline static bool FStar_UInt128_eq(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
static inline bool FStar_UInt128_eq(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static bool FStar_UInt128_gt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
static inline bool FStar_UInt128_gt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static bool FStar_UInt128_lt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
static inline bool FStar_UInt128_lt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static bool FStar_UInt128_gte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
static inline bool FStar_UInt128_gte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static bool FStar_UInt128_lte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
static inline bool FStar_UInt128_lte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
inline static FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a);
|
||||
static inline FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a);
|
||||
|
||||
inline static uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a);
|
||||
static inline uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a);
|
||||
|
||||
inline static FStar_UInt128_uint128 FStar_UInt128_mul32(uint64_t x, uint32_t y);
|
||||
static inline FStar_UInt128_uint128 FStar_UInt128_mul32(uint64_t x, uint32_t y);
|
||||
|
||||
inline static FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y);
|
||||
static inline FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y);
|
||||
|
||||
#define __FStar_UInt128_H_DEFINED
|
||||
#endif
|
||||
|
|
|
@ -1,11 +1,6 @@
|
|||
/*
|
||||
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License.
|
||||
|
||||
This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
|
||||
KreMLin invocation: ../krml -fparentheses -fcurly-braces -fno-shadow -header copyright-header.txt -minimal -skip-compilation -extract-uints -tmpdir dist/uint128 -add-include <inttypes.h> -add-include <stdbool.h> -add-include "kremlin/internal/types.h" -add-include "kremlin/internal/target.h" -bundle FStar.UInt64[rename=FStar_UInt_8_16_32_64] -bundle FStar.UInt128=[rename=FStar_UInt128_Verified] -fc89 -bundle *,WindowsWorkaroundSigh -static-header FStar.UInt128,FStar.UInt64 -ccopt -DKRML_VERIFIED_UINT128 -o libkremlib.a .extract/prims.krml .extract/FStar_Pervasives_Native.krml .extract/FStar_Pervasives.krml .extract/FStar_Preorder.krml .extract/FStar_Calc.krml .extract/FStar_Squash.krml .extract/FStar_Classical.krml .extract/FStar_StrongExcludedMiddle.krml .extract/FStar_FunctionalExtensionality.krml .extract/FStar_List_Tot_Base.krml .extract/FStar_List_Tot_Properties.krml .extract/FStar_List_Tot.krml .extract/FStar_Seq_Base.krml .extract/FStar_Seq_Properties.krml .extract/FStar_Seq.krml .extract/FStar_Mul.krml .extract/FStar_Math_Lib.krml .extract/FStar_Math_Lemmas.krml .extract/FStar_BitVector.krml .extract/FStar_UInt.krml .extract/FStar_UInt32.krml .extract/FStar_Int.krml .extract/FStar_Int16.krml .extract/FStar_Reflection_Types.krml .extract/FStar_Reflection_Data.krml .extract/FStar_Order.krml .extract/FStar_Reflection_Basic.krml .extract/FStar_Ghost.krml .extract/FStar_ErasedLogic.krml .extract/FStar_UInt64.krml .extract/FStar_UInt8.krml .extract/FStar_Endianness.krml .extract/FStar_Set.krml .extract/FStar_PropositionalExtensionality.krml .extract/FStar_PredicateExtensionality.krml .extract/FStar_TSet.krml .extract/FStar_Monotonic_Heap.krml .extract/FStar_Heap.krml .extract/FStar_Map.krml .extract/FStar_Monotonic_HyperHeap.krml .extract/FStar_Monotonic_HyperStack.krml .extract/FStar_HyperStack.krml .extract/FStar_Monotonic_Witnessed.krml .extract/FStar_HyperStack_ST.krml .extract/FStar_HyperStack_All.krml .extract/FStar_Char.krml .extract/FStar_Exn.krml .extract/FStar_ST.krml .extract/FStar_All.krml .extract/FStar_List.krml .extract/FStar_String.krml .extract/FStar_Reflection_Const.krml .extract/FStar_Reflection_Derived.krml .extract/FStar_Reflection_Derived_Lemmas.krml .extract/FStar_Date.krml .extract/FStar_Universe.krml .extract/FStar_GSet.krml .extract/FStar_ModifiesGen.krml .extract/FStar_Range.krml .extract/FStar_Tactics_Types.krml .extract/FStar_Tactics_Result.krml .extract/FStar_Tactics_Effect.krml .extract/FStar_Tactics_Builtins.krml .extract/FStar_Reflection.krml .extract/FStar_Tactics_SyntaxHelpers.krml .extract/FStar_Tactics_Util.krml .extract/FStar_Reflection_Formula.krml .extract/FStar_Tactics_Derived.krml .extract/FStar_Tactics_Logic.krml .extract/FStar_Tactics.krml .extract/FStar_BigOps.krml .extract/LowStar_Monotonic_Buffer.krml .extract/LowStar_Buffer.krml .extract/Spec_Loops.krml .extract/LowStar_BufferOps.krml .extract/C_Loops.krml .extract/FStar_Kremlin_Endianness.krml .extract/FStar_UInt63.krml .extract/FStar_Dyn.krml .extract/FStar_Int63.krml .extract/FStar_Int64.krml .extract/FStar_Int32.krml .extract/FStar_Int8.krml .extract/FStar_UInt16.krml .extract/FStar_Int_Cast.krml .extract/FStar_UInt128.krml .extract/C_Endianness.krml .extract/WasmSupport.krml .extract/FStar_Float.krml .extract/FStar_IO.krml .extract/C.krml .extract/LowStar_Modifies.krml .extract/C_String.krml .extract/FStar_Bytes.krml .extract/FStar_HyperStack_IO.krml .extract/LowStar_Printf.krml .extract/LowStar_Endianness.krml .extract/C_Failure.krml .extract/TestLib.krml .extract/FStar_Int_Cast_Full.krml
|
||||
F* version: b0467796
|
||||
KreMLin version: ab4c97c6
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
|
@ -18,31 +13,31 @@
|
|||
|
||||
#include "FStar_UInt_8_16_32_64.h"
|
||||
|
||||
inline static uint64_t
|
||||
static inline uint64_t
|
||||
FStar_UInt128___proj__Mkuint128__item__low(FStar_UInt128_uint128 projectee)
|
||||
{
|
||||
return projectee.low;
|
||||
}
|
||||
|
||||
inline static uint64_t
|
||||
static inline uint64_t
|
||||
FStar_UInt128___proj__Mkuint128__item__high(FStar_UInt128_uint128 projectee)
|
||||
{
|
||||
return projectee.high;
|
||||
}
|
||||
|
||||
inline static uint64_t
|
||||
static inline uint64_t
|
||||
FStar_UInt128_constant_time_carry(uint64_t a, uint64_t b)
|
||||
{
|
||||
return (a ^ ((a ^ b) | ((a - b) ^ b))) >> (uint32_t)63U;
|
||||
}
|
||||
|
||||
inline static uint64_t
|
||||
static inline uint64_t
|
||||
FStar_UInt128_carry(uint64_t a, uint64_t b)
|
||||
{
|
||||
return FStar_UInt128_constant_time_carry(a, b);
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
@ -51,7 +46,7 @@ FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
|||
return lit;
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_add_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
@ -60,7 +55,7 @@ FStar_UInt128_add_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
|||
return lit;
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
@ -69,7 +64,7 @@ FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
|||
return lit;
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
@ -78,7 +73,7 @@ FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
|||
return lit;
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_sub_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
@ -87,7 +82,7 @@ FStar_UInt128_sub_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
|||
return lit;
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_sub_mod_impl(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
@ -96,13 +91,13 @@ FStar_UInt128_sub_mod_impl(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
|||
return lit;
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_sub_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
return FStar_UInt128_sub_mod_impl(a, b);
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
@ -111,7 +106,7 @@ FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
|||
return lit;
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
@ -120,7 +115,7 @@ FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
|||
return lit;
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
@ -129,7 +124,7 @@ FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
|||
return lit;
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_lognot(FStar_UInt128_uint128 a)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
@ -140,19 +135,19 @@ FStar_UInt128_lognot(FStar_UInt128_uint128 a)
|
|||
|
||||
static uint32_t FStar_UInt128_u32_64 = (uint32_t)64U;
|
||||
|
||||
inline static uint64_t
|
||||
static inline uint64_t
|
||||
FStar_UInt128_add_u64_shift_left(uint64_t hi, uint64_t lo, uint32_t s)
|
||||
{
|
||||
return (hi << s) + (lo >> (FStar_UInt128_u32_64 - s));
|
||||
}
|
||||
|
||||
inline static uint64_t
|
||||
static inline uint64_t
|
||||
FStar_UInt128_add_u64_shift_left_respec(uint64_t hi, uint64_t lo, uint32_t s)
|
||||
{
|
||||
return FStar_UInt128_add_u64_shift_left(hi, lo, s);
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_left_small(FStar_UInt128_uint128 a, uint32_t s)
|
||||
{
|
||||
if (s == (uint32_t)0U) {
|
||||
|
@ -165,7 +160,7 @@ FStar_UInt128_shift_left_small(FStar_UInt128_uint128 a, uint32_t s)
|
|||
}
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_left_large(FStar_UInt128_uint128 a, uint32_t s)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
@ -174,7 +169,7 @@ FStar_UInt128_shift_left_large(FStar_UInt128_uint128 a, uint32_t s)
|
|||
return lit;
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s)
|
||||
{
|
||||
if (s < FStar_UInt128_u32_64) {
|
||||
|
@ -184,19 +179,19 @@ FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s)
|
|||
}
|
||||
}
|
||||
|
||||
inline static uint64_t
|
||||
static inline uint64_t
|
||||
FStar_UInt128_add_u64_shift_right(uint64_t hi, uint64_t lo, uint32_t s)
|
||||
{
|
||||
return (lo >> s) + (hi << (FStar_UInt128_u32_64 - s));
|
||||
}
|
||||
|
||||
inline static uint64_t
|
||||
static inline uint64_t
|
||||
FStar_UInt128_add_u64_shift_right_respec(uint64_t hi, uint64_t lo, uint32_t s)
|
||||
{
|
||||
return FStar_UInt128_add_u64_shift_right(hi, lo, s);
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_right_small(FStar_UInt128_uint128 a, uint32_t s)
|
||||
{
|
||||
if (s == (uint32_t)0U) {
|
||||
|
@ -209,7 +204,7 @@ FStar_UInt128_shift_right_small(FStar_UInt128_uint128 a, uint32_t s)
|
|||
}
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_right_large(FStar_UInt128_uint128 a, uint32_t s)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
@ -218,7 +213,7 @@ FStar_UInt128_shift_right_large(FStar_UInt128_uint128 a, uint32_t s)
|
|||
return lit;
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s)
|
||||
{
|
||||
if (s < FStar_UInt128_u32_64) {
|
||||
|
@ -228,37 +223,37 @@ FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s)
|
|||
}
|
||||
}
|
||||
|
||||
inline static bool
|
||||
static inline bool
|
||||
FStar_UInt128_eq(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
return a.low == b.low && a.high == b.high;
|
||||
}
|
||||
|
||||
inline static bool
|
||||
static inline bool
|
||||
FStar_UInt128_gt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
return a.high > b.high || (a.high == b.high && a.low > b.low);
|
||||
}
|
||||
|
||||
inline static bool
|
||||
static inline bool
|
||||
FStar_UInt128_lt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
return a.high < b.high || (a.high == b.high && a.low < b.low);
|
||||
}
|
||||
|
||||
inline static bool
|
||||
static inline bool
|
||||
FStar_UInt128_gte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
return a.high > b.high || (a.high == b.high && a.low >= b.low);
|
||||
}
|
||||
|
||||
inline static bool
|
||||
static inline bool
|
||||
FStar_UInt128_lte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
return a.high < b.high || (a.high == b.high && a.low <= b.low);
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
@ -267,7 +262,7 @@ FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
|||
return lit;
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
@ -278,7 +273,7 @@ FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
|||
return lit;
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_uint64_to_uint128(uint64_t a)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
@ -287,13 +282,13 @@ FStar_UInt128_uint64_to_uint128(uint64_t a)
|
|||
return lit;
|
||||
}
|
||||
|
||||
inline static uint64_t
|
||||
static inline uint64_t
|
||||
FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a)
|
||||
{
|
||||
return a.low;
|
||||
}
|
||||
|
||||
inline static uint64_t
|
||||
static inline uint64_t
|
||||
FStar_UInt128_u64_mod_32(uint64_t a)
|
||||
{
|
||||
return a & (uint64_t)0xffffffffU;
|
||||
|
@ -301,13 +296,13 @@ FStar_UInt128_u64_mod_32(uint64_t a)
|
|||
|
||||
static uint32_t FStar_UInt128_u32_32 = (uint32_t)32U;
|
||||
|
||||
inline static uint64_t
|
||||
static inline uint64_t
|
||||
FStar_UInt128_u32_combine(uint64_t hi, uint64_t lo)
|
||||
{
|
||||
return lo + (hi << FStar_UInt128_u32_32);
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_mul32(uint64_t x, uint32_t y)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
@ -319,13 +314,13 @@ FStar_UInt128_mul32(uint64_t x, uint32_t y)
|
|||
return lit;
|
||||
}
|
||||
|
||||
inline static uint64_t
|
||||
static inline uint64_t
|
||||
FStar_UInt128_u32_combine_(uint64_t hi, uint64_t lo)
|
||||
{
|
||||
return lo + (hi << FStar_UInt128_u32_32);
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_mul_wide(uint64_t x, uint64_t y)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
|
|
|
@ -1,11 +1,6 @@
|
|||
/*
|
||||
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License.
|
||||
|
||||
This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
|
||||
KreMLin invocation: ../krml -fparentheses -fcurly-braces -fno-shadow -header copyright-header.txt -minimal -skip-compilation -extract-uints -tmpdir dist/minimal -bundle FStar.UInt64+FStar.UInt32+FStar.UInt16+FStar.UInt8=[rename=FStar_UInt_8_16_32_64] -library FStar.UInt128 -add-include <inttypes.h> -add-include <stdbool.h> -add-include "kremlin/internal/compat.h" -add-include "kremlin/lowstar_endianness.h" -add-include "kremlin/internal/types.h" -add-include "kremlin/internal/target.h" -bundle LowStar.Endianness= -bundle FStar.UInt128= -bundle *,WindowsWorkaroundSigh fstar_uint128.c -o libkremlib.a .extract/prims.krml .extract/FStar_Pervasives_Native.krml .extract/FStar_Pervasives.krml .extract/FStar_Preorder.krml .extract/FStar_Calc.krml .extract/FStar_Squash.krml .extract/FStar_Classical.krml .extract/FStar_StrongExcludedMiddle.krml .extract/FStar_FunctionalExtensionality.krml .extract/FStar_List_Tot_Base.krml .extract/FStar_List_Tot_Properties.krml .extract/FStar_List_Tot.krml .extract/FStar_Seq_Base.krml .extract/FStar_Seq_Properties.krml .extract/FStar_Seq.krml .extract/FStar_Mul.krml .extract/FStar_Math_Lib.krml .extract/FStar_Math_Lemmas.krml .extract/FStar_BitVector.krml .extract/FStar_UInt.krml .extract/FStar_UInt32.krml .extract/FStar_Int.krml .extract/FStar_Int16.krml .extract/FStar_Reflection_Types.krml .extract/FStar_Reflection_Data.krml .extract/FStar_Order.krml .extract/FStar_Reflection_Basic.krml .extract/FStar_Ghost.krml .extract/FStar_ErasedLogic.krml .extract/FStar_UInt64.krml .extract/FStar_UInt8.krml .extract/FStar_Endianness.krml .extract/FStar_Set.krml .extract/FStar_PropositionalExtensionality.krml .extract/FStar_PredicateExtensionality.krml .extract/FStar_TSet.krml .extract/FStar_Monotonic_Heap.krml .extract/FStar_Heap.krml .extract/FStar_Map.krml .extract/FStar_Monotonic_HyperHeap.krml .extract/FStar_Monotonic_HyperStack.krml .extract/FStar_HyperStack.krml .extract/FStar_Monotonic_Witnessed.krml .extract/FStar_HyperStack_ST.krml .extract/FStar_HyperStack_All.krml .extract/FStar_Char.krml .extract/FStar_Exn.krml .extract/FStar_ST.krml .extract/FStar_All.krml .extract/FStar_List.krml .extract/FStar_String.krml .extract/FStar_Reflection_Const.krml .extract/FStar_Reflection_Derived.krml .extract/FStar_Reflection_Derived_Lemmas.krml .extract/FStar_Date.krml .extract/FStar_Universe.krml .extract/FStar_GSet.krml .extract/FStar_ModifiesGen.krml .extract/FStar_Range.krml .extract/FStar_Tactics_Types.krml .extract/FStar_Tactics_Result.krml .extract/FStar_Tactics_Effect.krml .extract/FStar_Tactics_Builtins.krml .extract/FStar_Reflection.krml .extract/FStar_Tactics_SyntaxHelpers.krml .extract/FStar_Tactics_Util.krml .extract/FStar_Reflection_Formula.krml .extract/FStar_Tactics_Derived.krml .extract/FStar_Tactics_Logic.krml .extract/FStar_Tactics.krml .extract/FStar_BigOps.krml .extract/LowStar_Monotonic_Buffer.krml .extract/LowStar_Buffer.krml .extract/Spec_Loops.krml .extract/LowStar_BufferOps.krml .extract/C_Loops.krml .extract/FStar_Kremlin_Endianness.krml .extract/FStar_UInt63.krml .extract/FStar_Dyn.krml .extract/FStar_Int63.krml .extract/FStar_Int64.krml .extract/FStar_Int32.krml .extract/FStar_Int8.krml .extract/FStar_UInt16.krml .extract/FStar_Int_Cast.krml .extract/FStar_UInt128.krml .extract/C_Endianness.krml .extract/WasmSupport.krml .extract/FStar_Float.krml .extract/FStar_IO.krml .extract/C.krml .extract/LowStar_Modifies.krml .extract/C_String.krml .extract/FStar_Bytes.krml .extract/FStar_HyperStack_IO.krml .extract/LowStar_Printf.krml .extract/LowStar_Endianness.krml .extract/C_Failure.krml .extract/TestLib.krml .extract/FStar_Int_Cast_Full.krml
|
||||
F* version: b0467796
|
||||
KreMLin version: ab4c97c6
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
|
@ -28,7 +23,7 @@ extern uint64_t FStar_UInt64_minus(uint64_t a);
|
|||
|
||||
extern uint32_t FStar_UInt64_n_minus_one;
|
||||
|
||||
inline static uint64_t
|
||||
static inline uint64_t
|
||||
FStar_UInt64_eq_mask(uint64_t a, uint64_t b)
|
||||
{
|
||||
uint64_t x = a ^ b;
|
||||
|
@ -38,7 +33,7 @@ FStar_UInt64_eq_mask(uint64_t a, uint64_t b)
|
|||
return xnx - (uint64_t)1U;
|
||||
}
|
||||
|
||||
inline static uint64_t
|
||||
static inline uint64_t
|
||||
FStar_UInt64_gte_mask(uint64_t a, uint64_t b)
|
||||
{
|
||||
uint64_t x = a;
|
||||
|
@ -66,7 +61,7 @@ extern uint32_t FStar_UInt32_minus(uint32_t a);
|
|||
|
||||
extern uint32_t FStar_UInt32_n_minus_one;
|
||||
|
||||
inline static uint32_t
|
||||
static inline uint32_t
|
||||
FStar_UInt32_eq_mask(uint32_t a, uint32_t b)
|
||||
{
|
||||
uint32_t x = a ^ b;
|
||||
|
@ -76,7 +71,7 @@ FStar_UInt32_eq_mask(uint32_t a, uint32_t b)
|
|||
return xnx - (uint32_t)1U;
|
||||
}
|
||||
|
||||
inline static uint32_t
|
||||
static inline uint32_t
|
||||
FStar_UInt32_gte_mask(uint32_t a, uint32_t b)
|
||||
{
|
||||
uint32_t x = a;
|
||||
|
@ -104,7 +99,7 @@ extern uint16_t FStar_UInt16_minus(uint16_t a);
|
|||
|
||||
extern uint32_t FStar_UInt16_n_minus_one;
|
||||
|
||||
inline static uint16_t
|
||||
static inline uint16_t
|
||||
FStar_UInt16_eq_mask(uint16_t a, uint16_t b)
|
||||
{
|
||||
uint16_t x = a ^ b;
|
||||
|
@ -114,7 +109,7 @@ FStar_UInt16_eq_mask(uint16_t a, uint16_t b)
|
|||
return xnx - (uint16_t)1U;
|
||||
}
|
||||
|
||||
inline static uint16_t
|
||||
static inline uint16_t
|
||||
FStar_UInt16_gte_mask(uint16_t a, uint16_t b)
|
||||
{
|
||||
uint16_t x = a;
|
||||
|
@ -142,7 +137,7 @@ extern uint8_t FStar_UInt8_minus(uint8_t a);
|
|||
|
||||
extern uint32_t FStar_UInt8_n_minus_one;
|
||||
|
||||
inline static uint8_t
|
||||
static inline uint8_t
|
||||
FStar_UInt8_eq_mask(uint8_t a, uint8_t b)
|
||||
{
|
||||
uint8_t x = a ^ b;
|
||||
|
@ -152,7 +147,7 @@ FStar_UInt8_eq_mask(uint8_t a, uint8_t b)
|
|||
return xnx - (uint8_t)1U;
|
||||
}
|
||||
|
||||
inline static uint8_t
|
||||
static inline uint8_t
|
||||
FStar_UInt8_gte_mask(uint8_t a, uint8_t b)
|
||||
{
|
||||
uint8_t x = a;
|
||||
|
|
|
@ -1,11 +1,6 @@
|
|||
/*
|
||||
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License.
|
||||
|
||||
This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
|
||||
KreMLin invocation: ../krml -fparentheses -fcurly-braces -fno-shadow -header copyright-header.txt -minimal -skip-compilation -extract-uints -tmpdir dist/minimal -bundle FStar.UInt64+FStar.UInt32+FStar.UInt16+FStar.UInt8=[rename=FStar_UInt_8_16_32_64] -library FStar.UInt128 -add-include <inttypes.h> -add-include <stdbool.h> -add-include "kremlin/internal/compat.h" -add-include "kremlin/lowstar_endianness.h" -add-include "kremlin/internal/types.h" -add-include "kremlin/internal/target.h" -bundle LowStar.Endianness= -bundle FStar.UInt128= -bundle *,WindowsWorkaroundSigh fstar_uint128.c -o libkremlib.a .extract/prims.krml .extract/FStar_Pervasives_Native.krml .extract/FStar_Pervasives.krml .extract/FStar_Preorder.krml .extract/FStar_Calc.krml .extract/FStar_Squash.krml .extract/FStar_Classical.krml .extract/FStar_StrongExcludedMiddle.krml .extract/FStar_FunctionalExtensionality.krml .extract/FStar_List_Tot_Base.krml .extract/FStar_List_Tot_Properties.krml .extract/FStar_List_Tot.krml .extract/FStar_Seq_Base.krml .extract/FStar_Seq_Properties.krml .extract/FStar_Seq.krml .extract/FStar_Mul.krml .extract/FStar_Math_Lib.krml .extract/FStar_Math_Lemmas.krml .extract/FStar_BitVector.krml .extract/FStar_UInt.krml .extract/FStar_UInt32.krml .extract/FStar_Int.krml .extract/FStar_Int16.krml .extract/FStar_Reflection_Types.krml .extract/FStar_Reflection_Data.krml .extract/FStar_Order.krml .extract/FStar_Reflection_Basic.krml .extract/FStar_Ghost.krml .extract/FStar_ErasedLogic.krml .extract/FStar_UInt64.krml .extract/FStar_UInt8.krml .extract/FStar_Endianness.krml .extract/FStar_Set.krml .extract/FStar_PropositionalExtensionality.krml .extract/FStar_PredicateExtensionality.krml .extract/FStar_TSet.krml .extract/FStar_Monotonic_Heap.krml .extract/FStar_Heap.krml .extract/FStar_Map.krml .extract/FStar_Monotonic_HyperHeap.krml .extract/FStar_Monotonic_HyperStack.krml .extract/FStar_HyperStack.krml .extract/FStar_Monotonic_Witnessed.krml .extract/FStar_HyperStack_ST.krml .extract/FStar_HyperStack_All.krml .extract/FStar_Char.krml .extract/FStar_Exn.krml .extract/FStar_ST.krml .extract/FStar_All.krml .extract/FStar_List.krml .extract/FStar_String.krml .extract/FStar_Reflection_Const.krml .extract/FStar_Reflection_Derived.krml .extract/FStar_Reflection_Derived_Lemmas.krml .extract/FStar_Date.krml .extract/FStar_Universe.krml .extract/FStar_GSet.krml .extract/FStar_ModifiesGen.krml .extract/FStar_Range.krml .extract/FStar_Tactics_Types.krml .extract/FStar_Tactics_Result.krml .extract/FStar_Tactics_Effect.krml .extract/FStar_Tactics_Builtins.krml .extract/FStar_Reflection.krml .extract/FStar_Tactics_SyntaxHelpers.krml .extract/FStar_Tactics_Util.krml .extract/FStar_Reflection_Formula.krml .extract/FStar_Tactics_Derived.krml .extract/FStar_Tactics_Logic.krml .extract/FStar_Tactics.krml .extract/FStar_BigOps.krml .extract/LowStar_Monotonic_Buffer.krml .extract/LowStar_Buffer.krml .extract/Spec_Loops.krml .extract/LowStar_BufferOps.krml .extract/C_Loops.krml .extract/FStar_Kremlin_Endianness.krml .extract/FStar_UInt63.krml .extract/FStar_Dyn.krml .extract/FStar_Int63.krml .extract/FStar_Int64.krml .extract/FStar_Int32.krml .extract/FStar_Int8.krml .extract/FStar_UInt16.krml .extract/FStar_Int_Cast.krml .extract/FStar_UInt128.krml .extract/C_Endianness.krml .extract/WasmSupport.krml .extract/FStar_Float.krml .extract/FStar_IO.krml .extract/C.krml .extract/LowStar_Modifies.krml .extract/C_String.krml .extract/FStar_Bytes.krml .extract/FStar_HyperStack_IO.krml .extract/LowStar_Printf.krml .extract/LowStar_Endianness.krml .extract/C_Failure.krml .extract/TestLib.krml .extract/FStar_Int_Cast_Full.krml
|
||||
F* version: b0467796
|
||||
KreMLin version: ab4c97c6
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
|
@ -20,13 +15,13 @@
|
|||
|
||||
#include "FStar_UInt128.h"
|
||||
|
||||
inline static void store128_le(uint8_t *x0, FStar_UInt128_uint128 x1);
|
||||
static inline void store128_le(uint8_t *x0, FStar_UInt128_uint128 x1);
|
||||
|
||||
inline static FStar_UInt128_uint128 load128_le(uint8_t *x0);
|
||||
static inline FStar_UInt128_uint128 load128_le(uint8_t *x0);
|
||||
|
||||
inline static void store128_be(uint8_t *x0, FStar_UInt128_uint128 x1);
|
||||
static inline void store128_be(uint8_t *x0, FStar_UInt128_uint128 x1);
|
||||
|
||||
inline static FStar_UInt128_uint128 load128_be(uint8_t *x0);
|
||||
static inline FStar_UInt128_uint128 load128_be(uint8_t *x0);
|
||||
|
||||
#define __LowStar_Endianness_H_DEFINED
|
||||
#endif
|
||||
|
|
|
@ -24,8 +24,9 @@
|
|||
#include "FStar_UInt_8_16_32_64.h"
|
||||
#include "LowStar_Endianness.h"
|
||||
|
||||
#if !defined(KRML_VERIFIED_UINT128) && !defined(_MSC_VER) && \
|
||||
(defined(__x86_64__) || defined(__x86_64) || defined(__aarch64__))
|
||||
#if !defined(KRML_VERIFIED_UINT128) && (!defined(_MSC_VER) || defined(__clang__)) && \
|
||||
(defined(__x86_64__) || defined(__x86_64) || defined(__aarch64__) || \
|
||||
(defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)))
|
||||
|
||||
/* GCC + using native unsigned __int128 support */
|
||||
|
||||
|
|
|
@ -2,9 +2,16 @@
|
|||
#define __Vec_Intrin_H
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <wmmintrin.h>
|
||||
|
||||
#define Lib_IntVector_Intrinsics_bit_mask64(x) -((x)&1)
|
||||
|
||||
#if defined(__x86_64__) || defined(_M_X64)
|
||||
|
||||
// The following functions are only available on machines that support Intel AVX
|
||||
|
||||
#include <emmintrin.h>
|
||||
#include <tmmintrin.h>
|
||||
#include <smmintrin.h>
|
||||
#include <immintrin.h>
|
||||
|
||||
typedef __m128i Lib_IntVector_Intrinsics_vec128;
|
||||
|
||||
|
@ -69,13 +76,13 @@ typedef __m128i Lib_IntVector_Intrinsics_vec128;
|
|||
(_mm_shuffle_epi8(x0, _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_rotate_left32(x0, x1) \
|
||||
((x1 == 8 ? Lib_IntVector_Intrinsics_vec128_rotate_left32_8(x0) : (x1 == 16 ? Lib_IntVector_Intrinsics_vec128_rotate_left32_16(x0) : _mm_xor_si128(_mm_slli_epi32(x0, x1), _mm_srli_epi32(x0, 32 - x1)))))
|
||||
((x1 == 8 ? Lib_IntVector_Intrinsics_vec128_rotate_left32_8(x0) : (x1 == 16 ? Lib_IntVector_Intrinsics_vec128_rotate_left32_16(x0) : _mm_xor_si128(_mm_slli_epi32(x0, x1), _mm_srli_epi32(x0, 32 - (x1))))))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_rotate_right32(x0, x1) \
|
||||
(Lib_IntVector_Intrinsics_vec128_rotate_left32(x0, 32 - x1))
|
||||
(Lib_IntVector_Intrinsics_vec128_rotate_left32(x0, 32 - (x1)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_shuffle32(x0, x1, x2, x3, x4) \
|
||||
(_mm_shuffle_epi32(x0, _MM_SHUFFLE(x1, x2, x3, x4)))
|
||||
(_mm_shuffle_epi32(x0, _MM_SHUFFLE(x4, x3, x2, x1)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_shuffle64(x0, x1, x2) \
|
||||
(_mm_shuffle_epi32(x0, _MM_SHUFFLE(2 * x1 + 1, 2 * x1, 2 * x2 + 1, 2 * x2)))
|
||||
|
@ -125,8 +132,6 @@ typedef __m128i Lib_IntVector_Intrinsics_vec128;
|
|||
#define Lib_IntVector_Intrinsics_vec128_zero \
|
||||
(_mm_set1_epi16((uint16_t)0))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_bit_mask64(x) -((x)&1)
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_add64(x0, x1) \
|
||||
(_mm_add_epi64(x0, x1))
|
||||
|
||||
|
@ -157,13 +162,13 @@ typedef __m128i Lib_IntVector_Intrinsics_vec128;
|
|||
#define Lib_IntVector_Intrinsics_vec128_load64(x) \
|
||||
(_mm_set1_epi64x(x)) /* hi lo */
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_load64s(x1, x2) \
|
||||
(_mm_set_epi64x(x1, x2)) /* hi lo */
|
||||
#define Lib_IntVector_Intrinsics_vec128_load64s(x0, x1) \
|
||||
(_mm_set_epi64x(x1, x0)) /* hi lo */
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_load32(x) \
|
||||
(_mm_set1_epi32(x))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_load32s(x3, x2, x1, x0) \
|
||||
#define Lib_IntVector_Intrinsics_vec128_load32s(x0, x1, x2, x3) \
|
||||
(_mm_set_epi32(x3, x2, x1, x0)) /* hi lo */
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_interleave_low32(x1, x2) \
|
||||
|
@ -178,6 +183,11 @@ typedef __m128i Lib_IntVector_Intrinsics_vec128;
|
|||
#define Lib_IntVector_Intrinsics_vec128_interleave_high64(x1, x2) \
|
||||
(_mm_unpackhi_epi64(x1, x2))
|
||||
|
||||
// The following functions are only available on machines that support Intel AVX2
|
||||
|
||||
#include <immintrin.h>
|
||||
#include <wmmintrin.h>
|
||||
|
||||
typedef __m256i Lib_IntVector_Intrinsics_vec256;
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec256_eq64(x0, x1) \
|
||||
|
@ -229,19 +239,31 @@ typedef __m256i Lib_IntVector_Intrinsics_vec256;
|
|||
(_mm256_shuffle_epi8(x0, _mm256_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2, 13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec256_rotate_left32(x0, x1) \
|
||||
((x1 == 8 ? Lib_IntVector_Intrinsics_vec256_rotate_left32_8(x0) : (x1 == 16 ? Lib_IntVector_Intrinsics_vec256_rotate_left32_16(x0) : _mm256_or_si256(_mm256_slli_epi32(x0, x1), _mm256_srli_epi32(x0, 32 - x1)))))
|
||||
((x1 == 8 ? Lib_IntVector_Intrinsics_vec256_rotate_left32_8(x0) : (x1 == 16 ? Lib_IntVector_Intrinsics_vec256_rotate_left32_16(x0) : _mm256_or_si256(_mm256_slli_epi32(x0, x1), _mm256_srli_epi32(x0, 32 - (x1))))))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec256_rotate_right32(x0, x1) \
|
||||
(Lib_IntVector_Intrinsics_vec256_rotate_left32(x0, 32 - x1))
|
||||
(Lib_IntVector_Intrinsics_vec256_rotate_left32(x0, 32 - (x1)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_shuffle32(x0, x1, x2, x3, x4) \
|
||||
(_mm_shuffle_epi32(x0, _MM_SHUFFLE(x1, x2, x3, x4)))
|
||||
#define Lib_IntVector_Intrinsics_vec256_rotate_right64_8(x0) \
|
||||
(_mm256_shuffle_epi8(x0, _mm256_set_epi8(8, 15, 14, 13, 12, 11, 10, 9, 0, 7, 6, 5, 4, 3, 2, 1, 8, 15, 14, 13, 12, 11, 10, 9, 0, 7, 6, 5, 4, 3, 2, 1)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec256_rotate_right64_16(x0) \
|
||||
(_mm256_shuffle_epi8(x0, _mm256_set_epi8(9, 8, 15, 14, 13, 12, 11, 10, 1, 0, 7, 6, 5, 4, 3, 2, 9, 8, 15, 14, 13, 12, 11, 10, 1, 0, 7, 6, 5, 4, 3, 2)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec256_rotate_right64_24(x0) \
|
||||
(_mm256_shuffle_epi8(x0, _mm256_set_epi8(10, 9, 8, 15, 14, 13, 12, 11, 2, 1, 0, 7, 6, 5, 4, 3, 10, 9, 8, 15, 14, 13, 12, 11, 2, 1, 0, 7, 6, 5, 4, 3)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec256_rotate_right64_32(x0) \
|
||||
(_mm256_shuffle_epi8(x0, _mm256_set_epi8(11, 10, 9, 8, 15, 14, 13, 12, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12, 3, 2, 1, 0, 7, 6, 5, 4)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec256_rotate_right64(x0, x1) \
|
||||
((x1 == 8 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_8(x0) : (x1 == 16 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_16(x0) : (x1 == 24 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_24(x0) : (x1 == 32 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_32(x0) : _mm256_xor_si256(_mm256_srli_epi64((x0), (x1)), _mm256_slli_epi64((x0), (64 - (x1)))))))))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec256_shuffle64(x0, x1, x2, x3, x4) \
|
||||
(_mm256_permute4x64_epi64(x0, _MM_SHUFFLE(x1, x2, x3, x4)))
|
||||
(_mm256_permute4x64_epi64(x0, _MM_SHUFFLE(x4, x3, x2, x1)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec256_shuffle32(x0, x1, x2, x3, x4, x5, x6, x7, x8) \
|
||||
(_mm256_permutevar8x32_epi32(x0, _mm256_set_epi32(x1, x2, x3, x4, x5, x6, x7, x8)))
|
||||
(_mm256_permutevar8x32_epi32(x0, _mm256_set_epi32(x8, x7, x6, x5, x4, x3, x2, x1)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec256_load_le(x0) \
|
||||
(_mm256_loadu_si256((__m256i*)(x0)))
|
||||
|
@ -297,19 +319,19 @@ typedef __m256i Lib_IntVector_Intrinsics_vec256;
|
|||
#define Lib_IntVector_Intrinsics_vec256_load64(x1) \
|
||||
(_mm256_set1_epi64x(x1)) /* hi lo */
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec256_load64s(x1, x2, x3, x4) \
|
||||
(_mm256_set_epi64x(x1, x2, x3, x4)) /* hi lo */
|
||||
#define Lib_IntVector_Intrinsics_vec256_load64s(x0, x1, x2, x3) \
|
||||
(_mm256_set_epi64x(x3, x2, x1, x0)) /* hi lo */
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec256_load32(x) \
|
||||
(_mm256_set1_epi32(x))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec256_load32s(x7, x6, x5, x4, x3, x2, x1, x0) \
|
||||
#define Lib_IntVector_Intrinsics_vec256_load32s(x0, x1, x2, x3, x4, x5, x6, x7) \
|
||||
(_mm256_set_epi32(x7, x6, x5, x4, x3, x2, x1, x0)) /* hi lo */
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec256_load128(x) \
|
||||
(_mm256_set_m128i((__m128i)x))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec256_load128s(x1, x0) \
|
||||
#define Lib_IntVector_Intrinsics_vec256_load128s(x0, x1) \
|
||||
(_mm256_set_m128i((__m128i)x1, (__m128i)x0))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec256_interleave_low32(x1, x2) \
|
||||
|
@ -330,6 +352,172 @@ typedef __m256i Lib_IntVector_Intrinsics_vec256;
|
|||
#define Lib_IntVector_Intrinsics_vec256_interleave_high128(x1, x2) \
|
||||
(_mm256_permute2x128_si256(x1, x2, 0x31))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_bit_mask64(x) -((x)&1)
|
||||
#elif defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM)
|
||||
#include <arm_neon.h>
|
||||
|
||||
typedef uint32x4_t Lib_IntVector_Intrinsics_vec128;
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_xor(x0, x1) \
|
||||
(veorq_u32(x0, x1))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_eq64(x0, x1) \
|
||||
(vceqq_u32(x0, x1))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_eq32(x0, x1) \
|
||||
(vceqq_u32(x0, x1))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_gt32(x0, x1) \
|
||||
(vcgtq_u32(x0, x1))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_or(x0, x1) \
|
||||
(voorq_u32(x0, x1))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_and(x0, x1) \
|
||||
(vandq_u32(x0, x1))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_lognot(x0) \
|
||||
(vmvnq_u32(x0))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_shift_left(x0, x1) \
|
||||
(vextq_u32(x0, vdupq_n_u8(0), 16 - (x1) / 8))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_shift_right(x0, x1) \
|
||||
(vextq_u32(x0, vdupq_n_u8(0), (x1) / 8))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_shift_left64(x0, x1) \
|
||||
(vreinterpretq_u32_u64(vshlq_n_u64(vreinterpretq_u64_u32(x0), x1)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_shift_right64(x0, x1) \
|
||||
(vreinterpretq_u32_u64(vshrq_n_u64(vreinterpretq_u64_u32(x0), x1)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_shift_left32(x0, x1) \
|
||||
(vshlq_n_u32(x0, x1))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_shift_right32(x0, x1) \
|
||||
(vreinterpretq_u32_u64(vshrq_n_u64(vreinterpretq_u64_u32(x0), x1)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_rotate_left32(x0, x1) \
|
||||
(vsriq_n_u32(vshlq_n_u32((x0), (x1)), (x0), 32 - (x1)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_rotate_right32(x0, x1) \
|
||||
(vsriq_n_u32(vshlq_n_u32((x0), 32 - (x1)), (x0), (x1)))
|
||||
|
||||
/*
|
||||
#define Lib_IntVector_Intrinsics_vec128_shuffle32(x0, x1, x2, x3, x4) \
|
||||
(_mm_shuffle_epi32(x0, _MM_SHUFFLE(x1,x2,x3,x4)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_shuffle64(x0, x1, x2) \
|
||||
(_mm_shuffle_epi32(x0, _MM_SHUFFLE(2*x1+1,2*x1,2*x2+1,2*x2)))
|
||||
*/
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_load_le(x0) \
|
||||
(vld1q_u32((const uint32_t*)(x0)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_store_le(x0, x1) \
|
||||
(vst1q_u32((uint32_t*)(x0), (x1)))
|
||||
|
||||
/*
|
||||
#define Lib_IntVector_Intrinsics_vec128_load_be(x0) \
|
||||
( Lib_IntVector_Intrinsics_vec128 l = vrev64q_u8(vld1q_u32((uint32_t*)(x0)));
|
||||
|
||||
*/
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_load32_be(x0) \
|
||||
(vrev32q_u8(vld1q_u32((const uint32_t*)(x0))))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_load64_be(x0) \
|
||||
(vreinterpretq_u32_u64(vrev64q_u8(vld1q_u32((const uint32_t*)(x0)))))
|
||||
|
||||
/*
|
||||
#define Lib_IntVector_Intrinsics_vec128_store_be(x0, x1) \
|
||||
(_mm_storeu_si128((__m128i*)(x0), _mm_shuffle_epi8(x1, _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15))))
|
||||
*/
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_store32_be(x0, x1) \
|
||||
(vst1q_u32((uint32_t*)(x0), (vrev32q_u8(x1))))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_store64_be(x0, x1) \
|
||||
(vst1q_u32((uint32_t*)(x0), (vrev64q_u8(x1))))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_insert8(x0, x1, x2) \
|
||||
(vsetq_lane_u8(x1, x0, x2))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_insert32(x0, x1, x2) \
|
||||
(vsetq_lane_u32(x1, x0, x2))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_insert64(x0, x1, x2) \
|
||||
(vreinterpretq_u32_u64(vsetq_lane_u64(x1, vreinterpretq_u64_u32(x0), x2)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_extract8(x0, x1) \
|
||||
(vgetq_lane_u8(x0, x1))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_extract32(x0, x1) \
|
||||
(vgetq_lane_u32(x0, x1))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_extract64(x0, x1) \
|
||||
(vreinterpretq_u32_u64(vgetq_lane_u64(vreinterpretq_u64_u32(x0), x1)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_zero \
|
||||
(vdup_n_u8(0))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_add64(x0, x1) \
|
||||
(vreinterpretq_u32_u64(vaddq_u64(vreinterpretq_u64_u32(x0), vreinterpretq_u64_u32(x1))))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_sub64(x0, x1) \
|
||||
(vreinterpretq_u32_u64(vsubq_u64(vreinterpretq_u64_u32(x0), vreinterpretq_u64_u32(x1))))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_mul64(x0, x1) \
|
||||
(vmull_u32(vmovn_u64(x0), vmovn_u64(x1)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_smul64(x0, x1) \
|
||||
(vmull_u32(vmovn_u64(x0), vdupq_n_u64(x1)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_add32(x0, x1) \
|
||||
(vaddq_u32(x0, x1))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_sub32(x0, x1) \
|
||||
(vsubq_u32(x0, x1))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_mul32(x0, x1) \
|
||||
(vmulq_lane_u32(x0, x1))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_smul32(x0, x1) \
|
||||
(vmulq_lane_u32(x0, vdupq_n_u32(x1)))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_load128(x) \
|
||||
((uint32x4_t)(x))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_load64(x) \
|
||||
(vreinterpretq_u32_u64(vdupq_n_u64(x))) /* hi lo */
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_load32(x) \
|
||||
(vdupq_n_u32(x)) /* hi lo */
|
||||
|
||||
static inline Lib_IntVector_Intrinsics_vec128
|
||||
Lib_IntVector_Intrinsics_vec128_load64s(uint64_t x1, uint64_t x2)
|
||||
{
|
||||
const uint64_t a[2] = { x1, x2 };
|
||||
return vreinterpretq_u32_u64(vld1q_u64(a));
|
||||
}
|
||||
|
||||
static inline Lib_IntVector_Intrinsics_vec128
|
||||
Lib_IntVector_Intrinsics_vec128_load32s(uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4)
|
||||
{
|
||||
const uint32_t a[4] = { x1, x2, x3, x4 };
|
||||
return vld1q_u32(a);
|
||||
}
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_interleave_low32(x1, x2) \
|
||||
(vzip1q_u32(x1, x2))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_interleave_high32(x1, x2) \
|
||||
(vzip2q_u32(x1, x2))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_interleave_low64(x1, x2) \
|
||||
(vreinterpretq_u32_u64(vzip1q_u64(vreinterpretq_u64_u32(x1), vreinterpretq_u64_u32(x2))))
|
||||
|
||||
#define Lib_IntVector_Intrinsics_vec128_interleave_high64(x1, x2) \
|
||||
(vreinterpretq_u32_u64(vzip2q_u64(vreinterpretq_u64_u32(x1), vreinterpretq_u64_u32(x2))))
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -22,12 +22,12 @@
|
|||
* The format of the version string should be
|
||||
* "<major version>.<minor version>[.<patch level>[.<build number>]][ <ECC>][ <Beta>]"
|
||||
*/
|
||||
#define NSS_VERSION "3.51" _NSS_CUSTOMIZED
|
||||
#define NSS_VERSION "3.52" _NSS_CUSTOMIZED " Beta"
|
||||
#define NSS_VMAJOR 3
|
||||
#define NSS_VMINOR 51
|
||||
#define NSS_VMINOR 52
|
||||
#define NSS_VPATCH 0
|
||||
#define NSS_VBUILD 0
|
||||
#define NSS_BETA PR_FALSE
|
||||
#define NSS_BETA PR_TRUE
|
||||
|
||||
#ifndef RC_INVOKED
|
||||
|
||||
|
|
|
@ -17,11 +17,11 @@
|
|||
* The format of the version string should be
|
||||
* "<major version>.<minor version>[.<patch level>[.<build number>]][ <ECC>][ <Beta>]"
|
||||
*/
|
||||
#define SOFTOKEN_VERSION "3.51" SOFTOKEN_ECC_STRING
|
||||
#define SOFTOKEN_VERSION "3.52" SOFTOKEN_ECC_STRING " Beta"
|
||||
#define SOFTOKEN_VMAJOR 3
|
||||
#define SOFTOKEN_VMINOR 51
|
||||
#define SOFTOKEN_VMINOR 52
|
||||
#define SOFTOKEN_VPATCH 0
|
||||
#define SOFTOKEN_VBUILD 0
|
||||
#define SOFTOKEN_BETA PR_FALSE
|
||||
#define SOFTOKEN_BETA PR_TRUE
|
||||
|
||||
#endif /* _SOFTKVER_H_ */
|
||||
|
|
|
@ -1034,7 +1034,9 @@ ssl_ParseSessionTicket(sslSocket *ss, const SECItem *decryptedTicket,
|
|||
PORT_SetError(SEC_ERROR_LIBRARY_FAILURE);
|
||||
return SECFailure;
|
||||
}
|
||||
parsedTicket->timestamp = (PRTime)temp << 32;
|
||||
|
||||
/* Cast to avoid undefined behavior if the top bit is set. */
|
||||
parsedTicket->timestamp = (PRTime)((PRUint64)temp << 32);
|
||||
rv = ssl3_ExtConsumeHandshakeNumber(ss, &temp, 4, &buffer, &len);
|
||||
if (rv != SECSuccess) {
|
||||
PORT_SetError(SEC_ERROR_LIBRARY_FAILURE);
|
||||
|
@ -1056,8 +1058,11 @@ ssl_ParseSessionTicket(sslSocket *ss, const SECItem *decryptedTicket,
|
|||
PORT_SetError(SEC_ERROR_LIBRARY_FAILURE);
|
||||
return SECFailure;
|
||||
}
|
||||
#ifndef UNSAFE_FUZZER_MODE
|
||||
/* A well-behaving server should only write 0 or 1. */
|
||||
PORT_Assert(temp == PR_TRUE || temp == PR_FALSE);
|
||||
parsedTicket->extendedMasterSecretUsed = (PRBool)temp;
|
||||
#endif
|
||||
parsedTicket->extendedMasterSecretUsed = temp ? PR_TRUE : PR_FALSE;
|
||||
|
||||
rv = ssl3_ExtConsumeHandshake(ss, &temp, 4, &buffer, &len);
|
||||
if (rv != SECSuccess) {
|
||||
|
|
|
@ -943,6 +943,10 @@ typedef struct SSLMaskingContextStr {
|
|||
unsigned int _maskLen), \
|
||||
(ctx, sample, sampleLen, mask, maskLen))
|
||||
|
||||
#define SSL_SetDtls13VersionWorkaround(fd, enabled) \
|
||||
SSL_EXPERIMENTAL_API("SSL_SetDtls13VersionWorkaround", \
|
||||
(PRFileDesc * _fd, PRBool _enabled), (fd, enabled))
|
||||
|
||||
/* Deprecated experimental APIs */
|
||||
#define SSL_UseAltServerHelloType(fd, enable) SSL_DEPRECATED_EXPERIMENTAL_API
|
||||
#define SSL_SetupAntiReplay(a, b, c) SSL_DEPRECATED_EXPERIMENTAL_API
|
||||
|
|
|
@ -281,6 +281,7 @@ typedef struct sslOptionsStr {
|
|||
unsigned int enableV2CompatibleHello : 1;
|
||||
unsigned int enablePostHandshakeAuth : 1;
|
||||
unsigned int enableDelegatedCredentials : 1;
|
||||
unsigned int enableDtls13VersionCompat : 1;
|
||||
} sslOptions;
|
||||
|
||||
typedef enum { sslHandshakingUndetermined = 0,
|
||||
|
@ -1861,6 +1862,8 @@ SSLExp_HkdfVariantExpandLabelWithMech(PRUint16 version, PRUint16 cipherSuite, PK
|
|||
CK_MECHANISM_TYPE mech, unsigned int keySize,
|
||||
SSLProtocolVariant variant, PK11SymKey **keyp);
|
||||
|
||||
SECStatus SSLExp_SetDtls13VersionWorkaround(PRFileDesc *fd, PRBool enabled);
|
||||
|
||||
SECStatus SSLExp_SetTimeFunc(PRFileDesc *fd, SSLTimeFunc f, void *arg);
|
||||
|
||||
extern SECStatus ssl_CreateMaskingContextInner(PRUint16 version, PRUint16 cipherSuite,
|
||||
|
|
|
@ -86,6 +86,7 @@ static sslOptions ssl_defaults = {
|
|||
.requireDHENamedGroups = PR_FALSE,
|
||||
.enable0RttData = PR_FALSE,
|
||||
.enableTls13CompatMode = PR_FALSE,
|
||||
.enableDtls13VersionCompat = PR_FALSE,
|
||||
.enableDtlsShortHeader = PR_FALSE,
|
||||
.enableHelloDowngradeCheck = PR_FALSE,
|
||||
.enableV2CompatibleHello = PR_FALSE,
|
||||
|
@ -4249,6 +4250,7 @@ struct {
|
|||
EXP(SendCertificateRequest),
|
||||
EXP(SendSessionTicket),
|
||||
EXP(SetAntiReplayContext),
|
||||
EXP(SetDtls13VersionWorkaround),
|
||||
EXP(SetESNIKeyPair),
|
||||
EXP(SetMaxEarlyDataSize),
|
||||
EXP(SetResumptionTokenCallback),
|
||||
|
@ -4289,6 +4291,17 @@ ssl_ClearPRCList(PRCList *list, void (*f)(void *))
|
|||
}
|
||||
}
|
||||
|
||||
SECStatus
|
||||
SSLExp_SetDtls13VersionWorkaround(PRFileDesc *fd, PRBool enabled)
|
||||
{
|
||||
sslSocket *ss = ssl_FindSocket(fd);
|
||||
if (!ss) {
|
||||
return SECFailure;
|
||||
}
|
||||
ss->opt.enableDtls13VersionCompat = enabled;
|
||||
return SECSuccess;
|
||||
}
|
||||
|
||||
SECStatus
|
||||
SSLExp_SetTimeFunc(PRFileDesc *fd, SSLTimeFunc f, void *arg)
|
||||
{
|
||||
|
|
|
@ -795,6 +795,21 @@ tls13_ClientSendSupportedVersionsXtn(const sslSocket *ss, TLSExtensionData *xtnD
|
|||
if (rv != SECSuccess) {
|
||||
return SECFailure;
|
||||
}
|
||||
|
||||
if (ss->opt.enableDtls13VersionCompat &&
|
||||
ss->protocolVariant == ssl_variant_datagram) {
|
||||
switch (version) {
|
||||
case SSL_LIBRARY_VERSION_TLS_1_2:
|
||||
case SSL_LIBRARY_VERSION_TLS_1_1:
|
||||
rv = sslBuffer_AppendNumber(buf, (PRUint16)version, 2);
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
if (rv != SECSuccess) {
|
||||
return SECFailure;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rv = sslBuffer_InsertLength(buf, lengthOffset, 1);
|
||||
|
|
|
@ -19,12 +19,12 @@
|
|||
* The format of the version string should be
|
||||
* "<major version>.<minor version>[.<patch level>[.<build number>]][ <Beta>]"
|
||||
*/
|
||||
#define NSSUTIL_VERSION "3.51"
|
||||
#define NSSUTIL_VERSION "3.52 Beta"
|
||||
#define NSSUTIL_VMAJOR 3
|
||||
#define NSSUTIL_VMINOR 51
|
||||
#define NSSUTIL_VMINOR 52
|
||||
#define NSSUTIL_VPATCH 0
|
||||
#define NSSUTIL_VBUILD 0
|
||||
#define NSSUTIL_BETA PR_FALSE
|
||||
#define NSSUTIL_BETA PR_TRUE
|
||||
|
||||
SEC_BEGIN_PROTOS
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ int main(int argc, char const *argv[]) {
|
|||
printf("\tAES-NI \t%s supported\n", aesni_support() ? "" : "not");
|
||||
printf("\tPCLMUL \t%s supported\n", clmul_support() ? "" : "not");
|
||||
printf("\tAVX \t%s supported\n", avx_support() ? "" : "not");
|
||||
printf("\tAVX2 \t%s supported\n", avx2_support() ? "" : "not");
|
||||
printf("\tSSSE3 \t%s supported\n", ssse3_support() ? "" : "not");
|
||||
printf("\tSSE4.1 \t%s supported\n", sse4_1_support() ? "" : "not");
|
||||
printf("\tSSE4.2 \t%s supported\n", sse4_2_support() ? "" : "not");
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
Network Security Services (NSS) is a set of libraries designed to support
|
||||
cross-platform development of security-enabled client and server
|
||||
applications. NSS supports SSL v3-TLS 1.2 (experimental TLS 1.3), PKCS #5, PKCS#7,
|
||||
applications. NSS supports TLS 1.2, TLS 1.3, PKCS #5, PKCS#7,
|
||||
PKCS #11, PKCS #12, S/MIME, X.509 v3 certificates, and other security
|
||||
standards.
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче