Bug 1636656 - land NSS e3444f4cc638 UPGRADE_NSS_RELEASE,

Differential Revision: https://phabricator.services.mozilla.com/D74716
This commit is contained in:
J.C. Jones 2020-05-11 18:20:52 +00:00
Родитель bf91ff2f3f
Коммит 638a597baa
52 изменённых файлов: 875 добавлений и 695 удалений

Просмотреть файл

@ -1504,7 +1504,7 @@ MOZ_ARG_WITH_BOOL(system-nss,
_USE_SYSTEM_NSS=1 )
if test -n "$_USE_SYSTEM_NSS"; then
AM_PATH_NSS(3.52, [MOZ_SYSTEM_NSS=1], [AC_MSG_ERROR([you don't have NSS installed or your version is too old])])
AM_PATH_NSS(3.53, [MOZ_SYSTEM_NSS=1], [AC_MSG_ERROR([you don't have NSS installed or your version is too old])])
fi
NSS_CFLAGS="$NSS_CFLAGS -I${DIST}/include/nss"

Просмотреть файл

@ -1 +1 @@
NSS_3_52_RTM
e3444f4cc638

Просмотреть файл

@ -1,22 +1,5 @@
7 Added functions:
[A] 'function SECStatus PK11_AEADOp(PK11Context*, CK_GENERATOR_FUNCTION, int, unsigned char*, int, const unsigned char*, int, unsigned char*, int*, int, unsigned char*, int, const unsigned char*, int)' {PK11_AEADOp@@NSS_3.52}
[A] 'function SECStatus PK11_AEADRawOp(PK11Context*, void*, int, const unsigned char*, int, unsigned char*, int*, int, const unsigned char*, int)' {PK11_AEADRawOp@@NSS_3.52}
[A] 'function CK_OBJECT_HANDLE PK11_GetObjectHandle(PK11ObjectType, void*, PK11SlotInfo**)' {PK11_GetObjectHandle@@NSS_3.52}
[A] 'function SECStatus PK11_ReadRawAttributes(PLArenaPool*, PK11ObjectType, void*, CK_ATTRIBUTE*, unsigned int)' {PK11_ReadRawAttributes@@NSS_3.52}
[A] 'function SECStatus PK11_SymKeysToSameSlot(CK_MECHANISM_TYPE, CK_ATTRIBUTE_TYPE, CK_ATTRIBUTE_TYPE, PK11SymKey*, PK11SymKey*, PK11SymKey**, PK11SymKey**)' {PK11_SymKeysToSameSlot@@NSS_3.52}
[A] 'function PRBool _PK11_ContextGetAEADSimulation(PK11Context*)' {_PK11_ContextGetAEADSimulation@@NSS_3.52}
[A] 'function SECStatus _PK11_ContextSetAEADSimulation(PK11Context*)' {_PK11_ContextSetAEADSimulation@@NSS_3.52}
1 function with some indirect sub-type change:
[C]'function SECStatus PK11_GetModInfo(SECMODModule*, CK_INFO*)' at pk11util.c:613:1 has some indirect sub-type changes:
parameter 1 of type 'SECMODModule*' has sub-type changes:
in pointed to type 'typedef SECMODModule' at secmodt.h:29:1:
underlying type 'struct SECMODModuleStr' at secmodt.h:44:1 changed:
type size changed from 1600 to 1664 (in bits)
1 data member insertion:
'CK_FLAGS SECMODModuleStr::flags', at offset 1600 (in bits) at secmodt.h:76:1
no data member change (1 filtered);
1 Added function:
[A] 'function PRBool SECMOD_GetSystemFIPSEnabled()' {SECMOD_GetSystemFIPSEnabled@@NSS_3.53}

Просмотреть файл

@ -1,10 +0,0 @@
6 Added functions:
[A] 'function CK_RV C_GetInterface(CK_UTF8CHAR_PTR, CK_VERSION_PTR, CK_INTERFACE_PTR_PTR, CK_FLAGS)' {C_GetInterface@@NSS_3.52}
[A] 'function CK_RV C_GetInterfaceList(CK_INTERFACE_PTR, CK_ULONG_PTR)' {C_GetInterfaceList@@NSS_3.52}
[A] 'function CK_RV FC_GetInterface(CK_UTF8CHAR_PTR, CK_VERSION_PTR, CK_INTERFACE_PTR_PTR, CK_FLAGS)' {FC_GetInterface@@NSS_3.52}
[A] 'function CK_RV FC_GetInterfaceList(CK_INTERFACE_PTR, CK_ULONG_PTR)' {FC_GetInterfaceList@@NSS_3.52}
[A] 'function CK_RV NSC_GetInterface(CK_UTF8CHAR_PTR, CK_VERSION_PTR, CK_INTERFACE_PTR_PTR, CK_FLAGS)' {NSC_GetInterface@@NSS_3.52}
[A] 'function CK_RV NSC_GetInterfaceList(CK_INTERFACE_PTR, CK_ULONG_PTR)' {NSC_GetInterfaceList@@NSS_3.52}

Просмотреть файл

@ -1 +1 @@
NSS_3_51_BRANCH
NSS_3_52_BRANCH

Просмотреть файл

@ -13,7 +13,7 @@ set -e -x -v
# HACL CI.
# When bug 1593647 is resolved, extract the code on CI again.
git clone -q "https://github.com/project-everest/hacl-star" ~/hacl-star
git -C ~/hacl-star checkout -q 079854e0072041d60859b6d8af2743bc6a37dc05
git -C ~/hacl-star checkout -q e4311991b1526734f99f4e3a0058895a46c63e5c
# Format the C snapshot.
cd ~/hacl-star/dist/mozilla

Просмотреть файл

@ -622,21 +622,23 @@ typedef enum {
bltestAES_GCM, /* . */
bltestCAMELLIA_ECB, /* . */
bltestCAMELLIA_CBC, /* . */
bltestSEED_ECB, /* SEED algorithm */
bltestSEED_CBC, /* SEED algorithm */
bltestCHACHA20, /* ChaCha20 + Poly1305 */
bltestRSA, /* Public Key Ciphers */
bltestRSA_OAEP, /* . (Public Key Enc.) */
bltestRSA_PSS, /* . (Public Key Sig.) */
bltestECDSA, /* . (Public Key Sig.) */
bltestDSA, /* . (Public Key Sig.) */
bltestMD2, /* Hash algorithms */
bltestMD5, /* . */
bltestSHA1, /* . */
bltestSHA224, /* . */
bltestSHA256, /* . */
bltestSHA384, /* . */
bltestSHA512, /* . */
#ifndef NSS_DISABLE_DEPRECATED_SEED
bltestSEED_ECB, /* SEED algorithm */
bltestSEED_CBC, /* SEED algorithm */
#endif
bltestCHACHA20, /* ChaCha20 + Poly1305 */
bltestRSA, /* Public Key Ciphers */
bltestRSA_OAEP, /* . (Public Key Enc.) */
bltestRSA_PSS, /* . (Public Key Sig.) */
bltestECDSA, /* . (Public Key Sig.) */
bltestDSA, /* . (Public Key Sig.) */
bltestMD2, /* Hash algorithms */
bltestMD5, /* . */
bltestSHA1, /* . */
bltestSHA224, /* . */
bltestSHA256, /* . */
bltestSHA384, /* . */
bltestSHA512, /* . */
NUMMODES
} bltestCipherMode;
@ -660,8 +662,10 @@ static char *mode_strings[] =
"aes_gcm",
"camellia_ecb",
"camellia_cbc",
#ifndef NSS_DISABLE_DEPRECATED_SEED
"seed_ecb",
"seed_cbc",
#endif
"chacha20_poly1305",
"rsa",
"rsa_oaep",
@ -792,8 +796,12 @@ struct bltestCipherInfoStr {
PRBool
is_symmkeyCipher(bltestCipherMode mode)
{
/* change as needed! */
/* change as needed! */
#ifndef NSS_DISABLE_DEPRECATED_SEED
if (mode >= bltestDES_ECB && mode <= bltestSEED_CBC)
#else
if (mode >= bltestDES_ECB && mode <= bltestCAMELLIA_CBC)
#endif
return PR_TRUE;
return PR_FALSE;
}
@ -880,7 +888,9 @@ cipher_requires_IV(bltestCipherMode mode)
case bltestAES_CTR:
case bltestAES_GCM:
case bltestCAMELLIA_CBC:
#ifndef NSS_DISABLE_DEPRECATED_SEED
case bltestSEED_CBC:
#endif
case bltestCHACHA20:
return PR_TRUE;
default:
@ -1176,6 +1186,7 @@ camellia_Decrypt(void *cx, unsigned char *output, unsigned int *outputLen,
input, inputLen);
}
#ifndef NSS_DISABLE_DEPRECATED_SEED
SECStatus
seed_Encrypt(void *cx, unsigned char *output, unsigned int *outputLen,
unsigned int maxOutputLen, const unsigned char *input,
@ -1193,6 +1204,7 @@ seed_Decrypt(void *cx, unsigned char *output, unsigned int *outputLen,
return SEED_Decrypt((SEEDContext *)cx, output, outputLen, maxOutputLen,
input, inputLen);
}
#endif /* NSS_DISABLE_DEPRECATED_SEED */
SECStatus
rsa_PublicKeyOp(void *cx, SECItem *output, const SECItem *input)
@ -1587,6 +1599,7 @@ bltest_camellia_init(bltestCipherInfo *cipherInfo, PRBool encrypt)
return SECSuccess;
}
#ifndef NSS_DISABLE_DEPRECATED_SEED
SECStatus
bltest_seed_init(bltestCipherInfo *cipherInfo, PRBool encrypt)
{
@ -1630,6 +1643,7 @@ bltest_seed_init(bltestCipherInfo *cipherInfo, PRBool encrypt)
return SECSuccess;
}
#endif /* NSS_DISABLE_DEPRECATED_SEED */
SECStatus
bltest_chacha20_init(bltestCipherInfo *cipherInfo, PRBool encrypt)
@ -2282,12 +2296,14 @@ cipherInit(bltestCipherInfo *cipherInfo, PRBool encrypt)
cipherInfo->input.pBuf.len);
return bltest_camellia_init(cipherInfo, encrypt);
break;
#ifndef NSS_DISABLE_DEPRECATED_SEED
case bltestSEED_ECB:
case bltestSEED_CBC:
SECITEM_AllocItem(cipherInfo->arena, &cipherInfo->output.buf,
cipherInfo->input.pBuf.len);
return bltest_seed_init(cipherInfo, encrypt);
break;
#endif /* NSS_DISABLE_DEPRECATED_SEED */
case bltestCHACHA20:
outlen = cipherInfo->input.pBuf.len + (encrypt ? 16 : 0);
SECITEM_AllocItem(cipherInfo->arena, &cipherInfo->output.buf, outlen);
@ -2586,10 +2602,12 @@ cipherFinish(bltestCipherInfo *cipherInfo)
case bltestCAMELLIA_CBC:
Camellia_DestroyContext((CamelliaContext *)cipherInfo->cx, PR_TRUE);
break;
#ifndef NSS_DISABLE_DEPRECATED_SEED
case bltestSEED_ECB:
case bltestSEED_CBC:
SEED_DestroyContext((SEEDContext *)cipherInfo->cx, PR_TRUE);
break;
#endif /* NSS_DISABLE_DEPRECATED_SEED */
case bltestCHACHA20:
ChaCha20Poly1305_DestroyContext((ChaCha20Poly1305Context *)
cipherInfo->cx,
@ -2747,8 +2765,10 @@ print_td:
case bltestAES_GCM:
case bltestCAMELLIA_ECB:
case bltestCAMELLIA_CBC:
#ifndef NSS_DISABLE_DEPRECATED_SEED
case bltestSEED_ECB:
case bltestSEED_CBC:
#endif
case bltestRC2_ECB:
case bltestRC2_CBC:
case bltestRC4:
@ -2939,19 +2959,23 @@ get_params(PLArenaPool *arena, bltestParams *params,
case bltestAES_CTS:
case bltestAES_CTR:
case bltestCAMELLIA_CBC:
#ifndef NSS_DISABLE_DEPRECATED_SEED
case bltestSEED_CBC:
sprintf(filename, "%s/tests/%s/%s%d", testdir, modestr, "iv", j);
load_file_data(arena, &params->sk.iv, filename, bltestBinary);
#endif
case bltestDES_ECB:
case bltestDES_EDE_ECB:
case bltestRC2_ECB:
case bltestRC4:
case bltestAES_ECB:
case bltestCAMELLIA_ECB:
#ifndef NSS_DISABLE_DEPRECATED_SEED
case bltestSEED_ECB:
sprintf(filename, "%s/tests/%s/%s%d", testdir, modestr, "key", j);
load_file_data(arena, &params->sk.key, filename, bltestBinary);
break;
#endif
#ifdef NSS_SOFTOKEN_DOES_RC5
case bltestRC5_ECB:
case bltestRC5_CBC:

Просмотреть файл

@ -99,6 +99,7 @@
'disable_arm_hw_aes%': 0,
'disable_tests%': 0,
'disable_chachapoly%': 0,
'disable_deprecated_seed%': 0,
'disable_dbm%': 1,
'disable_libpkix%': 1,
'disable_werror%': 0,
@ -569,6 +570,11 @@
'NSS_DISABLE_LIBPKIX',
],
}],
[ 'disable_deprecated_seed==1', {
'defines': [
'NSS_DISABLE_DEPRECATED_SEED',
],
}],
],
},
# Common settings for debug should go here.

Просмотреть файл

@ -170,6 +170,10 @@ ifdef NSS_DISABLE_CHACHAPOLY
DEFINES += -DNSS_DISABLE_CHACHAPOLY
endif
ifdef NSS_DISABLE_DEPRECATED_SEED
DEFINES += -DNSS_DISABLE_DEPRECATED_SEED
endif
ifdef NSS_PKIX_NO_LDAP
DEFINES += -DNSS_PKIX_NO_LDAP
endif

Просмотреть файл

@ -10,4 +10,3 @@
*/
#error "Do not include this header file."

Просмотреть файл

@ -72,10 +72,12 @@ class EncryptDeriveTest
return CKM_CAMELLIA_ECB_ENCRYPT_DATA;
case CKM_CAMELLIA_CBC:
return CKM_CAMELLIA_CBC_ENCRYPT_DATA;
#ifndef NSS_DISABLE_DEPRECATED_SEED
case CKM_SEED_ECB:
return CKM_SEED_ECB_ENCRYPT_DATA;
case CKM_SEED_CBC:
return CKM_SEED_CBC_ENCRYPT_DATA;
#endif
default:
ADD_FAILURE() << "Unknown mechanism";
break;
@ -93,7 +95,9 @@ class EncryptDeriveTest
case CKM_DES3_ECB:
case CKM_AES_ECB:
case CKM_CAMELLIA_ECB:
#ifndef NSS_DISABLE_DEPRECATED_SEED
case CKM_SEED_ECB:
#endif
string_data.pData = toUcharPtr(kInput);
string_data.ulLen = keysize();
param.data = reinterpret_cast<uint8_t*>(&string_data);
@ -110,7 +114,9 @@ class EncryptDeriveTest
case CKM_AES_CBC:
case CKM_CAMELLIA_CBC:
#ifndef NSS_DISABLE_DEPRECATED_SEED
case CKM_SEED_CBC:
#endif
aes_data.pData = toUcharPtr(kInput);
aes_data.length = keysize();
PORT_Memcpy(aes_data.iv, kIv, keysize());
@ -132,14 +138,18 @@ class EncryptDeriveTest
case CKM_DES3_ECB:
case CKM_AES_ECB:
case CKM_CAMELLIA_ECB:
#ifndef NSS_DISABLE_DEPRECATED_SEED
case CKM_SEED_ECB:
#endif
// No parameter needed here.
break;
case CKM_DES3_CBC:
case CKM_AES_CBC:
case CKM_CAMELLIA_CBC:
#ifndef NSS_DISABLE_DEPRECATED_SEED
case CKM_SEED_CBC:
#endif
param.data = toUcharPtr(kIv);
param.len = keysize();
break;
@ -186,8 +196,13 @@ class EncryptDeriveTest
TEST_P(EncryptDeriveTest, Test) { TestEncryptDerive(); }
static const CK_MECHANISM_TYPE kEncryptDeriveMechanisms[] = {
CKM_DES3_ECB, CKM_DES3_CBC, CKM_AES_ECB, CKM_AES_ECB, CKM_AES_CBC,
CKM_CAMELLIA_ECB, CKM_CAMELLIA_CBC, CKM_SEED_ECB, CKM_SEED_CBC};
CKM_DES3_ECB, CKM_DES3_CBC, CKM_AES_ECB, CKM_AES_ECB, CKM_AES_CBC,
CKM_CAMELLIA_ECB, CKM_CAMELLIA_CBC
#ifndef NSS_DISABLE_DEPRECATED_SEED
,
CKM_SEED_ECB, CKM_SEED_CBC
#endif
};
INSTANTIATE_TEST_CASE_P(EncryptDeriveTests, EncryptDeriveTest,
::testing::ValuesIn(kEncryptDeriveMechanisms));

Просмотреть файл

@ -50,6 +50,7 @@ class Pkcs11SeedTest : public ::testing::Test {
}
};
#ifndef NSS_DISABLE_DEPRECATED_SEED
// The intention here is to test the arguments of these functions
// The resulted content is already tested in EncryptDeriveTests.
// SEED_CBC needs an IV of 16 bytes.
@ -76,5 +77,6 @@ TEST_F(Pkcs11SeedTest, ECB_Singleblock) {
TEST_F(Pkcs11SeedTest, ECB_Multiblock) {
EncryptDecryptSeed(SECSuccess, 64, 64, CKM_SEED_ECB);
}
#endif
} // namespace nss_test
} // namespace nss_test

Просмотреть файл

@ -545,6 +545,10 @@ ifndef HAVE_INT128_SUPPORT
DEFINES += -DKRML_VERIFIED_UINT128
endif
ifndef NSS_DISABLE_DEPRECATED_SEED
CSRCS += seed.c
endif
ifndef NSS_DISABLE_CHACHAPOLY
ifeq ($(CPU_ARCH),x86_64)
ifndef NSS_DISABLE_AVX2

Просмотреть файл

@ -55,7 +55,7 @@
'rijndael.c',
'rsa.c',
'rsapkcs.c',
'seed.c',
'sha512.c',
'sha_fast.c',
'shvfy.c',
'sysrand.c',
@ -162,6 +162,11 @@
'verified/Hacl_Poly1305_32.c',
],
}],
[ 'disable_deprecated_seed==0', {
'sources': [
'seed.c',
],
}],
[ 'fuzz==1', {
'sources!': [ 'drbg.c' ],
'sources': [ 'det_rng.c' ],

Просмотреть файл

@ -6,6 +6,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifdef FREEBL_NO_DEPEND
#include "stubs.h"
extern int FREEBL_InitStubs(void);
#endif
@ -14,6 +15,15 @@ extern int FREEBL_InitStubs(void);
#include "alghmac.h"
#include "hmacct.h"
#include "blapii.h"
#include "secerr.h"
SECStatus
FREEBL_Deprecated(void)
{
PORT_SetError(SEC_ERROR_UNSUPPORTED_KEYALG);
return SECFailure;
}
static const struct FREEBLVectorStr vector =
{
@ -210,14 +220,23 @@ static const struct FREEBLVectorStr vector =
PQG_DestroyParams,
PQG_DestroyVerify,
/* End of Version 3.010. */
/* End of Version 3.010. */
#ifndef NSS_DISABLE_DEPRECATED_SEED
SEED_InitContext,
SEED_AllocateContext,
SEED_CreateContext,
SEED_DestroyContext,
SEED_Encrypt,
SEED_Decrypt,
#else
(F_SEED_InitContext)FREEBL_Deprecated,
(F_SEED_AllocateContext)FREEBL_Deprecated,
(F_SEED_CreateContext)FREEBL_Deprecated,
(F_SEED_DestroyContext)FREEBL_Deprecated,
(F_SEED_Encrypt)FREEBL_Deprecated,
(F_SEED_Decrypt)FREEBL_Deprecated,
#endif /* NSS_DISABLE_DEPRECATED_SEED */
BL_Init,
BL_SetForkState,

Просмотреть файл

@ -396,7 +396,11 @@ SEED_CreateContext(const unsigned char *key, const unsigned char *iv,
{
if (!vector && PR_SUCCESS != freebl_RunLoaderOnce())
return NULL;
#ifndef NSS_DISABLE_DEPRECATED_SEED
return (vector->p_SEED_CreateContext)(key, iv, mode, encrypt);
#else
return NULL;
#endif
}
void
@ -404,7 +408,11 @@ SEED_DestroyContext(SEEDContext *cx, PRBool freeit)
{
if (!vector && PR_SUCCESS != freebl_RunLoaderOnce())
return;
#ifndef NSS_DISABLE_DEPRECATED_SEED
(vector->p_SEED_DestroyContext)(cx, freeit);
#else
return;
#endif
}
SECStatus
@ -414,8 +422,12 @@ SEED_Encrypt(SEEDContext *cx, unsigned char *output, unsigned int *outputLen,
{
if (!vector && PR_SUCCESS != freebl_RunLoaderOnce())
return SECFailure;
#ifndef NSS_DISABLE_DEPRECATED_SEED
return (vector->p_SEED_Encrypt)(cx, output, outputLen, maxOutputLen, input,
inputLen);
#else
return SECFailure;
#endif
}
SECStatus
@ -425,8 +437,12 @@ SEED_Decrypt(SEEDContext *cx, unsigned char *output, unsigned int *outputLen,
{
if (!vector && PR_SUCCESS != freebl_RunLoaderOnce())
return SECFailure;
#ifndef NSS_DISABLE_DEPRECATED_SEED
return (vector->p_SEED_Decrypt)(cx, output, outputLen, maxOutputLen, input,
inputLen);
#else
return SECFailure;
#endif
}
AESContext *
@ -1341,7 +1357,11 @@ SEED_InitContext(SEEDContext *cx, const unsigned char *key,
{
if (!vector && PR_SUCCESS != freebl_RunLoaderOnce())
return SECFailure;
#ifndef NSS_DISABLE_DEPRECATED_SEED
return (vector->p_SEED_InitContext)(cx, key, keylen, iv, mode, encrypt, xtra);
#else
return SECFailure;
#endif
}
SECStatus

Просмотреть файл

@ -862,3 +862,29 @@ extern FREEBLGetVectorFn FREEBL_GetVector;
SEC_END_PROTOS
#endif
#ifdef NSS_DISABLE_DEPRECATED_SEED
typedef SECStatus (*F_SEED_InitContext)(SEEDContext *cx,
const unsigned char *key,
unsigned int keylen,
const unsigned char *iv,
int mode,
unsigned int encrypt,
unsigned int);
typedef SEEDContext *(*F_SEED_AllocateContext)(void);
typedef SEEDContext *(*F_SEED_CreateContext)(const unsigned char *key,
const unsigned char *iv,
int mode, PRBool encrypt);
typedef void (*F_SEED_DestroyContext)(SEEDContext *cx, PRBool freeit);
typedef SECStatus (*F_SEED_Encrypt)(SEEDContext *cx, unsigned char *output,
unsigned int *outputLen, unsigned int maxOutputLen,
const unsigned char *input, unsigned int inputLen);
typedef SECStatus (*F_SEED_Decrypt)(SEEDContext *cx, unsigned char *output,
unsigned int *outputLen, unsigned int maxOutputLen,
const unsigned char *input, unsigned int inputLen);
#endif

Просмотреть файл

@ -126,7 +126,7 @@ CSRCS = \
alg2268.c \
arcfour.c \
arcfive.c \
crypto_primitives.c \
crypto_primitives.c \
blake2b.c \
desblapi.c \
des.c \
@ -150,7 +150,6 @@ CSRCS = \
rsapkcs.c \
shvfy.c \
tlsprfalg.c \
seed.c \
jpake.c \
$(MPI_SRCS) \
$(MPCPU_SRCS) \

Просмотреть файл

@ -23,7 +23,7 @@
#include "Hacl_Chacha20.h"
uint32_t
const uint32_t
Hacl_Impl_Chacha20_Vec_chacha20_constants[4U] =
{ (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U };
@ -107,12 +107,12 @@ chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
k[12U] = k[12U] + ctr_u32;
}
static uint32_t
static const uint32_t
chacha20_constants[4U] =
{ (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U };
static inline void
chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n1, uint32_t ctr)
chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n, uint32_t ctr)
{
uint32_t *uu____0 = ctx;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i++) {
@ -133,7 +133,7 @@ chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n1, uint32_t ctr)
uint32_t *uu____2 = ctx + (uint32_t)13U;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)3U; i++) {
uint32_t *os = uu____2;
uint8_t *bj = n1 + i * (uint32_t)4U;
uint8_t *bj = n + i * (uint32_t)4U;
uint32_t u = load32_le(bj);
uint32_t r = u;
uint32_t x = r;
@ -142,10 +142,10 @@ chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n1, uint32_t ctr)
}
static inline void
chacha20_encrypt_block(uint32_t *ctx, uint8_t *out, uint32_t incr1, uint8_t *text)
chacha20_encrypt_block(uint32_t *ctx, uint8_t *out, uint32_t incr, uint8_t *text)
{
uint32_t k[16U] = { 0U };
chacha20_core(k, ctx, incr1);
chacha20_core(k, ctx, incr);
uint32_t bl[16U] = { 0U };
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
uint32_t *os = bl;
@ -166,25 +166,25 @@ chacha20_encrypt_block(uint32_t *ctx, uint8_t *out, uint32_t incr1, uint8_t *tex
}
static inline void
chacha20_encrypt_last(uint32_t *ctx, uint32_t len, uint8_t *out, uint32_t incr1, uint8_t *text)
chacha20_encrypt_last(uint32_t *ctx, uint32_t len, uint8_t *out, uint32_t incr, uint8_t *text)
{
uint8_t plain[64U] = { 0U };
memcpy(plain, text, len * sizeof(text[0U]));
chacha20_encrypt_block(ctx, plain, incr1, plain);
chacha20_encrypt_block(ctx, plain, incr, plain);
memcpy(out, plain, len * sizeof(plain[0U]));
}
static inline void
chacha20_update(uint32_t *ctx, uint32_t len, uint8_t *out, uint8_t *text)
{
uint32_t rem1 = len % (uint32_t)64U;
uint32_t rem = len % (uint32_t)64U;
uint32_t nb = len / (uint32_t)64U;
uint32_t rem2 = len % (uint32_t)64U;
uint32_t rem1 = len % (uint32_t)64U;
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
chacha20_encrypt_block(ctx, out + i * (uint32_t)64U, i, text + i * (uint32_t)64U);
}
if (rem2 > (uint32_t)0U) {
chacha20_encrypt_last(ctx, rem1, out + nb * (uint32_t)64U, nb, text + nb * (uint32_t)64U);
if (rem1 > (uint32_t)0U) {
chacha20_encrypt_last(ctx, rem, out + nb * (uint32_t)64U, nb, text + nb * (uint32_t)64U);
}
}
@ -194,11 +194,11 @@ Hacl_Chacha20_chacha20_encrypt(
uint8_t *out,
uint8_t *text,
uint8_t *key,
uint8_t *n1,
uint8_t *n,
uint32_t ctr)
{
uint32_t ctx[16U] = { 0U };
chacha20_init(ctx, key, n1, ctr);
chacha20_init(ctx, key, n, ctr);
chacha20_update(ctx, len, out, text);
}
@ -208,10 +208,10 @@ Hacl_Chacha20_chacha20_decrypt(
uint8_t *out,
uint8_t *cipher,
uint8_t *key,
uint8_t *n1,
uint8_t *n,
uint32_t ctr)
{
uint32_t ctx[16U] = { 0U };
chacha20_init(ctx, key, n1, ctr);
chacha20_init(ctx, key, n, ctr);
chacha20_update(ctx, len, out, cipher);
}

Просмотреть файл

@ -31,7 +31,7 @@
#include "Hacl_Kremlib.h"
extern uint32_t Hacl_Impl_Chacha20_Vec_chacha20_constants[4U];
extern const uint32_t Hacl_Impl_Chacha20_Vec_chacha20_constants[4U];
void
Hacl_Chacha20_chacha20_encrypt(
@ -39,7 +39,7 @@ Hacl_Chacha20_chacha20_encrypt(
uint8_t *out,
uint8_t *text,
uint8_t *key,
uint8_t *n1,
uint8_t *n,
uint32_t ctr);
void
@ -48,7 +48,7 @@ Hacl_Chacha20_chacha20_decrypt(
uint8_t *out,
uint8_t *cipher,
uint8_t *key,
uint8_t *n1,
uint8_t *n,
uint32_t ctr);
#define __Hacl_Chacha20_H_DEFINED

Просмотреть файл

@ -26,14 +26,14 @@
static inline void
poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t *text)
{
uint32_t n1 = len / (uint32_t)16U;
uint32_t n = len / (uint32_t)16U;
uint32_t r = len % (uint32_t)16U;
uint8_t *blocks = text;
uint8_t *rem1 = text + n1 * (uint32_t)16U;
uint8_t *rem = text + n * (uint32_t)16U;
Lib_IntVector_Intrinsics_vec128 *pre0 = ctx + (uint32_t)5U;
Lib_IntVector_Intrinsics_vec128 *acc0 = ctx;
uint32_t sz_block = (uint32_t)32U;
uint32_t len0 = n1 * (uint32_t)16U / sz_block * sz_block;
uint32_t len0 = n * (uint32_t)16U / sz_block * sz_block;
uint8_t *t00 = blocks;
if (len0 > (uint32_t)0U) {
uint32_t bs = (uint32_t)32U;
@ -197,13 +197,13 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec128 t3 = a34;
Lib_IntVector_Intrinsics_vec128 t4 = a44;
Lib_IntVector_Intrinsics_vec128
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec128
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
Lib_IntVector_Intrinsics_vec128
@ -213,21 +213,21 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec128
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
Lib_IntVector_Intrinsics_vec128
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
Lib_IntVector_Intrinsics_vec128
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
Lib_IntVector_Intrinsics_vec128 o00 = x02;
Lib_IntVector_Intrinsics_vec128 o10 = x12;
@ -262,10 +262,10 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
}
Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(acc0, pre0);
}
uint32_t len1 = n1 * (uint32_t)16U - len0;
uint32_t len1 = n * (uint32_t)16U - len0;
uint8_t *t10 = blocks + len0;
uint32_t nb = len1 / (uint32_t)16U;
uint32_t rem2 = len1 % (uint32_t)16U;
uint32_t rem1 = len1 % (uint32_t)16U;
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
uint8_t *block = t10 + i * (uint32_t)16U;
Lib_IntVector_Intrinsics_vec128 e[5U];
@ -431,13 +431,13 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec128 t3 = a36;
Lib_IntVector_Intrinsics_vec128 t4 = a46;
Lib_IntVector_Intrinsics_vec128
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec128
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
Lib_IntVector_Intrinsics_vec128
@ -447,21 +447,21 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec128
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
Lib_IntVector_Intrinsics_vec128
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
Lib_IntVector_Intrinsics_vec128
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
Lib_IntVector_Intrinsics_vec128 o0 = x02;
Lib_IntVector_Intrinsics_vec128 o1 = x12;
@ -474,13 +474,13 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
acc0[3U] = o3;
acc0[4U] = o4;
}
if (rem2 > (uint32_t)0U) {
uint8_t *last1 = t10 + nb * (uint32_t)16U;
if (rem1 > (uint32_t)0U) {
uint8_t *last = t10 + nb * (uint32_t)16U;
Lib_IntVector_Intrinsics_vec128 e[5U];
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
e[_i] = Lib_IntVector_Intrinsics_vec128_zero;
uint8_t tmp[16U] = { 0U };
memcpy(tmp, last1, rem2 * sizeof(last1[0U]));
memcpy(tmp, last, rem1 * sizeof(last[0U]));
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
@ -520,10 +520,10 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
e[2U] = f2;
e[3U] = f3;
e[4U] = f4;
uint64_t b = (uint64_t)1U << rem2 * (uint32_t)8U % (uint32_t)26U;
uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
Lib_IntVector_Intrinsics_vec128 fi = e[rem2 * (uint32_t)8U / (uint32_t)26U];
e[rem2 * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
Lib_IntVector_Intrinsics_vec128 fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
e[rem1 * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
Lib_IntVector_Intrinsics_vec128 *r1 = pre0;
Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + (uint32_t)5U;
Lib_IntVector_Intrinsics_vec128 r0 = r1[0U];
@ -641,13 +641,13 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec128 t3 = a36;
Lib_IntVector_Intrinsics_vec128 t4 = a46;
Lib_IntVector_Intrinsics_vec128
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec128
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
Lib_IntVector_Intrinsics_vec128
@ -657,21 +657,21 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec128
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
Lib_IntVector_Intrinsics_vec128
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
Lib_IntVector_Intrinsics_vec128
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
Lib_IntVector_Intrinsics_vec128 o0 = x02;
Lib_IntVector_Intrinsics_vec128 o1 = x12;
@ -685,7 +685,7 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
acc0[4U] = o4;
}
uint8_t tmp[16U] = { 0U };
memcpy(tmp, rem1, r * sizeof(rem1[0U]));
memcpy(tmp, rem, r * sizeof(rem[0U]));
if (r > (uint32_t)0U) {
Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
Lib_IntVector_Intrinsics_vec128 *acc = ctx;
@ -852,13 +852,13 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec128 t3 = a36;
Lib_IntVector_Intrinsics_vec128 t4 = a46;
Lib_IntVector_Intrinsics_vec128
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec128
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask261);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
Lib_IntVector_Intrinsics_vec128
@ -868,21 +868,21 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec128
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
Lib_IntVector_Intrinsics_vec128
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
Lib_IntVector_Intrinsics_vec128
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
Lib_IntVector_Intrinsics_vec128 o0 = x02;
Lib_IntVector_Intrinsics_vec128 o1 = x12;
@ -1081,13 +1081,13 @@ poly1305_do_128(
Lib_IntVector_Intrinsics_vec128 t3 = a36;
Lib_IntVector_Intrinsics_vec128 t4 = a46;
Lib_IntVector_Intrinsics_vec128
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec128
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask261);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
Lib_IntVector_Intrinsics_vec128
@ -1097,21 +1097,21 @@ poly1305_do_128(
Lib_IntVector_Intrinsics_vec128
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
Lib_IntVector_Intrinsics_vec128
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
Lib_IntVector_Intrinsics_vec128
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
Lib_IntVector_Intrinsics_vec128 o0 = x02;
Lib_IntVector_Intrinsics_vec128 o1 = x12;
@ -1129,7 +1129,7 @@ poly1305_do_128(
void
Hacl_Chacha20Poly1305_128_aead_encrypt(
uint8_t *k,
uint8_t *n1,
uint8_t *n,
uint32_t aadlen,
uint8_t *aad,
uint32_t mlen,
@ -1137,9 +1137,9 @@ Hacl_Chacha20Poly1305_128_aead_encrypt(
uint8_t *cipher,
uint8_t *mac)
{
Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, cipher, m, k, n1, (uint32_t)1U);
Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, cipher, m, k, n, (uint32_t)1U);
uint8_t tmp[64U] = { 0U };
Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n1, (uint32_t)0U);
Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
uint8_t *key = tmp;
poly1305_do_128(key, aadlen, aad, mlen, cipher, mac);
}
@ -1147,7 +1147,7 @@ Hacl_Chacha20Poly1305_128_aead_encrypt(
uint32_t
Hacl_Chacha20Poly1305_128_aead_decrypt(
uint8_t *k,
uint8_t *n1,
uint8_t *n,
uint32_t aadlen,
uint8_t *aad,
uint32_t mlen,
@ -1157,7 +1157,7 @@ Hacl_Chacha20Poly1305_128_aead_decrypt(
{
uint8_t computed_mac[16U] = { 0U };
uint8_t tmp[64U] = { 0U };
Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n1, (uint32_t)0U);
Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
uint8_t *key = tmp;
poly1305_do_128(key, aadlen, aad, mlen, cipher, computed_mac);
uint8_t res = (uint8_t)255U;
@ -1167,7 +1167,7 @@ Hacl_Chacha20Poly1305_128_aead_decrypt(
}
uint8_t z = res;
if (z == (uint8_t)255U) {
Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, m, cipher, k, n1, (uint32_t)1U);
Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, m, cipher, k, n, (uint32_t)1U);
return (uint32_t)0U;
}
return (uint32_t)1U;

Просмотреть файл

@ -37,7 +37,7 @@
void
Hacl_Chacha20Poly1305_128_aead_encrypt(
uint8_t *k,
uint8_t *n1,
uint8_t *n,
uint32_t aadlen,
uint8_t *aad,
uint32_t mlen,
@ -48,7 +48,7 @@ Hacl_Chacha20Poly1305_128_aead_encrypt(
uint32_t
Hacl_Chacha20Poly1305_128_aead_decrypt(
uint8_t *k,
uint8_t *n1,
uint8_t *n,
uint32_t aadlen,
uint8_t *aad,
uint32_t mlen,

Просмотреть файл

@ -26,14 +26,14 @@
static inline void
poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t *text)
{
uint32_t n1 = len / (uint32_t)16U;
uint32_t n = len / (uint32_t)16U;
uint32_t r = len % (uint32_t)16U;
uint8_t *blocks = text;
uint8_t *rem1 = text + n1 * (uint32_t)16U;
uint8_t *rem = text + n * (uint32_t)16U;
Lib_IntVector_Intrinsics_vec256 *pre0 = ctx + (uint32_t)5U;
Lib_IntVector_Intrinsics_vec256 *acc0 = ctx;
uint32_t sz_block = (uint32_t)64U;
uint32_t len0 = n1 * (uint32_t)16U / sz_block * sz_block;
uint32_t len0 = n * (uint32_t)16U / sz_block * sz_block;
uint8_t *t00 = blocks;
if (len0 > (uint32_t)0U) {
uint32_t bs = (uint32_t)64U;
@ -51,7 +51,7 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec256
hi = Lib_IntVector_Intrinsics_vec256_load_le(block + (uint32_t)32U);
Lib_IntVector_Intrinsics_vec256
mask2610 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi);
Lib_IntVector_Intrinsics_vec256
@ -68,14 +68,14 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
t30 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3);
Lib_IntVector_Intrinsics_vec256
t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)4U);
Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t20, mask2610);
Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t20, mask260);
Lib_IntVector_Intrinsics_vec256
t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t10, mask2610);
Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t010, mask2610);
Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t10, mask260);
Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t010, mask260);
Lib_IntVector_Intrinsics_vec256
t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)30U);
Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask2610);
Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask260);
Lib_IntVector_Intrinsics_vec256
o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U);
Lib_IntVector_Intrinsics_vec256 o00 = o5;
@ -199,13 +199,13 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec256 t3 = a34;
Lib_IntVector_Intrinsics_vec256 t4 = a44;
Lib_IntVector_Intrinsics_vec256
mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask261);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask261);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
Lib_IntVector_Intrinsics_vec256
@ -215,21 +215,21 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec256
t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask261);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask261);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
Lib_IntVector_Intrinsics_vec256
z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask261);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask261);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
Lib_IntVector_Intrinsics_vec256
z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask261);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
Lib_IntVector_Intrinsics_vec256 o01 = x02;
Lib_IntVector_Intrinsics_vec256 o12 = x12;
@ -264,10 +264,10 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
}
Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(acc0, pre0);
}
uint32_t len1 = n1 * (uint32_t)16U - len0;
uint32_t len1 = n * (uint32_t)16U - len0;
uint8_t *t10 = blocks + len0;
uint32_t nb = len1 / (uint32_t)16U;
uint32_t rem2 = len1 % (uint32_t)16U;
uint32_t rem1 = len1 % (uint32_t)16U;
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
uint8_t *block = t10 + i * (uint32_t)16U;
Lib_IntVector_Intrinsics_vec256 e[5U];
@ -433,13 +433,13 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec256 t3 = a36;
Lib_IntVector_Intrinsics_vec256 t4 = a46;
Lib_IntVector_Intrinsics_vec256
mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask261);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask261);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
Lib_IntVector_Intrinsics_vec256
@ -449,21 +449,21 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec256
t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask261);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask261);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
Lib_IntVector_Intrinsics_vec256
z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask261);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask261);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
Lib_IntVector_Intrinsics_vec256
z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask261);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
Lib_IntVector_Intrinsics_vec256 o0 = x02;
Lib_IntVector_Intrinsics_vec256 o1 = x12;
@ -476,13 +476,13 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
acc0[3U] = o3;
acc0[4U] = o4;
}
if (rem2 > (uint32_t)0U) {
uint8_t *last1 = t10 + nb * (uint32_t)16U;
if (rem1 > (uint32_t)0U) {
uint8_t *last = t10 + nb * (uint32_t)16U;
Lib_IntVector_Intrinsics_vec256 e[5U];
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
e[_i] = Lib_IntVector_Intrinsics_vec256_zero;
uint8_t tmp[16U] = { 0U };
memcpy(tmp, last1, rem2 * sizeof(last1[0U]));
memcpy(tmp, last, rem1 * sizeof(last[0U]));
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
@ -522,10 +522,10 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
e[2U] = f2;
e[3U] = f3;
e[4U] = f4;
uint64_t b = (uint64_t)1U << rem2 * (uint32_t)8U % (uint32_t)26U;
uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
Lib_IntVector_Intrinsics_vec256 fi = e[rem2 * (uint32_t)8U / (uint32_t)26U];
e[rem2 * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
Lib_IntVector_Intrinsics_vec256 fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
e[rem1 * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
Lib_IntVector_Intrinsics_vec256 *r1 = pre0;
Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + (uint32_t)5U;
Lib_IntVector_Intrinsics_vec256 r0 = r1[0U];
@ -643,13 +643,13 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec256 t3 = a36;
Lib_IntVector_Intrinsics_vec256 t4 = a46;
Lib_IntVector_Intrinsics_vec256
mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask261);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask261);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
Lib_IntVector_Intrinsics_vec256
@ -659,21 +659,21 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec256
t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask261);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask261);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
Lib_IntVector_Intrinsics_vec256
z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask261);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask261);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
Lib_IntVector_Intrinsics_vec256
z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask261);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
Lib_IntVector_Intrinsics_vec256 o0 = x02;
Lib_IntVector_Intrinsics_vec256 o1 = x12;
@ -687,7 +687,7 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
acc0[4U] = o4;
}
uint8_t tmp[16U] = { 0U };
memcpy(tmp, rem1, r * sizeof(rem1[0U]));
memcpy(tmp, rem, r * sizeof(rem[0U]));
if (r > (uint32_t)0U) {
Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
Lib_IntVector_Intrinsics_vec256 *acc = ctx;
@ -854,13 +854,13 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec256 t3 = a36;
Lib_IntVector_Intrinsics_vec256 t4 = a46;
Lib_IntVector_Intrinsics_vec256
mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask261);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask261);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
Lib_IntVector_Intrinsics_vec256
@ -870,21 +870,21 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec256
t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask261);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask261);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
Lib_IntVector_Intrinsics_vec256
z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask261);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask261);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
Lib_IntVector_Intrinsics_vec256
z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask261);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
Lib_IntVector_Intrinsics_vec256 o0 = x02;
Lib_IntVector_Intrinsics_vec256 o1 = x12;
@ -1083,13 +1083,13 @@ poly1305_do_256(
Lib_IntVector_Intrinsics_vec256 t3 = a36;
Lib_IntVector_Intrinsics_vec256 t4 = a46;
Lib_IntVector_Intrinsics_vec256
mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask261);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask261);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
Lib_IntVector_Intrinsics_vec256
@ -1099,21 +1099,21 @@ poly1305_do_256(
Lib_IntVector_Intrinsics_vec256
t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask261);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask261);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
Lib_IntVector_Intrinsics_vec256
z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask261);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask261);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
Lib_IntVector_Intrinsics_vec256
z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask261);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
Lib_IntVector_Intrinsics_vec256 o0 = x02;
Lib_IntVector_Intrinsics_vec256 o1 = x12;
@ -1131,7 +1131,7 @@ poly1305_do_256(
void
Hacl_Chacha20Poly1305_256_aead_encrypt(
uint8_t *k,
uint8_t *n1,
uint8_t *n,
uint32_t aadlen,
uint8_t *aad,
uint32_t mlen,
@ -1139,9 +1139,9 @@ Hacl_Chacha20Poly1305_256_aead_encrypt(
uint8_t *cipher,
uint8_t *mac)
{
Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, cipher, m, k, n1, (uint32_t)1U);
Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, cipher, m, k, n, (uint32_t)1U);
uint8_t tmp[64U] = { 0U };
Hacl_Chacha20_Vec256_chacha20_encrypt_256((uint32_t)64U, tmp, tmp, k, n1, (uint32_t)0U);
Hacl_Chacha20_Vec256_chacha20_encrypt_256((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
uint8_t *key = tmp;
poly1305_do_256(key, aadlen, aad, mlen, cipher, mac);
}
@ -1149,7 +1149,7 @@ Hacl_Chacha20Poly1305_256_aead_encrypt(
uint32_t
Hacl_Chacha20Poly1305_256_aead_decrypt(
uint8_t *k,
uint8_t *n1,
uint8_t *n,
uint32_t aadlen,
uint8_t *aad,
uint32_t mlen,
@ -1159,7 +1159,7 @@ Hacl_Chacha20Poly1305_256_aead_decrypt(
{
uint8_t computed_mac[16U] = { 0U };
uint8_t tmp[64U] = { 0U };
Hacl_Chacha20_Vec256_chacha20_encrypt_256((uint32_t)64U, tmp, tmp, k, n1, (uint32_t)0U);
Hacl_Chacha20_Vec256_chacha20_encrypt_256((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
uint8_t *key = tmp;
poly1305_do_256(key, aadlen, aad, mlen, cipher, computed_mac);
uint8_t res = (uint8_t)255U;
@ -1169,7 +1169,7 @@ Hacl_Chacha20Poly1305_256_aead_decrypt(
}
uint8_t z = res;
if (z == (uint8_t)255U) {
Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, m, cipher, k, n1, (uint32_t)1U);
Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, m, cipher, k, n, (uint32_t)1U);
return (uint32_t)0U;
}
return (uint32_t)1U;

Просмотреть файл

@ -37,7 +37,7 @@
void
Hacl_Chacha20Poly1305_256_aead_encrypt(
uint8_t *k,
uint8_t *n1,
uint8_t *n,
uint32_t aadlen,
uint8_t *aad,
uint32_t mlen,
@ -48,7 +48,7 @@ Hacl_Chacha20Poly1305_256_aead_encrypt(
uint32_t
Hacl_Chacha20Poly1305_256_aead_decrypt(
uint8_t *k,
uint8_t *n1,
uint8_t *n,
uint32_t aadlen,
uint8_t *aad,
uint32_t mlen,

Просмотреть файл

@ -26,14 +26,14 @@
static inline void
poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
{
uint32_t n1 = len / (uint32_t)16U;
uint32_t n = len / (uint32_t)16U;
uint32_t r = len % (uint32_t)16U;
uint8_t *blocks = text;
uint8_t *rem1 = text + n1 * (uint32_t)16U;
uint8_t *rem = text + n * (uint32_t)16U;
uint64_t *pre0 = ctx + (uint32_t)5U;
uint64_t *acc0 = ctx;
uint32_t nb = n1 * (uint32_t)16U / (uint32_t)16U;
uint32_t rem2 = n1 * (uint32_t)16U % (uint32_t)16U;
uint32_t nb = n * (uint32_t)16U / (uint32_t)16U;
uint32_t rem1 = n * (uint32_t)16U % (uint32_t)16U;
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
uint8_t *block = blocks + i * (uint32_t)16U;
uint64_t e[5U] = { 0U };
@ -118,29 +118,29 @@ poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
uint64_t t2 = a26;
uint64_t t3 = a36;
uint64_t t4 = a46;
uint64_t mask261 = (uint64_t)0x3ffffffU;
uint64_t mask26 = (uint64_t)0x3ffffffU;
uint64_t z0 = t0 >> (uint32_t)26U;
uint64_t z1 = t3 >> (uint32_t)26U;
uint64_t x0 = t0 & mask261;
uint64_t x3 = t3 & mask261;
uint64_t x0 = t0 & mask26;
uint64_t x3 = t3 & mask26;
uint64_t x1 = t1 + z0;
uint64_t x4 = t4 + z1;
uint64_t z01 = x1 >> (uint32_t)26U;
uint64_t z11 = x4 >> (uint32_t)26U;
uint64_t t = z11 << (uint32_t)2U;
uint64_t z12 = z11 + t;
uint64_t x11 = x1 & mask261;
uint64_t x41 = x4 & mask261;
uint64_t x11 = x1 & mask26;
uint64_t x41 = x4 & mask26;
uint64_t x2 = t2 + z01;
uint64_t x01 = x0 + z12;
uint64_t z02 = x2 >> (uint32_t)26U;
uint64_t z13 = x01 >> (uint32_t)26U;
uint64_t x21 = x2 & mask261;
uint64_t x02 = x01 & mask261;
uint64_t x21 = x2 & mask26;
uint64_t x02 = x01 & mask26;
uint64_t x31 = x3 + z02;
uint64_t x12 = x11 + z13;
uint64_t z03 = x31 >> (uint32_t)26U;
uint64_t x32 = x31 & mask261;
uint64_t x32 = x31 & mask26;
uint64_t x42 = x41 + z03;
uint64_t o0 = x02;
uint64_t o1 = x12;
@ -153,11 +153,11 @@ poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
acc0[3U] = o3;
acc0[4U] = o4;
}
if (rem2 > (uint32_t)0U) {
uint8_t *last1 = blocks + nb * (uint32_t)16U;
if (rem1 > (uint32_t)0U) {
uint8_t *last = blocks + nb * (uint32_t)16U;
uint64_t e[5U] = { 0U };
uint8_t tmp[16U] = { 0U };
memcpy(tmp, last1, rem2 * sizeof(last1[0U]));
memcpy(tmp, last, rem1 * sizeof(last[0U]));
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
@ -179,10 +179,10 @@ poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
e[2U] = f2;
e[3U] = f3;
e[4U] = f4;
uint64_t b = (uint64_t)1U << rem2 * (uint32_t)8U % (uint32_t)26U;
uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
uint64_t mask = b;
uint64_t fi = e[rem2 * (uint32_t)8U / (uint32_t)26U];
e[rem2 * (uint32_t)8U / (uint32_t)26U] = fi | mask;
uint64_t fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
e[rem1 * (uint32_t)8U / (uint32_t)26U] = fi | mask;
uint64_t *r1 = pre0;
uint64_t *r5 = pre0 + (uint32_t)5U;
uint64_t r0 = r1[0U];
@ -239,29 +239,29 @@ poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
uint64_t t2 = a26;
uint64_t t3 = a36;
uint64_t t4 = a46;
uint64_t mask261 = (uint64_t)0x3ffffffU;
uint64_t mask26 = (uint64_t)0x3ffffffU;
uint64_t z0 = t0 >> (uint32_t)26U;
uint64_t z1 = t3 >> (uint32_t)26U;
uint64_t x0 = t0 & mask261;
uint64_t x3 = t3 & mask261;
uint64_t x0 = t0 & mask26;
uint64_t x3 = t3 & mask26;
uint64_t x1 = t1 + z0;
uint64_t x4 = t4 + z1;
uint64_t z01 = x1 >> (uint32_t)26U;
uint64_t z11 = x4 >> (uint32_t)26U;
uint64_t t = z11 << (uint32_t)2U;
uint64_t z12 = z11 + t;
uint64_t x11 = x1 & mask261;
uint64_t x41 = x4 & mask261;
uint64_t x11 = x1 & mask26;
uint64_t x41 = x4 & mask26;
uint64_t x2 = t2 + z01;
uint64_t x01 = x0 + z12;
uint64_t z02 = x2 >> (uint32_t)26U;
uint64_t z13 = x01 >> (uint32_t)26U;
uint64_t x21 = x2 & mask261;
uint64_t x02 = x01 & mask261;
uint64_t x21 = x2 & mask26;
uint64_t x02 = x01 & mask26;
uint64_t x31 = x3 + z02;
uint64_t x12 = x11 + z13;
uint64_t z03 = x31 >> (uint32_t)26U;
uint64_t x32 = x31 & mask261;
uint64_t x32 = x31 & mask26;
uint64_t x42 = x41 + z03;
uint64_t o0 = x02;
uint64_t o1 = x12;
@ -275,7 +275,7 @@ poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
acc0[4U] = o4;
}
uint8_t tmp[16U] = { 0U };
memcpy(tmp, rem1, r * sizeof(rem1[0U]));
memcpy(tmp, rem, r * sizeof(rem[0U]));
if (r > (uint32_t)0U) {
uint64_t *pre = ctx + (uint32_t)5U;
uint64_t *acc = ctx;
@ -361,29 +361,29 @@ poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
uint64_t t2 = a26;
uint64_t t3 = a36;
uint64_t t4 = a46;
uint64_t mask261 = (uint64_t)0x3ffffffU;
uint64_t mask26 = (uint64_t)0x3ffffffU;
uint64_t z0 = t0 >> (uint32_t)26U;
uint64_t z1 = t3 >> (uint32_t)26U;
uint64_t x0 = t0 & mask261;
uint64_t x3 = t3 & mask261;
uint64_t x0 = t0 & mask26;
uint64_t x3 = t3 & mask26;
uint64_t x1 = t1 + z0;
uint64_t x4 = t4 + z1;
uint64_t z01 = x1 >> (uint32_t)26U;
uint64_t z11 = x4 >> (uint32_t)26U;
uint64_t t = z11 << (uint32_t)2U;
uint64_t z12 = z11 + t;
uint64_t x11 = x1 & mask261;
uint64_t x41 = x4 & mask261;
uint64_t x11 = x1 & mask26;
uint64_t x41 = x4 & mask26;
uint64_t x2 = t2 + z01;
uint64_t x01 = x0 + z12;
uint64_t z02 = x2 >> (uint32_t)26U;
uint64_t z13 = x01 >> (uint32_t)26U;
uint64_t x21 = x2 & mask261;
uint64_t x02 = x01 & mask261;
uint64_t x21 = x2 & mask26;
uint64_t x02 = x01 & mask26;
uint64_t x31 = x3 + z02;
uint64_t x12 = x11 + z13;
uint64_t z03 = x31 >> (uint32_t)26U;
uint64_t x32 = x31 & mask261;
uint64_t x32 = x31 & mask26;
uint64_t x42 = x41 + z03;
uint64_t o0 = x02;
uint64_t o1 = x12;
@ -499,29 +499,29 @@ poly1305_do_32(
uint64_t t2 = a26;
uint64_t t3 = a36;
uint64_t t4 = a46;
uint64_t mask261 = (uint64_t)0x3ffffffU;
uint64_t mask26 = (uint64_t)0x3ffffffU;
uint64_t z0 = t0 >> (uint32_t)26U;
uint64_t z1 = t3 >> (uint32_t)26U;
uint64_t x0 = t0 & mask261;
uint64_t x3 = t3 & mask261;
uint64_t x0 = t0 & mask26;
uint64_t x3 = t3 & mask26;
uint64_t x1 = t1 + z0;
uint64_t x4 = t4 + z1;
uint64_t z01 = x1 >> (uint32_t)26U;
uint64_t z11 = x4 >> (uint32_t)26U;
uint64_t t = z11 << (uint32_t)2U;
uint64_t z12 = z11 + t;
uint64_t x11 = x1 & mask261;
uint64_t x41 = x4 & mask261;
uint64_t x11 = x1 & mask26;
uint64_t x41 = x4 & mask26;
uint64_t x2 = t2 + z01;
uint64_t x01 = x0 + z12;
uint64_t z02 = x2 >> (uint32_t)26U;
uint64_t z13 = x01 >> (uint32_t)26U;
uint64_t x21 = x2 & mask261;
uint64_t x02 = x01 & mask261;
uint64_t x21 = x2 & mask26;
uint64_t x02 = x01 & mask26;
uint64_t x31 = x3 + z02;
uint64_t x12 = x11 + z13;
uint64_t z03 = x31 >> (uint32_t)26U;
uint64_t x32 = x31 & mask261;
uint64_t x32 = x31 & mask26;
uint64_t x42 = x41 + z03;
uint64_t o0 = x02;
uint64_t o1 = x12;
@ -539,7 +539,7 @@ poly1305_do_32(
void
Hacl_Chacha20Poly1305_32_aead_encrypt(
uint8_t *k,
uint8_t *n1,
uint8_t *n,
uint32_t aadlen,
uint8_t *aad,
uint32_t mlen,
@ -547,9 +547,9 @@ Hacl_Chacha20Poly1305_32_aead_encrypt(
uint8_t *cipher,
uint8_t *mac)
{
Hacl_Chacha20_chacha20_encrypt(mlen, cipher, m, k, n1, (uint32_t)1U);
Hacl_Chacha20_chacha20_encrypt(mlen, cipher, m, k, n, (uint32_t)1U);
uint8_t tmp[64U] = { 0U };
Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n1, (uint32_t)0U);
Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
uint8_t *key = tmp;
poly1305_do_32(key, aadlen, aad, mlen, cipher, mac);
}
@ -557,7 +557,7 @@ Hacl_Chacha20Poly1305_32_aead_encrypt(
uint32_t
Hacl_Chacha20Poly1305_32_aead_decrypt(
uint8_t *k,
uint8_t *n1,
uint8_t *n,
uint32_t aadlen,
uint8_t *aad,
uint32_t mlen,
@ -567,7 +567,7 @@ Hacl_Chacha20Poly1305_32_aead_decrypt(
{
uint8_t computed_mac[16U] = { 0U };
uint8_t tmp[64U] = { 0U };
Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n1, (uint32_t)0U);
Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
uint8_t *key = tmp;
poly1305_do_32(key, aadlen, aad, mlen, cipher, computed_mac);
uint8_t res = (uint8_t)255U;
@ -577,7 +577,7 @@ Hacl_Chacha20Poly1305_32_aead_decrypt(
}
uint8_t z = res;
if (z == (uint8_t)255U) {
Hacl_Chacha20_chacha20_encrypt(mlen, m, cipher, k, n1, (uint32_t)1U);
Hacl_Chacha20_chacha20_encrypt(mlen, m, cipher, k, n, (uint32_t)1U);
return (uint32_t)0U;
}
return (uint32_t)1U;

Просмотреть файл

@ -36,7 +36,7 @@
void
Hacl_Chacha20Poly1305_32_aead_encrypt(
uint8_t *k,
uint8_t *n1,
uint8_t *n,
uint32_t aadlen,
uint8_t *aad,
uint32_t mlen,
@ -47,7 +47,7 @@ Hacl_Chacha20Poly1305_32_aead_encrypt(
uint32_t
Hacl_Chacha20Poly1305_32_aead_decrypt(
uint8_t *k,
uint8_t *n1,
uint8_t *n,
uint32_t aadlen,
uint8_t *aad,
uint32_t mlen,

Просмотреть файл

@ -153,7 +153,7 @@ chacha20_core_128(
}
static inline void
chacha20_init_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k, uint8_t *n1, uint32_t ctr)
chacha20_init_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k, uint8_t *n, uint32_t ctr)
{
uint32_t ctx1[16U] = { 0U };
uint32_t *uu____0 = ctx1;
@ -175,7 +175,7 @@ chacha20_init_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k, uint8_t *n1,
uint32_t *uu____2 = ctx1 + (uint32_t)13U;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)3U; i++) {
uint32_t *os = uu____2;
uint8_t *bj = n1 + i * (uint32_t)4U;
uint8_t *bj = n + i * (uint32_t)4U;
uint32_t u = load32_le(bj);
uint32_t r = u;
uint32_t x = r;
@ -203,16 +203,16 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
uint8_t *out,
uint8_t *text,
uint8_t *key,
uint8_t *n1,
uint8_t *n,
uint32_t ctr)
{
Lib_IntVector_Intrinsics_vec128 ctx[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
ctx[_i] = Lib_IntVector_Intrinsics_vec128_zero;
chacha20_init_128(ctx, key, n1, ctr);
uint32_t rem1 = len % (uint32_t)256U;
chacha20_init_128(ctx, key, n, ctr);
uint32_t rem = len % (uint32_t)256U;
uint32_t nb = len / (uint32_t)256U;
uint32_t rem2 = len % (uint32_t)256U;
uint32_t rem1 = len % (uint32_t)256U;
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
uint8_t *uu____0 = out + i * (uint32_t)256U;
uint8_t *uu____1 = text + i * (uint32_t)256U;
@ -339,11 +339,11 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
Lib_IntVector_Intrinsics_vec128_store_le(uu____0 + i0 * (uint32_t)16U, y);
}
}
if (rem2 > (uint32_t)0U) {
if (rem1 > (uint32_t)0U) {
uint8_t *uu____2 = out + nb * (uint32_t)256U;
uint8_t *uu____3 = text + nb * (uint32_t)256U;
uint8_t plain[256U] = { 0U };
memcpy(plain, uu____3, rem1 * sizeof(uu____3[0U]));
memcpy(plain, uu____3, rem * sizeof(uu____3[0U]));
Lib_IntVector_Intrinsics_vec128 k[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
k[_i] = Lib_IntVector_Intrinsics_vec128_zero;
@ -466,7 +466,7 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]);
Lib_IntVector_Intrinsics_vec128_store_le(plain + i * (uint32_t)16U, y);
}
memcpy(uu____2, plain, rem1 * sizeof(plain[0U]));
memcpy(uu____2, plain, rem * sizeof(plain[0U]));
}
}
@ -476,16 +476,16 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
uint8_t *out,
uint8_t *cipher,
uint8_t *key,
uint8_t *n1,
uint8_t *n,
uint32_t ctr)
{
Lib_IntVector_Intrinsics_vec128 ctx[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
ctx[_i] = Lib_IntVector_Intrinsics_vec128_zero;
chacha20_init_128(ctx, key, n1, ctr);
uint32_t rem1 = len % (uint32_t)256U;
chacha20_init_128(ctx, key, n, ctr);
uint32_t rem = len % (uint32_t)256U;
uint32_t nb = len / (uint32_t)256U;
uint32_t rem2 = len % (uint32_t)256U;
uint32_t rem1 = len % (uint32_t)256U;
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
uint8_t *uu____0 = out + i * (uint32_t)256U;
uint8_t *uu____1 = cipher + i * (uint32_t)256U;
@ -612,11 +612,11 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
Lib_IntVector_Intrinsics_vec128_store_le(uu____0 + i0 * (uint32_t)16U, y);
}
}
if (rem2 > (uint32_t)0U) {
if (rem1 > (uint32_t)0U) {
uint8_t *uu____2 = out + nb * (uint32_t)256U;
uint8_t *uu____3 = cipher + nb * (uint32_t)256U;
uint8_t plain[256U] = { 0U };
memcpy(plain, uu____3, rem1 * sizeof(uu____3[0U]));
memcpy(plain, uu____3, rem * sizeof(uu____3[0U]));
Lib_IntVector_Intrinsics_vec128 k[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
k[_i] = Lib_IntVector_Intrinsics_vec128_zero;
@ -739,6 +739,6 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]);
Lib_IntVector_Intrinsics_vec128_store_le(plain + i * (uint32_t)16U, y);
}
memcpy(uu____2, plain, rem1 * sizeof(plain[0U]));
memcpy(uu____2, plain, rem * sizeof(plain[0U]));
}
}

Просмотреть файл

@ -39,7 +39,7 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
uint8_t *out,
uint8_t *text,
uint8_t *key,
uint8_t *n1,
uint8_t *n,
uint32_t ctr);
void
@ -48,7 +48,7 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
uint8_t *out,
uint8_t *cipher,
uint8_t *key,
uint8_t *n1,
uint8_t *n,
uint32_t ctr);
#define __Hacl_Chacha20_Vec128_H_DEFINED

Просмотреть файл

@ -153,7 +153,7 @@ chacha20_core_256(
}
static inline void
chacha20_init_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *k, uint8_t *n1, uint32_t ctr)
chacha20_init_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *k, uint8_t *n, uint32_t ctr)
{
uint32_t ctx1[16U] = { 0U };
uint32_t *uu____0 = ctx1;
@ -175,7 +175,7 @@ chacha20_init_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *k, uint8_t *n1,
uint32_t *uu____2 = ctx1 + (uint32_t)13U;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)3U; i++) {
uint32_t *os = uu____2;
uint8_t *bj = n1 + i * (uint32_t)4U;
uint8_t *bj = n + i * (uint32_t)4U;
uint32_t u = load32_le(bj);
uint32_t r = u;
uint32_t x = r;
@ -207,16 +207,16 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
uint8_t *out,
uint8_t *text,
uint8_t *key,
uint8_t *n1,
uint8_t *n,
uint32_t ctr)
{
Lib_IntVector_Intrinsics_vec256 ctx[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
ctx[_i] = Lib_IntVector_Intrinsics_vec256_zero;
chacha20_init_256(ctx, key, n1, ctr);
uint32_t rem1 = len % (uint32_t)512U;
chacha20_init_256(ctx, key, n, ctr);
uint32_t rem = len % (uint32_t)512U;
uint32_t nb = len / (uint32_t)512U;
uint32_t rem2 = len % (uint32_t)512U;
uint32_t rem1 = len % (uint32_t)512U;
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
uint8_t *uu____0 = out + i * (uint32_t)512U;
uint8_t *uu____1 = text + i * (uint32_t)512U;
@ -375,11 +375,11 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
Lib_IntVector_Intrinsics_vec256_store_le(uu____0 + i0 * (uint32_t)32U, y);
}
}
if (rem2 > (uint32_t)0U) {
if (rem1 > (uint32_t)0U) {
uint8_t *uu____2 = out + nb * (uint32_t)512U;
uint8_t *uu____3 = text + nb * (uint32_t)512U;
uint8_t plain[512U] = { 0U };
memcpy(plain, uu____3, rem1 * sizeof(uu____3[0U]));
memcpy(plain, uu____3, rem * sizeof(uu____3[0U]));
Lib_IntVector_Intrinsics_vec256 k[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
k[_i] = Lib_IntVector_Intrinsics_vec256_zero;
@ -534,7 +534,7 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i]);
Lib_IntVector_Intrinsics_vec256_store_le(plain + i * (uint32_t)32U, y);
}
memcpy(uu____2, plain, rem1 * sizeof(plain[0U]));
memcpy(uu____2, plain, rem * sizeof(plain[0U]));
}
}
@ -544,16 +544,16 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
uint8_t *out,
uint8_t *cipher,
uint8_t *key,
uint8_t *n1,
uint8_t *n,
uint32_t ctr)
{
Lib_IntVector_Intrinsics_vec256 ctx[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
ctx[_i] = Lib_IntVector_Intrinsics_vec256_zero;
chacha20_init_256(ctx, key, n1, ctr);
uint32_t rem1 = len % (uint32_t)512U;
chacha20_init_256(ctx, key, n, ctr);
uint32_t rem = len % (uint32_t)512U;
uint32_t nb = len / (uint32_t)512U;
uint32_t rem2 = len % (uint32_t)512U;
uint32_t rem1 = len % (uint32_t)512U;
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
uint8_t *uu____0 = out + i * (uint32_t)512U;
uint8_t *uu____1 = cipher + i * (uint32_t)512U;
@ -712,11 +712,11 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
Lib_IntVector_Intrinsics_vec256_store_le(uu____0 + i0 * (uint32_t)32U, y);
}
}
if (rem2 > (uint32_t)0U) {
if (rem1 > (uint32_t)0U) {
uint8_t *uu____2 = out + nb * (uint32_t)512U;
uint8_t *uu____3 = cipher + nb * (uint32_t)512U;
uint8_t plain[512U] = { 0U };
memcpy(plain, uu____3, rem1 * sizeof(uu____3[0U]));
memcpy(plain, uu____3, rem * sizeof(uu____3[0U]));
Lib_IntVector_Intrinsics_vec256 k[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
k[_i] = Lib_IntVector_Intrinsics_vec256_zero;
@ -871,6 +871,6 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i]);
Lib_IntVector_Intrinsics_vec256_store_le(plain + i * (uint32_t)32U, y);
}
memcpy(uu____2, plain, rem1 * sizeof(plain[0U]));
memcpy(uu____2, plain, rem * sizeof(plain[0U]));
}
}

Просмотреть файл

@ -39,7 +39,7 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
uint8_t *out,
uint8_t *text,
uint8_t *key,
uint8_t *n1,
uint8_t *n,
uint32_t ctr);
void
@ -48,7 +48,7 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
uint8_t *out,
uint8_t *cipher,
uint8_t *key,
uint8_t *n1,
uint8_t *n,
uint32_t ctr);
#define __Hacl_Chacha20_Vec256_H_DEFINED

Просмотреть файл

@ -648,7 +648,7 @@ cswap20(uint64_t bit, uint64_t *p1, uint64_t *p2)
}
}
static uint8_t g25519[32U] = { (uint8_t)9U };
static const uint8_t g25519[32U] = { (uint8_t)9U };
static void
point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_uint128 *tmp2)
@ -721,7 +721,7 @@ point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tmp2)
}
static void
montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init1)
montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
{
FStar_UInt128_uint128 tmp2[10U];
for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
@ -731,7 +731,7 @@ montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init1)
uint64_t *p01 = p01_tmp1_swap;
uint64_t *p03 = p01;
uint64_t *p11 = p01 + (uint32_t)10U;
memcpy(p11, init1, (uint32_t)10U * sizeof(init1[0U]));
memcpy(p11, init, (uint32_t)10U * sizeof(init[0U]));
uint64_t *x0 = p03;
uint64_t *z0 = p03 + (uint32_t)5U;
x0[0U] = (uint64_t)1U;
@ -748,24 +748,24 @@ montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init1)
uint64_t *p01_tmp11 = p01_tmp1_swap;
uint64_t *nq1 = p01_tmp1_swap;
uint64_t *nq_p11 = p01_tmp1_swap + (uint32_t)10U;
uint64_t *swap1 = p01_tmp1_swap + (uint32_t)40U;
uint64_t *swap = p01_tmp1_swap + (uint32_t)40U;
cswap20((uint64_t)1U, nq1, nq_p11);
point_add_and_double(init1, p01_tmp11, tmp2);
swap1[0U] = (uint64_t)1U;
point_add_and_double(init, p01_tmp11, tmp2);
swap[0U] = (uint64_t)1U;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)251U; i++) {
uint64_t *p01_tmp12 = p01_tmp1_swap;
uint64_t *swap2 = p01_tmp1_swap + (uint32_t)40U;
uint64_t *swap1 = p01_tmp1_swap + (uint32_t)40U;
uint64_t *nq2 = p01_tmp12;
uint64_t *nq_p12 = p01_tmp12 + (uint32_t)10U;
uint64_t
bit =
(uint64_t)(key[((uint32_t)253U - i) / (uint32_t)8U] >> ((uint32_t)253U - i) % (uint32_t)8U & (uint8_t)1U);
uint64_t sw = swap2[0U] ^ bit;
uint64_t sw = swap1[0U] ^ bit;
cswap20(sw, nq2, nq_p12);
point_add_and_double(init1, p01_tmp12, tmp2);
swap2[0U] = bit;
point_add_and_double(init, p01_tmp12, tmp2);
swap1[0U] = bit;
}
uint64_t sw = swap1[0U];
uint64_t sw = swap[0U];
cswap20(sw, nq1, nq_p11);
uint64_t *nq10 = p01_tmp1;
uint64_t *tmp1 = p01_tmp1 + (uint32_t)20U;
@ -776,10 +776,10 @@ montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init1)
}
static void
fsquare_times(uint64_t *o, uint64_t *inp, FStar_UInt128_uint128 *tmp, uint32_t n1)
fsquare_times(uint64_t *o, uint64_t *inp, FStar_UInt128_uint128 *tmp, uint32_t n)
{
fsqr0(o, inp);
for (uint32_t i = (uint32_t)0U; i < n1 - (uint32_t)1U; i++) {
for (uint32_t i = (uint32_t)0U; i < n - (uint32_t)1U; i++) {
fsqr0(o, o);
}
}
@ -840,7 +840,7 @@ encode_point(uint8_t *o, uint64_t *i)
void
Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
{
uint64_t init1[10U] = { 0U };
uint64_t init[10U] = { 0U };
uint64_t tmp[4U] = { 0U };
for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i++) {
uint64_t *os = tmp;
@ -852,8 +852,8 @@ Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
}
uint64_t tmp3 = tmp[3U];
tmp[3U] = tmp3 & (uint64_t)0x7fffffffffffffffU;
uint64_t *x = init1;
uint64_t *z = init1 + (uint32_t)5U;
uint64_t *x = init;
uint64_t *z = init + (uint32_t)5U;
z[0U] = (uint64_t)1U;
z[1U] = (uint64_t)0U;
z[2U] = (uint64_t)0U;
@ -872,8 +872,8 @@ Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
x[2U] = f1h | f2l;
x[3U] = f2h | f3l;
x[4U] = f3h;
montgomery_ladder(init1, priv, init1);
encode_point(out, init1);
montgomery_ladder(init, priv, init);
encode_point(out, init);
}
void
@ -891,11 +891,11 @@ Hacl_Curve25519_51_secret_to_public(uint8_t *pub, uint8_t *priv)
bool
Hacl_Curve25519_51_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub)
{
uint8_t zeros1[32U] = { 0U };
uint8_t zeros[32U] = { 0U };
Hacl_Curve25519_51_scalarmult(out, priv, pub);
uint8_t res = (uint8_t)255U;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++) {
uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros1[i]);
uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros[i]);
res = uu____0 & res;
}
uint8_t z = res;

Просмотреть файл

@ -239,13 +239,13 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
Lib_IntVector_Intrinsics_vec128 t3 = a35;
Lib_IntVector_Intrinsics_vec128 t4 = a45;
Lib_IntVector_Intrinsics_vec128
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec128
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask261);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
Lib_IntVector_Intrinsics_vec128
@ -255,21 +255,21 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
Lib_IntVector_Intrinsics_vec128
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
Lib_IntVector_Intrinsics_vec128
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
Lib_IntVector_Intrinsics_vec128
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
Lib_IntVector_Intrinsics_vec128 o0 = x02;
Lib_IntVector_Intrinsics_vec128 o10 = x12;
@ -514,13 +514,13 @@ Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k
Lib_IntVector_Intrinsics_vec128 t3 = a34;
Lib_IntVector_Intrinsics_vec128 t4 = a44;
Lib_IntVector_Intrinsics_vec128
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec128
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask261);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
Lib_IntVector_Intrinsics_vec128
@ -530,21 +530,21 @@ Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k
Lib_IntVector_Intrinsics_vec128
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
Lib_IntVector_Intrinsics_vec128
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
Lib_IntVector_Intrinsics_vec128
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
Lib_IntVector_Intrinsics_vec128 o0 = x02;
Lib_IntVector_Intrinsics_vec128 o1 = x12;
@ -736,13 +736,13 @@ Hacl_Poly1305_128_poly1305_update1(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t
Lib_IntVector_Intrinsics_vec128 t3 = a36;
Lib_IntVector_Intrinsics_vec128 t4 = a46;
Lib_IntVector_Intrinsics_vec128
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec128
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask261);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
Lib_IntVector_Intrinsics_vec128
@ -752,21 +752,21 @@ Hacl_Poly1305_128_poly1305_update1(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t
Lib_IntVector_Intrinsics_vec128
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
Lib_IntVector_Intrinsics_vec128
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
Lib_IntVector_Intrinsics_vec128
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
Lib_IntVector_Intrinsics_vec128 o0 = x02;
Lib_IntVector_Intrinsics_vec128 o1 = x12;
@ -953,13 +953,13 @@ Hacl_Poly1305_128_poly1305_update(
Lib_IntVector_Intrinsics_vec128 t3 = a34;
Lib_IntVector_Intrinsics_vec128 t4 = a44;
Lib_IntVector_Intrinsics_vec128
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec128
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
Lib_IntVector_Intrinsics_vec128
@ -969,21 +969,21 @@ Hacl_Poly1305_128_poly1305_update(
Lib_IntVector_Intrinsics_vec128
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
Lib_IntVector_Intrinsics_vec128
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
Lib_IntVector_Intrinsics_vec128
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
Lib_IntVector_Intrinsics_vec128 o00 = x02;
Lib_IntVector_Intrinsics_vec128 o10 = x12;
@ -1021,7 +1021,7 @@ Hacl_Poly1305_128_poly1305_update(
uint32_t len1 = len - len0;
uint8_t *t1 = text + len0;
uint32_t nb = len1 / (uint32_t)16U;
uint32_t rem1 = len1 % (uint32_t)16U;
uint32_t rem = len1 % (uint32_t)16U;
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
uint8_t *block = t1 + i * (uint32_t)16U;
Lib_IntVector_Intrinsics_vec128 e[5U];
@ -1187,13 +1187,13 @@ Hacl_Poly1305_128_poly1305_update(
Lib_IntVector_Intrinsics_vec128 t3 = a36;
Lib_IntVector_Intrinsics_vec128 t4 = a46;
Lib_IntVector_Intrinsics_vec128
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec128
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
Lib_IntVector_Intrinsics_vec128
@ -1203,21 +1203,21 @@ Hacl_Poly1305_128_poly1305_update(
Lib_IntVector_Intrinsics_vec128
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
Lib_IntVector_Intrinsics_vec128
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
Lib_IntVector_Intrinsics_vec128
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
Lib_IntVector_Intrinsics_vec128 o0 = x02;
Lib_IntVector_Intrinsics_vec128 o1 = x12;
@ -1230,13 +1230,13 @@ Hacl_Poly1305_128_poly1305_update(
acc[3U] = o3;
acc[4U] = o4;
}
if (rem1 > (uint32_t)0U) {
uint8_t *last1 = t1 + nb * (uint32_t)16U;
if (rem > (uint32_t)0U) {
uint8_t *last = t1 + nb * (uint32_t)16U;
Lib_IntVector_Intrinsics_vec128 e[5U];
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
e[_i] = Lib_IntVector_Intrinsics_vec128_zero;
uint8_t tmp[16U] = { 0U };
memcpy(tmp, last1, rem1 * sizeof(last1[0U]));
memcpy(tmp, last, rem * sizeof(last[0U]));
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
@ -1276,10 +1276,10 @@ Hacl_Poly1305_128_poly1305_update(
e[2U] = f2;
e[3U] = f3;
e[4U] = f4;
uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
uint64_t b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U;
Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
Lib_IntVector_Intrinsics_vec128 fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
e[rem1 * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
Lib_IntVector_Intrinsics_vec128 fi = e[rem * (uint32_t)8U / (uint32_t)26U];
e[rem * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
Lib_IntVector_Intrinsics_vec128 *r = pre;
Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
@ -1397,13 +1397,13 @@ Hacl_Poly1305_128_poly1305_update(
Lib_IntVector_Intrinsics_vec128 t3 = a36;
Lib_IntVector_Intrinsics_vec128 t4 = a46;
Lib_IntVector_Intrinsics_vec128
mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec128
z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
Lib_IntVector_Intrinsics_vec128
@ -1413,21 +1413,21 @@ Hacl_Poly1305_128_poly1305_update(
Lib_IntVector_Intrinsics_vec128
t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
Lib_IntVector_Intrinsics_vec128
z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
Lib_IntVector_Intrinsics_vec128
z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
Lib_IntVector_Intrinsics_vec128 o0 = x02;
Lib_IntVector_Intrinsics_vec128 o1 = x12;

Просмотреть файл

@ -33,7 +33,7 @@ Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc,
Lib_IntVector_Intrinsics_vec256
hi = Lib_IntVector_Intrinsics_vec256_load_le(b + (uint32_t)32U);
Lib_IntVector_Intrinsics_vec256
mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256 m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi);
Lib_IntVector_Intrinsics_vec256
m1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(lo, hi);
@ -46,14 +46,14 @@ Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc,
Lib_IntVector_Intrinsics_vec256 t3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3);
Lib_IntVector_Intrinsics_vec256
t2 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)4U);
Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t2, mask261);
Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t2, mask26);
Lib_IntVector_Intrinsics_vec256
t1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t1, mask261);
Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t0, mask261);
Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t1, mask26);
Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
Lib_IntVector_Intrinsics_vec256
t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)30U);
Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask261);
Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask26);
Lib_IntVector_Intrinsics_vec256
o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U);
Lib_IntVector_Intrinsics_vec256 o0 = o5;
@ -245,13 +245,13 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
Lib_IntVector_Intrinsics_vec256 t30 = a350;
Lib_IntVector_Intrinsics_vec256 t40 = a450;
Lib_IntVector_Intrinsics_vec256
mask2610 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x00 = Lib_IntVector_Intrinsics_vec256_and(t00, mask2610);
Lib_IntVector_Intrinsics_vec256 x30 = Lib_IntVector_Intrinsics_vec256_and(t30, mask2610);
Lib_IntVector_Intrinsics_vec256 x00 = Lib_IntVector_Intrinsics_vec256_and(t00, mask260);
Lib_IntVector_Intrinsics_vec256 x30 = Lib_IntVector_Intrinsics_vec256_and(t30, mask260);
Lib_IntVector_Intrinsics_vec256 x10 = Lib_IntVector_Intrinsics_vec256_add64(t10, z00);
Lib_IntVector_Intrinsics_vec256 x40 = Lib_IntVector_Intrinsics_vec256_add64(t40, z10);
Lib_IntVector_Intrinsics_vec256
@ -261,21 +261,21 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
Lib_IntVector_Intrinsics_vec256
t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z110, t5);
Lib_IntVector_Intrinsics_vec256 x110 = Lib_IntVector_Intrinsics_vec256_and(x10, mask2610);
Lib_IntVector_Intrinsics_vec256 x410 = Lib_IntVector_Intrinsics_vec256_and(x40, mask2610);
Lib_IntVector_Intrinsics_vec256 x110 = Lib_IntVector_Intrinsics_vec256_and(x10, mask260);
Lib_IntVector_Intrinsics_vec256 x410 = Lib_IntVector_Intrinsics_vec256_and(x40, mask260);
Lib_IntVector_Intrinsics_vec256 x20 = Lib_IntVector_Intrinsics_vec256_add64(t20, z010);
Lib_IntVector_Intrinsics_vec256 x010 = Lib_IntVector_Intrinsics_vec256_add64(x00, z12);
Lib_IntVector_Intrinsics_vec256
z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x210 = Lib_IntVector_Intrinsics_vec256_and(x20, mask2610);
Lib_IntVector_Intrinsics_vec256 x020 = Lib_IntVector_Intrinsics_vec256_and(x010, mask2610);
Lib_IntVector_Intrinsics_vec256 x210 = Lib_IntVector_Intrinsics_vec256_and(x20, mask260);
Lib_IntVector_Intrinsics_vec256 x020 = Lib_IntVector_Intrinsics_vec256_and(x010, mask260);
Lib_IntVector_Intrinsics_vec256 x310 = Lib_IntVector_Intrinsics_vec256_add64(x30, z020);
Lib_IntVector_Intrinsics_vec256 x120 = Lib_IntVector_Intrinsics_vec256_add64(x110, z130);
Lib_IntVector_Intrinsics_vec256
z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x320 = Lib_IntVector_Intrinsics_vec256_and(x310, mask2610);
Lib_IntVector_Intrinsics_vec256 x320 = Lib_IntVector_Intrinsics_vec256_and(x310, mask260);
Lib_IntVector_Intrinsics_vec256 x420 = Lib_IntVector_Intrinsics_vec256_add64(x410, z030);
Lib_IntVector_Intrinsics_vec256 r20 = x020;
Lib_IntVector_Intrinsics_vec256 r21 = x120;
@ -373,13 +373,13 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
Lib_IntVector_Intrinsics_vec256 t31 = a351;
Lib_IntVector_Intrinsics_vec256 t41 = a451;
Lib_IntVector_Intrinsics_vec256
mask2611 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
z04 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z14 = Lib_IntVector_Intrinsics_vec256_shift_right64(t31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x03 = Lib_IntVector_Intrinsics_vec256_and(t01, mask2611);
Lib_IntVector_Intrinsics_vec256 x33 = Lib_IntVector_Intrinsics_vec256_and(t31, mask2611);
Lib_IntVector_Intrinsics_vec256 x03 = Lib_IntVector_Intrinsics_vec256_and(t01, mask261);
Lib_IntVector_Intrinsics_vec256 x33 = Lib_IntVector_Intrinsics_vec256_and(t31, mask261);
Lib_IntVector_Intrinsics_vec256 x13 = Lib_IntVector_Intrinsics_vec256_add64(t11, z04);
Lib_IntVector_Intrinsics_vec256 x43 = Lib_IntVector_Intrinsics_vec256_add64(t41, z14);
Lib_IntVector_Intrinsics_vec256
@ -389,21 +389,21 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
Lib_IntVector_Intrinsics_vec256
t6 = Lib_IntVector_Intrinsics_vec256_shift_left64(z111, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec256 z120 = Lib_IntVector_Intrinsics_vec256_add64(z111, t6);
Lib_IntVector_Intrinsics_vec256 x111 = Lib_IntVector_Intrinsics_vec256_and(x13, mask2611);
Lib_IntVector_Intrinsics_vec256 x411 = Lib_IntVector_Intrinsics_vec256_and(x43, mask2611);
Lib_IntVector_Intrinsics_vec256 x111 = Lib_IntVector_Intrinsics_vec256_and(x13, mask261);
Lib_IntVector_Intrinsics_vec256 x411 = Lib_IntVector_Intrinsics_vec256_and(x43, mask261);
Lib_IntVector_Intrinsics_vec256 x22 = Lib_IntVector_Intrinsics_vec256_add64(t21, z011);
Lib_IntVector_Intrinsics_vec256 x011 = Lib_IntVector_Intrinsics_vec256_add64(x03, z120);
Lib_IntVector_Intrinsics_vec256
z021 = Lib_IntVector_Intrinsics_vec256_shift_right64(x22, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z131 = Lib_IntVector_Intrinsics_vec256_shift_right64(x011, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x211 = Lib_IntVector_Intrinsics_vec256_and(x22, mask2611);
Lib_IntVector_Intrinsics_vec256 x021 = Lib_IntVector_Intrinsics_vec256_and(x011, mask2611);
Lib_IntVector_Intrinsics_vec256 x211 = Lib_IntVector_Intrinsics_vec256_and(x22, mask261);
Lib_IntVector_Intrinsics_vec256 x021 = Lib_IntVector_Intrinsics_vec256_and(x011, mask261);
Lib_IntVector_Intrinsics_vec256 x311 = Lib_IntVector_Intrinsics_vec256_add64(x33, z021);
Lib_IntVector_Intrinsics_vec256 x121 = Lib_IntVector_Intrinsics_vec256_add64(x111, z131);
Lib_IntVector_Intrinsics_vec256
z031 = Lib_IntVector_Intrinsics_vec256_shift_right64(x311, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x321 = Lib_IntVector_Intrinsics_vec256_and(x311, mask2611);
Lib_IntVector_Intrinsics_vec256 x321 = Lib_IntVector_Intrinsics_vec256_and(x311, mask261);
Lib_IntVector_Intrinsics_vec256 x421 = Lib_IntVector_Intrinsics_vec256_add64(x411, z031);
Lib_IntVector_Intrinsics_vec256 r30 = x021;
Lib_IntVector_Intrinsics_vec256 r31 = x121;
@ -539,13 +539,13 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
Lib_IntVector_Intrinsics_vec256 t3 = a35;
Lib_IntVector_Intrinsics_vec256 t4 = a45;
Lib_IntVector_Intrinsics_vec256
mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask261);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask261);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
Lib_IntVector_Intrinsics_vec256
@ -555,21 +555,21 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
Lib_IntVector_Intrinsics_vec256
t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec256 z121 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask261);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask261);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z121);
Lib_IntVector_Intrinsics_vec256
z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask261);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask261);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
Lib_IntVector_Intrinsics_vec256
z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask261);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
Lib_IntVector_Intrinsics_vec256 o0 = x02;
Lib_IntVector_Intrinsics_vec256 o10 = x12;
@ -580,57 +580,32 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
v00 = Lib_IntVector_Intrinsics_vec256_interleave_high128(o0, o0);
Lib_IntVector_Intrinsics_vec256 v10 = Lib_IntVector_Intrinsics_vec256_add64(o0, v00);
Lib_IntVector_Intrinsics_vec256
v20 =
Lib_IntVector_Intrinsics_vec256_add64(v10,
Lib_IntVector_Intrinsics_vec256_shuffle64(v10,
(uint32_t)1U,
(uint32_t)1U,
(uint32_t)1U,
(uint32_t)1U));
v10h = Lib_IntVector_Intrinsics_vec256_interleave_high64(v10, v10);
Lib_IntVector_Intrinsics_vec256 v20 = Lib_IntVector_Intrinsics_vec256_add64(v10, v10h);
Lib_IntVector_Intrinsics_vec256
v01 = Lib_IntVector_Intrinsics_vec256_interleave_high128(o10, o10);
Lib_IntVector_Intrinsics_vec256 v11 = Lib_IntVector_Intrinsics_vec256_add64(o10, v01);
Lib_IntVector_Intrinsics_vec256
v21 =
Lib_IntVector_Intrinsics_vec256_add64(v11,
Lib_IntVector_Intrinsics_vec256_shuffle64(v11,
(uint32_t)1U,
(uint32_t)1U,
(uint32_t)1U,
(uint32_t)1U));
v11h = Lib_IntVector_Intrinsics_vec256_interleave_high64(v11, v11);
Lib_IntVector_Intrinsics_vec256 v21 = Lib_IntVector_Intrinsics_vec256_add64(v11, v11h);
Lib_IntVector_Intrinsics_vec256
v02 = Lib_IntVector_Intrinsics_vec256_interleave_high128(o20, o20);
Lib_IntVector_Intrinsics_vec256 v12 = Lib_IntVector_Intrinsics_vec256_add64(o20, v02);
Lib_IntVector_Intrinsics_vec256
v22 =
Lib_IntVector_Intrinsics_vec256_add64(v12,
Lib_IntVector_Intrinsics_vec256_shuffle64(v12,
(uint32_t)1U,
(uint32_t)1U,
(uint32_t)1U,
(uint32_t)1U));
v12h = Lib_IntVector_Intrinsics_vec256_interleave_high64(v12, v12);
Lib_IntVector_Intrinsics_vec256 v22 = Lib_IntVector_Intrinsics_vec256_add64(v12, v12h);
Lib_IntVector_Intrinsics_vec256
v03 = Lib_IntVector_Intrinsics_vec256_interleave_high128(o30, o30);
Lib_IntVector_Intrinsics_vec256 v13 = Lib_IntVector_Intrinsics_vec256_add64(o30, v03);
Lib_IntVector_Intrinsics_vec256
v23 =
Lib_IntVector_Intrinsics_vec256_add64(v13,
Lib_IntVector_Intrinsics_vec256_shuffle64(v13,
(uint32_t)1U,
(uint32_t)1U,
(uint32_t)1U,
(uint32_t)1U));
v13h = Lib_IntVector_Intrinsics_vec256_interleave_high64(v13, v13);
Lib_IntVector_Intrinsics_vec256 v23 = Lib_IntVector_Intrinsics_vec256_add64(v13, v13h);
Lib_IntVector_Intrinsics_vec256
v04 = Lib_IntVector_Intrinsics_vec256_interleave_high128(o40, o40);
Lib_IntVector_Intrinsics_vec256 v14 = Lib_IntVector_Intrinsics_vec256_add64(o40, v04);
Lib_IntVector_Intrinsics_vec256
v24 =
Lib_IntVector_Intrinsics_vec256_add64(v14,
Lib_IntVector_Intrinsics_vec256_shuffle64(v14,
(uint32_t)1U,
(uint32_t)1U,
(uint32_t)1U,
(uint32_t)1U));
v14h = Lib_IntVector_Intrinsics_vec256_interleave_high64(v14, v14);
Lib_IntVector_Intrinsics_vec256 v24 = Lib_IntVector_Intrinsics_vec256_add64(v14, v14h);
Lib_IntVector_Intrinsics_vec256
l = Lib_IntVector_Intrinsics_vec256_add64(v20, Lib_IntVector_Intrinsics_vec256_zero);
Lib_IntVector_Intrinsics_vec256
@ -857,13 +832,13 @@ Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *k
Lib_IntVector_Intrinsics_vec256 t30 = a340;
Lib_IntVector_Intrinsics_vec256 t40 = a440;
Lib_IntVector_Intrinsics_vec256
mask2610 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x00 = Lib_IntVector_Intrinsics_vec256_and(t00, mask2610);
Lib_IntVector_Intrinsics_vec256 x30 = Lib_IntVector_Intrinsics_vec256_and(t30, mask2610);
Lib_IntVector_Intrinsics_vec256 x00 = Lib_IntVector_Intrinsics_vec256_and(t00, mask260);
Lib_IntVector_Intrinsics_vec256 x30 = Lib_IntVector_Intrinsics_vec256_and(t30, mask260);
Lib_IntVector_Intrinsics_vec256 x10 = Lib_IntVector_Intrinsics_vec256_add64(t10, z00);
Lib_IntVector_Intrinsics_vec256 x40 = Lib_IntVector_Intrinsics_vec256_add64(t40, z10);
Lib_IntVector_Intrinsics_vec256
@ -873,21 +848,21 @@ Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *k
Lib_IntVector_Intrinsics_vec256
t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z110, t5);
Lib_IntVector_Intrinsics_vec256 x110 = Lib_IntVector_Intrinsics_vec256_and(x10, mask2610);
Lib_IntVector_Intrinsics_vec256 x410 = Lib_IntVector_Intrinsics_vec256_and(x40, mask2610);
Lib_IntVector_Intrinsics_vec256 x110 = Lib_IntVector_Intrinsics_vec256_and(x10, mask260);
Lib_IntVector_Intrinsics_vec256 x410 = Lib_IntVector_Intrinsics_vec256_and(x40, mask260);
Lib_IntVector_Intrinsics_vec256 x20 = Lib_IntVector_Intrinsics_vec256_add64(t20, z010);
Lib_IntVector_Intrinsics_vec256 x010 = Lib_IntVector_Intrinsics_vec256_add64(x00, z12);
Lib_IntVector_Intrinsics_vec256
z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x210 = Lib_IntVector_Intrinsics_vec256_and(x20, mask2610);
Lib_IntVector_Intrinsics_vec256 x020 = Lib_IntVector_Intrinsics_vec256_and(x010, mask2610);
Lib_IntVector_Intrinsics_vec256 x210 = Lib_IntVector_Intrinsics_vec256_and(x20, mask260);
Lib_IntVector_Intrinsics_vec256 x020 = Lib_IntVector_Intrinsics_vec256_and(x010, mask260);
Lib_IntVector_Intrinsics_vec256 x310 = Lib_IntVector_Intrinsics_vec256_add64(x30, z020);
Lib_IntVector_Intrinsics_vec256 x120 = Lib_IntVector_Intrinsics_vec256_add64(x110, z130);
Lib_IntVector_Intrinsics_vec256
z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x320 = Lib_IntVector_Intrinsics_vec256_and(x310, mask2610);
Lib_IntVector_Intrinsics_vec256 x320 = Lib_IntVector_Intrinsics_vec256_and(x310, mask260);
Lib_IntVector_Intrinsics_vec256 x420 = Lib_IntVector_Intrinsics_vec256_add64(x410, z030);
Lib_IntVector_Intrinsics_vec256 o00 = x020;
Lib_IntVector_Intrinsics_vec256 o10 = x120;
@ -1008,13 +983,13 @@ Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *k
Lib_IntVector_Intrinsics_vec256 t3 = a34;
Lib_IntVector_Intrinsics_vec256 t4 = a44;
Lib_IntVector_Intrinsics_vec256
mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask261);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask261);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
Lib_IntVector_Intrinsics_vec256
@ -1024,21 +999,21 @@ Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *k
Lib_IntVector_Intrinsics_vec256
t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec256 z120 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask261);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask261);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z120);
Lib_IntVector_Intrinsics_vec256
z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask261);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask261);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
Lib_IntVector_Intrinsics_vec256
z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask261);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
Lib_IntVector_Intrinsics_vec256 o0 = x02;
Lib_IntVector_Intrinsics_vec256 o1 = x12;
@ -1230,13 +1205,13 @@ Hacl_Poly1305_256_poly1305_update1(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t
Lib_IntVector_Intrinsics_vec256 t3 = a36;
Lib_IntVector_Intrinsics_vec256 t4 = a46;
Lib_IntVector_Intrinsics_vec256
mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask261);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask261);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
Lib_IntVector_Intrinsics_vec256
@ -1246,21 +1221,21 @@ Hacl_Poly1305_256_poly1305_update1(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t
Lib_IntVector_Intrinsics_vec256
t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask261);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask261);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
Lib_IntVector_Intrinsics_vec256
z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask261);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask261);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
Lib_IntVector_Intrinsics_vec256
z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask261);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
Lib_IntVector_Intrinsics_vec256 o0 = x02;
Lib_IntVector_Intrinsics_vec256 o1 = x12;
@ -1301,7 +1276,7 @@ Hacl_Poly1305_256_poly1305_update(
Lib_IntVector_Intrinsics_vec256
hi = Lib_IntVector_Intrinsics_vec256_load_le(block + (uint32_t)32U);
Lib_IntVector_Intrinsics_vec256
mask2610 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi);
Lib_IntVector_Intrinsics_vec256
@ -1318,14 +1293,14 @@ Hacl_Poly1305_256_poly1305_update(
t30 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3);
Lib_IntVector_Intrinsics_vec256
t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)4U);
Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t20, mask2610);
Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t20, mask260);
Lib_IntVector_Intrinsics_vec256
t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t10, mask2610);
Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t010, mask2610);
Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t10, mask260);
Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t010, mask260);
Lib_IntVector_Intrinsics_vec256
t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)30U);
Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask2610);
Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask260);
Lib_IntVector_Intrinsics_vec256
o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U);
Lib_IntVector_Intrinsics_vec256 o00 = o5;
@ -1449,13 +1424,13 @@ Hacl_Poly1305_256_poly1305_update(
Lib_IntVector_Intrinsics_vec256 t3 = a34;
Lib_IntVector_Intrinsics_vec256 t4 = a44;
Lib_IntVector_Intrinsics_vec256
mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask261);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask261);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
Lib_IntVector_Intrinsics_vec256
@ -1465,21 +1440,21 @@ Hacl_Poly1305_256_poly1305_update(
Lib_IntVector_Intrinsics_vec256
t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask261);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask261);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
Lib_IntVector_Intrinsics_vec256
z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask261);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask261);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
Lib_IntVector_Intrinsics_vec256
z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask261);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
Lib_IntVector_Intrinsics_vec256 o01 = x02;
Lib_IntVector_Intrinsics_vec256 o12 = x12;
@ -1517,7 +1492,7 @@ Hacl_Poly1305_256_poly1305_update(
uint32_t len1 = len - len0;
uint8_t *t1 = text + len0;
uint32_t nb = len1 / (uint32_t)16U;
uint32_t rem1 = len1 % (uint32_t)16U;
uint32_t rem = len1 % (uint32_t)16U;
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
uint8_t *block = t1 + i * (uint32_t)16U;
Lib_IntVector_Intrinsics_vec256 e[5U];
@ -1683,13 +1658,13 @@ Hacl_Poly1305_256_poly1305_update(
Lib_IntVector_Intrinsics_vec256 t3 = a36;
Lib_IntVector_Intrinsics_vec256 t4 = a46;
Lib_IntVector_Intrinsics_vec256
mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask261);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask261);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
Lib_IntVector_Intrinsics_vec256
@ -1699,21 +1674,21 @@ Hacl_Poly1305_256_poly1305_update(
Lib_IntVector_Intrinsics_vec256
t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask261);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask261);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
Lib_IntVector_Intrinsics_vec256
z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask261);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask261);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
Lib_IntVector_Intrinsics_vec256
z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask261);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
Lib_IntVector_Intrinsics_vec256 o0 = x02;
Lib_IntVector_Intrinsics_vec256 o1 = x12;
@ -1726,13 +1701,13 @@ Hacl_Poly1305_256_poly1305_update(
acc[3U] = o3;
acc[4U] = o4;
}
if (rem1 > (uint32_t)0U) {
uint8_t *last1 = t1 + nb * (uint32_t)16U;
if (rem > (uint32_t)0U) {
uint8_t *last = t1 + nb * (uint32_t)16U;
Lib_IntVector_Intrinsics_vec256 e[5U];
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
e[_i] = Lib_IntVector_Intrinsics_vec256_zero;
uint8_t tmp[16U] = { 0U };
memcpy(tmp, last1, rem1 * sizeof(last1[0U]));
memcpy(tmp, last, rem * sizeof(last[0U]));
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
@ -1772,10 +1747,10 @@ Hacl_Poly1305_256_poly1305_update(
e[2U] = f2;
e[3U] = f3;
e[4U] = f4;
uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
uint64_t b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U;
Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
Lib_IntVector_Intrinsics_vec256 fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
e[rem1 * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
Lib_IntVector_Intrinsics_vec256 fi = e[rem * (uint32_t)8U / (uint32_t)26U];
e[rem * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
Lib_IntVector_Intrinsics_vec256 *r = pre;
Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
@ -1893,13 +1868,13 @@ Hacl_Poly1305_256_poly1305_update(
Lib_IntVector_Intrinsics_vec256 t3 = a36;
Lib_IntVector_Intrinsics_vec256 t4 = a46;
Lib_IntVector_Intrinsics_vec256
mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask261);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask261);
Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
Lib_IntVector_Intrinsics_vec256
@ -1909,21 +1884,21 @@ Hacl_Poly1305_256_poly1305_update(
Lib_IntVector_Intrinsics_vec256
t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask261);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask261);
Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
Lib_IntVector_Intrinsics_vec256
z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256
z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask261);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask261);
Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
Lib_IntVector_Intrinsics_vec256
z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask261);
Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
Lib_IntVector_Intrinsics_vec256 o0 = x02;
Lib_IntVector_Intrinsics_vec256 o1 = x12;

Просмотреть файл

@ -174,29 +174,29 @@ Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text)
uint64_t t2 = a26;
uint64_t t3 = a36;
uint64_t t4 = a46;
uint64_t mask261 = (uint64_t)0x3ffffffU;
uint64_t mask26 = (uint64_t)0x3ffffffU;
uint64_t z0 = t0 >> (uint32_t)26U;
uint64_t z1 = t3 >> (uint32_t)26U;
uint64_t x0 = t0 & mask261;
uint64_t x3 = t3 & mask261;
uint64_t x0 = t0 & mask26;
uint64_t x3 = t3 & mask26;
uint64_t x1 = t1 + z0;
uint64_t x4 = t4 + z1;
uint64_t z01 = x1 >> (uint32_t)26U;
uint64_t z11 = x4 >> (uint32_t)26U;
uint64_t t = z11 << (uint32_t)2U;
uint64_t z12 = z11 + t;
uint64_t x11 = x1 & mask261;
uint64_t x41 = x4 & mask261;
uint64_t x11 = x1 & mask26;
uint64_t x41 = x4 & mask26;
uint64_t x2 = t2 + z01;
uint64_t x01 = x0 + z12;
uint64_t z02 = x2 >> (uint32_t)26U;
uint64_t z13 = x01 >> (uint32_t)26U;
uint64_t x21 = x2 & mask261;
uint64_t x02 = x01 & mask261;
uint64_t x21 = x2 & mask26;
uint64_t x02 = x01 & mask26;
uint64_t x31 = x3 + z02;
uint64_t x12 = x11 + z13;
uint64_t z03 = x31 >> (uint32_t)26U;
uint64_t x32 = x31 & mask261;
uint64_t x32 = x31 & mask26;
uint64_t x42 = x41 + z03;
uint64_t o0 = x02;
uint64_t o1 = x12;
@ -216,7 +216,7 @@ Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
uint64_t *pre = ctx + (uint32_t)5U;
uint64_t *acc = ctx;
uint32_t nb = len / (uint32_t)16U;
uint32_t rem1 = len % (uint32_t)16U;
uint32_t rem = len % (uint32_t)16U;
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
uint8_t *block = text + i * (uint32_t)16U;
uint64_t e[5U] = { 0U };
@ -301,29 +301,29 @@ Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
uint64_t t2 = a26;
uint64_t t3 = a36;
uint64_t t4 = a46;
uint64_t mask261 = (uint64_t)0x3ffffffU;
uint64_t mask26 = (uint64_t)0x3ffffffU;
uint64_t z0 = t0 >> (uint32_t)26U;
uint64_t z1 = t3 >> (uint32_t)26U;
uint64_t x0 = t0 & mask261;
uint64_t x3 = t3 & mask261;
uint64_t x0 = t0 & mask26;
uint64_t x3 = t3 & mask26;
uint64_t x1 = t1 + z0;
uint64_t x4 = t4 + z1;
uint64_t z01 = x1 >> (uint32_t)26U;
uint64_t z11 = x4 >> (uint32_t)26U;
uint64_t t = z11 << (uint32_t)2U;
uint64_t z12 = z11 + t;
uint64_t x11 = x1 & mask261;
uint64_t x41 = x4 & mask261;
uint64_t x11 = x1 & mask26;
uint64_t x41 = x4 & mask26;
uint64_t x2 = t2 + z01;
uint64_t x01 = x0 + z12;
uint64_t z02 = x2 >> (uint32_t)26U;
uint64_t z13 = x01 >> (uint32_t)26U;
uint64_t x21 = x2 & mask261;
uint64_t x02 = x01 & mask261;
uint64_t x21 = x2 & mask26;
uint64_t x02 = x01 & mask26;
uint64_t x31 = x3 + z02;
uint64_t x12 = x11 + z13;
uint64_t z03 = x31 >> (uint32_t)26U;
uint64_t x32 = x31 & mask261;
uint64_t x32 = x31 & mask26;
uint64_t x42 = x41 + z03;
uint64_t o0 = x02;
uint64_t o1 = x12;
@ -336,11 +336,11 @@ Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
acc[3U] = o3;
acc[4U] = o4;
}
if (rem1 > (uint32_t)0U) {
uint8_t *last1 = text + nb * (uint32_t)16U;
if (rem > (uint32_t)0U) {
uint8_t *last = text + nb * (uint32_t)16U;
uint64_t e[5U] = { 0U };
uint8_t tmp[16U] = { 0U };
memcpy(tmp, last1, rem1 * sizeof(last1[0U]));
memcpy(tmp, last, rem * sizeof(last[0U]));
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
@ -362,10 +362,10 @@ Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
e[2U] = f2;
e[3U] = f3;
e[4U] = f4;
uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
uint64_t b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U;
uint64_t mask = b;
uint64_t fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
e[rem1 * (uint32_t)8U / (uint32_t)26U] = fi | mask;
uint64_t fi = e[rem * (uint32_t)8U / (uint32_t)26U];
e[rem * (uint32_t)8U / (uint32_t)26U] = fi | mask;
uint64_t *r = pre;
uint64_t *r5 = pre + (uint32_t)5U;
uint64_t r0 = r[0U];
@ -422,29 +422,29 @@ Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
uint64_t t2 = a26;
uint64_t t3 = a36;
uint64_t t4 = a46;
uint64_t mask261 = (uint64_t)0x3ffffffU;
uint64_t mask26 = (uint64_t)0x3ffffffU;
uint64_t z0 = t0 >> (uint32_t)26U;
uint64_t z1 = t3 >> (uint32_t)26U;
uint64_t x0 = t0 & mask261;
uint64_t x3 = t3 & mask261;
uint64_t x0 = t0 & mask26;
uint64_t x3 = t3 & mask26;
uint64_t x1 = t1 + z0;
uint64_t x4 = t4 + z1;
uint64_t z01 = x1 >> (uint32_t)26U;
uint64_t z11 = x4 >> (uint32_t)26U;
uint64_t t = z11 << (uint32_t)2U;
uint64_t z12 = z11 + t;
uint64_t x11 = x1 & mask261;
uint64_t x41 = x4 & mask261;
uint64_t x11 = x1 & mask26;
uint64_t x41 = x4 & mask26;
uint64_t x2 = t2 + z01;
uint64_t x01 = x0 + z12;
uint64_t z02 = x2 >> (uint32_t)26U;
uint64_t z13 = x01 >> (uint32_t)26U;
uint64_t x21 = x2 & mask261;
uint64_t x02 = x01 & mask261;
uint64_t x21 = x2 & mask26;
uint64_t x02 = x01 & mask26;
uint64_t x31 = x3 + z02;
uint64_t x12 = x11 + z13;
uint64_t z03 = x31 >> (uint32_t)26U;
uint64_t x32 = x31 & mask261;
uint64_t x32 = x31 & mask26;
uint64_t x42 = x41 + z03;
uint64_t o0 = x02;
uint64_t o1 = x12;

Просмотреть файл

@ -47,21 +47,33 @@ typedef uint64_t FStar_Date_dateTime, FStar_Date_timeSpan;
* definition into scope by default. */
typedef const char *Prims_string;
/* The great static header headache. */
#if (defined(_MSC_VER) && defined(_M_X64) && !defined(__clang__))
#define IS_MSVC64 1
#endif
/* This code makes a number of assumptions and should be refined. In particular,
* it assumes that: any non-MSVC amd64 compiler supports int128. Maybe it would
* be easier to just test for defined(__SIZEOF_INT128__) only? */
#if (defined(__x86_64__) || \
defined(__x86_64) || \
defined(__aarch64__) || \
(defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)) || \
defined(__s390x__) || \
(defined(_MSC_VER) && !defined(_M_X64) && defined(__clang__)) || \
(defined(__mips__) && defined(__LP64__)) || \
(defined(__riscv) && __riscv_xlen == 64) || \
defined(__SIZEOF_INT128__))
#define HAS_INT128 1
#endif
/* The uint128 type is a special case since we offer several implementations of
* it, depending on the compiler and whether the user wants the verified
* implementation or not. */
#if !defined(KRML_VERIFIED_UINT128) && defined(_MSC_VER) && defined(_M_X64) && \
!defined(__clang__)
#if !defined(KRML_VERIFIED_UINT128) && defined(IS_MSVC64)
#include <emmintrin.h>
typedef __m128i FStar_UInt128_uint128;
#elif !defined(KRML_VERIFIED_UINT128) && !defined(_MSC_VER) && \
(defined(__x86_64__) || defined(__x86_64) || defined(__aarch64__) || \
(defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)))
#elif !defined(KRML_VERIFIED_UINT128) && defined(HAS_INT128)
typedef unsigned __int128 FStar_UInt128_uint128;
#elif !defined(KRML_VERIFIED_UINT128) && defined(_MSC_VER) && defined(__clang__)
typedef __uint128_t FStar_UInt128_uint128;
#else
typedef struct FStar_UInt128_uint128_s {
uint64_t low;
@ -75,15 +87,13 @@ typedef FStar_UInt128_uint128 FStar_UInt128_t, uint128_t;
#include "kremlin/lowstar_endianness.h"
/* This one is always included, because it defines C.Endianness functions too. */
#if !defined(_MSC_VER) || defined(__clang__)
#include "fstar_uint128_gcc64.h"
#endif
#if !defined(KRML_VERIFIED_UINT128) && defined(_MSC_VER) && !defined(__clang__)
#if !defined(KRML_VERIFIED_UINT128) && defined(IS_MSVC64)
#include "fstar_uint128_msvc.h"
#elif defined(KRML_VERIFIED_UINT128)
#elif !defined(KRML_VERIFIED_UINT128) && defined(HAS_INT128)
#include "fstar_uint128_gcc64.h"
#else
#include "FStar_UInt128_Verified.h"
#include "fstar_uint128_struct_endianness.h"
#endif
#endif

Просмотреть файл

@ -13,12 +13,6 @@
#ifndef __FStar_UInt128_H
#define __FStar_UInt128_H
static inline uint64_t
FStar_UInt128___proj__Mkuint128__item__low(FStar_UInt128_uint128 projectee);
static inline uint64_t
FStar_UInt128___proj__Mkuint128__item__high(FStar_UInt128_uint128 projectee);
static inline FStar_UInt128_uint128
FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);

Просмотреть файл

@ -13,18 +13,6 @@
#include "FStar_UInt_8_16_32_64.h"
static inline uint64_t
FStar_UInt128___proj__Mkuint128__item__low(FStar_UInt128_uint128 projectee)
{
return projectee.low;
}
static inline uint64_t
FStar_UInt128___proj__Mkuint128__item__high(FStar_UInt128_uint128 projectee)
{
return projectee.high;
}
static inline uint64_t
FStar_UInt128_constant_time_carry(uint64_t a, uint64_t b)
{

Просмотреть файл

@ -15,6 +15,10 @@
extern Prims_int FStar_UInt64_n;
extern bool FStar_UInt64_uu___is_Mk(uint64_t projectee);
extern Prims_int FStar_UInt64___proj__Mk__item__v(uint64_t projectee);
extern Prims_int FStar_UInt64_v(uint64_t x);
extern uint64_t FStar_UInt64_uint_to_t(Prims_int x);
@ -47,12 +51,20 @@ FStar_UInt64_gte_mask(uint64_t a, uint64_t b)
return x_xor_q_ - (uint64_t)1U;
}
extern Prims_string FStar_UInt64_to_string(uint64_t uu____716);
extern Prims_string FStar_UInt64_to_string(uint64_t uu____888);
extern uint64_t FStar_UInt64_of_string(Prims_string uu____728);
extern Prims_string FStar_UInt64_to_string_hex(uint64_t uu____899);
extern Prims_string FStar_UInt64_to_string_hex_pad(uint64_t uu____910);
extern uint64_t FStar_UInt64_of_string(Prims_string uu____921);
extern Prims_int FStar_UInt32_n;
extern bool FStar_UInt32_uu___is_Mk(uint32_t projectee);
extern Prims_int FStar_UInt32___proj__Mk__item__v(uint32_t projectee);
extern Prims_int FStar_UInt32_v(uint32_t x);
extern uint32_t FStar_UInt32_uint_to_t(Prims_int x);
@ -85,12 +97,20 @@ FStar_UInt32_gte_mask(uint32_t a, uint32_t b)
return x_xor_q_ - (uint32_t)1U;
}
extern Prims_string FStar_UInt32_to_string(uint32_t uu____716);
extern Prims_string FStar_UInt32_to_string(uint32_t uu____888);
extern uint32_t FStar_UInt32_of_string(Prims_string uu____728);
extern Prims_string FStar_UInt32_to_string_hex(uint32_t uu____899);
extern Prims_string FStar_UInt32_to_string_hex_pad(uint32_t uu____910);
extern uint32_t FStar_UInt32_of_string(Prims_string uu____921);
extern Prims_int FStar_UInt16_n;
extern bool FStar_UInt16_uu___is_Mk(uint16_t projectee);
extern Prims_int FStar_UInt16___proj__Mk__item__v(uint16_t projectee);
extern Prims_int FStar_UInt16_v(uint16_t x);
extern uint16_t FStar_UInt16_uint_to_t(Prims_int x);
@ -123,12 +143,20 @@ FStar_UInt16_gte_mask(uint16_t a, uint16_t b)
return x_xor_q_ - (uint16_t)1U;
}
extern Prims_string FStar_UInt16_to_string(uint16_t uu____716);
extern Prims_string FStar_UInt16_to_string(uint16_t uu____888);
extern uint16_t FStar_UInt16_of_string(Prims_string uu____728);
extern Prims_string FStar_UInt16_to_string_hex(uint16_t uu____899);
extern Prims_string FStar_UInt16_to_string_hex_pad(uint16_t uu____910);
extern uint16_t FStar_UInt16_of_string(Prims_string uu____921);
extern Prims_int FStar_UInt8_n;
extern bool FStar_UInt8_uu___is_Mk(uint8_t projectee);
extern Prims_int FStar_UInt8___proj__Mk__item__v(uint8_t projectee);
extern Prims_int FStar_UInt8_v(uint8_t x);
extern uint8_t FStar_UInt8_uint_to_t(Prims_int x);
@ -161,9 +189,13 @@ FStar_UInt8_gte_mask(uint8_t a, uint8_t b)
return x_xor_q_ - (uint8_t)1U;
}
extern Prims_string FStar_UInt8_to_string(uint8_t uu____716);
extern Prims_string FStar_UInt8_to_string(uint8_t uu____888);
extern uint8_t FStar_UInt8_of_string(Prims_string uu____728);
extern Prims_string FStar_UInt8_to_string_hex(uint8_t uu____899);
extern Prims_string FStar_UInt8_to_string_hex_pad(uint8_t uu____910);
extern uint8_t FStar_UInt8_of_string(Prims_string uu____921);
typedef uint8_t FStar_UInt8_byte;

Просмотреть файл

@ -24,10 +24,6 @@
#include "FStar_UInt_8_16_32_64.h"
#include "LowStar_Endianness.h"
#if !defined(KRML_VERIFIED_UINT128) && (!defined(_MSC_VER) || defined(__clang__)) && \
(defined(__x86_64__) || defined(__x86_64) || defined(__aarch64__) || \
(defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)))
/* GCC + using native unsigned __int128 support */
inline static uint128_t
@ -222,83 +218,3 @@ FStar_UInt128_mul32(uint64_t x, uint32_t y)
{
return (uint128_t)x * (uint128_t)y;
}
#elif !defined(_MSC_VER) && defined(KRML_VERIFIED_UINT128)
/* Verified uint128 implementation. */
/* Access 64-bit fields within the int128. */
#define HIGH64_OF(x) ((x)->high)
#define LOW64_OF(x) ((x)->low)
/* A series of definitions written using pointers. */
inline static void
load128_le_(uint8_t *b, uint128_t *r)
{
LOW64_OF(r) = load64_le(b);
HIGH64_OF(r) = load64_le(b + 8);
}
inline static void
store128_le_(uint8_t *b, uint128_t *n)
{
store64_le(b, LOW64_OF(n));
store64_le(b + 8, HIGH64_OF(n));
}
inline static void
load128_be_(uint8_t *b, uint128_t *r)
{
HIGH64_OF(r) = load64_be(b);
LOW64_OF(r) = load64_be(b + 8);
}
inline static void
store128_be_(uint8_t *b, uint128_t *n)
{
store64_be(b, HIGH64_OF(n));
store64_be(b + 8, LOW64_OF(n));
}
#ifndef KRML_NOSTRUCT_PASSING
inline static uint128_t
load128_le(uint8_t *b)
{
uint128_t r;
load128_le_(b, &r);
return r;
}
inline static void
store128_le(uint8_t *b, uint128_t n)
{
store128_le_(b, &n);
}
inline static uint128_t
load128_be(uint8_t *b)
{
uint128_t r;
load128_be_(b, &r);
return r;
}
inline static void
store128_be(uint8_t *b, uint128_t n)
{
store128_be_(b, &n);
}
#else /* !defined(KRML_STRUCT_PASSING) */
#define print128 print128_
#define load128_le load128_le_
#define store128_le store128_le_
#define load128_be load128_be_
#define store128_be store128_be_
#endif /* KRML_STRUCT_PASSING */
#endif

Просмотреть файл

@ -15,6 +15,10 @@
#error This file only works with the MSVC compiler
#endif
/* JP: need to rip out HAS_OPTIMIZED since the header guards in types.h are now
* done properly and only include this file when we know for sure we are on
* 64-bit MSVC. */
#if defined(_M_X64) && !defined(KRML_VERIFIED_UINT128)
#define HAS_OPTIMIZED 1
#else
@ -104,9 +108,7 @@ FStar_UInt128_add_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
#else
return ((FStar_UInt128_uint128){
.low = a.low + b.low,
.high = FStar_UInt64_add_underspec(
FStar_UInt64_add_underspec(a.high, b.high),
FStar_UInt128_carry(a.low + b.low, b.low)) });
.high = a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low) });
#endif
}
@ -146,9 +148,7 @@ FStar_UInt128_sub_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
#else
return ((FStar_UInt128_uint128){
.low = a.low - b.low,
.high = FStar_UInt64_sub_underspec(
FStar_UInt64_sub_underspec(a.high, b.high),
FStar_UInt128_carry(a.low, a.low - b.low)) });
.high = a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low) });
#endif
}

Просмотреть файл

@ -0,0 +1,84 @@
/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
Licensed under the Apache 2.0 License. */
#ifndef FSTAR_UINT128_STRUCT_ENDIANNESS_H
#define FSTAR_UINT128_STRUCT_ENDIANNESS_H
/* Hand-written implementation of endianness-related uint128 functions
* for the extracted uint128 implementation */
/* Access 64-bit fields within the int128. */
#define HIGH64_OF(x) ((x)->high)
#define LOW64_OF(x) ((x)->low)
/* A series of definitions written using pointers. */
inline static void
load128_le_(uint8_t *b, uint128_t *r)
{
LOW64_OF(r) = load64_le(b);
HIGH64_OF(r) = load64_le(b + 8);
}
inline static void
store128_le_(uint8_t *b, uint128_t *n)
{
store64_le(b, LOW64_OF(n));
store64_le(b + 8, HIGH64_OF(n));
}
inline static void
load128_be_(uint8_t *b, uint128_t *r)
{
HIGH64_OF(r) = load64_be(b);
LOW64_OF(r) = load64_be(b + 8);
}
inline static void
store128_be_(uint8_t *b, uint128_t *n)
{
store64_be(b, HIGH64_OF(n));
store64_be(b + 8, LOW64_OF(n));
}
#ifndef KRML_NOSTRUCT_PASSING
inline static uint128_t
load128_le(uint8_t *b)
{
uint128_t r;
load128_le_(b, &r);
return r;
}
inline static void
store128_le(uint8_t *b, uint128_t n)
{
store128_le_(b, &n);
}
inline static uint128_t
load128_be(uint8_t *b)
{
uint128_t r;
load128_be_(b, &r);
return r;
}
inline static void
store128_be(uint8_t *b, uint128_t n)
{
store128_be_(b, &n);
}
#else /* !defined(KRML_STRUCT_PASSING) */
#define print128 print128_
#define load128_le load128_le_
#define store128_le store128_le_
#define load128_be load128_be_
#define store128_be store128_be_
#endif /* KRML_STRUCT_PASSING */
#endif

Просмотреть файл

@ -75,8 +75,11 @@ typedef __m128i Lib_IntVector_Intrinsics_vec128;
#define Lib_IntVector_Intrinsics_vec128_rotate_left32_16(x0) \
(_mm_shuffle_epi8(x0, _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2)))
#define Lib_IntVector_Intrinsics_vec128_rotate_left32_24(x0) \
(_mm_shuffle_epi8(x0, _mm_set_epi8(12, 15, 14, 13, 8, 11, 10, 9, 4, 7, 6, 5, 0, 3, 2, 1)))
#define Lib_IntVector_Intrinsics_vec128_rotate_left32(x0, x1) \
((x1 == 8 ? Lib_IntVector_Intrinsics_vec128_rotate_left32_8(x0) : (x1 == 16 ? Lib_IntVector_Intrinsics_vec128_rotate_left32_16(x0) : _mm_xor_si128(_mm_slli_epi32(x0, x1), _mm_srli_epi32(x0, 32 - (x1))))))
(((x1) == 8 ? Lib_IntVector_Intrinsics_vec128_rotate_left32_8(x0) : ((x1) == 16 ? Lib_IntVector_Intrinsics_vec128_rotate_left32_16(x0) : ((x1) == 24 ? Lib_IntVector_Intrinsics_vec128_rotate_left32_24(x0) : _mm_xor_si128(_mm_slli_epi32(x0, x1), _mm_srli_epi32(x0, 32 - (x1)))))))
#define Lib_IntVector_Intrinsics_vec128_rotate_right32(x0, x1) \
(Lib_IntVector_Intrinsics_vec128_rotate_left32(x0, 32 - (x1)))
@ -87,6 +90,12 @@ typedef __m128i Lib_IntVector_Intrinsics_vec128;
#define Lib_IntVector_Intrinsics_vec128_shuffle64(x0, x1, x2) \
(_mm_shuffle_epi32(x0, _MM_SHUFFLE(2 * x1 + 1, 2 * x1, 2 * x2 + 1, 2 * x2)))
#define Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(x0, x1) \
(_mm_shuffle_epi32(x0, _MM_SHUFFLE((x1 + 3) % 4, (x1 + 2) % 4, (x1 + 1) % 4, x1 % 4)))
#define Lib_IntVector_Intrinsics_vec128_rotate_right_lanes64(x0, x1) \
(_mm_shuffle_epi32(x0, _MM_SHUFFLE((2 * x1 + 3) % 4, (2 * x1 + 2) % 4, (2 * x1 + 1) % 4, (2 * x1) % 4)))
#define Lib_IntVector_Intrinsics_vec128_load_le(x0) \
(_mm_loadu_si128((__m128i*)(x0)))
@ -130,7 +139,7 @@ typedef __m128i Lib_IntVector_Intrinsics_vec128;
(_mm_extract_epi64(x0, x1))
#define Lib_IntVector_Intrinsics_vec128_zero \
(_mm_set1_epi16((uint16_t)0))
(_mm_setzero_si128())
#define Lib_IntVector_Intrinsics_vec128_add64(x0, x1) \
(_mm_add_epi64(x0, x1))
@ -238,8 +247,11 @@ typedef __m256i Lib_IntVector_Intrinsics_vec256;
#define Lib_IntVector_Intrinsics_vec256_rotate_left32_16(x0) \
(_mm256_shuffle_epi8(x0, _mm256_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2, 13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2)))
#define Lib_IntVector_Intrinsics_vec256_rotate_left32_24(x0) \
(_mm256_shuffle_epi8(x0, _mm256_set_epi8(12, 15, 14, 13, 8, 11, 10, 9, 4, 7, 6, 5, 0, 3, 2, 1, 12, 15, 14, 13, 8, 11, 10, 9, 4, 7, 6, 5, 0, 3, 2, 1)))
#define Lib_IntVector_Intrinsics_vec256_rotate_left32(x0, x1) \
((x1 == 8 ? Lib_IntVector_Intrinsics_vec256_rotate_left32_8(x0) : (x1 == 16 ? Lib_IntVector_Intrinsics_vec256_rotate_left32_16(x0) : _mm256_or_si256(_mm256_slli_epi32(x0, x1), _mm256_srli_epi32(x0, 32 - (x1))))))
((x1 == 8 ? Lib_IntVector_Intrinsics_vec256_rotate_left32_8(x0) : (x1 == 16 ? Lib_IntVector_Intrinsics_vec256_rotate_left32_16(x0) : (x1 == 24 ? Lib_IntVector_Intrinsics_vec256_rotate_left32_24(x0) : _mm256_or_si256(_mm256_slli_epi32(x0, x1), _mm256_srli_epi32(x0, 32 - (x1)))))))
#define Lib_IntVector_Intrinsics_vec256_rotate_right32(x0, x1) \
(Lib_IntVector_Intrinsics_vec256_rotate_left32(x0, 32 - (x1)))
@ -256,8 +268,20 @@ typedef __m256i Lib_IntVector_Intrinsics_vec256;
#define Lib_IntVector_Intrinsics_vec256_rotate_right64_32(x0) \
(_mm256_shuffle_epi8(x0, _mm256_set_epi8(11, 10, 9, 8, 15, 14, 13, 12, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12, 3, 2, 1, 0, 7, 6, 5, 4)))
#define Lib_IntVector_Intrinsics_vec256_rotate_right64_40(x0) \
(_mm256_shuffle_epi8(x0, _mm256_set_epi8(12, 11, 10, 9, 8, 15, 14, 13, 4, 3, 2, 1, 0, 7, 6, 5, 12, 11, 10, 9, 8, 15, 14, 13, 4, 3, 2, 1, 0, 7, 6, 5)))
#define Lib_IntVector_Intrinsics_vec256_rotate_right64_48(x0) \
(_mm256_shuffle_epi8(x0, _mm256_set_epi8(13, 12, 11, 10, 9, 8, 15, 14, 5, 4, 3, 2, 1, 0, 7, 6, 13, 12, 11, 10, 9, 8, 15, 14, 5, 4, 3, 2, 1, 0, 7, 6)))
#define Lib_IntVector_Intrinsics_vec256_rotate_right64_56(x0) \
(_mm256_shuffle_epi8(x0, _mm256_set_epi8(14, 13, 12, 11, 10, 9, 8, 15, 6, 5, 4, 3, 2, 1, 0, 7, 14, 13, 12, 11, 10, 9, 8, 15, 6, 5, 4, 3, 2, 1, 0, 7)))
#define Lib_IntVector_Intrinsics_vec256_rotate_right64(x0, x1) \
((x1 == 8 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_8(x0) : (x1 == 16 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_16(x0) : (x1 == 24 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_24(x0) : (x1 == 32 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_32(x0) : _mm256_xor_si256(_mm256_srli_epi64((x0), (x1)), _mm256_slli_epi64((x0), (64 - (x1)))))))))
((x1 == 8 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_8(x0) : (x1 == 16 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_16(x0) : (x1 == 24 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_24(x0) : (x1 == 32 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_32(x0) : (x1 == 40 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_40(x0) : (x1 == 48 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_48(x0) : (x1 == 56 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_56(x0) : _mm256_xor_si256(_mm256_srli_epi64((x0), (x1)), _mm256_slli_epi64((x0), (64 - (x1))))))))))))
#define Lib_IntVector_Intrinsics_vec256_rotate_left64(x0, x1) \
(Lib_IntVector_Intrinsics_vec256_rotate_right64(x0, 64 - (x1)))
#define Lib_IntVector_Intrinsics_vec256_shuffle64(x0, x1, x2, x3, x4) \
(_mm256_permute4x64_epi64(x0, _MM_SHUFFLE(x4, x3, x2, x1)))
@ -265,12 +289,30 @@ typedef __m256i Lib_IntVector_Intrinsics_vec256;
#define Lib_IntVector_Intrinsics_vec256_shuffle32(x0, x1, x2, x3, x4, x5, x6, x7, x8) \
(_mm256_permutevar8x32_epi32(x0, _mm256_set_epi32(x8, x7, x6, x5, x4, x3, x2, x1)))
#define Lib_IntVector_Intrinsics_vec256_rotate_right_lanes32(x0, x1) \
(_mm256_permutevar8x32_epi32(x0, _mm256_set_epi32((x1 + 7) % 8, (x1 + 6) % 8, (x1 + 5) % 8, (x1 + 4) % 8, (x1 + 3 % 8), (x1 + 2) % 8, (x1 + 1) % 8, x1 % 8)))
#define Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(x0, x1) \
(_mm256_permute4x64_epi64(x0, _MM_SHUFFLE((x1 + 3) % 4, (x1 + 2) % 4, (x1 + 1) % 4, x1 % 4)))
#define Lib_IntVector_Intrinsics_vec256_load_le(x0) \
(_mm256_loadu_si256((__m256i*)(x0)))
#define Lib_IntVector_Intrinsics_vec256_load32_be(x0) \
(_mm256_shuffle_epi8(_mm256_loadu_si256((__m256i*)(x0)), _mm256_set_epi8(12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3)))
#define Lib_IntVector_Intrinsics_vec256_load64_be(x0) \
(_mm256_shuffle_epi8(_mm256_loadu_si256((__m256i*)(x0)), _mm256_set_epi8(8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7)))
#define Lib_IntVector_Intrinsics_vec256_store_le(x0, x1) \
(_mm256_storeu_si256((__m256i*)(x0), x1))
#define Lib_IntVector_Intrinsics_vec256_store32_be(x0, x1) \
(_mm256_storeu_si256((__m256i*)(x0), _mm256_shuffle_epi8(x1, _mm256_set_epi8(12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3))))
#define Lib_IntVector_Intrinsics_vec256_store64_be(x0, x1) \
(_mm256_storeu_si256((__m256i*)(x0), _mm256_shuffle_epi8(x1, _mm256_set_epi8(8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7))))
#define Lib_IntVector_Intrinsics_vec256_insert8(x0, x1, x2) \
(_mm256_insert_epi8(x0, x1, x2))
@ -290,7 +332,7 @@ typedef __m256i Lib_IntVector_Intrinsics_vec256;
(_mm256_extract_epi64(x0, x1))
#define Lib_IntVector_Intrinsics_vec256_zero \
(_mm256_set1_epi16((uint16_t)0))
(_mm256_setzero_si256())
#define Lib_IntVector_Intrinsics_vec256_add64(x0, x1) \
(_mm256_add_epi64(x0, x1))
@ -369,8 +411,17 @@ typedef uint32x4_t Lib_IntVector_Intrinsics_vec128;
#define Lib_IntVector_Intrinsics_vec128_gt32(x0, x1) \
(vcgtq_u32(x0, x1))
#define high32(x0) \
(vmovn_u64(vshrq_n_u64(vreinterpretq_u64_u32(x0), 32)))
#define low32(x0) \
(vmovn_u64(vreinterpretq_u64_u32(x0)))
#define Lib_IntVector_Intrinsics_vec128_gt64(x0, x1) \
(vreinterpretq_u32_u64(vmovl_u32(vorr_u32(vcgt_u32(high32(x0), high32(x1)), vand_u32(vceq_u32(high32(x0), high32(x1)), vcgt_u32(low32(x0), low32(x1)))))))
#define Lib_IntVector_Intrinsics_vec128_or(x0, x1) \
(voorq_u32(x0, x1))
(vorrq_u32(x0, x1))
#define Lib_IntVector_Intrinsics_vec128_and(x0, x1) \
(vandq_u32(x0, x1))
@ -394,13 +445,25 @@ typedef uint32x4_t Lib_IntVector_Intrinsics_vec128;
(vshlq_n_u32(x0, x1))
#define Lib_IntVector_Intrinsics_vec128_shift_right32(x0, x1) \
(vreinterpretq_u32_u64(vshrq_n_u64(vreinterpretq_u64_u32(x0), x1)))
(vshrq_n_u32(x0, x1))
#define Lib_IntVector_Intrinsics_vec128_rotate_left32_16(x1) \
(vreinterpretq_u32_u16(vrev32q_u16(vreinterpretq_u16_u32(x1))))
#define Lib_IntVector_Intrinsics_vec128_rotate_left32(x0, x1) \
(vsriq_n_u32(vshlq_n_u32((x0), (x1)), (x0), 32 - (x1)))
(((x1) == 16 ? Lib_IntVector_Intrinsics_vec128_rotate_left32_16(x0) : vsriq_n_u32(vshlq_n_u32((x0), (x1)), (x0), 32 - (x1))))
#define Lib_IntVector_Intrinsics_vec128_rotate_right32_16(x1) \
(vreinterpretq_u32_u16(vrev32q_u16(vreinterpretq_u16_u32(x1))))
#define Lib_IntVector_Intrinsics_vec128_rotate_right32(x0, x1) \
(vsriq_n_u32(vshlq_n_u32((x0), 32 - (x1)), (x0), (x1)))
(((x1) == 16 ? Lib_IntVector_Intrinsics_vec128_rotate_right32_16(x0) : vsriq_n_u32(vshlq_n_u32((x0), 32 - (x1)), (x0), (x1))))
#define Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(x0, x1) \
(vextq_u32(x0, x0, x1))
#define Lib_IntVector_Intrinsics_vec128_rotate_right_lanes64(x0, x1) \
(vextq_u64(x0, x0, x1))
/*
#define Lib_IntVector_Intrinsics_vec128_shuffle32(x0, x1, x2, x3, x4) \
@ -423,10 +486,10 @@ typedef uint32x4_t Lib_IntVector_Intrinsics_vec128;
*/
#define Lib_IntVector_Intrinsics_vec128_load32_be(x0) \
(vrev32q_u8(vld1q_u32((const uint32_t*)(x0))))
(vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(vld1q_u32((const uint32_t*)(x0))))))
#define Lib_IntVector_Intrinsics_vec128_load64_be(x0) \
(vreinterpretq_u32_u64(vrev64q_u8(vld1q_u32((const uint32_t*)(x0)))))
(vreinterpretq_u32_u8(vrev64q_u8(vreinterpretq_u8_u32(vld1q_u32((const uint32_t*)(x0))))))
/*
#define Lib_IntVector_Intrinsics_vec128_store_be(x0, x1) \
@ -434,10 +497,10 @@ typedef uint32x4_t Lib_IntVector_Intrinsics_vec128;
*/
#define Lib_IntVector_Intrinsics_vec128_store32_be(x0, x1) \
(vst1q_u32((uint32_t*)(x0), (vrev32q_u8(x1))))
(vst1q_u32((uint32_t*)(x0), (vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(x1))))))
#define Lib_IntVector_Intrinsics_vec128_store64_be(x0, x1) \
(vst1q_u32((uint32_t*)(x0), (vrev64q_u8(x1))))
(vst1q_u32((uint32_t*)(x0), (vreinterpretq_u32_u8(vrev64q_u8(vreinterpretq_u8_u32(x1))))))
#define Lib_IntVector_Intrinsics_vec128_insert8(x0, x1, x2) \
(vsetq_lane_u8(x1, x0, x2))
@ -455,10 +518,10 @@ typedef uint32x4_t Lib_IntVector_Intrinsics_vec128;
(vgetq_lane_u32(x0, x1))
#define Lib_IntVector_Intrinsics_vec128_extract64(x0, x1) \
(vreinterpretq_u32_u64(vgetq_lane_u64(vreinterpretq_u64_u32(x0), x1)))
(vgetq_lane_u64(vreinterpretq_u64_u32(x0), x1))
#define Lib_IntVector_Intrinsics_vec128_zero \
(vdup_n_u8(0))
(vdupq_n_u32(0))
#define Lib_IntVector_Intrinsics_vec128_add64(x0, x1) \
(vreinterpretq_u32_u64(vaddq_u64(vreinterpretq_u64_u32(x0), vreinterpretq_u64_u32(x1))))
@ -467,10 +530,10 @@ typedef uint32x4_t Lib_IntVector_Intrinsics_vec128;
(vreinterpretq_u32_u64(vsubq_u64(vreinterpretq_u64_u32(x0), vreinterpretq_u64_u32(x1))))
#define Lib_IntVector_Intrinsics_vec128_mul64(x0, x1) \
(vmull_u32(vmovn_u64(x0), vmovn_u64(x1)))
(vreinterpretq_u32_u64(vmull_u32(vmovn_u64(vreinterpretq_u64_u32(x0)), vmovn_u64(vreinterpretq_u64_u32(x1)))))
#define Lib_IntVector_Intrinsics_vec128_smul64(x0, x1) \
(vmull_u32(vmovn_u64(x0), vdupq_n_u64(x1)))
(vreinterpretq_u32_u64(vmull_n_u32(vmovn_u64(vreinterpretq_u64_u32(x0)), (uint32_t)x1)))
#define Lib_IntVector_Intrinsics_vec128_add32(x0, x1) \
(vaddq_u32(x0, x1))

Просмотреть файл

@ -1175,3 +1175,9 @@ PK11_SymKeysToSameSlot;
;+ local:
;+ *;
;+};
;+NSS_3.53 { # NSS 3.53 release
;+ global:
SECMOD_GetSystemFIPSEnabled;
;+ local:
;+ *;
;+};

Просмотреть файл

@ -22,12 +22,12 @@
* The format of the version string should be
* "<major version>.<minor version>[.<patch level>[.<build number>]][ <ECC>][ <Beta>]"
*/
#define NSS_VERSION "3.52" _NSS_CUSTOMIZED
#define NSS_VERSION "3.53" _NSS_CUSTOMIZED " Beta"
#define NSS_VMAJOR 3
#define NSS_VMINOR 52
#define NSS_VMINOR 53
#define NSS_VPATCH 0
#define NSS_VBUILD 0
#define NSS_BETA PR_FALSE
#define NSS_BETA PR_TRUE
#ifndef RC_INVOKED

Просмотреть файл

@ -818,7 +818,7 @@ SECMOD_CreateModuleEx(const char *library, const char *moduleName,
mod->internal = NSSUTIL_ArgHasFlag("flags", "internal", nssc);
mod->isFIPS = NSSUTIL_ArgHasFlag("flags", "FIPS", nssc);
/* if the system FIPS mode is enabled, force FIPS to be on */
if (secmod_GetSystemFIPSEnabled()) {
if (SECMOD_GetSystemFIPSEnabled()) {
mod->isFIPS = PR_TRUE;
}
mod->isCritical = NSSUTIL_ArgHasFlag("flags", "critical", nssc);

Просмотреть файл

@ -939,6 +939,17 @@ PK11_GetLowLevelKeyIDForPrivateKey(SECKEYPrivateKey *key);
PRBool SECMOD_HasRootCerts(void);
/**********************************************************************
* Other Utilities
**********************************************************************/
/*
* Get the state of the system FIPS mode -
* NSS uses this to force FIPS mode if the system bit is on. This returns
* the system state independent of the database state and can be called
* before NSS initializes.
*/
int SECMOD_GetSystemFIPSEnabled();
SEC_END_PROTOS
#endif

Просмотреть файл

@ -95,8 +95,8 @@ SECMOD_Shutdown()
return SECSuccess;
}
int
secmod_GetSystemFIPSEnabled(void)
PRBool
SECMOD_GetSystemFIPSEnabled(void)
{
#ifdef LINUX
#ifndef NSS_FIPS_DISABLED
@ -106,20 +106,20 @@ secmod_GetSystemFIPSEnabled(void)
f = fopen("/proc/sys/crypto/fips_enabled", "r");
if (!f) {
return 0;
return PR_FALSE;
}
size = fread(&d, 1, sizeof(d), f);
fclose(f);
if (size != sizeof(d)) {
return 0;
return PR_FALSE;
}
if (d == '1') {
return 1;
return PR_TRUE;
}
#endif
#endif
return 0;
return PR_FALSE;
}
/*
@ -455,7 +455,7 @@ SECMOD_DeleteInternalModule(const char *name)
SECMODModuleList **mlpp;
SECStatus rv = SECFailure;
if (secmod_GetSystemFIPSEnabled() || pendingModule) {
if (SECMOD_GetSystemFIPSEnabled() || pendingModule) {
PORT_SetError(SEC_ERROR_MODULE_STUCK);
return rv;
}
@ -990,7 +990,7 @@ SECMOD_CanDeleteInternalModule(void)
#ifdef NSS_FIPS_DISABLED
return PR_FALSE;
#else
return (PRBool)((pendingModule == NULL) && !secmod_GetSystemFIPSEnabled());
return (PRBool)((pendingModule == NULL) && !SECMOD_GetSystemFIPSEnabled());
#endif
}

Просмотреть файл

@ -115,13 +115,6 @@ PK11SymKey *pk11_TokenKeyGenWithFlagsAndKeyType(PK11SlotInfo *slot,
CK_MECHANISM_TYPE pk11_GetPBECryptoMechanism(SECAlgorithmID *algid,
SECItem **param, SECItem *pwd, PRBool faulty3DES);
/* Get the state of the system FIPS mode */
/* NSS uses this to force FIPS mode if the system bit is on. Applications which
* use the SECMOD_CanDeleteInteral() to check to see if they can switch to or
* from FIPS mode will automatically be told that they can't swith out of FIPS
* mode */
int secmod_GetSystemFIPSEnabled();
extern void pk11sdr_Init(void);
extern void pk11sdr_Shutdown(void);

Просмотреть файл

@ -392,15 +392,17 @@ static const struct mechanismList mechanisms[] = {
{ CKM_CAMELLIA_MAC, { 16, 32, CKF_SN_VR }, PR_TRUE },
{ CKM_CAMELLIA_MAC_GENERAL, { 16, 32, CKF_SN_VR }, PR_TRUE },
{ CKM_CAMELLIA_CBC_PAD, { 16, 32, CKF_EN_DE_WR_UN }, PR_TRUE },
/* ------------------------- SEED Operations --------------------------- */
/* ------------------------- SEED Operations --------------------------- */
#ifndef NSS_DISABLE_DEPRECATED_SEED
{ CKM_SEED_KEY_GEN, { 16, 16, CKF_GENERATE }, PR_TRUE },
{ CKM_SEED_ECB, { 16, 16, CKF_EN_DE_WR_UN }, PR_TRUE },
{ CKM_SEED_CBC, { 16, 16, CKF_EN_DE_WR_UN }, PR_TRUE },
{ CKM_SEED_MAC, { 16, 16, CKF_SN_VR }, PR_TRUE },
{ CKM_SEED_MAC_GENERAL, { 16, 16, CKF_SN_VR }, PR_TRUE },
{ CKM_SEED_CBC_PAD, { 16, 16, CKF_EN_DE_WR_UN }, PR_TRUE },
#endif
/* ------------------------- ChaCha20 Operations ---------------------- */
#ifndef NSS_DISABLE_CHACHAPOLY
/* ------------------------- ChaCha20 Operations ---------------------- */
{ CKM_NSS_CHACHA20_KEY_GEN, { 32, 32, CKF_GENERATE }, PR_TRUE },
{ CKM_NSS_CHACHA20_POLY1305, { 32, 32, CKF_EN_DE }, PR_TRUE },
{ CKM_NSS_CHACHA20_CTR, { 32, 32, CKF_EN_DE }, PR_TRUE },
@ -495,9 +497,10 @@ static const struct mechanismList mechanisms[] = {
{ CKM_AES_CBC_ENCRYPT_DATA, { 1, 32, CKF_DERIVE }, PR_FALSE },
{ CKM_CAMELLIA_ECB_ENCRYPT_DATA, { 1, 32, CKF_DERIVE }, PR_FALSE },
{ CKM_CAMELLIA_CBC_ENCRYPT_DATA, { 1, 32, CKF_DERIVE }, PR_FALSE },
#ifndef NSS_DISABLE_DEPRECATED_SEED
{ CKM_SEED_ECB_ENCRYPT_DATA, { 1, 32, CKF_DERIVE }, PR_FALSE },
{ CKM_SEED_CBC_ENCRYPT_DATA, { 1, 32, CKF_DERIVE }, PR_FALSE },
#endif
/* ---------------------- SSL Key Derivations ------------------------- */
{ CKM_SSL3_PRE_MASTER_KEY_GEN, { 48, 48, CKF_GENERATE }, PR_FALSE },
{ CKM_SSL3_MASTER_KEY_DERIVE, { 48, 48, CKF_DERIVE }, PR_FALSE },

Просмотреть файл

@ -1044,6 +1044,7 @@ sftk_CryptInit(CK_SESSION_HANDLE hSession, CK_MECHANISM_PTR pMechanism,
context->update = (SFTKCipher)(isEncrypt ? DES_Encrypt : DES_Decrypt);
context->destroy = (SFTKDestroy)DES_DestroyContext;
break;
#ifndef NSS_DISABLE_DEPRECATED_SEED
case CKM_SEED_CBC_PAD:
context->doPad = PR_TRUE;
/* fall thru */
@ -1078,7 +1079,7 @@ sftk_CryptInit(CK_SESSION_HANDLE hSession, CK_MECHANISM_PTR pMechanism,
context->update = (SFTKCipher)(isEncrypt ? SEED_Encrypt : SEED_Decrypt);
context->destroy = (SFTKDestroy)SEED_DestroyContext;
break;
#endif /* NSS_DISABLE_DEPRECATED_SEED */
case CKM_CAMELLIA_CBC_PAD:
context->doPad = PR_TRUE;
/* fall thru */
@ -2325,6 +2326,7 @@ sftk_InitCBCMac(CK_SESSION_HANDLE hSession, CK_MECHANISM_PTR pMechanism,
cbc_mechanism.pParameter = &ivBlock;
cbc_mechanism.ulParameterLen = blockSize;
break;
#ifndef NSS_DISABLE_DEPRECATED_SEED
case CKM_SEED_MAC_GENERAL:
mac_bytes = *(CK_ULONG *)pMechanism->pParameter;
/* fall through */
@ -2335,6 +2337,7 @@ sftk_InitCBCMac(CK_SESSION_HANDLE hSession, CK_MECHANISM_PTR pMechanism,
cbc_mechanism.pParameter = &ivBlock;
cbc_mechanism.ulParameterLen = blockSize;
break;
#endif /* NSS_DISABLE_DEPRECATED_SEED */
case CKM_CAMELLIA_MAC_GENERAL:
mac_bytes = *(CK_ULONG *)pMechanism->pParameter;
/* fall through */
@ -4215,10 +4218,12 @@ nsc_SetupBulkKeyGen(CK_MECHANISM_TYPE mechanism, CK_KEY_TYPE *key_type,
*key_type = CKK_DES3;
*key_length = 24;
break;
#ifndef NSS_DISABLE_DEPRECATED_SEED
case CKM_SEED_KEY_GEN:
*key_type = CKK_SEED;
*key_length = 16;
break;
#endif /* NSS_DISABLE_DEPRECATED_SEED */
case CKM_CAMELLIA_KEY_GEN:
*key_type = CKK_CAMELLIA;
if (*key_length == 0)
@ -4530,7 +4535,9 @@ NSC_GenerateKey(CK_SESSION_HANDLE hSession,
case CKM_RC2_KEY_GEN:
case CKM_RC4_KEY_GEN:
case CKM_GENERIC_SECRET_KEY_GEN:
#ifndef NSS_DISABLE_DEPRECATED_SEED
case CKM_SEED_KEY_GEN:
#endif
case CKM_CAMELLIA_KEY_GEN:
case CKM_AES_KEY_GEN:
case CKM_NSS_CHACHA20_KEY_GEN:
@ -7823,6 +7830,7 @@ NSC_DeriveKey(CK_SESSION_HANDLE hSession,
break;
}
#ifndef NSS_DISABLE_DEPRECATED_SEED
case CKM_SEED_ECB_ENCRYPT_DATA:
case CKM_SEED_CBC_ENCRYPT_DATA: {
void *cipherInfo;
@ -7869,6 +7877,7 @@ NSC_DeriveKey(CK_SESSION_HANDLE hSession,
SEED_DestroyContext(cipherInfo, PR_TRUE);
break;
}
#endif /* NSS_DISABLE_DEPRECATED_SEED */
case CKM_CONCATENATE_BASE_AND_KEY: {
SFTKObject *newKey;

Просмотреть файл

@ -17,11 +17,11 @@
* The format of the version string should be
* "<major version>.<minor version>[.<patch level>[.<build number>]][ <ECC>][ <Beta>]"
*/
#define SOFTOKEN_VERSION "3.52" SOFTOKEN_ECC_STRING
#define SOFTOKEN_VERSION "3.53" SOFTOKEN_ECC_STRING " Beta"
#define SOFTOKEN_VMAJOR 3
#define SOFTOKEN_VMINOR 52
#define SOFTOKEN_VMINOR 53
#define SOFTOKEN_VPATCH 0
#define SOFTOKEN_VBUILD 0
#define SOFTOKEN_BETA PR_FALSE
#define SOFTOKEN_BETA PR_TRUE
#endif /* _SOFTKVER_H_ */

Просмотреть файл

@ -19,12 +19,12 @@
* The format of the version string should be
* "<major version>.<minor version>[.<patch level>[.<build number>]][ <Beta>]"
*/
#define NSSUTIL_VERSION "3.52"
#define NSSUTIL_VERSION "3.53 Beta"
#define NSSUTIL_VMAJOR 3
#define NSSUTIL_VMINOR 52
#define NSSUTIL_VMINOR 53
#define NSSUTIL_VPATCH 0
#define NSSUTIL_VBUILD 0
#define NSSUTIL_BETA PR_FALSE
#define NSSUTIL_BETA PR_TRUE
SEC_BEGIN_PROTOS