Upgrade evercrypt and switch to new official hacl-star checked-in extracted C (#654)

This commit is contained in:
Amaury Chamayou 2019-12-20 08:37:31 +00:00 коммит произвёл GitHub
Родитель 573374590c
Коммит 36fad90c7a
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
171 изменённых файлов: 55877 добавлений и 67468 удалений

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

2160
3rdparty/evercrypt-msr/evercrypt/Hacl_Ed25519.c поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

135
3rdparty/evercrypt-msr/evercrypt/Hacl_Hash.h поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

288
3rdparty/evercrypt-msr/evercrypt/Hacl_SHA3.c поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

87
3rdparty/evercrypt-msr/evercrypt/Hacl_SHA3.h поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

40
3rdparty/evercrypt-msr/evercrypt/Hacl_Spec.h поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Просмотреть файл

@ -1,83 +0,0 @@
#include "Lib_RandomBuffer.h"
#include <stdio.h>
#if (defined(_WIN32) || defined(_WIN64))
#include <inttypes.h>
#include <stdbool.h>
#include <malloc.h>
#include <windows.h>
bool read_random_bytes(uint32_t len, uint8_t *buf) {
HCRYPTPROV ctxt;
if (!(CryptAcquireContext(&ctxt, NULL, NULL, PROV_RSA_FULL,
CRYPT_VERIFYCONTEXT))) {
DWORD error = GetLastError();
printf("Cannot acquire crypto context: 0x%lx\n", error);
return false;
}
bool pass = true;
if (!(CryptGenRandom(ctxt, (uint64_t)len, buf))) {
printf("Cannot read random bytes\n");
pass = false;
}
CryptReleaseContext(ctxt, 0);
return pass;
}
void *hacl_aligned_malloc(size_t alignment, size_t size) {
void *res = _aligned_malloc(size, alignment);
if (res == NULL) {
printf("Cannot allocate %" PRIu64 " bytes aligned to %" PRIu64 "\n",
(uint64_t)size, (uint64_t)alignment);
}
return res;
}
void hacl_aligned_free(void *ptr) { _aligned_free(ptr); }
#else
/* assume POSIX here */
#include <fcntl.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
bool read_random_bytes(uint32_t len, uint8_t *buf) {
int fd = open("/dev/urandom", O_RDONLY);
if (fd == -1) {
printf("Cannot open /dev/urandom\n");
return false;
}
bool pass = true;
uint64_t res = read(fd, buf, (uint64_t)len);
if (res != (uint64_t)len) {
printf("Error on reading, expected %" PRIu32 " bytes, got %" PRIu64
" bytes\n",
len, res);
pass = false;
}
close(fd);
return pass;
}
void *hacl_aligned_malloc(size_t alignment, size_t size) {
void *res = NULL;
if (posix_memalign(&res, alignment, size)) {
printf("Cannot allocate %" PRIu64 " bytes aligned to %" PRIu64 "\n",
(uint64_t)size, (uint64_t)alignment);
return NULL;
}
return res;
}
void hacl_aligned_free(void *ptr) { free(ptr); }
#endif
void randombytes(uint8_t *x, uint32_t len) {
if (!(read_random_bytes(len, x)))
exit(1);
}

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

25
3rdparty/evercrypt-msr/evercrypt/LowStar.c поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

23
3rdparty/evercrypt-msr/evercrypt/LowStar.h поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

2102
3rdparty/evercrypt-msr/evercrypt/MerkleTree.c поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

200
3rdparty/evercrypt-msr/evercrypt/MerkleTree.h поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

65
3rdparty/evercrypt-msr/evercrypt/TestLib.h поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

11
3rdparty/evercrypt-msr/evercrypt/Vale.c поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

143
3rdparty/evercrypt-msr/evercrypt/Vale.h поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,38 +0,0 @@
/*
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
Licensed under the Apache 2.0 License.
This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
KreMLin invocation: ../krml -fparentheses -fcurly-braces -fno-shadow -header copyright-header.txt -minimal -tmpdir dist/minimal -skip-compilation -extract-uints -add-include <inttypes.h> -add-include <stdbool.h> -add-include "kremlin/internal/compat.h" -add-include "kremlin/lowstar_endianness.h" -add-include "kremlin/internal/types.h" -bundle FStar.UInt64+FStar.UInt32+FStar.UInt16+FStar.UInt8=[rename=FStar_UInt_8_16_32_64] -bundle C.Endianness= -library FStar.UInt128 -bundle FStar.UInt128= -bundle *,WindowsWorkaroundSigh fstar_uint128.c -o libkremlib.a .extract/prims.krml .extract/FStar_Pervasives_Native.krml .extract/FStar_Pervasives.krml .extract/FStar_Reflection_Types.krml .extract/FStar_Reflection_Data.krml .extract/FStar_Order.krml .extract/FStar_Reflection_Basic.krml .extract/FStar_Preorder.krml .extract/FStar_Calc.krml .extract/FStar_Squash.krml .extract/FStar_Classical.krml .extract/FStar_StrongExcludedMiddle.krml .extract/FStar_FunctionalExtensionality.krml .extract/FStar_List_Tot_Base.krml .extract/FStar_List_Tot_Properties.krml .extract/FStar_List_Tot.krml .extract/FStar_Seq_Base.krml .extract/FStar_Seq_Properties.krml .extract/FStar_Seq.krml .extract/FStar_Mul.krml .extract/FStar_Math_Lib.krml .extract/FStar_Math_Lemmas.krml .extract/FStar_BitVector.krml .extract/FStar_UInt.krml .extract/FStar_UInt32.krml .extract/FStar_Int.krml .extract/FStar_Int16.krml .extract/FStar_Ghost.krml .extract/FStar_ErasedLogic.krml .extract/FStar_UInt64.krml .extract/FStar_Set.krml .extract/FStar_PropositionalExtensionality.krml .extract/FStar_PredicateExtensionality.krml .extract/FStar_TSet.krml .extract/FStar_Monotonic_Heap.krml .extract/FStar_Heap.krml .extract/FStar_Map.krml .extract/FStar_Monotonic_HyperHeap.krml .extract/FStar_Monotonic_HyperStack.krml .extract/FStar_HyperStack.krml .extract/FStar_Monotonic_Witnessed.krml .extract/FStar_HyperStack_ST.krml .extract/FStar_HyperStack_All.krml .extract/FStar_Char.krml .extract/FStar_Exn.krml .extract/FStar_ST.krml .extract/FStar_All.krml .extract/FStar_List.krml .extract/FStar_String.krml .extract/FStar_Reflection_Const.krml .extract/FStar_Reflection_Derived.krml .extract/FStar_Reflection_Derived_Lemmas.krml .extract/FStar_Date.krml .extract/FStar_Universe.krml .extract/FStar_GSet.krml .extract/FStar_ModifiesGen.krml .extract/FStar_Range.krml .extract/FStar_Tactics_Types.krml .extract/FStar_Tactics_Result.krml .extract/FStar_Tactics_Effect.krml .extract/FStar_Tactics_Util.krml .extract/FStar_Tactics_Builtins.krml .extract/FStar_Reflection_Formula.krml .extract/FStar_Reflection.krml .extract/FStar_Tactics_Derived.krml .extract/FStar_Tactics_Logic.krml .extract/FStar_Tactics.krml .extract/FStar_BigOps.krml .extract/LowStar_Monotonic_Buffer.krml .extract/LowStar_Buffer.krml .extract/Spec_Loops.krml .extract/LowStar_BufferOps.krml .extract/C_Loops.krml .extract/FStar_UInt8.krml .extract/FStar_Kremlin_Endianness.krml .extract/FStar_UInt63.krml .extract/FStar_Dyn.krml .extract/FStar_Int63.krml .extract/FStar_Int64.krml .extract/FStar_Int32.krml .extract/FStar_Int8.krml .extract/FStar_UInt16.krml .extract/FStar_Int_Cast.krml .extract/FStar_UInt128.krml .extract/C_Endianness.krml .extract/WasmSupport.krml .extract/FStar_Float.krml .extract/FStar_IO.krml .extract/C.krml .extract/LowStar_Modifies.krml .extract/FStar_Bytes.krml .extract/C_String.krml .extract/FStar_HyperStack_IO.krml .extract/C_Failure.krml .extract/TestLib.krml .extract/FStar_Int_Cast_Full.krml
F* version: 74c6d2a5
KreMLin version: 1bd260eb
*/
#include "C_Endianness.h"
uint32_t index_32_be(uint8_t *b, uint32_t i)
{
return load32_be(b + (uint32_t)4U * i);
}
uint32_t index_32_le(uint8_t *b, uint32_t i)
{
return load32_le(b + (uint32_t)4U * i);
}
uint64_t index_64_be(uint8_t *b, uint32_t i)
{
return load64_be(b + (uint32_t)8U * i);
}
uint64_t index_64_le(uint8_t *b, uint32_t i)
{
return load64_le(b + (uint32_t)8U * i);
}
void upd_32_be(uint8_t *b, uint32_t i, uint32_t v1)
{
store32_be(b + (uint32_t)4U * i, v1);
}

Просмотреть файл

@ -1,42 +0,0 @@
/*
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
Licensed under the Apache 2.0 License.
This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
KreMLin invocation: ../krml -fparentheses -fcurly-braces -fno-shadow -header copyright-header.txt -minimal -tmpdir dist/minimal -skip-compilation -extract-uints -add-include <inttypes.h> -add-include <stdbool.h> -add-include "kremlin/internal/compat.h" -add-include "kremlin/lowstar_endianness.h" -add-include "kremlin/internal/types.h" -bundle FStar.UInt64+FStar.UInt32+FStar.UInt16+FStar.UInt8=[rename=FStar_UInt_8_16_32_64] -bundle C.Endianness= -library FStar.UInt128 -bundle FStar.UInt128= -bundle *,WindowsWorkaroundSigh fstar_uint128.c -o libkremlib.a .extract/prims.krml .extract/FStar_Pervasives_Native.krml .extract/FStar_Pervasives.krml .extract/FStar_Reflection_Types.krml .extract/FStar_Reflection_Data.krml .extract/FStar_Order.krml .extract/FStar_Reflection_Basic.krml .extract/FStar_Preorder.krml .extract/FStar_Calc.krml .extract/FStar_Squash.krml .extract/FStar_Classical.krml .extract/FStar_StrongExcludedMiddle.krml .extract/FStar_FunctionalExtensionality.krml .extract/FStar_List_Tot_Base.krml .extract/FStar_List_Tot_Properties.krml .extract/FStar_List_Tot.krml .extract/FStar_Seq_Base.krml .extract/FStar_Seq_Properties.krml .extract/FStar_Seq.krml .extract/FStar_Mul.krml .extract/FStar_Math_Lib.krml .extract/FStar_Math_Lemmas.krml .extract/FStar_BitVector.krml .extract/FStar_UInt.krml .extract/FStar_UInt32.krml .extract/FStar_Int.krml .extract/FStar_Int16.krml .extract/FStar_Ghost.krml .extract/FStar_ErasedLogic.krml .extract/FStar_UInt64.krml .extract/FStar_Set.krml .extract/FStar_PropositionalExtensionality.krml .extract/FStar_PredicateExtensionality.krml .extract/FStar_TSet.krml .extract/FStar_Monotonic_Heap.krml .extract/FStar_Heap.krml .extract/FStar_Map.krml .extract/FStar_Monotonic_HyperHeap.krml .extract/FStar_Monotonic_HyperStack.krml .extract/FStar_HyperStack.krml .extract/FStar_Monotonic_Witnessed.krml .extract/FStar_HyperStack_ST.krml .extract/FStar_HyperStack_All.krml .extract/FStar_Char.krml .extract/FStar_Exn.krml .extract/FStar_ST.krml .extract/FStar_All.krml .extract/FStar_List.krml .extract/FStar_String.krml .extract/FStar_Reflection_Const.krml .extract/FStar_Reflection_Derived.krml .extract/FStar_Reflection_Derived_Lemmas.krml .extract/FStar_Date.krml .extract/FStar_Universe.krml .extract/FStar_GSet.krml .extract/FStar_ModifiesGen.krml .extract/FStar_Range.krml .extract/FStar_Tactics_Types.krml .extract/FStar_Tactics_Result.krml .extract/FStar_Tactics_Effect.krml .extract/FStar_Tactics_Util.krml .extract/FStar_Tactics_Builtins.krml .extract/FStar_Reflection_Formula.krml .extract/FStar_Reflection.krml .extract/FStar_Tactics_Derived.krml .extract/FStar_Tactics_Logic.krml .extract/FStar_Tactics.krml .extract/FStar_BigOps.krml .extract/LowStar_Monotonic_Buffer.krml .extract/LowStar_Buffer.krml .extract/Spec_Loops.krml .extract/LowStar_BufferOps.krml .extract/C_Loops.krml .extract/FStar_UInt8.krml .extract/FStar_Kremlin_Endianness.krml .extract/FStar_UInt63.krml .extract/FStar_Dyn.krml .extract/FStar_Int63.krml .extract/FStar_Int64.krml .extract/FStar_Int32.krml .extract/FStar_Int8.krml .extract/FStar_UInt16.krml .extract/FStar_Int_Cast.krml .extract/FStar_UInt128.krml .extract/C_Endianness.krml .extract/WasmSupport.krml .extract/FStar_Float.krml .extract/FStar_IO.krml .extract/C.krml .extract/LowStar_Modifies.krml .extract/FStar_Bytes.krml .extract/C_String.krml .extract/FStar_HyperStack_IO.krml .extract/C_Failure.krml .extract/TestLib.krml .extract/FStar_Int_Cast_Full.krml
F* version: 74c6d2a5
KreMLin version: 1bd260eb
*/
#ifndef __C_Endianness_H
#define __C_Endianness_H
#include "FStar_UInt128.h"
#include <inttypes.h>
#include <stdbool.h>
#include "kremlin/internal/compat.h"
#include "kremlin/lowstar_endianness.h"
#include "kremlin/internal/types.h"
extern FStar_UInt128_uint128 load128_le(uint8_t *b);
extern void store128_le(uint8_t *b, FStar_UInt128_uint128 z);
extern FStar_UInt128_uint128 load128_be(uint8_t *b);
extern void store128_be(uint8_t *b, FStar_UInt128_uint128 z);
uint32_t index_32_be(uint8_t *b, uint32_t i);
uint32_t index_32_le(uint8_t *b, uint32_t i);
uint64_t index_64_be(uint8_t *b, uint32_t i);
uint64_t index_64_le(uint8_t *b, uint32_t i);
void upd_32_be(uint8_t *b, uint32_t i, uint32_t v1);
#define __C_Endianness_H_DEFINED
#endif

Просмотреть файл

@ -1,132 +0,0 @@
/*
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
Licensed under the Apache 2.0 License.
This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
KreMLin invocation: ../krml -fparentheses -fcurly-braces -fno-shadow -header copyright-header.txt -minimal -tmpdir dist/minimal -skip-compilation -extract-uints -add-include <inttypes.h> -add-include <stdbool.h> -add-include "kremlin/internal/compat.h" -add-include "kremlin/lowstar_endianness.h" -add-include "kremlin/internal/types.h" -bundle FStar.UInt64+FStar.UInt32+FStar.UInt16+FStar.UInt8=[rename=FStar_UInt_8_16_32_64] -bundle C.Endianness= -library FStar.UInt128 -bundle FStar.UInt128= -bundle *,WindowsWorkaroundSigh fstar_uint128.c -o libkremlib.a .extract/prims.krml .extract/FStar_Pervasives_Native.krml .extract/FStar_Pervasives.krml .extract/FStar_Reflection_Types.krml .extract/FStar_Reflection_Data.krml .extract/FStar_Order.krml .extract/FStar_Reflection_Basic.krml .extract/FStar_Preorder.krml .extract/FStar_Calc.krml .extract/FStar_Squash.krml .extract/FStar_Classical.krml .extract/FStar_StrongExcludedMiddle.krml .extract/FStar_FunctionalExtensionality.krml .extract/FStar_List_Tot_Base.krml .extract/FStar_List_Tot_Properties.krml .extract/FStar_List_Tot.krml .extract/FStar_Seq_Base.krml .extract/FStar_Seq_Properties.krml .extract/FStar_Seq.krml .extract/FStar_Mul.krml .extract/FStar_Math_Lib.krml .extract/FStar_Math_Lemmas.krml .extract/FStar_BitVector.krml .extract/FStar_UInt.krml .extract/FStar_UInt32.krml .extract/FStar_Int.krml .extract/FStar_Int16.krml .extract/FStar_Ghost.krml .extract/FStar_ErasedLogic.krml .extract/FStar_UInt64.krml .extract/FStar_Set.krml .extract/FStar_PropositionalExtensionality.krml .extract/FStar_PredicateExtensionality.krml .extract/FStar_TSet.krml .extract/FStar_Monotonic_Heap.krml .extract/FStar_Heap.krml .extract/FStar_Map.krml .extract/FStar_Monotonic_HyperHeap.krml .extract/FStar_Monotonic_HyperStack.krml .extract/FStar_HyperStack.krml .extract/FStar_Monotonic_Witnessed.krml .extract/FStar_HyperStack_ST.krml .extract/FStar_HyperStack_All.krml .extract/FStar_Char.krml .extract/FStar_Exn.krml .extract/FStar_ST.krml .extract/FStar_All.krml .extract/FStar_List.krml .extract/FStar_String.krml .extract/FStar_Reflection_Const.krml .extract/FStar_Reflection_Derived.krml .extract/FStar_Reflection_Derived_Lemmas.krml .extract/FStar_Date.krml .extract/FStar_Universe.krml .extract/FStar_GSet.krml .extract/FStar_ModifiesGen.krml .extract/FStar_Range.krml .extract/FStar_Tactics_Types.krml .extract/FStar_Tactics_Result.krml .extract/FStar_Tactics_Effect.krml .extract/FStar_Tactics_Util.krml .extract/FStar_Tactics_Builtins.krml .extract/FStar_Reflection_Formula.krml .extract/FStar_Reflection.krml .extract/FStar_Tactics_Derived.krml .extract/FStar_Tactics_Logic.krml .extract/FStar_Tactics.krml .extract/FStar_BigOps.krml .extract/LowStar_Monotonic_Buffer.krml .extract/LowStar_Buffer.krml .extract/Spec_Loops.krml .extract/LowStar_BufferOps.krml .extract/C_Loops.krml .extract/FStar_UInt8.krml .extract/FStar_Kremlin_Endianness.krml .extract/FStar_UInt63.krml .extract/FStar_Dyn.krml .extract/FStar_Int63.krml .extract/FStar_Int64.krml .extract/FStar_Int32.krml .extract/FStar_Int8.krml .extract/FStar_UInt16.krml .extract/FStar_Int_Cast.krml .extract/FStar_UInt128.krml .extract/C_Endianness.krml .extract/WasmSupport.krml .extract/FStar_Float.krml .extract/FStar_IO.krml .extract/C.krml .extract/LowStar_Modifies.krml .extract/FStar_Bytes.krml .extract/C_String.krml .extract/FStar_HyperStack_IO.krml .extract/C_Failure.krml .extract/TestLib.krml .extract/FStar_Int_Cast_Full.krml
F* version: 74c6d2a5
KreMLin version: 1bd260eb
*/
#ifndef __FStar_UInt128_H
#define __FStar_UInt128_H
#include <inttypes.h>
#include <stdbool.h>
#include "kremlin/internal/compat.h"
#include "kremlin/lowstar_endianness.h"
#include "kremlin/internal/types.h"
extern uint64_t FStar_UInt128___proj__Mkuint128__item__low(FStar_UInt128_uint128 projectee);
extern uint64_t FStar_UInt128___proj__Mkuint128__item__high(FStar_UInt128_uint128 projectee);
typedef FStar_UInt128_uint128 FStar_UInt128_t;
extern FStar_UInt128_uint128
FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
extern FStar_UInt128_uint128
FStar_UInt128_add_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
extern FStar_UInt128_uint128
FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
extern FStar_UInt128_uint128
FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
extern FStar_UInt128_uint128
FStar_UInt128_sub_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
extern FStar_UInt128_uint128
FStar_UInt128_sub_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
extern FStar_UInt128_uint128
FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
extern FStar_UInt128_uint128
FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
extern FStar_UInt128_uint128
FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
extern FStar_UInt128_uint128 FStar_UInt128_lognot(FStar_UInt128_uint128 a);
extern FStar_UInt128_uint128 FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s);
extern FStar_UInt128_uint128 FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s);
extern bool FStar_UInt128_eq(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
extern bool FStar_UInt128_gt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
extern bool FStar_UInt128_lt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
extern bool FStar_UInt128_gte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
extern bool FStar_UInt128_lte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
extern FStar_UInt128_uint128
FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
extern FStar_UInt128_uint128
FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
extern FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a);
extern uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a);
extern FStar_UInt128_uint128
FStar_UInt128_op_Plus_Hat(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
extern FStar_UInt128_uint128
FStar_UInt128_op_Plus_Question_Hat(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
extern FStar_UInt128_uint128
FStar_UInt128_op_Plus_Percent_Hat(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
extern FStar_UInt128_uint128
FStar_UInt128_op_Subtraction_Hat(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
extern FStar_UInt128_uint128
FStar_UInt128_op_Subtraction_Question_Hat(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
extern FStar_UInt128_uint128
FStar_UInt128_op_Subtraction_Percent_Hat(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
extern FStar_UInt128_uint128
FStar_UInt128_op_Amp_Hat(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
extern FStar_UInt128_uint128
FStar_UInt128_op_Hat_Hat(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
extern FStar_UInt128_uint128
FStar_UInt128_op_Bar_Hat(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
extern FStar_UInt128_uint128
FStar_UInt128_op_Less_Less_Hat(FStar_UInt128_uint128 x0, uint32_t x1);
extern FStar_UInt128_uint128
FStar_UInt128_op_Greater_Greater_Hat(FStar_UInt128_uint128 x0, uint32_t x1);
extern bool FStar_UInt128_op_Equals_Hat(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
extern bool FStar_UInt128_op_Greater_Hat(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
extern bool FStar_UInt128_op_Less_Hat(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
extern bool
FStar_UInt128_op_Greater_Equals_Hat(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
extern bool
FStar_UInt128_op_Less_Equals_Hat(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
extern FStar_UInt128_uint128 FStar_UInt128_mul32(uint64_t x, uint32_t y);
extern FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y);
#define __FStar_UInt128_H_DEFINED
#endif

Просмотреть файл

@ -1,101 +0,0 @@
/*
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
Licensed under the Apache 2.0 License.
This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
KreMLin invocation: ../krml -fparentheses -fcurly-braces -fno-shadow -header copyright-header.txt -minimal -tmpdir dist/minimal -skip-compilation -extract-uints -add-include <inttypes.h> -add-include <stdbool.h> -add-include "kremlin/internal/compat.h" -add-include "kremlin/lowstar_endianness.h" -add-include "kremlin/internal/types.h" -bundle FStar.UInt64+FStar.UInt32+FStar.UInt16+FStar.UInt8=[rename=FStar_UInt_8_16_32_64] -bundle C.Endianness= -library FStar.UInt128 -bundle FStar.UInt128= -bundle *,WindowsWorkaroundSigh fstar_uint128.c -o libkremlib.a .extract/prims.krml .extract/FStar_Pervasives_Native.krml .extract/FStar_Pervasives.krml .extract/FStar_Reflection_Types.krml .extract/FStar_Reflection_Data.krml .extract/FStar_Order.krml .extract/FStar_Reflection_Basic.krml .extract/FStar_Preorder.krml .extract/FStar_Calc.krml .extract/FStar_Squash.krml .extract/FStar_Classical.krml .extract/FStar_StrongExcludedMiddle.krml .extract/FStar_FunctionalExtensionality.krml .extract/FStar_List_Tot_Base.krml .extract/FStar_List_Tot_Properties.krml .extract/FStar_List_Tot.krml .extract/FStar_Seq_Base.krml .extract/FStar_Seq_Properties.krml .extract/FStar_Seq.krml .extract/FStar_Mul.krml .extract/FStar_Math_Lib.krml .extract/FStar_Math_Lemmas.krml .extract/FStar_BitVector.krml .extract/FStar_UInt.krml .extract/FStar_UInt32.krml .extract/FStar_Int.krml .extract/FStar_Int16.krml .extract/FStar_Ghost.krml .extract/FStar_ErasedLogic.krml .extract/FStar_UInt64.krml .extract/FStar_Set.krml .extract/FStar_PropositionalExtensionality.krml .extract/FStar_PredicateExtensionality.krml .extract/FStar_TSet.krml .extract/FStar_Monotonic_Heap.krml .extract/FStar_Heap.krml .extract/FStar_Map.krml .extract/FStar_Monotonic_HyperHeap.krml .extract/FStar_Monotonic_HyperStack.krml .extract/FStar_HyperStack.krml .extract/FStar_Monotonic_Witnessed.krml .extract/FStar_HyperStack_ST.krml .extract/FStar_HyperStack_All.krml .extract/FStar_Char.krml .extract/FStar_Exn.krml .extract/FStar_ST.krml .extract/FStar_All.krml .extract/FStar_List.krml .extract/FStar_String.krml .extract/FStar_Reflection_Const.krml .extract/FStar_Reflection_Derived.krml .extract/FStar_Reflection_Derived_Lemmas.krml .extract/FStar_Date.krml .extract/FStar_Universe.krml .extract/FStar_GSet.krml .extract/FStar_ModifiesGen.krml .extract/FStar_Range.krml .extract/FStar_Tactics_Types.krml .extract/FStar_Tactics_Result.krml .extract/FStar_Tactics_Effect.krml .extract/FStar_Tactics_Util.krml .extract/FStar_Tactics_Builtins.krml .extract/FStar_Reflection_Formula.krml .extract/FStar_Reflection.krml .extract/FStar_Tactics_Derived.krml .extract/FStar_Tactics_Logic.krml .extract/FStar_Tactics.krml .extract/FStar_BigOps.krml .extract/LowStar_Monotonic_Buffer.krml .extract/LowStar_Buffer.krml .extract/Spec_Loops.krml .extract/LowStar_BufferOps.krml .extract/C_Loops.krml .extract/FStar_UInt8.krml .extract/FStar_Kremlin_Endianness.krml .extract/FStar_UInt63.krml .extract/FStar_Dyn.krml .extract/FStar_Int63.krml .extract/FStar_Int64.krml .extract/FStar_Int32.krml .extract/FStar_Int8.krml .extract/FStar_UInt16.krml .extract/FStar_Int_Cast.krml .extract/FStar_UInt128.krml .extract/C_Endianness.krml .extract/WasmSupport.krml .extract/FStar_Float.krml .extract/FStar_IO.krml .extract/C.krml .extract/LowStar_Modifies.krml .extract/FStar_Bytes.krml .extract/C_String.krml .extract/FStar_HyperStack_IO.krml .extract/C_Failure.krml .extract/TestLib.krml .extract/FStar_Int_Cast_Full.krml
F* version: 74c6d2a5
KreMLin version: 1bd260eb
*/
#include "FStar_UInt_8_16_32_64.h"
uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b)
{
uint64_t x = a ^ b;
uint64_t minus_x = ~x + (uint64_t)1U;
uint64_t x_or_minus_x = x | minus_x;
uint64_t xnx = x_or_minus_x >> (uint32_t)63U;
return xnx - (uint64_t)1U;
}
uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b)
{
uint64_t x = a;
uint64_t y = b;
uint64_t x_xor_y = x ^ y;
uint64_t x_sub_y = x - y;
uint64_t x_sub_y_xor_y = x_sub_y ^ y;
uint64_t q = x_xor_y | x_sub_y_xor_y;
uint64_t x_xor_q = x ^ q;
uint64_t x_xor_q_ = x_xor_q >> (uint32_t)63U;
return x_xor_q_ - (uint64_t)1U;
}
uint32_t FStar_UInt32_eq_mask(uint32_t a, uint32_t b)
{
uint32_t x = a ^ b;
uint32_t minus_x = ~x + (uint32_t)1U;
uint32_t x_or_minus_x = x | minus_x;
uint32_t xnx = x_or_minus_x >> (uint32_t)31U;
return xnx - (uint32_t)1U;
}
uint32_t FStar_UInt32_gte_mask(uint32_t a, uint32_t b)
{
uint32_t x = a;
uint32_t y = b;
uint32_t x_xor_y = x ^ y;
uint32_t x_sub_y = x - y;
uint32_t x_sub_y_xor_y = x_sub_y ^ y;
uint32_t q = x_xor_y | x_sub_y_xor_y;
uint32_t x_xor_q = x ^ q;
uint32_t x_xor_q_ = x_xor_q >> (uint32_t)31U;
return x_xor_q_ - (uint32_t)1U;
}
uint16_t FStar_UInt16_eq_mask(uint16_t a, uint16_t b)
{
uint16_t x = a ^ b;
uint16_t minus_x = ~x + (uint16_t)1U;
uint16_t x_or_minus_x = x | minus_x;
uint16_t xnx = x_or_minus_x >> (uint32_t)15U;
return xnx - (uint16_t)1U;
}
uint16_t FStar_UInt16_gte_mask(uint16_t a, uint16_t b)
{
uint16_t x = a;
uint16_t y = b;
uint16_t x_xor_y = x ^ y;
uint16_t x_sub_y = x - y;
uint16_t x_sub_y_xor_y = x_sub_y ^ y;
uint16_t q = x_xor_y | x_sub_y_xor_y;
uint16_t x_xor_q = x ^ q;
uint16_t x_xor_q_ = x_xor_q >> (uint32_t)15U;
return x_xor_q_ - (uint16_t)1U;
}
uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b)
{
uint8_t x = a ^ b;
uint8_t minus_x = ~x + (uint8_t)1U;
uint8_t x_or_minus_x = x | minus_x;
uint8_t xnx = x_or_minus_x >> (uint32_t)7U;
return xnx - (uint8_t)1U;
}
uint8_t FStar_UInt8_gte_mask(uint8_t a, uint8_t b)
{
uint8_t x = a;
uint8_t y = b;
uint8_t x_xor_y = x ^ y;
uint8_t x_sub_y = x - y;
uint8_t x_sub_y_xor_y = x_sub_y ^ y;
uint8_t q = x_xor_y | x_sub_y_xor_y;
uint8_t x_xor_q = x ^ q;
uint8_t x_xor_q_ = x_xor_q >> (uint32_t)7U;
return x_xor_q_ - (uint8_t)1U;
}

Просмотреть файл

@ -1,282 +0,0 @@
/*
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
Licensed under the Apache 2.0 License.
This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
KreMLin invocation: ../krml -fparentheses -fcurly-braces -fno-shadow -header copyright-header.txt -minimal -tmpdir dist/minimal -skip-compilation -extract-uints -add-include <inttypes.h> -add-include <stdbool.h> -add-include "kremlin/internal/compat.h" -add-include "kremlin/lowstar_endianness.h" -add-include "kremlin/internal/types.h" -bundle FStar.UInt64+FStar.UInt32+FStar.UInt16+FStar.UInt8=[rename=FStar_UInt_8_16_32_64] -bundle C.Endianness= -library FStar.UInt128 -bundle FStar.UInt128= -bundle *,WindowsWorkaroundSigh fstar_uint128.c -o libkremlib.a .extract/prims.krml .extract/FStar_Pervasives_Native.krml .extract/FStar_Pervasives.krml .extract/FStar_Reflection_Types.krml .extract/FStar_Reflection_Data.krml .extract/FStar_Order.krml .extract/FStar_Reflection_Basic.krml .extract/FStar_Preorder.krml .extract/FStar_Calc.krml .extract/FStar_Squash.krml .extract/FStar_Classical.krml .extract/FStar_StrongExcludedMiddle.krml .extract/FStar_FunctionalExtensionality.krml .extract/FStar_List_Tot_Base.krml .extract/FStar_List_Tot_Properties.krml .extract/FStar_List_Tot.krml .extract/FStar_Seq_Base.krml .extract/FStar_Seq_Properties.krml .extract/FStar_Seq.krml .extract/FStar_Mul.krml .extract/FStar_Math_Lib.krml .extract/FStar_Math_Lemmas.krml .extract/FStar_BitVector.krml .extract/FStar_UInt.krml .extract/FStar_UInt32.krml .extract/FStar_Int.krml .extract/FStar_Int16.krml .extract/FStar_Ghost.krml .extract/FStar_ErasedLogic.krml .extract/FStar_UInt64.krml .extract/FStar_Set.krml .extract/FStar_PropositionalExtensionality.krml .extract/FStar_PredicateExtensionality.krml .extract/FStar_TSet.krml .extract/FStar_Monotonic_Heap.krml .extract/FStar_Heap.krml .extract/FStar_Map.krml .extract/FStar_Monotonic_HyperHeap.krml .extract/FStar_Monotonic_HyperStack.krml .extract/FStar_HyperStack.krml .extract/FStar_Monotonic_Witnessed.krml .extract/FStar_HyperStack_ST.krml .extract/FStar_HyperStack_All.krml .extract/FStar_Char.krml .extract/FStar_Exn.krml .extract/FStar_ST.krml .extract/FStar_All.krml .extract/FStar_List.krml .extract/FStar_String.krml .extract/FStar_Reflection_Const.krml .extract/FStar_Reflection_Derived.krml .extract/FStar_Reflection_Derived_Lemmas.krml .extract/FStar_Date.krml .extract/FStar_Universe.krml .extract/FStar_GSet.krml .extract/FStar_ModifiesGen.krml .extract/FStar_Range.krml .extract/FStar_Tactics_Types.krml .extract/FStar_Tactics_Result.krml .extract/FStar_Tactics_Effect.krml .extract/FStar_Tactics_Util.krml .extract/FStar_Tactics_Builtins.krml .extract/FStar_Reflection_Formula.krml .extract/FStar_Reflection.krml .extract/FStar_Tactics_Derived.krml .extract/FStar_Tactics_Logic.krml .extract/FStar_Tactics.krml .extract/FStar_BigOps.krml .extract/LowStar_Monotonic_Buffer.krml .extract/LowStar_Buffer.krml .extract/Spec_Loops.krml .extract/LowStar_BufferOps.krml .extract/C_Loops.krml .extract/FStar_UInt8.krml .extract/FStar_Kremlin_Endianness.krml .extract/FStar_UInt63.krml .extract/FStar_Dyn.krml .extract/FStar_Int63.krml .extract/FStar_Int64.krml .extract/FStar_Int32.krml .extract/FStar_Int8.krml .extract/FStar_UInt16.krml .extract/FStar_Int_Cast.krml .extract/FStar_UInt128.krml .extract/C_Endianness.krml .extract/WasmSupport.krml .extract/FStar_Float.krml .extract/FStar_IO.krml .extract/C.krml .extract/LowStar_Modifies.krml .extract/FStar_Bytes.krml .extract/C_String.krml .extract/FStar_HyperStack_IO.krml .extract/C_Failure.krml .extract/TestLib.krml .extract/FStar_Int_Cast_Full.krml
F* version: 74c6d2a5
KreMLin version: 1bd260eb
*/
#ifndef __FStar_UInt_8_16_32_64_H
#define __FStar_UInt_8_16_32_64_H
#include <inttypes.h>
#include <stdbool.h>
#include "kremlin/internal/compat.h"
#include "kremlin/lowstar_endianness.h"
#include "kremlin/internal/types.h"
extern Prims_int FStar_UInt64_n;
extern Prims_int FStar_UInt64_v(uint64_t x);
extern uint64_t FStar_UInt64_uint_to_t(Prims_int x);
extern uint64_t FStar_UInt64_add(uint64_t a, uint64_t b);
extern uint64_t FStar_UInt64_add_underspec(uint64_t a, uint64_t b);
extern uint64_t FStar_UInt64_add_mod(uint64_t a, uint64_t b);
extern uint64_t FStar_UInt64_sub(uint64_t a, uint64_t b);
extern uint64_t FStar_UInt64_sub_underspec(uint64_t a, uint64_t b);
extern uint64_t FStar_UInt64_sub_mod(uint64_t a, uint64_t b);
extern uint64_t FStar_UInt64_mul(uint64_t a, uint64_t b);
extern uint64_t FStar_UInt64_mul_underspec(uint64_t a, uint64_t b);
extern uint64_t FStar_UInt64_mul_mod(uint64_t a, uint64_t b);
extern uint64_t FStar_UInt64_mul_div(uint64_t a, uint64_t b);
extern uint64_t FStar_UInt64_div(uint64_t a, uint64_t b);
extern uint64_t FStar_UInt64_rem(uint64_t a, uint64_t b);
extern uint64_t FStar_UInt64_logand(uint64_t x, uint64_t y);
extern uint64_t FStar_UInt64_logxor(uint64_t x, uint64_t y);
extern uint64_t FStar_UInt64_logor(uint64_t x, uint64_t y);
extern uint64_t FStar_UInt64_lognot(uint64_t x);
extern uint64_t FStar_UInt64_shift_right(uint64_t a, uint32_t s);
extern uint64_t FStar_UInt64_shift_left(uint64_t a, uint32_t s);
extern bool FStar_UInt64_eq(uint64_t a, uint64_t b);
extern bool FStar_UInt64_gt(uint64_t a, uint64_t b);
extern bool FStar_UInt64_gte(uint64_t a, uint64_t b);
extern bool FStar_UInt64_lt(uint64_t a, uint64_t b);
extern bool FStar_UInt64_lte(uint64_t a, uint64_t b);
extern uint64_t FStar_UInt64_minus(uint64_t a);
extern uint32_t FStar_UInt64_n_minus_one;
uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b);
uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b);
extern Prims_string FStar_UInt64_to_string(uint64_t uu____722);
extern uint64_t FStar_UInt64_of_string(Prims_string uu____734);
extern Prims_int FStar_UInt32_n;
extern Prims_int FStar_UInt32_v(uint32_t x);
extern uint32_t FStar_UInt32_uint_to_t(Prims_int x);
extern uint32_t FStar_UInt32_add(uint32_t a, uint32_t b);
extern uint32_t FStar_UInt32_add_underspec(uint32_t a, uint32_t b);
extern uint32_t FStar_UInt32_add_mod(uint32_t a, uint32_t b);
extern uint32_t FStar_UInt32_sub(uint32_t a, uint32_t b);
extern uint32_t FStar_UInt32_sub_underspec(uint32_t a, uint32_t b);
extern uint32_t FStar_UInt32_sub_mod(uint32_t a, uint32_t b);
extern uint32_t FStar_UInt32_mul(uint32_t a, uint32_t b);
extern uint32_t FStar_UInt32_mul_underspec(uint32_t a, uint32_t b);
extern uint32_t FStar_UInt32_mul_mod(uint32_t a, uint32_t b);
extern uint32_t FStar_UInt32_mul_div(uint32_t a, uint32_t b);
extern uint32_t FStar_UInt32_div(uint32_t a, uint32_t b);
extern uint32_t FStar_UInt32_rem(uint32_t a, uint32_t b);
extern uint32_t FStar_UInt32_logand(uint32_t x, uint32_t y);
extern uint32_t FStar_UInt32_logxor(uint32_t x, uint32_t y);
extern uint32_t FStar_UInt32_logor(uint32_t x, uint32_t y);
extern uint32_t FStar_UInt32_lognot(uint32_t x);
extern uint32_t FStar_UInt32_shift_right(uint32_t a, uint32_t s);
extern uint32_t FStar_UInt32_shift_left(uint32_t a, uint32_t s);
extern bool FStar_UInt32_eq(uint32_t a, uint32_t b);
extern bool FStar_UInt32_gt(uint32_t a, uint32_t b);
extern bool FStar_UInt32_gte(uint32_t a, uint32_t b);
extern bool FStar_UInt32_lt(uint32_t a, uint32_t b);
extern bool FStar_UInt32_lte(uint32_t a, uint32_t b);
extern uint32_t FStar_UInt32_minus(uint32_t a);
extern uint32_t FStar_UInt32_n_minus_one;
uint32_t FStar_UInt32_eq_mask(uint32_t a, uint32_t b);
uint32_t FStar_UInt32_gte_mask(uint32_t a, uint32_t b);
extern Prims_string FStar_UInt32_to_string(uint32_t uu____722);
extern uint32_t FStar_UInt32_of_string(Prims_string uu____734);
extern Prims_int FStar_UInt16_n;
extern Prims_int FStar_UInt16_v(uint16_t x);
extern uint16_t FStar_UInt16_uint_to_t(Prims_int x);
extern uint16_t FStar_UInt16_add(uint16_t a, uint16_t b);
extern uint16_t FStar_UInt16_add_underspec(uint16_t a, uint16_t b);
extern uint16_t FStar_UInt16_add_mod(uint16_t a, uint16_t b);
extern uint16_t FStar_UInt16_sub(uint16_t a, uint16_t b);
extern uint16_t FStar_UInt16_sub_underspec(uint16_t a, uint16_t b);
extern uint16_t FStar_UInt16_sub_mod(uint16_t a, uint16_t b);
extern uint16_t FStar_UInt16_mul(uint16_t a, uint16_t b);
extern uint16_t FStar_UInt16_mul_underspec(uint16_t a, uint16_t b);
extern uint16_t FStar_UInt16_mul_mod(uint16_t a, uint16_t b);
extern uint16_t FStar_UInt16_mul_div(uint16_t a, uint16_t b);
extern uint16_t FStar_UInt16_div(uint16_t a, uint16_t b);
extern uint16_t FStar_UInt16_rem(uint16_t a, uint16_t b);
extern uint16_t FStar_UInt16_logand(uint16_t x, uint16_t y);
extern uint16_t FStar_UInt16_logxor(uint16_t x, uint16_t y);
extern uint16_t FStar_UInt16_logor(uint16_t x, uint16_t y);
extern uint16_t FStar_UInt16_lognot(uint16_t x);
extern uint16_t FStar_UInt16_shift_right(uint16_t a, uint32_t s);
extern uint16_t FStar_UInt16_shift_left(uint16_t a, uint32_t s);
extern bool FStar_UInt16_eq(uint16_t a, uint16_t b);
extern bool FStar_UInt16_gt(uint16_t a, uint16_t b);
extern bool FStar_UInt16_gte(uint16_t a, uint16_t b);
extern bool FStar_UInt16_lt(uint16_t a, uint16_t b);
extern bool FStar_UInt16_lte(uint16_t a, uint16_t b);
extern uint16_t FStar_UInt16_minus(uint16_t a);
extern uint32_t FStar_UInt16_n_minus_one;
uint16_t FStar_UInt16_eq_mask(uint16_t a, uint16_t b);
uint16_t FStar_UInt16_gte_mask(uint16_t a, uint16_t b);
extern Prims_string FStar_UInt16_to_string(uint16_t uu____722);
extern uint16_t FStar_UInt16_of_string(Prims_string uu____734);
extern Prims_int FStar_UInt8_n;
extern Prims_int FStar_UInt8_v(uint8_t x);
extern uint8_t FStar_UInt8_uint_to_t(Prims_int x);
extern uint8_t FStar_UInt8_add(uint8_t a, uint8_t b);
extern uint8_t FStar_UInt8_add_underspec(uint8_t a, uint8_t b);
extern uint8_t FStar_UInt8_add_mod(uint8_t a, uint8_t b);
extern uint8_t FStar_UInt8_sub(uint8_t a, uint8_t b);
extern uint8_t FStar_UInt8_sub_underspec(uint8_t a, uint8_t b);
extern uint8_t FStar_UInt8_sub_mod(uint8_t a, uint8_t b);
extern uint8_t FStar_UInt8_mul(uint8_t a, uint8_t b);
extern uint8_t FStar_UInt8_mul_underspec(uint8_t a, uint8_t b);
extern uint8_t FStar_UInt8_mul_mod(uint8_t a, uint8_t b);
extern uint8_t FStar_UInt8_mul_div(uint8_t a, uint8_t b);
extern uint8_t FStar_UInt8_div(uint8_t a, uint8_t b);
extern uint8_t FStar_UInt8_rem(uint8_t a, uint8_t b);
extern uint8_t FStar_UInt8_logand(uint8_t x, uint8_t y);
extern uint8_t FStar_UInt8_logxor(uint8_t x, uint8_t y);
extern uint8_t FStar_UInt8_logor(uint8_t x, uint8_t y);
extern uint8_t FStar_UInt8_lognot(uint8_t x);
extern uint8_t FStar_UInt8_shift_right(uint8_t a, uint32_t s);
extern uint8_t FStar_UInt8_shift_left(uint8_t a, uint32_t s);
extern bool FStar_UInt8_eq(uint8_t a, uint8_t b);
extern bool FStar_UInt8_gt(uint8_t a, uint8_t b);
extern bool FStar_UInt8_gte(uint8_t a, uint8_t b);
extern bool FStar_UInt8_lt(uint8_t a, uint8_t b);
extern bool FStar_UInt8_lte(uint8_t a, uint8_t b);
extern uint8_t FStar_UInt8_minus(uint8_t a);
extern uint32_t FStar_UInt8_n_minus_one;
uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b);
uint8_t FStar_UInt8_gte_mask(uint8_t a, uint8_t b);
extern Prims_string FStar_UInt8_to_string(uint8_t uu____722);
extern uint8_t FStar_UInt8_of_string(Prims_string uu____734);
typedef uint8_t FStar_UInt8_byte;
#define __FStar_UInt_8_16_32_64_H_DEFINED
#endif

Просмотреть файл

@ -1,187 +0,0 @@
/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
Licensed under the Apache 2.0 License. */
/******************************************************************************/
/* Machine integers (128-bit arithmetic) */
/******************************************************************************/
/* This header makes KreMLin-generated C code work with:
* - the default setting where we assume the target compiler defines __int128
* - the setting where we use FStar.UInt128's implementation instead; in that
* case, generated C files must be compiled with -DKRML_VERIFIED_UINT128
* - a refinement of the case above, wherein all structures are passed by
* reference, a.k.a. "-fnostruct-passing", meaning that the KreMLin-generated
* must be compiled with -DKRML_NOSTRUCT_PASSING
* Note: no MSVC support in this file.
*/
/* This file is used for both the minimal and generic kremlib distributions. As
* such, it assumes that the machine integers have been bundled the exact same
* way in both cases. */
#include "FStar_UInt128.h"
#include "FStar_UInt_8_16_32_64.h"
#include "C_Endianness.h"
#if !defined(KRML_VERIFIED_UINT128) && !defined(_MSC_VER)
/* GCC + using native unsigned __int128 support */
uint128_t load128_le(uint8_t *b) {
uint128_t l = (uint128_t)load64_le(b);
uint128_t h = (uint128_t)load64_le(b + 8);
return (h << 64 | l);
}
void store128_le(uint8_t *b, uint128_t n) {
store64_le(b, (uint64_t)n);
store64_le(b + 8, (uint64_t)(n >> 64));
}
uint128_t load128_be(uint8_t *b) {
uint128_t h = (uint128_t)load64_be(b);
uint128_t l = (uint128_t)load64_be(b + 8);
return (h << 64 | l);
}
void store128_be(uint8_t *b, uint128_t n) {
store64_be(b, (uint64_t)(n >> 64));
store64_be(b + 8, (uint64_t)n);
}
uint128_t FStar_UInt128_add(uint128_t x, uint128_t y) {
return x + y;
}
uint128_t FStar_UInt128_mul(uint128_t x, uint128_t y) {
return x * y;
}
uint128_t FStar_UInt128_add_mod(uint128_t x, uint128_t y) {
return x + y;
}
uint128_t FStar_UInt128_sub(uint128_t x, uint128_t y) {
return x - y;
}
uint128_t FStar_UInt128_sub_mod(uint128_t x, uint128_t y) {
return x - y;
}
uint128_t FStar_UInt128_logand(uint128_t x, uint128_t y) {
return x & y;
}
uint128_t FStar_UInt128_logor(uint128_t x, uint128_t y) {
return x | y;
}
uint128_t FStar_UInt128_logxor(uint128_t x, uint128_t y) {
return x ^ y;
}
uint128_t FStar_UInt128_lognot(uint128_t x) {
return ~x;
}
uint128_t FStar_UInt128_shift_left(uint128_t x, uint32_t y) {
return x << y;
}
uint128_t FStar_UInt128_shift_right(uint128_t x, uint32_t y) {
return x >> y;
}
uint128_t FStar_UInt128_uint64_to_uint128(uint64_t x) {
return (uint128_t)x;
}
uint64_t FStar_UInt128_uint128_to_uint64(uint128_t x) {
return (uint64_t)x;
}
uint128_t FStar_UInt128_mul_wide(uint64_t x, uint64_t y) {
return ((uint128_t) x) * y;
}
uint128_t FStar_UInt128_eq_mask(uint128_t x, uint128_t y) {
uint64_t mask =
FStar_UInt64_eq_mask((uint64_t)(x >> 64), (uint64_t)(y >> 64)) &
FStar_UInt64_eq_mask(x, y);
return ((uint128_t)mask) << 64 | mask;
}
uint128_t FStar_UInt128_gte_mask(uint128_t x, uint128_t y) {
uint64_t mask =
(FStar_UInt64_gte_mask(x >> 64, y >> 64) &
~(FStar_UInt64_eq_mask(x >> 64, y >> 64))) |
(FStar_UInt64_eq_mask(x >> 64, y >> 64) & FStar_UInt64_gte_mask(x, y));
return ((uint128_t)mask) << 64 | mask;
}
#elif !defined(_MSC_VER) && defined(KRML_VERIFIED_UINT128)
/* Verified uint128 implementation. */
/* Access 64-bit fields within the int128. */
#define HIGH64_OF(x) ((x)->high)
#define LOW64_OF(x) ((x)->low)
typedef FStar_UInt128_uint128 FStar_UInt128_t_, uint128_t;
/* A series of definitions written using pointers. */
void load128_le_(uint8_t *b, uint128_t *r) {
LOW64_OF(r) = load64_le(b);
HIGH64_OF(r) = load64_le(b + 8);
}
void store128_le_(uint8_t *b, uint128_t *n) {
store64_le(b, LOW64_OF(n));
store64_le(b + 8, HIGH64_OF(n));
}
void load128_be_(uint8_t *b, uint128_t *r) {
HIGH64_OF(r) = load64_be(b);
LOW64_OF(r) = load64_be(b + 8);
}
void store128_be_(uint8_t *b, uint128_t *n) {
store64_be(b, HIGH64_OF(n));
store64_be(b + 8, LOW64_OF(n));
}
# ifndef KRML_NOSTRUCT_PASSING
uint128_t load128_le(uint8_t *b) {
uint128_t r;
load128_le_(b, &r);
return r;
}
void store128_le(uint8_t *b, uint128_t n) {
store128_le_(b, &n);
}
uint128_t load128_be(uint8_t *b) {
uint128_t r;
load128_be_(b, &r);
return r;
}
void store128_be(uint8_t *b, uint128_t n) {
store128_be_(b, &n);
}
# else /* !defined(KRML_STRUCT_PASSING) */
# define print128 print128_
# define load128_le load128_le_
# define store128_le store128_le_
# define load128_be load128_be_
# define store128_be store128_be_
# endif /* KRML_STRUCT_PASSING */
#endif

Просмотреть файл

@ -1 +0,0 @@
projecteverest/hacl-star-linux:648c1eb12363

251
3rdparty/hacl-star/evercrypt/EverCrypt_AutoConfig2.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,251 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "EverCrypt_AutoConfig2.h"
static bool EverCrypt_AutoConfig2_cpu_has_shaext[1U] = { false };
static bool EverCrypt_AutoConfig2_cpu_has_aesni[1U] = { false };
static bool EverCrypt_AutoConfig2_cpu_has_pclmulqdq[1U] = { false };
static bool EverCrypt_AutoConfig2_cpu_has_avx2[1U] = { false };
static bool EverCrypt_AutoConfig2_cpu_has_avx[1U] = { false };
static bool EverCrypt_AutoConfig2_cpu_has_bmi2[1U] = { false };
static bool EverCrypt_AutoConfig2_cpu_has_adx[1U] = { false };
static bool EverCrypt_AutoConfig2_cpu_has_sse[1U] = { false };
static bool EverCrypt_AutoConfig2_cpu_has_movbe[1U] = { false };
static bool EverCrypt_AutoConfig2_cpu_has_rdrand[1U] = { false };
static bool EverCrypt_AutoConfig2_user_wants_hacl[1U] = { true };
static bool EverCrypt_AutoConfig2_user_wants_vale[1U] = { true };
static bool EverCrypt_AutoConfig2_user_wants_openssl[1U] = { true };
static bool EverCrypt_AutoConfig2_user_wants_bcrypt[1U] = { false };
bool EverCrypt_AutoConfig2_has_shaext()
{
return EverCrypt_AutoConfig2_cpu_has_shaext[0U];
}
bool EverCrypt_AutoConfig2_has_aesni()
{
return EverCrypt_AutoConfig2_cpu_has_aesni[0U];
}
bool EverCrypt_AutoConfig2_has_pclmulqdq()
{
return EverCrypt_AutoConfig2_cpu_has_pclmulqdq[0U];
}
bool EverCrypt_AutoConfig2_has_avx2()
{
return EverCrypt_AutoConfig2_cpu_has_avx2[0U];
}
bool EverCrypt_AutoConfig2_has_avx()
{
return EverCrypt_AutoConfig2_cpu_has_avx[0U];
}
bool EverCrypt_AutoConfig2_has_bmi2()
{
return EverCrypt_AutoConfig2_cpu_has_bmi2[0U];
}
bool EverCrypt_AutoConfig2_has_adx()
{
return EverCrypt_AutoConfig2_cpu_has_adx[0U];
}
bool EverCrypt_AutoConfig2_has_sse()
{
return EverCrypt_AutoConfig2_cpu_has_sse[0U];
}
bool EverCrypt_AutoConfig2_has_movbe()
{
return EverCrypt_AutoConfig2_cpu_has_movbe[0U];
}
bool EverCrypt_AutoConfig2_has_rdrand()
{
return EverCrypt_AutoConfig2_cpu_has_rdrand[0U];
}
bool EverCrypt_AutoConfig2_wants_vale()
{
return EverCrypt_AutoConfig2_user_wants_vale[0U];
}
bool EverCrypt_AutoConfig2_wants_hacl()
{
return EverCrypt_AutoConfig2_user_wants_hacl[0U];
}
bool EverCrypt_AutoConfig2_wants_openssl()
{
return EverCrypt_AutoConfig2_user_wants_openssl[0U];
}
bool EverCrypt_AutoConfig2_wants_bcrypt()
{
return EverCrypt_AutoConfig2_user_wants_bcrypt[0U];
}
void EverCrypt_AutoConfig2_recall()
{
}
void EverCrypt_AutoConfig2_init()
{
#if EVERCRYPT_TARGETCONFIG_X64
uint64_t scrut = check_aesni();
if (scrut != (uint64_t)0U)
{
EverCrypt_AutoConfig2_cpu_has_aesni[0U] = true;
EverCrypt_AutoConfig2_cpu_has_pclmulqdq[0U] = true;
}
uint64_t scrut0 = check_sha();
if (scrut0 != (uint64_t)0U)
{
EverCrypt_AutoConfig2_cpu_has_shaext[0U] = true;
}
uint64_t scrut1 = check_adx_bmi2();
if (scrut1 != (uint64_t)0U)
{
EverCrypt_AutoConfig2_cpu_has_bmi2[0U] = true;
EverCrypt_AutoConfig2_cpu_has_adx[0U] = true;
}
uint64_t scrut2 = check_avx();
if (scrut2 != (uint64_t)0U)
{
EverCrypt_AutoConfig2_cpu_has_avx[0U] = true;
}
uint64_t scrut3 = check_avx2();
if (scrut3 != (uint64_t)0U)
{
EverCrypt_AutoConfig2_cpu_has_avx2[0U] = true;
}
uint64_t scrut4 = check_sse();
if (scrut4 != (uint64_t)0U)
{
EverCrypt_AutoConfig2_cpu_has_sse[0U] = true;
}
uint64_t scrut5 = check_movbe();
if (scrut5 != (uint64_t)0U)
{
EverCrypt_AutoConfig2_cpu_has_movbe[0U] = true;
}
uint64_t scrut6 = check_rdrand();
if (scrut6 != (uint64_t)0U)
{
EverCrypt_AutoConfig2_cpu_has_rdrand[0U] = true;
}
#endif
EverCrypt_AutoConfig2_user_wants_hacl[0U] = true;
EverCrypt_AutoConfig2_user_wants_vale[0U] = true;
EverCrypt_AutoConfig2_user_wants_bcrypt[0U] = false;
EverCrypt_AutoConfig2_user_wants_openssl[0U] = true;
}
void EverCrypt_AutoConfig2_disable_avx2()
{
EverCrypt_AutoConfig2_cpu_has_avx2[0U] = false;
}
void EverCrypt_AutoConfig2_disable_avx()
{
EverCrypt_AutoConfig2_cpu_has_avx[0U] = false;
}
void EverCrypt_AutoConfig2_disable_bmi2()
{
EverCrypt_AutoConfig2_cpu_has_bmi2[0U] = false;
}
void EverCrypt_AutoConfig2_disable_adx()
{
EverCrypt_AutoConfig2_cpu_has_adx[0U] = false;
}
void EverCrypt_AutoConfig2_disable_shaext()
{
EverCrypt_AutoConfig2_cpu_has_shaext[0U] = false;
}
void EverCrypt_AutoConfig2_disable_aesni()
{
EverCrypt_AutoConfig2_cpu_has_aesni[0U] = false;
}
void EverCrypt_AutoConfig2_disable_pclmulqdq()
{
EverCrypt_AutoConfig2_cpu_has_pclmulqdq[0U] = false;
}
void EverCrypt_AutoConfig2_disable_sse()
{
EverCrypt_AutoConfig2_cpu_has_sse[0U] = false;
}
void EverCrypt_AutoConfig2_disable_movbe()
{
EverCrypt_AutoConfig2_cpu_has_movbe[0U] = false;
}
void EverCrypt_AutoConfig2_disable_rdrand()
{
EverCrypt_AutoConfig2_cpu_has_rdrand[0U] = false;
}
void EverCrypt_AutoConfig2_disable_vale()
{
EverCrypt_AutoConfig2_user_wants_vale[0U] = false;
}
void EverCrypt_AutoConfig2_disable_hacl()
{
EverCrypt_AutoConfig2_user_wants_hacl[0U] = false;
}
void EverCrypt_AutoConfig2_disable_openssl()
{
EverCrypt_AutoConfig2_user_wants_openssl[0U] = false;
}
void EverCrypt_AutoConfig2_disable_bcrypt()
{
EverCrypt_AutoConfig2_user_wants_bcrypt[0U] = false;
}

99
3rdparty/hacl-star/evercrypt/EverCrypt_AutoConfig2.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,99 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __EverCrypt_AutoConfig2_H
#define __EverCrypt_AutoConfig2_H
#include "Vale.h"
bool EverCrypt_AutoConfig2_has_shaext();
bool EverCrypt_AutoConfig2_has_aesni();
bool EverCrypt_AutoConfig2_has_pclmulqdq();
bool EverCrypt_AutoConfig2_has_avx2();
bool EverCrypt_AutoConfig2_has_avx();
bool EverCrypt_AutoConfig2_has_bmi2();
bool EverCrypt_AutoConfig2_has_adx();
bool EverCrypt_AutoConfig2_has_sse();
bool EverCrypt_AutoConfig2_has_movbe();
bool EverCrypt_AutoConfig2_has_rdrand();
bool EverCrypt_AutoConfig2_wants_vale();
bool EverCrypt_AutoConfig2_wants_hacl();
bool EverCrypt_AutoConfig2_wants_openssl();
bool EverCrypt_AutoConfig2_wants_bcrypt();
void EverCrypt_AutoConfig2_recall();
void EverCrypt_AutoConfig2_init();
typedef void (*EverCrypt_AutoConfig2_disabler)();
void EverCrypt_AutoConfig2_disable_avx2();
void EverCrypt_AutoConfig2_disable_avx();
void EverCrypt_AutoConfig2_disable_bmi2();
void EverCrypt_AutoConfig2_disable_adx();
void EverCrypt_AutoConfig2_disable_shaext();
void EverCrypt_AutoConfig2_disable_aesni();
void EverCrypt_AutoConfig2_disable_pclmulqdq();
void EverCrypt_AutoConfig2_disable_sse();
void EverCrypt_AutoConfig2_disable_movbe();
void EverCrypt_AutoConfig2_disable_rdrand();
void EverCrypt_AutoConfig2_disable_vale();
void EverCrypt_AutoConfig2_disable_hacl();
void EverCrypt_AutoConfig2_disable_openssl();
void EverCrypt_AutoConfig2_disable_bcrypt();
#define __EverCrypt_AutoConfig2_H_DEFINED
#endif

411
3rdparty/hacl-star/evercrypt/EverCrypt_CTR.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,411 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "EverCrypt_CTR.h"
typedef struct EverCrypt_CTR_state_s_s
{
Spec_Cipher_Expansion_impl i;
uint8_t *iv;
uint32_t iv_len;
uint8_t *xkey;
uint32_t ctr;
}
EverCrypt_CTR_state_s;
bool
EverCrypt_CTR_uu___is_State(Spec_Agile_Cipher_cipher_alg a, EverCrypt_CTR_state_s projectee)
{
return true;
}
Spec_Cipher_Expansion_impl
EverCrypt_CTR___proj__State__item__i(
Spec_Agile_Cipher_cipher_alg a,
EverCrypt_CTR_state_s projectee
)
{
return projectee.i;
}
uint8_t
*EverCrypt_CTR___proj__State__item__iv(
Spec_Agile_Cipher_cipher_alg a,
EverCrypt_CTR_state_s projectee
)
{
return projectee.iv;
}
uint32_t
EverCrypt_CTR___proj__State__item__iv_len(
Spec_Agile_Cipher_cipher_alg a,
EverCrypt_CTR_state_s projectee
)
{
return projectee.iv_len;
}
uint8_t
*EverCrypt_CTR___proj__State__item__xkey(
Spec_Agile_Cipher_cipher_alg a,
EverCrypt_CTR_state_s projectee
)
{
return projectee.xkey;
}
uint32_t
EverCrypt_CTR___proj__State__item__ctr(
Spec_Agile_Cipher_cipher_alg a,
EverCrypt_CTR_state_s projectee
)
{
return projectee.ctr;
}
uint8_t EverCrypt_CTR_xor8(uint8_t a, uint8_t b)
{
return a ^ b;
}
Spec_Agile_Cipher_cipher_alg EverCrypt_CTR_alg_of_state(EverCrypt_CTR_state_s *s)
{
EverCrypt_CTR_state_s scrut = *s;
Spec_Cipher_Expansion_impl i1 = scrut.i;
return Spec_Cipher_Expansion_cipher_alg_of_impl(i1);
}
static Spec_Cipher_Expansion_impl
EverCrypt_CTR_vale_impl_of_alg(Spec_Agile_Cipher_cipher_alg a)
{
switch (a)
{
case Spec_Agile_Cipher_AES128:
{
return Spec_Cipher_Expansion_Vale_AES128;
}
case Spec_Agile_Cipher_AES256:
{
return Spec_Cipher_Expansion_Vale_AES256;
}
default:
{
KRML_HOST_EPRINTF("KreMLin incomplete match at %s:%d\n", __FILE__, __LINE__);
KRML_HOST_EXIT(253U);
}
}
}
EverCrypt_Error_error_code
EverCrypt_CTR_create_in(
Spec_Agile_Cipher_cipher_alg a,
EverCrypt_CTR_state_s **dst,
uint8_t *k1,
uint8_t *iv,
uint32_t iv_len,
uint32_t c
)
{
switch (a)
{
case Spec_Agile_Cipher_AES128:
{
bool has_aesni1 = EverCrypt_AutoConfig2_has_aesni();
bool has_pclmulqdq1 = EverCrypt_AutoConfig2_has_pclmulqdq();
bool has_avx1 = EverCrypt_AutoConfig2_has_avx();
bool has_sse1 = EverCrypt_AutoConfig2_has_sse();
if (iv_len < (uint32_t)12U)
{
return EverCrypt_Error_InvalidIVLength;
}
#if EVERCRYPT_TARGETCONFIG_X64
if (has_aesni1 && has_pclmulqdq1 && has_avx1 && has_sse1)
{
uint8_t *ek = KRML_HOST_CALLOC((uint32_t)304U, sizeof (uint8_t));
uint8_t *keys_b = ek;
uint8_t *hkeys_b = ek + (uint32_t)176U;
uint64_t scrut = aes128_key_expansion(k1, keys_b);
uint64_t scrut0 = aes128_keyhash_init(keys_b, hkeys_b);
uint8_t *iv_ = KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint8_t));
memcpy(iv_, iv, iv_len * sizeof iv[0U]);
KRML_CHECK_SIZE(sizeof (EverCrypt_CTR_state_s), (uint32_t)1U);
EverCrypt_CTR_state_s *p = KRML_HOST_MALLOC(sizeof (EverCrypt_CTR_state_s));
p[0U]
=
(
(EverCrypt_CTR_state_s){
.i = EverCrypt_CTR_vale_impl_of_alg(Spec_Cipher_Expansion_cipher_alg_of_impl(Spec_Cipher_Expansion_Vale_AES128)),
.iv = iv_,
.iv_len = iv_len,
.xkey = ek,
.ctr = c
}
);
*dst = p;
return EverCrypt_Error_Success;
}
#endif
return EverCrypt_Error_UnsupportedAlgorithm;
}
case Spec_Agile_Cipher_AES256:
{
bool has_aesni1 = EverCrypt_AutoConfig2_has_aesni();
bool has_pclmulqdq1 = EverCrypt_AutoConfig2_has_pclmulqdq();
bool has_avx1 = EverCrypt_AutoConfig2_has_avx();
bool has_sse1 = EverCrypt_AutoConfig2_has_sse();
if (iv_len < (uint32_t)12U)
{
return EverCrypt_Error_InvalidIVLength;
}
#if EVERCRYPT_TARGETCONFIG_X64
if (has_aesni1 && has_pclmulqdq1 && has_avx1 && has_sse1)
{
uint8_t *ek = KRML_HOST_CALLOC((uint32_t)368U, sizeof (uint8_t));
uint8_t *keys_b = ek;
uint8_t *hkeys_b = ek + (uint32_t)240U;
uint64_t scrut = aes256_key_expansion(k1, keys_b);
uint64_t scrut0 = aes256_keyhash_init(keys_b, hkeys_b);
uint8_t *iv_ = KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint8_t));
memcpy(iv_, iv, iv_len * sizeof iv[0U]);
KRML_CHECK_SIZE(sizeof (EverCrypt_CTR_state_s), (uint32_t)1U);
EverCrypt_CTR_state_s *p = KRML_HOST_MALLOC(sizeof (EverCrypt_CTR_state_s));
p[0U]
=
(
(EverCrypt_CTR_state_s){
.i = EverCrypt_CTR_vale_impl_of_alg(Spec_Cipher_Expansion_cipher_alg_of_impl(Spec_Cipher_Expansion_Vale_AES256)),
.iv = iv_,
.iv_len = iv_len,
.xkey = ek,
.ctr = c
}
);
*dst = p;
return EverCrypt_Error_Success;
}
#endif
return EverCrypt_Error_UnsupportedAlgorithm;
}
case Spec_Agile_Cipher_CHACHA20:
{
uint8_t *ek = KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
memcpy(ek, k1, (uint32_t)32U * sizeof k1[0U]);
KRML_CHECK_SIZE(sizeof (uint8_t), iv_len);
uint8_t *iv_ = KRML_HOST_CALLOC(iv_len, sizeof (uint8_t));
memcpy(iv_, iv, iv_len * sizeof iv[0U]);
KRML_CHECK_SIZE(sizeof (EverCrypt_CTR_state_s), (uint32_t)1U);
EverCrypt_CTR_state_s *p = KRML_HOST_MALLOC(sizeof (EverCrypt_CTR_state_s));
p[0U]
=
(
(EverCrypt_CTR_state_s){
.i = Spec_Cipher_Expansion_Hacl_CHACHA20,
.iv = iv_,
.iv_len = (uint32_t)12U,
.xkey = ek,
.ctr = c
}
);
*dst = p;
return EverCrypt_Error_Success;
}
default:
{
KRML_HOST_EPRINTF("KreMLin incomplete match at %s:%d\n", __FILE__, __LINE__);
KRML_HOST_EXIT(253U);
}
}
}
void
EverCrypt_CTR_init(
EverCrypt_CTR_state_s *p,
uint8_t *k1,
uint8_t *iv,
uint32_t iv_len,
uint32_t c
)
{
EverCrypt_CTR_state_s scrut0 = *p;
uint8_t *ek = scrut0.xkey;
uint8_t *iv_ = scrut0.iv;
Spec_Cipher_Expansion_impl i1 = scrut0.i;
memcpy(iv_, iv, iv_len * sizeof iv[0U]);
switch (i1)
{
case Spec_Cipher_Expansion_Vale_AES128:
{
uint8_t *keys_b = ek;
uint8_t *hkeys_b = ek + (uint32_t)176U;
uint64_t scrut = aes128_key_expansion(k1, keys_b);
uint64_t scrut1 = aes128_keyhash_init(keys_b, hkeys_b);
break;
}
case Spec_Cipher_Expansion_Vale_AES256:
{
uint8_t *keys_b = ek;
uint8_t *hkeys_b = ek + (uint32_t)240U;
uint64_t scrut = aes256_key_expansion(k1, keys_b);
uint64_t scrut1 = aes256_keyhash_init(keys_b, hkeys_b);
break;
}
case Spec_Cipher_Expansion_Hacl_CHACHA20:
{
memcpy(ek, k1, (uint32_t)32U * sizeof k1[0U]);
break;
}
default:
{
KRML_HOST_EPRINTF("KreMLin incomplete match at %s:%d\n", __FILE__, __LINE__);
KRML_HOST_EXIT(253U);
}
}
*p = ((EverCrypt_CTR_state_s){ .i = i1, .iv = iv_, .iv_len = iv_len, .xkey = ek, .ctr = c });
}
void EverCrypt_CTR_update_block(EverCrypt_CTR_state_s *p, uint8_t *dst, uint8_t *src)
{
EverCrypt_CTR_state_s scrut = *p;
Spec_Cipher_Expansion_impl i1 = scrut.i;
uint8_t *iv = scrut.iv;
uint8_t *ek = scrut.xkey;
uint32_t c01 = scrut.ctr;
switch (i1)
{
case Spec_Cipher_Expansion_Vale_AES128:
{
EverCrypt_CTR_state_s scrut0 = *p;
uint32_t c02 = scrut0.ctr;
uint8_t *ek1 = scrut0.xkey;
uint32_t iv_len1 = scrut0.iv_len;
uint8_t *iv1 = scrut0.iv;
uint8_t ctr_block1[16U] = { 0U };
memcpy(ctr_block1, iv1, iv_len1 * sizeof iv1[0U]);
uint128_t uu____0 = load128_be(ctr_block1);
uint128_t c = uu____0 + (uint128_t)(uint64_t)c02;
store128_le(ctr_block1, c);
uint8_t *uu____1 = ek1;
uint8_t inout_b[16U] = { 0U };
uint32_t num_blocks = (uint32_t)(uint64_t)16U / (uint32_t)16U;
uint32_t num_bytes_ = num_blocks * (uint32_t)16U;
uint8_t *in_b_ = src;
uint8_t *out_b_ = dst;
memcpy(inout_b, src + num_bytes_, (uint32_t)(uint64_t)16U % (uint32_t)16U * sizeof src[0U]);
uint64_t
scrut1 =
gctr128_bytes(in_b_,
(uint64_t)16U,
out_b_,
inout_b,
uu____1,
ctr_block1,
(uint64_t)num_blocks);
memcpy(dst + num_bytes_,
inout_b,
(uint32_t)(uint64_t)16U % (uint32_t)16U * sizeof inout_b[0U]);
uint32_t c4 = c02 + (uint32_t)1U;
*p
=
(
(EverCrypt_CTR_state_s){
.i = Spec_Cipher_Expansion_Vale_AES128,
.iv = iv1,
.iv_len = iv_len1,
.xkey = ek1,
.ctr = c4
}
);
break;
}
case Spec_Cipher_Expansion_Vale_AES256:
{
EverCrypt_CTR_state_s scrut0 = *p;
uint32_t c02 = scrut0.ctr;
uint8_t *ek1 = scrut0.xkey;
uint32_t iv_len1 = scrut0.iv_len;
uint8_t *iv1 = scrut0.iv;
uint8_t ctr_block1[16U] = { 0U };
memcpy(ctr_block1, iv1, iv_len1 * sizeof iv1[0U]);
uint128_t uu____2 = load128_be(ctr_block1);
uint128_t c = uu____2 + (uint128_t)(uint64_t)c02;
store128_le(ctr_block1, c);
uint8_t *uu____3 = ek1;
uint8_t inout_b[16U] = { 0U };
uint32_t num_blocks = (uint32_t)(uint64_t)16U / (uint32_t)16U;
uint32_t num_bytes_ = num_blocks * (uint32_t)16U;
uint8_t *in_b_ = src;
uint8_t *out_b_ = dst;
memcpy(inout_b, src + num_bytes_, (uint32_t)(uint64_t)16U % (uint32_t)16U * sizeof src[0U]);
uint64_t
scrut1 =
gctr256_bytes(in_b_,
(uint64_t)16U,
out_b_,
inout_b,
uu____3,
ctr_block1,
(uint64_t)num_blocks);
memcpy(dst + num_bytes_,
inout_b,
(uint32_t)(uint64_t)16U % (uint32_t)16U * sizeof inout_b[0U]);
uint32_t c4 = c02 + (uint32_t)1U;
*p
=
(
(EverCrypt_CTR_state_s){
.i = Spec_Cipher_Expansion_Vale_AES256,
.iv = iv1,
.iv_len = iv_len1,
.xkey = ek1,
.ctr = c4
}
);
break;
}
case Spec_Cipher_Expansion_Hacl_CHACHA20:
{
uint32_t ctx[16U] = { 0U };
Hacl_Impl_Chacha20_chacha20_init(ctx, ek, iv, (uint32_t)0U);
Hacl_Impl_Chacha20_chacha20_encrypt_block(ctx, dst, c01, src);
break;
}
default:
{
KRML_HOST_EPRINTF("KreMLin incomplete match at %s:%d\n", __FILE__, __LINE__);
KRML_HOST_EXIT(253U);
}
}
}
void EverCrypt_CTR_free(EverCrypt_CTR_state_s *p)
{
EverCrypt_CTR_state_s scrut = *p;
uint8_t *iv = scrut.iv;
uint8_t *ek = scrut.xkey;
KRML_HOST_FREE(iv);
KRML_HOST_FREE(ek);
KRML_HOST_FREE(p);
}

108
3rdparty/hacl-star/evercrypt/EverCrypt_CTR.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,108 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __EverCrypt_CTR_H
#define __EverCrypt_CTR_H
#include "Hacl_Chacha20.h"
#include "Hacl_Kremlib.h"
#include "Vale.h"
#include "EverCrypt_AutoConfig2.h"
#include "EverCrypt_Error.h"
#include "Hacl_Spec.h"
typedef struct EverCrypt_CTR_state_s_s EverCrypt_CTR_state_s;
bool
EverCrypt_CTR_uu___is_State(Spec_Agile_Cipher_cipher_alg a, EverCrypt_CTR_state_s projectee);
Spec_Cipher_Expansion_impl
EverCrypt_CTR___proj__State__item__i(
Spec_Agile_Cipher_cipher_alg a,
EverCrypt_CTR_state_s projectee
);
uint8_t
*EverCrypt_CTR___proj__State__item__iv(
Spec_Agile_Cipher_cipher_alg a,
EverCrypt_CTR_state_s projectee
);
uint32_t
EverCrypt_CTR___proj__State__item__iv_len(
Spec_Agile_Cipher_cipher_alg a,
EverCrypt_CTR_state_s projectee
);
uint8_t
*EverCrypt_CTR___proj__State__item__xkey(
Spec_Agile_Cipher_cipher_alg a,
EverCrypt_CTR_state_s projectee
);
uint32_t
EverCrypt_CTR___proj__State__item__ctr(
Spec_Agile_Cipher_cipher_alg a,
EverCrypt_CTR_state_s projectee
);
typedef uint8_t EverCrypt_CTR_uint8;
uint8_t EverCrypt_CTR_xor8(uint8_t a, uint8_t b);
typedef void *EverCrypt_CTR_e_alg;
Spec_Agile_Cipher_cipher_alg EverCrypt_CTR_alg_of_state(EverCrypt_CTR_state_s *s);
EverCrypt_Error_error_code
EverCrypt_CTR_create_in(
Spec_Agile_Cipher_cipher_alg a,
EverCrypt_CTR_state_s **dst,
uint8_t *k1,
uint8_t *iv,
uint32_t iv_len,
uint32_t c
);
void
EverCrypt_CTR_init(
EverCrypt_CTR_state_s *p,
uint8_t *k1,
uint8_t *iv,
uint32_t iv_len,
uint32_t c
);
void EverCrypt_CTR_update_block(EverCrypt_CTR_state_s *p, uint8_t *dst, uint8_t *src);
void EverCrypt_CTR_free(EverCrypt_CTR_state_s *p);
#define __EverCrypt_CTR_H_DEFINED
#endif

41
3rdparty/hacl-star/evercrypt/EverCrypt_Cipher.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,41 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "EverCrypt_Cipher.h"
void
EverCrypt_Cipher_chacha20(
uint32_t len,
uint8_t *dst,
uint8_t *src,
uint8_t *key,
uint8_t *iv,
uint32_t ctr
)
{
uint32_t ctx[16U] = { 0U };
Hacl_Impl_Chacha20_chacha20_init(ctx, key, iv, ctr);
Hacl_Impl_Chacha20_chacha20_update(ctx, len, dst, src);
}

47
3rdparty/hacl-star/evercrypt/EverCrypt_Cipher.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,47 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __EverCrypt_Cipher_H
#define __EverCrypt_Cipher_H
#include "Hacl_Chacha20.h"
void
EverCrypt_Cipher_chacha20(
uint32_t len,
uint8_t *dst,
uint8_t *src,
uint8_t *key,
uint8_t *iv,
uint32_t ctr
);
#define __EverCrypt_Cipher_H_DEFINED
#endif

68
3rdparty/hacl-star/evercrypt/EverCrypt_Curve25519.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,68 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "EverCrypt_Curve25519.h"
inline static bool EverCrypt_Curve25519_has_adx_bmi2()
{
bool has_bmi21 = EverCrypt_AutoConfig2_has_bmi2();
bool has_adx1 = EverCrypt_AutoConfig2_has_adx();
return has_bmi21 && has_adx1;
}
void EverCrypt_Curve25519_secret_to_public(uint8_t *pub, uint8_t *priv)
{
#if EVERCRYPT_TARGETCONFIG_X64
if (EverCrypt_Curve25519_has_adx_bmi2())
{
Hacl_Curve25519_64_secret_to_public(pub, priv);
return;
}
#endif
Hacl_Curve25519_51_secret_to_public(pub, priv);
}
void EverCrypt_Curve25519_scalarmult(uint8_t *shared, uint8_t *my_priv, uint8_t *their_pub)
{
#if EVERCRYPT_TARGETCONFIG_X64
if (EverCrypt_Curve25519_has_adx_bmi2())
{
Hacl_Curve25519_64_scalarmult(shared, my_priv, their_pub);
return;
}
#endif
Hacl_Curve25519_51_scalarmult(shared, my_priv, their_pub);
}
bool EverCrypt_Curve25519_ecdh(uint8_t *shared, uint8_t *my_priv, uint8_t *their_pub)
{
#if EVERCRYPT_TARGETCONFIG_X64
if (EverCrypt_Curve25519_has_adx_bmi2())
{
return Hacl_Curve25519_64_ecdh(shared, my_priv, their_pub);
}
#endif
return Hacl_Curve25519_51_ecdh(shared, my_priv, their_pub);
}

45
3rdparty/hacl-star/evercrypt/EverCrypt_Curve25519.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,45 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __EverCrypt_Curve25519_H
#define __EverCrypt_Curve25519_H
#include "EverCrypt_AutoConfig2.h"
#include "Hacl_Curve25519_64.h"
#include "Hacl_Curve25519_51.h"
void EverCrypt_Curve25519_secret_to_public(uint8_t *pub, uint8_t *priv);
void EverCrypt_Curve25519_scalarmult(uint8_t *shared, uint8_t *my_priv, uint8_t *their_pub);
bool EverCrypt_Curve25519_ecdh(uint8_t *shared, uint8_t *my_priv, uint8_t *their_pub);
#define __EverCrypt_Curve25519_H_DEFINED
#endif

2135
3rdparty/hacl-star/evercrypt/EverCrypt_DRBG.c поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

239
3rdparty/hacl-star/evercrypt/EverCrypt_DRBG.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,239 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __EverCrypt_DRBG_H
#define __EverCrypt_DRBG_H
#include "EverCrypt_HMAC.h"
#include "Hacl_Leftovers.h"
#include "Lib_RandomBuffer_System.h"
#include "Hacl_Spec.h"
#include "Hacl_Lib.h"
typedef Spec_Hash_Definitions_hash_alg EverCrypt_DRBG_supported_alg;
extern uint32_t EverCrypt_DRBG_reseed_interval;
extern uint32_t EverCrypt_DRBG_max_output_length;
extern uint32_t EverCrypt_DRBG_max_length;
extern uint32_t EverCrypt_DRBG_max_personalization_string_length;
extern uint32_t EverCrypt_DRBG_max_additional_input_length;
uint32_t EverCrypt_DRBG_min_length(Spec_Hash_Definitions_hash_alg a);
#define EverCrypt_DRBG_SHA1_s 0
#define EverCrypt_DRBG_SHA2_256_s 1
#define EverCrypt_DRBG_SHA2_384_s 2
#define EverCrypt_DRBG_SHA2_512_s 3
typedef uint8_t EverCrypt_DRBG_state_s_tags;
typedef struct EverCrypt_DRBG_state_s_s EverCrypt_DRBG_state_s;
bool
EverCrypt_DRBG_uu___is_SHA1_s(
Spec_Hash_Definitions_hash_alg uu____164,
EverCrypt_DRBG_state_s projectee
);
Hacl_HMAC_DRBG_state
EverCrypt_DRBG___proj__SHA1_s__item___0(
Spec_Hash_Definitions_hash_alg uu____207,
EverCrypt_DRBG_state_s projectee
);
bool
EverCrypt_DRBG_uu___is_SHA2_256_s(
Spec_Hash_Definitions_hash_alg uu____239,
EverCrypt_DRBG_state_s projectee
);
Hacl_HMAC_DRBG_state
EverCrypt_DRBG___proj__SHA2_256_s__item___0(
Spec_Hash_Definitions_hash_alg uu____282,
EverCrypt_DRBG_state_s projectee
);
bool
EverCrypt_DRBG_uu___is_SHA2_384_s(
Spec_Hash_Definitions_hash_alg uu____314,
EverCrypt_DRBG_state_s projectee
);
Hacl_HMAC_DRBG_state
EverCrypt_DRBG___proj__SHA2_384_s__item___0(
Spec_Hash_Definitions_hash_alg uu____357,
EverCrypt_DRBG_state_s projectee
);
bool
EverCrypt_DRBG_uu___is_SHA2_512_s(
Spec_Hash_Definitions_hash_alg uu____389,
EverCrypt_DRBG_state_s projectee
);
Hacl_HMAC_DRBG_state
EverCrypt_DRBG___proj__SHA2_512_s__item___0(
Spec_Hash_Definitions_hash_alg uu____432,
EverCrypt_DRBG_state_s projectee
);
EverCrypt_DRBG_state_s *EverCrypt_DRBG_create(Spec_Hash_Definitions_hash_alg a);
bool
EverCrypt_DRBG_instantiate_sha1(
EverCrypt_DRBG_state_s *st,
uint8_t *personalization_string,
uint32_t personalization_string_len
);
bool
EverCrypt_DRBG_instantiate_sha2_256(
EverCrypt_DRBG_state_s *st,
uint8_t *personalization_string,
uint32_t personalization_string_len
);
bool
EverCrypt_DRBG_instantiate_sha2_384(
EverCrypt_DRBG_state_s *st,
uint8_t *personalization_string,
uint32_t personalization_string_len
);
bool
EverCrypt_DRBG_instantiate_sha2_512(
EverCrypt_DRBG_state_s *st,
uint8_t *personalization_string,
uint32_t personalization_string_len
);
bool
EverCrypt_DRBG_reseed_sha1(
EverCrypt_DRBG_state_s *st,
uint8_t *additional_input,
uint32_t additional_input_len
);
bool
EverCrypt_DRBG_reseed_sha2_256(
EverCrypt_DRBG_state_s *st,
uint8_t *additional_input,
uint32_t additional_input_len
);
bool
EverCrypt_DRBG_reseed_sha2_384(
EverCrypt_DRBG_state_s *st,
uint8_t *additional_input,
uint32_t additional_input_len
);
bool
EverCrypt_DRBG_reseed_sha2_512(
EverCrypt_DRBG_state_s *st,
uint8_t *additional_input,
uint32_t additional_input_len
);
bool
EverCrypt_DRBG_generate_sha1(
uint8_t *output,
EverCrypt_DRBG_state_s *st,
uint32_t n1,
uint8_t *additional_input,
uint32_t additional_input_len
);
bool
EverCrypt_DRBG_generate_sha2_256(
uint8_t *output,
EverCrypt_DRBG_state_s *st,
uint32_t n1,
uint8_t *additional_input,
uint32_t additional_input_len
);
bool
EverCrypt_DRBG_generate_sha2_384(
uint8_t *output,
EverCrypt_DRBG_state_s *st,
uint32_t n1,
uint8_t *additional_input,
uint32_t additional_input_len
);
bool
EverCrypt_DRBG_generate_sha2_512(
uint8_t *output,
EverCrypt_DRBG_state_s *st,
uint32_t n1,
uint8_t *additional_input,
uint32_t additional_input_len
);
void EverCrypt_DRBG_uninstantiate_sha1(EverCrypt_DRBG_state_s *st);
void EverCrypt_DRBG_uninstantiate_sha2_256(EverCrypt_DRBG_state_s *st);
void EverCrypt_DRBG_uninstantiate_sha2_384(EverCrypt_DRBG_state_s *st);
void EverCrypt_DRBG_uninstantiate_sha2_512(EverCrypt_DRBG_state_s *st);
bool
EverCrypt_DRBG_instantiate(
EverCrypt_DRBG_state_s *st,
uint8_t *personalization_string,
uint32_t personalization_string_len
);
bool
EverCrypt_DRBG_reseed(
EverCrypt_DRBG_state_s *st,
uint8_t *additional_input,
uint32_t additional_input_len
);
bool
EverCrypt_DRBG_generate(
uint8_t *output,
EverCrypt_DRBG_state_s *st,
uint32_t n1,
uint8_t *additional_input,
uint32_t additional_input_len
);
void EverCrypt_DRBG_uninstantiate(EverCrypt_DRBG_state_s *st);
#define __EverCrypt_DRBG_H_DEFINED
#endif

52
3rdparty/hacl-star/evercrypt/EverCrypt_Ed25519.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,52 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "EverCrypt_Ed25519.h"
void EverCrypt_Ed25519_sign(uint8_t *signature, uint8_t *secret1, uint32_t len, uint8_t *msg)
{
Hacl_Ed25519_sign(signature, secret1, len, msg);
}
bool EverCrypt_Ed25519_verify(uint8_t *output, uint32_t len, uint8_t *msg, uint8_t *signature)
{
return Hacl_Ed25519_verify(output, len, msg, signature);
}
void EverCrypt_Ed25519_secret_to_public(uint8_t *output, uint8_t *secret1)
{
Hacl_Ed25519_secret_to_public(output, secret1);
}
void EverCrypt_Ed25519_expand_keys(uint8_t *ks, uint8_t *secret1)
{
Hacl_Ed25519_expand_keys(ks, secret1);
}
void
EverCrypt_Ed25519_sign_expanded(uint8_t *signature, uint8_t *ks, uint32_t len, uint8_t *msg)
{
Hacl_Ed25519_sign_expanded(signature, ks, len, msg);
}

48
3rdparty/hacl-star/evercrypt/EverCrypt_Ed25519.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,48 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __EverCrypt_Ed25519_H
#define __EverCrypt_Ed25519_H
#include "Hacl_Ed25519.h"
void EverCrypt_Ed25519_sign(uint8_t *signature, uint8_t *secret1, uint32_t len, uint8_t *msg);
bool EverCrypt_Ed25519_verify(uint8_t *output, uint32_t len, uint8_t *msg, uint8_t *signature);
void EverCrypt_Ed25519_secret_to_public(uint8_t *output, uint8_t *secret1);
void EverCrypt_Ed25519_expand_keys(uint8_t *ks, uint8_t *secret1);
void
EverCrypt_Ed25519_sign_expanded(uint8_t *signature, uint8_t *ks, uint32_t len, uint8_t *msg);
#define __EverCrypt_Ed25519_H_DEFINED
#endif

116
3rdparty/hacl-star/evercrypt/EverCrypt_Error.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,116 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "EverCrypt_Error.h"
bool EverCrypt_Error_uu___is_Success(EverCrypt_Error_error_code projectee)
{
switch (projectee)
{
case EverCrypt_Error_Success:
{
return true;
}
default:
{
return false;
}
}
}
bool EverCrypt_Error_uu___is_UnsupportedAlgorithm(EverCrypt_Error_error_code projectee)
{
switch (projectee)
{
case EverCrypt_Error_UnsupportedAlgorithm:
{
return true;
}
default:
{
return false;
}
}
}
bool EverCrypt_Error_uu___is_InvalidKey(EverCrypt_Error_error_code projectee)
{
switch (projectee)
{
case EverCrypt_Error_InvalidKey:
{
return true;
}
default:
{
return false;
}
}
}
bool EverCrypt_Error_uu___is_AuthenticationFailure(EverCrypt_Error_error_code projectee)
{
switch (projectee)
{
case EverCrypt_Error_AuthenticationFailure:
{
return true;
}
default:
{
return false;
}
}
}
bool EverCrypt_Error_uu___is_InvalidIVLength(EverCrypt_Error_error_code projectee)
{
switch (projectee)
{
case EverCrypt_Error_InvalidIVLength:
{
return true;
}
default:
{
return false;
}
}
}
bool EverCrypt_Error_uu___is_DecodeError(EverCrypt_Error_error_code projectee)
{
switch (projectee)
{
case EverCrypt_Error_DecodeError:
{
return true;
}
default:
{
return false;
}
}
}

58
3rdparty/hacl-star/evercrypt/EverCrypt_Error.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,58 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __EverCrypt_Error_H
#define __EverCrypt_Error_H
#define EverCrypt_Error_Success 0
#define EverCrypt_Error_UnsupportedAlgorithm 1
#define EverCrypt_Error_InvalidKey 2
#define EverCrypt_Error_AuthenticationFailure 3
#define EverCrypt_Error_InvalidIVLength 4
#define EverCrypt_Error_DecodeError 5
typedef uint8_t EverCrypt_Error_error_code;
bool EverCrypt_Error_uu___is_Success(EverCrypt_Error_error_code projectee);
bool EverCrypt_Error_uu___is_UnsupportedAlgorithm(EverCrypt_Error_error_code projectee);
bool EverCrypt_Error_uu___is_InvalidKey(EverCrypt_Error_error_code projectee);
bool EverCrypt_Error_uu___is_AuthenticationFailure(EverCrypt_Error_error_code projectee);
bool EverCrypt_Error_uu___is_InvalidIVLength(EverCrypt_Error_error_code projectee);
bool EverCrypt_Error_uu___is_DecodeError(EverCrypt_Error_error_code projectee);
#define __EverCrypt_Error_H_DEFINED
#endif

382
3rdparty/hacl-star/evercrypt/EverCrypt_HKDF.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,382 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "EverCrypt_HKDF.h"
void
EverCrypt_HKDF_expand_sha1(
uint8_t *okm,
uint8_t *prk,
uint32_t prklen,
uint8_t *info,
uint32_t infolen,
uint32_t len
)
{
uint32_t tlen = (uint32_t)20U;
uint32_t n1 = len / tlen;
uint8_t *output = okm;
KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
uint8_t text[tlen + infolen + (uint32_t)1U];
memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof text[0U]);
uint8_t *text0 = text + tlen;
uint8_t *tag = text;
uint8_t *ctr = text + tlen + infolen;
memcpy(text + tlen, info, infolen * sizeof info[0U]);
for (uint32_t i = (uint32_t)0U; i < n1; i = i + (uint32_t)1U)
{
ctr[0U] = (uint8_t)(i + (uint32_t)1U);
if (i == (uint32_t)0U)
{
EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + (uint32_t)1U);
}
else
{
EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
}
memcpy(output + i * tlen, tag, tlen * sizeof tag[0U]);
}
if (n1 * tlen < len)
{
ctr[0U] = (uint8_t)(n1 + (uint32_t)1U);
if (n1 == (uint32_t)0U)
{
EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + (uint32_t)1U);
}
else
{
EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
}
uint8_t *block = okm + n1 * tlen;
memcpy(block, tag, (len - n1 * tlen) * sizeof tag[0U]);
}
}
void
EverCrypt_HKDF_extract_sha1(
uint8_t *prk,
uint8_t *salt,
uint32_t saltlen,
uint8_t *ikm,
uint32_t ikmlen
)
{
EverCrypt_HMAC_compute_sha1(prk, salt, saltlen, ikm, ikmlen);
}
void
EverCrypt_HKDF_expand_sha2_256(
uint8_t *okm,
uint8_t *prk,
uint32_t prklen,
uint8_t *info,
uint32_t infolen,
uint32_t len
)
{
uint32_t tlen = (uint32_t)32U;
uint32_t n1 = len / tlen;
uint8_t *output = okm;
KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
uint8_t text[tlen + infolen + (uint32_t)1U];
memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof text[0U]);
uint8_t *text0 = text + tlen;
uint8_t *tag = text;
uint8_t *ctr = text + tlen + infolen;
memcpy(text + tlen, info, infolen * sizeof info[0U]);
for (uint32_t i = (uint32_t)0U; i < n1; i = i + (uint32_t)1U)
{
ctr[0U] = (uint8_t)(i + (uint32_t)1U);
if (i == (uint32_t)0U)
{
EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
}
else
{
EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
}
memcpy(output + i * tlen, tag, tlen * sizeof tag[0U]);
}
if (n1 * tlen < len)
{
ctr[0U] = (uint8_t)(n1 + (uint32_t)1U);
if (n1 == (uint32_t)0U)
{
EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
}
else
{
EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
}
uint8_t *block = okm + n1 * tlen;
memcpy(block, tag, (len - n1 * tlen) * sizeof tag[0U]);
}
}
void
EverCrypt_HKDF_extract_sha2_256(
uint8_t *prk,
uint8_t *salt,
uint32_t saltlen,
uint8_t *ikm,
uint32_t ikmlen
)
{
EverCrypt_HMAC_compute_sha2_256(prk, salt, saltlen, ikm, ikmlen);
}
void
EverCrypt_HKDF_expand_sha2_384(
uint8_t *okm,
uint8_t *prk,
uint32_t prklen,
uint8_t *info,
uint32_t infolen,
uint32_t len
)
{
uint32_t tlen = (uint32_t)48U;
uint32_t n1 = len / tlen;
uint8_t *output = okm;
KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
uint8_t text[tlen + infolen + (uint32_t)1U];
memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof text[0U]);
uint8_t *text0 = text + tlen;
uint8_t *tag = text;
uint8_t *ctr = text + tlen + infolen;
memcpy(text + tlen, info, infolen * sizeof info[0U]);
for (uint32_t i = (uint32_t)0U; i < n1; i = i + (uint32_t)1U)
{
ctr[0U] = (uint8_t)(i + (uint32_t)1U);
if (i == (uint32_t)0U)
{
EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
}
else
{
EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
}
memcpy(output + i * tlen, tag, tlen * sizeof tag[0U]);
}
if (n1 * tlen < len)
{
ctr[0U] = (uint8_t)(n1 + (uint32_t)1U);
if (n1 == (uint32_t)0U)
{
EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
}
else
{
EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
}
uint8_t *block = okm + n1 * tlen;
memcpy(block, tag, (len - n1 * tlen) * sizeof tag[0U]);
}
}
void
EverCrypt_HKDF_extract_sha2_384(
uint8_t *prk,
uint8_t *salt,
uint32_t saltlen,
uint8_t *ikm,
uint32_t ikmlen
)
{
EverCrypt_HMAC_compute_sha2_384(prk, salt, saltlen, ikm, ikmlen);
}
void
EverCrypt_HKDF_expand_sha2_512(
uint8_t *okm,
uint8_t *prk,
uint32_t prklen,
uint8_t *info,
uint32_t infolen,
uint32_t len
)
{
uint32_t tlen = (uint32_t)64U;
uint32_t n1 = len / tlen;
uint8_t *output = okm;
KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
uint8_t text[tlen + infolen + (uint32_t)1U];
memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof text[0U]);
uint8_t *text0 = text + tlen;
uint8_t *tag = text;
uint8_t *ctr = text + tlen + infolen;
memcpy(text + tlen, info, infolen * sizeof info[0U]);
for (uint32_t i = (uint32_t)0U; i < n1; i = i + (uint32_t)1U)
{
ctr[0U] = (uint8_t)(i + (uint32_t)1U);
if (i == (uint32_t)0U)
{
EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
}
else
{
EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
}
memcpy(output + i * tlen, tag, tlen * sizeof tag[0U]);
}
if (n1 * tlen < len)
{
ctr[0U] = (uint8_t)(n1 + (uint32_t)1U);
if (n1 == (uint32_t)0U)
{
EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
}
else
{
EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
}
uint8_t *block = okm + n1 * tlen;
memcpy(block, tag, (len - n1 * tlen) * sizeof tag[0U]);
}
}
void
EverCrypt_HKDF_extract_sha2_512(
uint8_t *prk,
uint8_t *salt,
uint32_t saltlen,
uint8_t *ikm,
uint32_t ikmlen
)
{
EverCrypt_HMAC_compute_sha2_512(prk, salt, saltlen, ikm, ikmlen);
}
void
EverCrypt_HKDF_expand(
Spec_Hash_Definitions_hash_alg a,
uint8_t *okm,
uint8_t *prk,
uint32_t prklen,
uint8_t *info,
uint32_t infolen,
uint32_t len
)
{
switch (a)
{
case Spec_Hash_Definitions_SHA1:
{
EverCrypt_HKDF_expand_sha1(okm, prk, prklen, info, infolen, len);
break;
}
case Spec_Hash_Definitions_SHA2_256:
{
EverCrypt_HKDF_expand_sha2_256(okm, prk, prklen, info, infolen, len);
break;
}
case Spec_Hash_Definitions_SHA2_384:
{
EverCrypt_HKDF_expand_sha2_384(okm, prk, prklen, info, infolen, len);
break;
}
case Spec_Hash_Definitions_SHA2_512:
{
EverCrypt_HKDF_expand_sha2_512(okm, prk, prklen, info, infolen, len);
break;
}
default:
{
KRML_HOST_EPRINTF("KreMLin incomplete match at %s:%d\n", __FILE__, __LINE__);
KRML_HOST_EXIT(253U);
}
}
}
void
EverCrypt_HKDF_extract(
Spec_Hash_Definitions_hash_alg a,
uint8_t *prk,
uint8_t *salt,
uint32_t saltlen,
uint8_t *ikm,
uint32_t ikmlen
)
{
switch (a)
{
case Spec_Hash_Definitions_SHA1:
{
EverCrypt_HKDF_extract_sha1(prk, salt, saltlen, ikm, ikmlen);
break;
}
case Spec_Hash_Definitions_SHA2_256:
{
EverCrypt_HKDF_extract_sha2_256(prk, salt, saltlen, ikm, ikmlen);
break;
}
case Spec_Hash_Definitions_SHA2_384:
{
EverCrypt_HKDF_extract_sha2_384(prk, salt, saltlen, ikm, ikmlen);
break;
}
case Spec_Hash_Definitions_SHA2_512:
{
EverCrypt_HKDF_extract_sha2_512(prk, salt, saltlen, ikm, ikmlen);
break;
}
default:
{
KRML_HOST_EPRINTF("KreMLin incomplete match at %s:%d\n", __FILE__, __LINE__);
KRML_HOST_EXIT(253U);
}
}
}
KRML_DEPRECATED("expand")
void
EverCrypt_HKDF_hkdf_expand(
Spec_Hash_Definitions_hash_alg a,
uint8_t *okm,
uint8_t *prk,
uint32_t prklen,
uint8_t *info,
uint32_t infolen,
uint32_t len
)
{
EverCrypt_HKDF_expand(a, okm, prk, prklen, info, infolen, len);
}
KRML_DEPRECATED("extract")
void
EverCrypt_HKDF_hkdf_extract(
Spec_Hash_Definitions_hash_alg a,
uint8_t *prk,
uint8_t *salt,
uint32_t saltlen,
uint8_t *ikm,
uint32_t ikmlen
)
{
EverCrypt_HKDF_extract(a, prk, salt, saltlen, ikm, ikmlen);
}

160
3rdparty/hacl-star/evercrypt/EverCrypt_HKDF.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,160 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __EverCrypt_HKDF_H
#define __EverCrypt_HKDF_H
#include "EverCrypt_HMAC.h"
#include "Hacl_Spec.h"
void
EverCrypt_HKDF_expand_sha1(
uint8_t *okm,
uint8_t *prk,
uint32_t prklen,
uint8_t *info,
uint32_t infolen,
uint32_t len
);
void
EverCrypt_HKDF_extract_sha1(
uint8_t *prk,
uint8_t *salt,
uint32_t saltlen,
uint8_t *ikm,
uint32_t ikmlen
);
void
EverCrypt_HKDF_expand_sha2_256(
uint8_t *okm,
uint8_t *prk,
uint32_t prklen,
uint8_t *info,
uint32_t infolen,
uint32_t len
);
void
EverCrypt_HKDF_extract_sha2_256(
uint8_t *prk,
uint8_t *salt,
uint32_t saltlen,
uint8_t *ikm,
uint32_t ikmlen
);
void
EverCrypt_HKDF_expand_sha2_384(
uint8_t *okm,
uint8_t *prk,
uint32_t prklen,
uint8_t *info,
uint32_t infolen,
uint32_t len
);
void
EverCrypt_HKDF_extract_sha2_384(
uint8_t *prk,
uint8_t *salt,
uint32_t saltlen,
uint8_t *ikm,
uint32_t ikmlen
);
void
EverCrypt_HKDF_expand_sha2_512(
uint8_t *okm,
uint8_t *prk,
uint32_t prklen,
uint8_t *info,
uint32_t infolen,
uint32_t len
);
void
EverCrypt_HKDF_extract_sha2_512(
uint8_t *prk,
uint8_t *salt,
uint32_t saltlen,
uint8_t *ikm,
uint32_t ikmlen
);
void
EverCrypt_HKDF_expand(
Spec_Hash_Definitions_hash_alg a,
uint8_t *okm,
uint8_t *prk,
uint32_t prklen,
uint8_t *info,
uint32_t infolen,
uint32_t len
);
void
EverCrypt_HKDF_extract(
Spec_Hash_Definitions_hash_alg a,
uint8_t *prk,
uint8_t *salt,
uint32_t saltlen,
uint8_t *ikm,
uint32_t ikmlen
);
KRML_DEPRECATED("expand")
void
EverCrypt_HKDF_hkdf_expand(
Spec_Hash_Definitions_hash_alg a,
uint8_t *okm,
uint8_t *prk,
uint32_t prklen,
uint8_t *info,
uint32_t infolen,
uint32_t len
);
KRML_DEPRECATED("extract")
void
EverCrypt_HKDF_hkdf_extract(
Spec_Hash_Definitions_hash_alg a,
uint8_t *prk,
uint8_t *salt,
uint32_t saltlen,
uint8_t *ikm,
uint32_t ikmlen
);
#define __EverCrypt_HKDF_H_DEFINED
#endif

363
3rdparty/hacl-star/evercrypt/EverCrypt_HMAC.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,363 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "EverCrypt_HMAC.h"
void
EverCrypt_HMAC_compute_sha1(
uint8_t *dst,
uint8_t *key,
uint32_t key_len,
uint8_t *data,
uint32_t data_len
)
{
uint32_t l = (uint32_t)64U;
KRML_CHECK_SIZE(sizeof (uint8_t), l);
uint8_t key_block[l];
memset(key_block, 0U, l * sizeof key_block[0U]);
uint32_t i1;
if (key_len <= (uint32_t)64U)
{
i1 = key_len;
}
else
{
i1 = (uint32_t)20U;
}
uint8_t *nkey = key_block;
if (key_len <= (uint32_t)64U)
{
memcpy(nkey, key, key_len * sizeof key[0U]);
}
else
{
Hacl_Hash_SHA1_legacy_hash(key, key_len, nkey);
}
KRML_CHECK_SIZE(sizeof (uint8_t), l);
uint8_t ipad[l];
memset(ipad, (uint8_t)0x36U, l * sizeof ipad[0U]);
for (uint32_t i = (uint32_t)0U; i < l; i = i + (uint32_t)1U)
{
uint8_t xi = ipad[i];
uint8_t yi = key_block[i];
ipad[i] = xi ^ yi;
}
KRML_CHECK_SIZE(sizeof (uint8_t), l);
uint8_t opad[l];
memset(opad, (uint8_t)0x5cU, l * sizeof opad[0U]);
for (uint32_t i = (uint32_t)0U; i < l; i = i + (uint32_t)1U)
{
uint8_t xi = opad[i];
uint8_t yi = key_block[i];
opad[i] = xi ^ yi;
}
uint32_t
s[5U] =
{
(uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
(uint32_t)0xc3d2e1f0U
};
Hacl_Hash_Core_SHA1_legacy_init(s);
Hacl_Hash_SHA1_legacy_update_multi(s, ipad, (uint32_t)1U);
Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)(uint32_t)64U, data, data_len);
uint8_t *dst1 = ipad;
Hacl_Hash_Core_SHA1_legacy_finish(s, dst1);
uint8_t *hash1 = ipad;
Hacl_Hash_Core_SHA1_legacy_init(s);
Hacl_Hash_SHA1_legacy_update_multi(s, opad, (uint32_t)1U);
Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)(uint32_t)64U, hash1, (uint32_t)20U);
Hacl_Hash_Core_SHA1_legacy_finish(s, dst);
}
void
EverCrypt_HMAC_compute_sha2_256(
uint8_t *dst,
uint8_t *key,
uint32_t key_len,
uint8_t *data,
uint32_t data_len
)
{
uint32_t l = (uint32_t)64U;
KRML_CHECK_SIZE(sizeof (uint8_t), l);
uint8_t key_block[l];
memset(key_block, 0U, l * sizeof key_block[0U]);
uint32_t i1;
if (key_len <= (uint32_t)64U)
{
i1 = key_len;
}
else
{
i1 = (uint32_t)32U;
}
uint8_t *nkey = key_block;
if (key_len <= (uint32_t)64U)
{
memcpy(nkey, key, key_len * sizeof key[0U]);
}
else
{
EverCrypt_Hash_hash_256(key, key_len, nkey);
}
KRML_CHECK_SIZE(sizeof (uint8_t), l);
uint8_t ipad[l];
memset(ipad, (uint8_t)0x36U, l * sizeof ipad[0U]);
for (uint32_t i = (uint32_t)0U; i < l; i = i + (uint32_t)1U)
{
uint8_t xi = ipad[i];
uint8_t yi = key_block[i];
ipad[i] = xi ^ yi;
}
KRML_CHECK_SIZE(sizeof (uint8_t), l);
uint8_t opad[l];
memset(opad, (uint8_t)0x5cU, l * sizeof opad[0U]);
for (uint32_t i = (uint32_t)0U; i < l; i = i + (uint32_t)1U)
{
uint8_t xi = opad[i];
uint8_t yi = key_block[i];
opad[i] = xi ^ yi;
}
uint32_t
s[8U] =
{
(uint32_t)0x6a09e667U, (uint32_t)0xbb67ae85U, (uint32_t)0x3c6ef372U, (uint32_t)0xa54ff53aU,
(uint32_t)0x510e527fU, (uint32_t)0x9b05688cU, (uint32_t)0x1f83d9abU, (uint32_t)0x5be0cd19U
};
Hacl_Hash_Core_SHA2_init_256(s);
EverCrypt_Hash_update_multi_256(s, ipad, (uint32_t)1U);
EverCrypt_Hash_update_last_256(s, (uint64_t)(uint32_t)64U, data, data_len);
uint8_t *dst1 = ipad;
Hacl_Hash_Core_SHA2_finish_256(s, dst1);
uint8_t *hash1 = ipad;
Hacl_Hash_Core_SHA2_init_256(s);
EverCrypt_Hash_update_multi_256(s, opad, (uint32_t)1U);
EverCrypt_Hash_update_last_256(s, (uint64_t)(uint32_t)64U, hash1, (uint32_t)32U);
Hacl_Hash_Core_SHA2_finish_256(s, dst);
}
void
EverCrypt_HMAC_compute_sha2_384(
uint8_t *dst,
uint8_t *key,
uint32_t key_len,
uint8_t *data,
uint32_t data_len
)
{
uint32_t l = (uint32_t)128U;
KRML_CHECK_SIZE(sizeof (uint8_t), l);
uint8_t key_block[l];
memset(key_block, 0U, l * sizeof key_block[0U]);
uint32_t i1;
if (key_len <= (uint32_t)128U)
{
i1 = key_len;
}
else
{
i1 = (uint32_t)48U;
}
uint8_t *nkey = key_block;
if (key_len <= (uint32_t)128U)
{
memcpy(nkey, key, key_len * sizeof key[0U]);
}
else
{
Hacl_Hash_SHA2_hash_384(key, key_len, nkey);
}
KRML_CHECK_SIZE(sizeof (uint8_t), l);
uint8_t ipad[l];
memset(ipad, (uint8_t)0x36U, l * sizeof ipad[0U]);
for (uint32_t i = (uint32_t)0U; i < l; i = i + (uint32_t)1U)
{
uint8_t xi = ipad[i];
uint8_t yi = key_block[i];
ipad[i] = xi ^ yi;
}
KRML_CHECK_SIZE(sizeof (uint8_t), l);
uint8_t opad[l];
memset(opad, (uint8_t)0x5cU, l * sizeof opad[0U]);
for (uint32_t i = (uint32_t)0U; i < l; i = i + (uint32_t)1U)
{
uint8_t xi = opad[i];
uint8_t yi = key_block[i];
opad[i] = xi ^ yi;
}
uint64_t
s[8U] =
{
(uint64_t)0xcbbb9d5dc1059ed8U, (uint64_t)0x629a292a367cd507U, (uint64_t)0x9159015a3070dd17U,
(uint64_t)0x152fecd8f70e5939U, (uint64_t)0x67332667ffc00b31U, (uint64_t)0x8eb44a8768581511U,
(uint64_t)0xdb0c2e0d64f98fa7U, (uint64_t)0x47b5481dbefa4fa4U
};
Hacl_Hash_Core_SHA2_init_384(s);
Hacl_Hash_SHA2_update_multi_384(s, ipad, (uint32_t)1U);
Hacl_Hash_SHA2_update_last_384(s, (uint128_t)(uint64_t)(uint32_t)128U, data, data_len);
uint8_t *dst1 = ipad;
Hacl_Hash_Core_SHA2_finish_384(s, dst1);
uint8_t *hash1 = ipad;
Hacl_Hash_Core_SHA2_init_384(s);
Hacl_Hash_SHA2_update_multi_384(s, opad, (uint32_t)1U);
Hacl_Hash_SHA2_update_last_384(s, (uint128_t)(uint64_t)(uint32_t)128U, hash1, (uint32_t)48U);
Hacl_Hash_Core_SHA2_finish_384(s, dst);
}
void
EverCrypt_HMAC_compute_sha2_512(
uint8_t *dst,
uint8_t *key,
uint32_t key_len,
uint8_t *data,
uint32_t data_len
)
{
uint32_t l = (uint32_t)128U;
KRML_CHECK_SIZE(sizeof (uint8_t), l);
uint8_t key_block[l];
memset(key_block, 0U, l * sizeof key_block[0U]);
uint32_t i1;
if (key_len <= (uint32_t)128U)
{
i1 = key_len;
}
else
{
i1 = (uint32_t)64U;
}
uint8_t *nkey = key_block;
if (key_len <= (uint32_t)128U)
{
memcpy(nkey, key, key_len * sizeof key[0U]);
}
else
{
Hacl_Hash_SHA2_hash_512(key, key_len, nkey);
}
KRML_CHECK_SIZE(sizeof (uint8_t), l);
uint8_t ipad[l];
memset(ipad, (uint8_t)0x36U, l * sizeof ipad[0U]);
for (uint32_t i = (uint32_t)0U; i < l; i = i + (uint32_t)1U)
{
uint8_t xi = ipad[i];
uint8_t yi = key_block[i];
ipad[i] = xi ^ yi;
}
KRML_CHECK_SIZE(sizeof (uint8_t), l);
uint8_t opad[l];
memset(opad, (uint8_t)0x5cU, l * sizeof opad[0U]);
for (uint32_t i = (uint32_t)0U; i < l; i = i + (uint32_t)1U)
{
uint8_t xi = opad[i];
uint8_t yi = key_block[i];
opad[i] = xi ^ yi;
}
uint64_t
s[8U] =
{
(uint64_t)0x6a09e667f3bcc908U, (uint64_t)0xbb67ae8584caa73bU, (uint64_t)0x3c6ef372fe94f82bU,
(uint64_t)0xa54ff53a5f1d36f1U, (uint64_t)0x510e527fade682d1U, (uint64_t)0x9b05688c2b3e6c1fU,
(uint64_t)0x1f83d9abfb41bd6bU, (uint64_t)0x5be0cd19137e2179U
};
Hacl_Hash_Core_SHA2_init_512(s);
Hacl_Hash_SHA2_update_multi_512(s, ipad, (uint32_t)1U);
Hacl_Hash_SHA2_update_last_512(s, (uint128_t)(uint64_t)(uint32_t)128U, data, data_len);
uint8_t *dst1 = ipad;
Hacl_Hash_Core_SHA2_finish_512(s, dst1);
uint8_t *hash1 = ipad;
Hacl_Hash_Core_SHA2_init_512(s);
Hacl_Hash_SHA2_update_multi_512(s, opad, (uint32_t)1U);
Hacl_Hash_SHA2_update_last_512(s, (uint128_t)(uint64_t)(uint32_t)128U, hash1, (uint32_t)64U);
Hacl_Hash_Core_SHA2_finish_512(s, dst);
}
bool EverCrypt_HMAC_is_supported_alg(Spec_Hash_Definitions_hash_alg uu___0_5843)
{
switch (uu___0_5843)
{
case Spec_Hash_Definitions_SHA1:
{
return true;
}
case Spec_Hash_Definitions_SHA2_256:
{
return true;
}
case Spec_Hash_Definitions_SHA2_384:
{
return true;
}
case Spec_Hash_Definitions_SHA2_512:
{
return true;
}
default:
{
return false;
}
}
}
void
EverCrypt_HMAC_compute(
Spec_Hash_Definitions_hash_alg a,
uint8_t *mac,
uint8_t *key,
uint32_t keylen,
uint8_t *data,
uint32_t datalen
)
{
switch (a)
{
case Spec_Hash_Definitions_SHA1:
{
EverCrypt_HMAC_compute_sha1(mac, key, keylen, data, datalen);
break;
}
case Spec_Hash_Definitions_SHA2_256:
{
EverCrypt_HMAC_compute_sha2_256(mac, key, keylen, data, datalen);
break;
}
case Spec_Hash_Definitions_SHA2_384:
{
EverCrypt_HMAC_compute_sha2_384(mac, key, keylen, data, datalen);
break;
}
case Spec_Hash_Definitions_SHA2_512:
{
EverCrypt_HMAC_compute_sha2_512(mac, key, keylen, data, datalen);
break;
}
default:
{
KRML_HOST_EPRINTF("KreMLin incomplete match at %s:%d\n", __FILE__, __LINE__);
KRML_HOST_EXIT(253U);
}
}
}

90
3rdparty/hacl-star/evercrypt/EverCrypt_HMAC.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,90 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __EverCrypt_HMAC_H
#define __EverCrypt_HMAC_H
#include "Hacl_Kremlib.h"
#include "Hacl_Hash.h"
#include "Hacl_Spec.h"
#include "EverCrypt_Hash.h"
void
EverCrypt_HMAC_compute_sha1(
uint8_t *dst,
uint8_t *key,
uint32_t key_len,
uint8_t *data,
uint32_t data_len
);
void
EverCrypt_HMAC_compute_sha2_256(
uint8_t *dst,
uint8_t *key,
uint32_t key_len,
uint8_t *data,
uint32_t data_len
);
void
EverCrypt_HMAC_compute_sha2_384(
uint8_t *dst,
uint8_t *key,
uint32_t key_len,
uint8_t *data,
uint32_t data_len
);
void
EverCrypt_HMAC_compute_sha2_512(
uint8_t *dst,
uint8_t *key,
uint32_t key_len,
uint8_t *data,
uint32_t data_len
);
bool EverCrypt_HMAC_is_supported_alg(Spec_Hash_Definitions_hash_alg uu___0_5843);
typedef Spec_Hash_Definitions_hash_alg EverCrypt_HMAC_supported_alg;
void
EverCrypt_HMAC_compute(
Spec_Hash_Definitions_hash_alg a,
uint8_t *mac,
uint8_t *key,
uint32_t keylen,
uint8_t *data,
uint32_t datalen
);
#define __EverCrypt_HMAC_H_DEFINED
#endif

1641
3rdparty/hacl-star/evercrypt/EverCrypt_Hash.c поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

236
3rdparty/hacl-star/evercrypt/EverCrypt_Hash.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,236 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __EverCrypt_Hash_H
#define __EverCrypt_Hash_H
#include "Hacl_Kremlib.h"
#include "Vale.h"
#include "Hacl_Hash.h"
#include "EverCrypt_AutoConfig2.h"
#include "Hacl_Spec.h"
typedef Spec_Hash_Definitions_hash_alg EverCrypt_Hash_alg;
C_String_t EverCrypt_Hash_string_of_alg(Spec_Hash_Definitions_hash_alg uu___0_6);
typedef Spec_Hash_Definitions_hash_alg EverCrypt_Hash_broken_alg;
typedef Spec_Hash_Definitions_hash_alg EverCrypt_Hash_alg13;
typedef void *EverCrypt_Hash_e_alg;
#define EverCrypt_Hash_MD5_s 0
#define EverCrypt_Hash_SHA1_s 1
#define EverCrypt_Hash_SHA2_224_s 2
#define EverCrypt_Hash_SHA2_256_s 3
#define EverCrypt_Hash_SHA2_384_s 4
#define EverCrypt_Hash_SHA2_512_s 5
typedef uint8_t EverCrypt_Hash_state_s_tags;
typedef struct EverCrypt_Hash_state_s_s
{
EverCrypt_Hash_state_s_tags tag;
union {
uint32_t *case_MD5_s;
uint32_t *case_SHA1_s;
uint32_t *case_SHA2_224_s;
uint32_t *case_SHA2_256_s;
uint64_t *case_SHA2_384_s;
uint64_t *case_SHA2_512_s;
}
;
}
EverCrypt_Hash_state_s;
bool
EverCrypt_Hash_uu___is_MD5_s(
Spec_Hash_Definitions_hash_alg uu____151,
EverCrypt_Hash_state_s projectee
);
uint32_t
*EverCrypt_Hash___proj__MD5_s__item__p(
Spec_Hash_Definitions_hash_alg uu____179,
EverCrypt_Hash_state_s projectee
);
bool
EverCrypt_Hash_uu___is_SHA1_s(
Spec_Hash_Definitions_hash_alg uu____202,
EverCrypt_Hash_state_s projectee
);
uint32_t
*EverCrypt_Hash___proj__SHA1_s__item__p(
Spec_Hash_Definitions_hash_alg uu____230,
EverCrypt_Hash_state_s projectee
);
bool
EverCrypt_Hash_uu___is_SHA2_224_s(
Spec_Hash_Definitions_hash_alg uu____253,
EverCrypt_Hash_state_s projectee
);
uint32_t
*EverCrypt_Hash___proj__SHA2_224_s__item__p(
Spec_Hash_Definitions_hash_alg uu____281,
EverCrypt_Hash_state_s projectee
);
bool
EverCrypt_Hash_uu___is_SHA2_256_s(
Spec_Hash_Definitions_hash_alg uu____304,
EverCrypt_Hash_state_s projectee
);
uint32_t
*EverCrypt_Hash___proj__SHA2_256_s__item__p(
Spec_Hash_Definitions_hash_alg uu____332,
EverCrypt_Hash_state_s projectee
);
bool
EverCrypt_Hash_uu___is_SHA2_384_s(
Spec_Hash_Definitions_hash_alg uu____355,
EverCrypt_Hash_state_s projectee
);
uint64_t
*EverCrypt_Hash___proj__SHA2_384_s__item__p(
Spec_Hash_Definitions_hash_alg uu____383,
EverCrypt_Hash_state_s projectee
);
bool
EverCrypt_Hash_uu___is_SHA2_512_s(
Spec_Hash_Definitions_hash_alg uu____406,
EverCrypt_Hash_state_s projectee
);
uint64_t
*EverCrypt_Hash___proj__SHA2_512_s__item__p(
Spec_Hash_Definitions_hash_alg uu____434,
EverCrypt_Hash_state_s projectee
);
Spec_Hash_Definitions_hash_alg EverCrypt_Hash_alg_of_state(EverCrypt_Hash_state_s *s);
EverCrypt_Hash_state_s *EverCrypt_Hash_create_in(Spec_Hash_Definitions_hash_alg a);
EverCrypt_Hash_state_s *EverCrypt_Hash_create(Spec_Hash_Definitions_hash_alg a);
void EverCrypt_Hash_init(EverCrypt_Hash_state_s *s);
void EverCrypt_Hash_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n1);
void EverCrypt_Hash_update(EverCrypt_Hash_state_s *s, uint8_t *block1);
void EverCrypt_Hash_update_multi(EverCrypt_Hash_state_s *s, uint8_t *blocks, uint32_t len);
void
EverCrypt_Hash_update_last_256(
uint32_t *s,
uint64_t prev_len,
uint8_t *input,
uint32_t input_len
);
void EverCrypt_Hash_update_last(EverCrypt_Hash_state_s *s, uint8_t *last1, uint64_t total_len);
void EverCrypt_Hash_finish(EverCrypt_Hash_state_s *s, uint8_t *dst);
void EverCrypt_Hash_free(EverCrypt_Hash_state_s *s);
void EverCrypt_Hash_copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst);
void EverCrypt_Hash_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst);
void EverCrypt_Hash_hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst);
void
EverCrypt_Hash_hash(
Spec_Hash_Definitions_hash_alg a,
uint8_t *dst,
uint8_t *input,
uint32_t len
);
typedef uint8_t *EverCrypt_Hash_Incremental_any_hash_t;
typedef struct EverCrypt_Hash_Incremental_state_s_s EverCrypt_Hash_Incremental_state_s;
bool
EverCrypt_Hash_Incremental_uu___is_State(
Spec_Hash_Definitions_hash_alg a,
EverCrypt_Hash_Incremental_state_s projectee
);
EverCrypt_Hash_state_s
*EverCrypt_Hash_Incremental___proj__State__item__hash_state(
Spec_Hash_Definitions_hash_alg a,
EverCrypt_Hash_Incremental_state_s projectee
);
uint8_t
*EverCrypt_Hash_Incremental___proj__State__item__buf(
Spec_Hash_Definitions_hash_alg a,
EverCrypt_Hash_Incremental_state_s projectee
);
uint64_t
EverCrypt_Hash_Incremental___proj__State__item__total_len(
Spec_Hash_Definitions_hash_alg a,
EverCrypt_Hash_Incremental_state_s projectee
);
Spec_Hash_Definitions_hash_alg
EverCrypt_Hash_Incremental_alg_of_state(EverCrypt_Hash_Incremental_state_s *s);
EverCrypt_Hash_Incremental_state_s
*EverCrypt_Hash_Incremental_create_in(Spec_Hash_Definitions_hash_alg a);
void EverCrypt_Hash_Incremental_init(EverCrypt_Hash_Incremental_state_s *s);
void
EverCrypt_Hash_Incremental_update(
EverCrypt_Hash_Incremental_state_s *p1,
uint8_t *data,
uint32_t len
);
void EverCrypt_Hash_Incremental_finish(EverCrypt_Hash_Incremental_state_s *s, uint8_t *dst);
void EverCrypt_Hash_Incremental_free(EverCrypt_Hash_Incremental_state_s *s);
#define __EverCrypt_Hash_H_DEFINED
#endif

34
3rdparty/hacl-star/evercrypt/EverCrypt_StaticConfig.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,34 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "EverCrypt_StaticConfig.h"
bool EverCrypt_StaticConfig_hacl = true;
bool EverCrypt_StaticConfig_vale = true;
bool EverCrypt_StaticConfig_openssl = true;
bool EverCrypt_StaticConfig_bcrypt = false;

45
3rdparty/hacl-star/evercrypt/EverCrypt_StaticConfig.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,45 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __EverCrypt_StaticConfig_H
#define __EverCrypt_StaticConfig_H
extern bool EverCrypt_StaticConfig_hacl;
extern bool EverCrypt_StaticConfig_vale;
extern bool EverCrypt_StaticConfig_openssl;
extern bool EverCrypt_StaticConfig_bcrypt;
#define __EverCrypt_StaticConfig_H_DEFINED
#endif

66
3rdparty/hacl-star/evercrypt/EverCrypt_Vale.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,66 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "EverCrypt_Vale.h"
uint8_t *__proj__Mkgcm_args__item__plain(gcm_args projectee)
{
return projectee.plain;
}
uint64_t __proj__Mkgcm_args__item__plain_len(gcm_args projectee)
{
return projectee.plain_len;
}
uint8_t *__proj__Mkgcm_args__item__aad(gcm_args projectee)
{
return projectee.aad;
}
uint64_t __proj__Mkgcm_args__item__aad_len(gcm_args projectee)
{
return projectee.aad_len;
}
uint8_t *__proj__Mkgcm_args__item__iv(gcm_args projectee)
{
return projectee.iv;
}
uint8_t *__proj__Mkgcm_args__item__expanded_key(gcm_args projectee)
{
return projectee.expanded_key;
}
uint8_t *__proj__Mkgcm_args__item__cipher(gcm_args projectee)
{
return projectee.cipher;
}
uint8_t *__proj__Mkgcm_args__item__tag(gcm_args projectee)
{
return projectee.tag;
}

83
3rdparty/hacl-star/evercrypt/EverCrypt_Vale.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,83 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __EverCrypt_Vale_H
#define __EverCrypt_Vale_H
extern void aes128_key_expansion_sbox(uint8_t *key, uint8_t *w, uint8_t *sbox);
extern void
aes128_encrypt_one_block(uint8_t *cipher, uint8_t *plain, uint8_t *w, uint8_t *sbox);
typedef struct gcm_args_s
{
uint8_t *plain;
uint64_t plain_len;
uint8_t *aad;
uint64_t aad_len;
uint8_t *iv;
uint8_t *expanded_key;
uint8_t *cipher;
uint8_t *tag;
}
gcm_args;
uint8_t *__proj__Mkgcm_args__item__plain(gcm_args projectee);
uint64_t __proj__Mkgcm_args__item__plain_len(gcm_args projectee);
uint8_t *__proj__Mkgcm_args__item__aad(gcm_args projectee);
uint64_t __proj__Mkgcm_args__item__aad_len(gcm_args projectee);
uint8_t *__proj__Mkgcm_args__item__iv(gcm_args projectee);
uint8_t *__proj__Mkgcm_args__item__expanded_key(gcm_args projectee);
uint8_t *__proj__Mkgcm_args__item__cipher(gcm_args projectee);
uint8_t *__proj__Mkgcm_args__item__tag(gcm_args projectee);
extern void __stdcall old_aes128_key_expansion(uint8_t *key_ptr, uint8_t *expanded_key_ptr);
extern void __stdcall old_gcm128_encrypt(gcm_args *uu____343);
extern uint32_t __stdcall old_gcm128_decrypt(gcm_args *uu____357);
extern void __stdcall old_aes256_key_expansion(uint8_t *key_ptr, uint8_t *expanded_key_ptr);
extern void __stdcall old_gcm256_encrypt(gcm_args *uu____389);
extern uint32_t __stdcall old_gcm256_decrypt(gcm_args *uu____403);
#define __EverCrypt_Vale_H_DEFINED
#endif

279
3rdparty/hacl-star/evercrypt/Hacl_Chacha20.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,279 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "Hacl_Chacha20.h"
inline static void
Hacl_Impl_Chacha20_Core32_quarter_round(
uint32_t *st,
uint32_t a,
uint32_t b,
uint32_t c,
uint32_t d
)
{
uint32_t sta = st[a];
uint32_t stb0 = st[b];
uint32_t std0 = st[d];
uint32_t sta10 = sta + stb0;
uint32_t std10 = std0 ^ sta10;
uint32_t std2 = std10 << (uint32_t)16U | std10 >> (uint32_t)16U;
st[a] = sta10;
st[d] = std2;
uint32_t sta0 = st[c];
uint32_t stb1 = st[d];
uint32_t std3 = st[b];
uint32_t sta11 = sta0 + stb1;
uint32_t std11 = std3 ^ sta11;
uint32_t std20 = std11 << (uint32_t)12U | std11 >> (uint32_t)20U;
st[c] = sta11;
st[b] = std20;
uint32_t sta2 = st[a];
uint32_t stb2 = st[b];
uint32_t std4 = st[d];
uint32_t sta12 = sta2 + stb2;
uint32_t std12 = std4 ^ sta12;
uint32_t std21 = std12 << (uint32_t)8U | std12 >> (uint32_t)24U;
st[a] = sta12;
st[d] = std21;
uint32_t sta3 = st[c];
uint32_t stb = st[d];
uint32_t std = st[b];
uint32_t sta1 = sta3 + stb;
uint32_t std1 = std ^ sta1;
uint32_t std22 = std1 << (uint32_t)7U | std1 >> (uint32_t)25U;
st[c] = sta1;
st[b] = std22;
}
inline static void Hacl_Impl_Chacha20_Core32_double_round(uint32_t *st)
{
Hacl_Impl_Chacha20_Core32_quarter_round(st,
(uint32_t)0U,
(uint32_t)4U,
(uint32_t)8U,
(uint32_t)12U);
Hacl_Impl_Chacha20_Core32_quarter_round(st,
(uint32_t)1U,
(uint32_t)5U,
(uint32_t)9U,
(uint32_t)13U);
Hacl_Impl_Chacha20_Core32_quarter_round(st,
(uint32_t)2U,
(uint32_t)6U,
(uint32_t)10U,
(uint32_t)14U);
Hacl_Impl_Chacha20_Core32_quarter_round(st,
(uint32_t)3U,
(uint32_t)7U,
(uint32_t)11U,
(uint32_t)15U);
Hacl_Impl_Chacha20_Core32_quarter_round(st,
(uint32_t)0U,
(uint32_t)5U,
(uint32_t)10U,
(uint32_t)15U);
Hacl_Impl_Chacha20_Core32_quarter_round(st,
(uint32_t)1U,
(uint32_t)6U,
(uint32_t)11U,
(uint32_t)12U);
Hacl_Impl_Chacha20_Core32_quarter_round(st,
(uint32_t)2U,
(uint32_t)7U,
(uint32_t)8U,
(uint32_t)13U);
Hacl_Impl_Chacha20_Core32_quarter_round(st,
(uint32_t)3U,
(uint32_t)4U,
(uint32_t)9U,
(uint32_t)14U);
}
inline static void Hacl_Impl_Chacha20_rounds(uint32_t *st)
{
Hacl_Impl_Chacha20_Core32_double_round(st);
Hacl_Impl_Chacha20_Core32_double_round(st);
Hacl_Impl_Chacha20_Core32_double_round(st);
Hacl_Impl_Chacha20_Core32_double_round(st);
Hacl_Impl_Chacha20_Core32_double_round(st);
Hacl_Impl_Chacha20_Core32_double_round(st);
Hacl_Impl_Chacha20_Core32_double_round(st);
Hacl_Impl_Chacha20_Core32_double_round(st);
Hacl_Impl_Chacha20_Core32_double_round(st);
Hacl_Impl_Chacha20_Core32_double_round(st);
}
inline static void Hacl_Impl_Chacha20_chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
{
memcpy(k, ctx, (uint32_t)16U * sizeof ctx[0U]);
uint32_t ctr_u32 = ctr;
k[12U] = k[12U] + ctr_u32;
Hacl_Impl_Chacha20_rounds(k);
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U)
{
uint32_t *os = k;
uint32_t x = k[i] + ctx[i];
os[i] = x;
}
k[12U] = k[12U] + ctr_u32;
}
static uint32_t
Hacl_Impl_Chacha20_chacha20_constants[4U] =
{ (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U };
inline void
Hacl_Impl_Chacha20_chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n1, uint32_t ctr)
{
uint32_t *uu____0 = ctx;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U)
{
uint32_t *os = uu____0;
uint32_t x = Hacl_Impl_Chacha20_chacha20_constants[i];
os[i] = x;
}
uint32_t *uu____1 = ctx + (uint32_t)4U;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)8U; i = i + (uint32_t)1U)
{
uint32_t *os = uu____1;
uint8_t *bj = k + i * (uint32_t)4U;
uint32_t u = load32_le(bj);
uint32_t r = u;
uint32_t x = r;
os[i] = x;
}
ctx[12U] = ctr;
uint32_t *uu____2 = ctx + (uint32_t)13U;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)3U; i = i + (uint32_t)1U)
{
uint32_t *os = uu____2;
uint8_t *bj = n1 + i * (uint32_t)4U;
uint32_t u = load32_le(bj);
uint32_t r = u;
uint32_t x = r;
os[i] = x;
}
}
inline void
Hacl_Impl_Chacha20_chacha20_encrypt_block(
uint32_t *ctx,
uint8_t *out,
uint32_t incr1,
uint8_t *text
)
{
uint32_t k[16U] = { 0U };
Hacl_Impl_Chacha20_chacha20_core(k, ctx, incr1);
uint32_t bl[16U] = { 0U };
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U)
{
uint32_t *os = bl;
uint8_t *bj = text + i * (uint32_t)4U;
uint32_t u = load32_le(bj);
uint32_t r = u;
uint32_t x = r;
os[i] = x;
}
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U)
{
uint32_t *os = bl;
uint32_t x = bl[i] ^ k[i];
os[i] = x;
}
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U)
{
store32_le(out + i * (uint32_t)4U, bl[i]);
}
}
inline static void
Hacl_Impl_Chacha20_chacha20_encrypt_last(
uint32_t *ctx,
uint32_t len,
uint8_t *out,
uint32_t incr1,
uint8_t *text
)
{
uint8_t plain[64U] = { 0U };
memcpy(plain, text, len * sizeof text[0U]);
Hacl_Impl_Chacha20_chacha20_encrypt_block(ctx, plain, incr1, plain);
memcpy(out, plain, len * sizeof plain[0U]);
}
inline void
Hacl_Impl_Chacha20_chacha20_update(uint32_t *ctx, uint32_t len, uint8_t *out, uint8_t *text)
{
uint32_t rem1 = len % (uint32_t)64U;
uint32_t nb = len / (uint32_t)64U;
uint32_t rem2 = len % (uint32_t)64U;
for (uint32_t i = (uint32_t)0U; i < nb; i = i + (uint32_t)1U)
{
Hacl_Impl_Chacha20_chacha20_encrypt_block(ctx,
out + i * (uint32_t)64U,
i,
text + i * (uint32_t)64U);
}
if (rem2 > (uint32_t)0U)
{
Hacl_Impl_Chacha20_chacha20_encrypt_last(ctx,
rem1,
out + nb * (uint32_t)64U,
nb,
text + nb * (uint32_t)64U);
}
}
void
Hacl_Chacha20_chacha20_encrypt(
uint32_t len,
uint8_t *out,
uint8_t *text,
uint8_t *key,
uint8_t *n1,
uint32_t ctr
)
{
uint32_t ctx[16U] = { 0U };
Hacl_Impl_Chacha20_chacha20_init(ctx, key, n1, ctr);
Hacl_Impl_Chacha20_chacha20_update(ctx, len, out, text);
}
void
Hacl_Chacha20_chacha20_decrypt(
uint32_t len,
uint8_t *out,
uint8_t *cipher,
uint8_t *key,
uint8_t *n1,
uint32_t ctr
)
{
uint32_t ctx[16U] = { 0U };
Hacl_Impl_Chacha20_chacha20_init(ctx, key, n1, ctr);
Hacl_Impl_Chacha20_chacha20_update(ctx, len, out, cipher);
}

70
3rdparty/hacl-star/evercrypt/Hacl_Chacha20.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,70 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __Hacl_Chacha20_H
#define __Hacl_Chacha20_H
#include "Hacl_Kremlib.h"
void Hacl_Impl_Chacha20_chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n1, uint32_t ctr);
void
Hacl_Impl_Chacha20_chacha20_encrypt_block(
uint32_t *ctx,
uint8_t *out,
uint32_t incr1,
uint8_t *text
);
void
Hacl_Impl_Chacha20_chacha20_update(uint32_t *ctx, uint32_t len, uint8_t *out, uint8_t *text);
void
Hacl_Chacha20_chacha20_encrypt(
uint32_t len,
uint8_t *out,
uint8_t *text,
uint8_t *key,
uint8_t *n1,
uint32_t ctr
);
void
Hacl_Chacha20_chacha20_decrypt(
uint32_t len,
uint8_t *out,
uint8_t *cipher,
uint8_t *key,
uint8_t *n1,
uint32_t ctr
);
#define __Hacl_Chacha20_H_DEFINED
#endif

855
3rdparty/hacl-star/evercrypt/Hacl_Curve25519_51.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,855 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "Hacl_Curve25519_51.h"
inline void Hacl_Impl_Curve25519_Field51_fadd(uint64_t *out, uint64_t *f1, uint64_t *f2)
{
uint64_t f10 = f1[0U];
uint64_t f20 = f2[0U];
uint64_t f11 = f1[1U];
uint64_t f21 = f2[1U];
uint64_t f12 = f1[2U];
uint64_t f22 = f2[2U];
uint64_t f13 = f1[3U];
uint64_t f23 = f2[3U];
uint64_t f14 = f1[4U];
uint64_t f24 = f2[4U];
out[0U] = f10 + f20;
out[1U] = f11 + f21;
out[2U] = f12 + f22;
out[3U] = f13 + f23;
out[4U] = f14 + f24;
}
inline void Hacl_Impl_Curve25519_Field51_fsub(uint64_t *out, uint64_t *f1, uint64_t *f2)
{
uint64_t f10 = f1[0U];
uint64_t f20 = f2[0U];
uint64_t f11 = f1[1U];
uint64_t f21 = f2[1U];
uint64_t f12 = f1[2U];
uint64_t f22 = f2[2U];
uint64_t f13 = f1[3U];
uint64_t f23 = f2[3U];
uint64_t f14 = f1[4U];
uint64_t f24 = f2[4U];
out[0U] = f10 + (uint64_t)0x3fffffffffff68U - f20;
out[1U] = f11 + (uint64_t)0x3ffffffffffff8U - f21;
out[2U] = f12 + (uint64_t)0x3ffffffffffff8U - f22;
out[3U] = f13 + (uint64_t)0x3ffffffffffff8U - f23;
out[4U] = f14 + (uint64_t)0x3ffffffffffff8U - f24;
}
inline void
Hacl_Impl_Curve25519_Field51_fmul(
uint64_t *out,
uint64_t *f1,
uint64_t *f2,
uint128_t *uu____2959
)
{
uint64_t f10 = f1[0U];
uint64_t f11 = f1[1U];
uint64_t f12 = f1[2U];
uint64_t f13 = f1[3U];
uint64_t f14 = f1[4U];
uint64_t f20 = f2[0U];
uint64_t f21 = f2[1U];
uint64_t f22 = f2[2U];
uint64_t f23 = f2[3U];
uint64_t f24 = f2[4U];
uint64_t tmp1 = f21 * (uint64_t)19U;
uint64_t tmp2 = f22 * (uint64_t)19U;
uint64_t tmp3 = f23 * (uint64_t)19U;
uint64_t tmp4 = f24 * (uint64_t)19U;
uint128_t o00 = (uint128_t)f10 * f20;
uint128_t o10 = (uint128_t)f10 * f21;
uint128_t o20 = (uint128_t)f10 * f22;
uint128_t o30 = (uint128_t)f10 * f23;
uint128_t o40 = (uint128_t)f10 * f24;
uint128_t o01 = o00 + (uint128_t)f11 * tmp4;
uint128_t o11 = o10 + (uint128_t)f11 * f20;
uint128_t o21 = o20 + (uint128_t)f11 * f21;
uint128_t o31 = o30 + (uint128_t)f11 * f22;
uint128_t o41 = o40 + (uint128_t)f11 * f23;
uint128_t o02 = o01 + (uint128_t)f12 * tmp3;
uint128_t o12 = o11 + (uint128_t)f12 * tmp4;
uint128_t o22 = o21 + (uint128_t)f12 * f20;
uint128_t o32 = o31 + (uint128_t)f12 * f21;
uint128_t o42 = o41 + (uint128_t)f12 * f22;
uint128_t o03 = o02 + (uint128_t)f13 * tmp2;
uint128_t o13 = o12 + (uint128_t)f13 * tmp3;
uint128_t o23 = o22 + (uint128_t)f13 * tmp4;
uint128_t o33 = o32 + (uint128_t)f13 * f20;
uint128_t o43 = o42 + (uint128_t)f13 * f21;
uint128_t o04 = o03 + (uint128_t)f14 * tmp1;
uint128_t o14 = o13 + (uint128_t)f14 * tmp2;
uint128_t o24 = o23 + (uint128_t)f14 * tmp3;
uint128_t o34 = o33 + (uint128_t)f14 * tmp4;
uint128_t o44 = o43 + (uint128_t)f14 * f20;
uint128_t tmp_w0 = o04;
uint128_t tmp_w1 = o14;
uint128_t tmp_w2 = o24;
uint128_t tmp_w3 = o34;
uint128_t tmp_w4 = o44;
uint128_t l_ = tmp_w0 + (uint128_t)(uint64_t)0U;
uint64_t tmp01 = (uint64_t)l_ & (uint64_t)0x7ffffffffffffU;
uint64_t c0 = (uint64_t)(l_ >> (uint32_t)51U);
uint128_t l_0 = tmp_w1 + (uint128_t)c0;
uint64_t tmp11 = (uint64_t)l_0 & (uint64_t)0x7ffffffffffffU;
uint64_t c1 = (uint64_t)(l_0 >> (uint32_t)51U);
uint128_t l_1 = tmp_w2 + (uint128_t)c1;
uint64_t tmp21 = (uint64_t)l_1 & (uint64_t)0x7ffffffffffffU;
uint64_t c2 = (uint64_t)(l_1 >> (uint32_t)51U);
uint128_t l_2 = tmp_w3 + (uint128_t)c2;
uint64_t tmp31 = (uint64_t)l_2 & (uint64_t)0x7ffffffffffffU;
uint64_t c3 = (uint64_t)(l_2 >> (uint32_t)51U);
uint128_t l_3 = tmp_w4 + (uint128_t)c3;
uint64_t tmp41 = (uint64_t)l_3 & (uint64_t)0x7ffffffffffffU;
uint64_t c4 = (uint64_t)(l_3 >> (uint32_t)51U);
uint64_t l_4 = tmp01 + c4 * (uint64_t)19U;
uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
uint64_t c5 = l_4 >> (uint32_t)51U;
uint64_t o0 = tmp0_;
uint64_t o1 = tmp11 + c5;
uint64_t o2 = tmp21;
uint64_t o3 = tmp31;
uint64_t o4 = tmp41;
out[0U] = o0;
out[1U] = o1;
out[2U] = o2;
out[3U] = o3;
out[4U] = o4;
}
inline static void
Hacl_Impl_Curve25519_Field51_fmul2(
uint64_t *out,
uint64_t *f1,
uint64_t *f2,
uint128_t *uu____4281
)
{
uint64_t f10 = f1[0U];
uint64_t f11 = f1[1U];
uint64_t f12 = f1[2U];
uint64_t f13 = f1[3U];
uint64_t f14 = f1[4U];
uint64_t f20 = f2[0U];
uint64_t f21 = f2[1U];
uint64_t f22 = f2[2U];
uint64_t f23 = f2[3U];
uint64_t f24 = f2[4U];
uint64_t f30 = f1[5U];
uint64_t f31 = f1[6U];
uint64_t f32 = f1[7U];
uint64_t f33 = f1[8U];
uint64_t f34 = f1[9U];
uint64_t f40 = f2[5U];
uint64_t f41 = f2[6U];
uint64_t f42 = f2[7U];
uint64_t f43 = f2[8U];
uint64_t f44 = f2[9U];
uint64_t tmp11 = f21 * (uint64_t)19U;
uint64_t tmp12 = f22 * (uint64_t)19U;
uint64_t tmp13 = f23 * (uint64_t)19U;
uint64_t tmp14 = f24 * (uint64_t)19U;
uint64_t tmp21 = f41 * (uint64_t)19U;
uint64_t tmp22 = f42 * (uint64_t)19U;
uint64_t tmp23 = f43 * (uint64_t)19U;
uint64_t tmp24 = f44 * (uint64_t)19U;
uint128_t o00 = (uint128_t)f10 * f20;
uint128_t o15 = (uint128_t)f10 * f21;
uint128_t o25 = (uint128_t)f10 * f22;
uint128_t o30 = (uint128_t)f10 * f23;
uint128_t o40 = (uint128_t)f10 * f24;
uint128_t o010 = o00 + (uint128_t)f11 * tmp14;
uint128_t o110 = o15 + (uint128_t)f11 * f20;
uint128_t o210 = o25 + (uint128_t)f11 * f21;
uint128_t o310 = o30 + (uint128_t)f11 * f22;
uint128_t o410 = o40 + (uint128_t)f11 * f23;
uint128_t o020 = o010 + (uint128_t)f12 * tmp13;
uint128_t o120 = o110 + (uint128_t)f12 * tmp14;
uint128_t o220 = o210 + (uint128_t)f12 * f20;
uint128_t o320 = o310 + (uint128_t)f12 * f21;
uint128_t o420 = o410 + (uint128_t)f12 * f22;
uint128_t o030 = o020 + (uint128_t)f13 * tmp12;
uint128_t o130 = o120 + (uint128_t)f13 * tmp13;
uint128_t o230 = o220 + (uint128_t)f13 * tmp14;
uint128_t o330 = o320 + (uint128_t)f13 * f20;
uint128_t o430 = o420 + (uint128_t)f13 * f21;
uint128_t o040 = o030 + (uint128_t)f14 * tmp11;
uint128_t o140 = o130 + (uint128_t)f14 * tmp12;
uint128_t o240 = o230 + (uint128_t)f14 * tmp13;
uint128_t o340 = o330 + (uint128_t)f14 * tmp14;
uint128_t o440 = o430 + (uint128_t)f14 * f20;
uint128_t tmp_w10 = o040;
uint128_t tmp_w11 = o140;
uint128_t tmp_w12 = o240;
uint128_t tmp_w13 = o340;
uint128_t tmp_w14 = o440;
uint128_t o0 = (uint128_t)f30 * f40;
uint128_t o1 = (uint128_t)f30 * f41;
uint128_t o2 = (uint128_t)f30 * f42;
uint128_t o3 = (uint128_t)f30 * f43;
uint128_t o4 = (uint128_t)f30 * f44;
uint128_t o01 = o0 + (uint128_t)f31 * tmp24;
uint128_t o111 = o1 + (uint128_t)f31 * f40;
uint128_t o211 = o2 + (uint128_t)f31 * f41;
uint128_t o31 = o3 + (uint128_t)f31 * f42;
uint128_t o41 = o4 + (uint128_t)f31 * f43;
uint128_t o02 = o01 + (uint128_t)f32 * tmp23;
uint128_t o121 = o111 + (uint128_t)f32 * tmp24;
uint128_t o221 = o211 + (uint128_t)f32 * f40;
uint128_t o32 = o31 + (uint128_t)f32 * f41;
uint128_t o42 = o41 + (uint128_t)f32 * f42;
uint128_t o03 = o02 + (uint128_t)f33 * tmp22;
uint128_t o131 = o121 + (uint128_t)f33 * tmp23;
uint128_t o231 = o221 + (uint128_t)f33 * tmp24;
uint128_t o33 = o32 + (uint128_t)f33 * f40;
uint128_t o43 = o42 + (uint128_t)f33 * f41;
uint128_t o04 = o03 + (uint128_t)f34 * tmp21;
uint128_t o141 = o131 + (uint128_t)f34 * tmp22;
uint128_t o241 = o231 + (uint128_t)f34 * tmp23;
uint128_t o34 = o33 + (uint128_t)f34 * tmp24;
uint128_t o44 = o43 + (uint128_t)f34 * f40;
uint128_t tmp_w20 = o04;
uint128_t tmp_w21 = o141;
uint128_t tmp_w22 = o241;
uint128_t tmp_w23 = o34;
uint128_t tmp_w24 = o44;
uint128_t l_ = tmp_w10 + (uint128_t)(uint64_t)0U;
uint64_t tmp00 = (uint64_t)l_ & (uint64_t)0x7ffffffffffffU;
uint64_t c00 = (uint64_t)(l_ >> (uint32_t)51U);
uint128_t l_0 = tmp_w11 + (uint128_t)c00;
uint64_t tmp10 = (uint64_t)l_0 & (uint64_t)0x7ffffffffffffU;
uint64_t c10 = (uint64_t)(l_0 >> (uint32_t)51U);
uint128_t l_1 = tmp_w12 + (uint128_t)c10;
uint64_t tmp20 = (uint64_t)l_1 & (uint64_t)0x7ffffffffffffU;
uint64_t c20 = (uint64_t)(l_1 >> (uint32_t)51U);
uint128_t l_2 = tmp_w13 + (uint128_t)c20;
uint64_t tmp30 = (uint64_t)l_2 & (uint64_t)0x7ffffffffffffU;
uint64_t c30 = (uint64_t)(l_2 >> (uint32_t)51U);
uint128_t l_3 = tmp_w14 + (uint128_t)c30;
uint64_t tmp40 = (uint64_t)l_3 & (uint64_t)0x7ffffffffffffU;
uint64_t c40 = (uint64_t)(l_3 >> (uint32_t)51U);
uint64_t l_4 = tmp00 + c40 * (uint64_t)19U;
uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
uint64_t c50 = l_4 >> (uint32_t)51U;
uint64_t o100 = tmp0_;
uint64_t o112 = tmp10 + c50;
uint64_t o122 = tmp20;
uint64_t o132 = tmp30;
uint64_t o142 = tmp40;
uint128_t l_5 = tmp_w20 + (uint128_t)(uint64_t)0U;
uint64_t tmp0 = (uint64_t)l_5 & (uint64_t)0x7ffffffffffffU;
uint64_t c0 = (uint64_t)(l_5 >> (uint32_t)51U);
uint128_t l_6 = tmp_w21 + (uint128_t)c0;
uint64_t tmp1 = (uint64_t)l_6 & (uint64_t)0x7ffffffffffffU;
uint64_t c1 = (uint64_t)(l_6 >> (uint32_t)51U);
uint128_t l_7 = tmp_w22 + (uint128_t)c1;
uint64_t tmp2 = (uint64_t)l_7 & (uint64_t)0x7ffffffffffffU;
uint64_t c2 = (uint64_t)(l_7 >> (uint32_t)51U);
uint128_t l_8 = tmp_w23 + (uint128_t)c2;
uint64_t tmp3 = (uint64_t)l_8 & (uint64_t)0x7ffffffffffffU;
uint64_t c3 = (uint64_t)(l_8 >> (uint32_t)51U);
uint128_t l_9 = tmp_w24 + (uint128_t)c3;
uint64_t tmp4 = (uint64_t)l_9 & (uint64_t)0x7ffffffffffffU;
uint64_t c4 = (uint64_t)(l_9 >> (uint32_t)51U);
uint64_t l_10 = tmp0 + c4 * (uint64_t)19U;
uint64_t tmp0_0 = l_10 & (uint64_t)0x7ffffffffffffU;
uint64_t c5 = l_10 >> (uint32_t)51U;
uint64_t o200 = tmp0_0;
uint64_t o212 = tmp1 + c5;
uint64_t o222 = tmp2;
uint64_t o232 = tmp3;
uint64_t o242 = tmp4;
uint64_t o10 = o100;
uint64_t o11 = o112;
uint64_t o12 = o122;
uint64_t o13 = o132;
uint64_t o14 = o142;
uint64_t o20 = o200;
uint64_t o21 = o212;
uint64_t o22 = o222;
uint64_t o23 = o232;
uint64_t o24 = o242;
out[0U] = o10;
out[1U] = o11;
out[2U] = o12;
out[3U] = o13;
out[4U] = o14;
out[5U] = o20;
out[6U] = o21;
out[7U] = o22;
out[8U] = o23;
out[9U] = o24;
}
inline void Hacl_Impl_Curve25519_Field51_fmul1(uint64_t *out, uint64_t *f1, uint64_t f2)
{
uint64_t f10 = f1[0U];
uint64_t f11 = f1[1U];
uint64_t f12 = f1[2U];
uint64_t f13 = f1[3U];
uint64_t f14 = f1[4U];
uint128_t tmp_w0 = (uint128_t)f2 * f10;
uint128_t tmp_w1 = (uint128_t)f2 * f11;
uint128_t tmp_w2 = (uint128_t)f2 * f12;
uint128_t tmp_w3 = (uint128_t)f2 * f13;
uint128_t tmp_w4 = (uint128_t)f2 * f14;
uint128_t l_ = tmp_w0 + (uint128_t)(uint64_t)0U;
uint64_t tmp0 = (uint64_t)l_ & (uint64_t)0x7ffffffffffffU;
uint64_t c0 = (uint64_t)(l_ >> (uint32_t)51U);
uint128_t l_0 = tmp_w1 + (uint128_t)c0;
uint64_t tmp1 = (uint64_t)l_0 & (uint64_t)0x7ffffffffffffU;
uint64_t c1 = (uint64_t)(l_0 >> (uint32_t)51U);
uint128_t l_1 = tmp_w2 + (uint128_t)c1;
uint64_t tmp2 = (uint64_t)l_1 & (uint64_t)0x7ffffffffffffU;
uint64_t c2 = (uint64_t)(l_1 >> (uint32_t)51U);
uint128_t l_2 = tmp_w3 + (uint128_t)c2;
uint64_t tmp3 = (uint64_t)l_2 & (uint64_t)0x7ffffffffffffU;
uint64_t c3 = (uint64_t)(l_2 >> (uint32_t)51U);
uint128_t l_3 = tmp_w4 + (uint128_t)c3;
uint64_t tmp4 = (uint64_t)l_3 & (uint64_t)0x7ffffffffffffU;
uint64_t c4 = (uint64_t)(l_3 >> (uint32_t)51U);
uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
uint64_t c5 = l_4 >> (uint32_t)51U;
uint64_t o0 = tmp0_;
uint64_t o1 = tmp1 + c5;
uint64_t o2 = tmp2;
uint64_t o3 = tmp3;
uint64_t o4 = tmp4;
out[0U] = o0;
out[1U] = o1;
out[2U] = o2;
out[3U] = o3;
out[4U] = o4;
}
inline void
Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, uint128_t *uu____6941)
{
uint64_t f0 = f[0U];
uint64_t f1 = f[1U];
uint64_t f2 = f[2U];
uint64_t f3 = f[3U];
uint64_t f4 = f[4U];
uint64_t d0 = (uint64_t)2U * f0;
uint64_t d1 = (uint64_t)2U * f1;
uint64_t d2 = (uint64_t)38U * f2;
uint64_t d3 = (uint64_t)19U * f3;
uint64_t d419 = (uint64_t)19U * f4;
uint64_t d4 = (uint64_t)2U * d419;
uint128_t s0 = (uint128_t)f0 * f0 + (uint128_t)d4 * f1 + (uint128_t)d2 * f3;
uint128_t s1 = (uint128_t)d0 * f1 + (uint128_t)d4 * f2 + (uint128_t)d3 * f3;
uint128_t s2 = (uint128_t)d0 * f2 + (uint128_t)f1 * f1 + (uint128_t)d4 * f3;
uint128_t s3 = (uint128_t)d0 * f3 + (uint128_t)d1 * f2 + (uint128_t)f4 * d419;
uint128_t s4 = (uint128_t)d0 * f4 + (uint128_t)d1 * f3 + (uint128_t)f2 * f2;
uint128_t o00 = s0;
uint128_t o10 = s1;
uint128_t o20 = s2;
uint128_t o30 = s3;
uint128_t o40 = s4;
uint128_t l_ = o00 + (uint128_t)(uint64_t)0U;
uint64_t tmp0 = (uint64_t)l_ & (uint64_t)0x7ffffffffffffU;
uint64_t c0 = (uint64_t)(l_ >> (uint32_t)51U);
uint128_t l_0 = o10 + (uint128_t)c0;
uint64_t tmp1 = (uint64_t)l_0 & (uint64_t)0x7ffffffffffffU;
uint64_t c1 = (uint64_t)(l_0 >> (uint32_t)51U);
uint128_t l_1 = o20 + (uint128_t)c1;
uint64_t tmp2 = (uint64_t)l_1 & (uint64_t)0x7ffffffffffffU;
uint64_t c2 = (uint64_t)(l_1 >> (uint32_t)51U);
uint128_t l_2 = o30 + (uint128_t)c2;
uint64_t tmp3 = (uint64_t)l_2 & (uint64_t)0x7ffffffffffffU;
uint64_t c3 = (uint64_t)(l_2 >> (uint32_t)51U);
uint128_t l_3 = o40 + (uint128_t)c3;
uint64_t tmp4 = (uint64_t)l_3 & (uint64_t)0x7ffffffffffffU;
uint64_t c4 = (uint64_t)(l_3 >> (uint32_t)51U);
uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
uint64_t c5 = l_4 >> (uint32_t)51U;
uint64_t o0 = tmp0_;
uint64_t o1 = tmp1 + c5;
uint64_t o2 = tmp2;
uint64_t o3 = tmp3;
uint64_t o4 = tmp4;
out[0U] = o0;
out[1U] = o1;
out[2U] = o2;
out[3U] = o3;
out[4U] = o4;
}
inline static void
Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, uint128_t *uu____7692)
{
uint64_t f10 = f[0U];
uint64_t f11 = f[1U];
uint64_t f12 = f[2U];
uint64_t f13 = f[3U];
uint64_t f14 = f[4U];
uint64_t f20 = f[5U];
uint64_t f21 = f[6U];
uint64_t f22 = f[7U];
uint64_t f23 = f[8U];
uint64_t f24 = f[9U];
uint64_t d00 = (uint64_t)2U * f10;
uint64_t d10 = (uint64_t)2U * f11;
uint64_t d20 = (uint64_t)38U * f12;
uint64_t d30 = (uint64_t)19U * f13;
uint64_t d4190 = (uint64_t)19U * f14;
uint64_t d40 = (uint64_t)2U * d4190;
uint128_t s00 = (uint128_t)f10 * f10 + (uint128_t)d40 * f11 + (uint128_t)d20 * f13;
uint128_t s10 = (uint128_t)d00 * f11 + (uint128_t)d40 * f12 + (uint128_t)d30 * f13;
uint128_t s20 = (uint128_t)d00 * f12 + (uint128_t)f11 * f11 + (uint128_t)d40 * f13;
uint128_t s30 = (uint128_t)d00 * f13 + (uint128_t)d10 * f12 + (uint128_t)f14 * d4190;
uint128_t s40 = (uint128_t)d00 * f14 + (uint128_t)d10 * f13 + (uint128_t)f12 * f12;
uint128_t o100 = s00;
uint128_t o110 = s10;
uint128_t o120 = s20;
uint128_t o130 = s30;
uint128_t o140 = s40;
uint64_t d0 = (uint64_t)2U * f20;
uint64_t d1 = (uint64_t)2U * f21;
uint64_t d2 = (uint64_t)38U * f22;
uint64_t d3 = (uint64_t)19U * f23;
uint64_t d419 = (uint64_t)19U * f24;
uint64_t d4 = (uint64_t)2U * d419;
uint128_t s0 = (uint128_t)f20 * f20 + (uint128_t)d4 * f21 + (uint128_t)d2 * f23;
uint128_t s1 = (uint128_t)d0 * f21 + (uint128_t)d4 * f22 + (uint128_t)d3 * f23;
uint128_t s2 = (uint128_t)d0 * f22 + (uint128_t)f21 * f21 + (uint128_t)d4 * f23;
uint128_t s3 = (uint128_t)d0 * f23 + (uint128_t)d1 * f22 + (uint128_t)f24 * d419;
uint128_t s4 = (uint128_t)d0 * f24 + (uint128_t)d1 * f23 + (uint128_t)f22 * f22;
uint128_t o200 = s0;
uint128_t o210 = s1;
uint128_t o220 = s2;
uint128_t o230 = s3;
uint128_t o240 = s4;
uint128_t l_ = o100 + (uint128_t)(uint64_t)0U;
uint64_t tmp00 = (uint64_t)l_ & (uint64_t)0x7ffffffffffffU;
uint64_t c00 = (uint64_t)(l_ >> (uint32_t)51U);
uint128_t l_0 = o110 + (uint128_t)c00;
uint64_t tmp10 = (uint64_t)l_0 & (uint64_t)0x7ffffffffffffU;
uint64_t c10 = (uint64_t)(l_0 >> (uint32_t)51U);
uint128_t l_1 = o120 + (uint128_t)c10;
uint64_t tmp20 = (uint64_t)l_1 & (uint64_t)0x7ffffffffffffU;
uint64_t c20 = (uint64_t)(l_1 >> (uint32_t)51U);
uint128_t l_2 = o130 + (uint128_t)c20;
uint64_t tmp30 = (uint64_t)l_2 & (uint64_t)0x7ffffffffffffU;
uint64_t c30 = (uint64_t)(l_2 >> (uint32_t)51U);
uint128_t l_3 = o140 + (uint128_t)c30;
uint64_t tmp40 = (uint64_t)l_3 & (uint64_t)0x7ffffffffffffU;
uint64_t c40 = (uint64_t)(l_3 >> (uint32_t)51U);
uint64_t l_4 = tmp00 + c40 * (uint64_t)19U;
uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
uint64_t c50 = l_4 >> (uint32_t)51U;
uint64_t o101 = tmp0_;
uint64_t o111 = tmp10 + c50;
uint64_t o121 = tmp20;
uint64_t o131 = tmp30;
uint64_t o141 = tmp40;
uint128_t l_5 = o200 + (uint128_t)(uint64_t)0U;
uint64_t tmp0 = (uint64_t)l_5 & (uint64_t)0x7ffffffffffffU;
uint64_t c0 = (uint64_t)(l_5 >> (uint32_t)51U);
uint128_t l_6 = o210 + (uint128_t)c0;
uint64_t tmp1 = (uint64_t)l_6 & (uint64_t)0x7ffffffffffffU;
uint64_t c1 = (uint64_t)(l_6 >> (uint32_t)51U);
uint128_t l_7 = o220 + (uint128_t)c1;
uint64_t tmp2 = (uint64_t)l_7 & (uint64_t)0x7ffffffffffffU;
uint64_t c2 = (uint64_t)(l_7 >> (uint32_t)51U);
uint128_t l_8 = o230 + (uint128_t)c2;
uint64_t tmp3 = (uint64_t)l_8 & (uint64_t)0x7ffffffffffffU;
uint64_t c3 = (uint64_t)(l_8 >> (uint32_t)51U);
uint128_t l_9 = o240 + (uint128_t)c3;
uint64_t tmp4 = (uint64_t)l_9 & (uint64_t)0x7ffffffffffffU;
uint64_t c4 = (uint64_t)(l_9 >> (uint32_t)51U);
uint64_t l_10 = tmp0 + c4 * (uint64_t)19U;
uint64_t tmp0_0 = l_10 & (uint64_t)0x7ffffffffffffU;
uint64_t c5 = l_10 >> (uint32_t)51U;
uint64_t o201 = tmp0_0;
uint64_t o211 = tmp1 + c5;
uint64_t o221 = tmp2;
uint64_t o231 = tmp3;
uint64_t o241 = tmp4;
uint64_t o10 = o101;
uint64_t o11 = o111;
uint64_t o12 = o121;
uint64_t o13 = o131;
uint64_t o14 = o141;
uint64_t o20 = o201;
uint64_t o21 = o211;
uint64_t o22 = o221;
uint64_t o23 = o231;
uint64_t o24 = o241;
out[0U] = o10;
out[1U] = o11;
out[2U] = o12;
out[3U] = o13;
out[4U] = o14;
out[5U] = o20;
out[6U] = o21;
out[7U] = o22;
out[8U] = o23;
out[9U] = o24;
}
static void Hacl_Impl_Curve25519_Field51_store_felem(uint64_t *u64s, uint64_t *f)
{
uint64_t f0 = f[0U];
uint64_t f1 = f[1U];
uint64_t f2 = f[2U];
uint64_t f3 = f[3U];
uint64_t f4 = f[4U];
uint64_t l_ = f0 + (uint64_t)0U;
uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU;
uint64_t c0 = l_ >> (uint32_t)51U;
uint64_t l_0 = f1 + c0;
uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU;
uint64_t c1 = l_0 >> (uint32_t)51U;
uint64_t l_1 = f2 + c1;
uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU;
uint64_t c2 = l_1 >> (uint32_t)51U;
uint64_t l_2 = f3 + c2;
uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU;
uint64_t c3 = l_2 >> (uint32_t)51U;
uint64_t l_3 = f4 + c3;
uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU;
uint64_t c4 = l_3 >> (uint32_t)51U;
uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
uint64_t c5 = l_4 >> (uint32_t)51U;
uint64_t f01 = tmp0_;
uint64_t f11 = tmp1 + c5;
uint64_t f21 = tmp2;
uint64_t f31 = tmp3;
uint64_t f41 = tmp4;
uint64_t m0 = FStar_UInt64_gte_mask(f01, (uint64_t)0x7ffffffffffedU);
uint64_t m1 = FStar_UInt64_eq_mask(f11, (uint64_t)0x7ffffffffffffU);
uint64_t m2 = FStar_UInt64_eq_mask(f21, (uint64_t)0x7ffffffffffffU);
uint64_t m3 = FStar_UInt64_eq_mask(f31, (uint64_t)0x7ffffffffffffU);
uint64_t m4 = FStar_UInt64_eq_mask(f41, (uint64_t)0x7ffffffffffffU);
uint64_t mask = (((m0 & m1) & m2) & m3) & m4;
uint64_t f0_ = f01 - (mask & (uint64_t)0x7ffffffffffedU);
uint64_t f1_ = f11 - (mask & (uint64_t)0x7ffffffffffffU);
uint64_t f2_ = f21 - (mask & (uint64_t)0x7ffffffffffffU);
uint64_t f3_ = f31 - (mask & (uint64_t)0x7ffffffffffffU);
uint64_t f4_ = f41 - (mask & (uint64_t)0x7ffffffffffffU);
uint64_t f02 = f0_;
uint64_t f12 = f1_;
uint64_t f22 = f2_;
uint64_t f32 = f3_;
uint64_t f42 = f4_;
uint64_t o00 = f02 | f12 << (uint32_t)51U;
uint64_t o10 = f12 >> (uint32_t)13U | f22 << (uint32_t)38U;
uint64_t o20 = f22 >> (uint32_t)26U | f32 << (uint32_t)25U;
uint64_t o30 = f32 >> (uint32_t)39U | f42 << (uint32_t)12U;
uint64_t o0 = o00;
uint64_t o1 = o10;
uint64_t o2 = o20;
uint64_t o3 = o30;
u64s[0U] = o0;
u64s[1U] = o1;
u64s[2U] = o2;
u64s[3U] = o3;
}
inline static void
Hacl_Impl_Curve25519_Field51_cswap2(uint64_t bit, uint64_t *p1, uint64_t *p2)
{
uint64_t mask = (uint64_t)0U - bit;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)10U; i = i + (uint32_t)1U)
{
uint64_t dummy = mask & (p1[i] ^ p2[i]);
p1[i] = p1[i] ^ dummy;
p2[i] = p2[i] ^ dummy;
}
}
static uint8_t
Hacl_Curve25519_51_g25519[32U] =
{
(uint8_t)9U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
(uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
(uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
(uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
(uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U
};
static void
Hacl_Curve25519_51_point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint128_t *tmp2)
{
uint64_t *nq = p01_tmp1;
uint64_t *nq_p1 = p01_tmp1 + (uint32_t)10U;
uint64_t *tmp1 = p01_tmp1 + (uint32_t)20U;
uint64_t *x1 = q;
uint64_t *x2 = nq;
uint64_t *z2 = nq + (uint32_t)5U;
uint64_t *z3 = nq_p1 + (uint32_t)5U;
uint64_t *a = tmp1;
uint64_t *b = tmp1 + (uint32_t)5U;
uint64_t *ab = tmp1;
uint64_t *dc = tmp1 + (uint32_t)10U;
Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2);
Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2);
uint64_t *x3 = nq_p1;
uint64_t *z31 = nq_p1 + (uint32_t)5U;
uint64_t *d0 = dc;
uint64_t *c0 = dc + (uint32_t)5U;
Hacl_Impl_Curve25519_Field51_fadd(c0, x3, z31);
Hacl_Impl_Curve25519_Field51_fsub(d0, x3, z31);
Hacl_Impl_Curve25519_Field51_fmul2(dc, dc, ab, tmp2);
Hacl_Impl_Curve25519_Field51_fadd(x3, d0, c0);
Hacl_Impl_Curve25519_Field51_fsub(z31, d0, c0);
uint64_t *a1 = tmp1;
uint64_t *b1 = tmp1 + (uint32_t)5U;
uint64_t *d = tmp1 + (uint32_t)10U;
uint64_t *c = tmp1 + (uint32_t)15U;
uint64_t *ab1 = tmp1;
uint64_t *dc1 = tmp1 + (uint32_t)10U;
Hacl_Impl_Curve25519_Field51_fsqr2(dc1, ab1, tmp2);
Hacl_Impl_Curve25519_Field51_fsqr2(nq_p1, nq_p1, tmp2);
a1[0U] = c[0U];
a1[1U] = c[1U];
a1[2U] = c[2U];
a1[3U] = c[3U];
a1[4U] = c[4U];
Hacl_Impl_Curve25519_Field51_fsub(c, d, c);
Hacl_Impl_Curve25519_Field51_fmul1(b1, c, (uint64_t)121665U);
Hacl_Impl_Curve25519_Field51_fadd(b1, b1, d);
Hacl_Impl_Curve25519_Field51_fmul2(nq, dc1, ab1, tmp2);
Hacl_Impl_Curve25519_Field51_fmul(z3, z3, x1, tmp2);
}
static void Hacl_Curve25519_51_point_double(uint64_t *nq, uint64_t *tmp1, uint128_t *tmp2)
{
uint64_t *x2 = nq;
uint64_t *z2 = nq + (uint32_t)5U;
uint64_t *a = tmp1;
uint64_t *b = tmp1 + (uint32_t)5U;
uint64_t *d = tmp1 + (uint32_t)10U;
uint64_t *c = tmp1 + (uint32_t)15U;
uint64_t *ab = tmp1;
uint64_t *dc = tmp1 + (uint32_t)10U;
Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2);
Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2);
Hacl_Impl_Curve25519_Field51_fsqr2(dc, ab, tmp2);
a[0U] = c[0U];
a[1U] = c[1U];
a[2U] = c[2U];
a[3U] = c[3U];
a[4U] = c[4U];
Hacl_Impl_Curve25519_Field51_fsub(c, d, c);
Hacl_Impl_Curve25519_Field51_fmul1(b, c, (uint64_t)121665U);
Hacl_Impl_Curve25519_Field51_fadd(b, b, d);
Hacl_Impl_Curve25519_Field51_fmul2(nq, dc, ab, tmp2);
}
static void Hacl_Curve25519_51_montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init1)
{
uint128_t tmp2[10U];
for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
tmp2[_i] = (uint128_t)(uint64_t)0U;
uint64_t p01_tmp1_swap[41U] = { 0U };
uint64_t *p0 = p01_tmp1_swap;
uint64_t *p01 = p01_tmp1_swap;
uint64_t *p03 = p01;
uint64_t *p11 = p01 + (uint32_t)10U;
memcpy(p11, init1, (uint32_t)10U * sizeof init1[0U]);
uint64_t *x0 = p03;
uint64_t *z0 = p03 + (uint32_t)5U;
x0[0U] = (uint64_t)1U;
x0[1U] = (uint64_t)0U;
x0[2U] = (uint64_t)0U;
x0[3U] = (uint64_t)0U;
x0[4U] = (uint64_t)0U;
z0[0U] = (uint64_t)0U;
z0[1U] = (uint64_t)0U;
z0[2U] = (uint64_t)0U;
z0[3U] = (uint64_t)0U;
z0[4U] = (uint64_t)0U;
uint64_t *p01_tmp1 = p01_tmp1_swap;
uint64_t *p01_tmp11 = p01_tmp1_swap;
uint64_t *nq1 = p01_tmp1_swap;
uint64_t *nq_p11 = p01_tmp1_swap + (uint32_t)10U;
uint64_t *swap1 = p01_tmp1_swap + (uint32_t)40U;
Hacl_Impl_Curve25519_Field51_cswap2((uint64_t)1U, nq1, nq_p11);
Hacl_Curve25519_51_point_add_and_double(init1, p01_tmp11, tmp2);
swap1[0U] = (uint64_t)1U;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)251U; i = i + (uint32_t)1U)
{
uint64_t *p01_tmp12 = p01_tmp1_swap;
uint64_t *swap2 = p01_tmp1_swap + (uint32_t)40U;
uint64_t *nq2 = p01_tmp12;
uint64_t *nq_p12 = p01_tmp12 + (uint32_t)10U;
uint64_t
bit =
(uint64_t)(key[((uint32_t)253U - i)
/ (uint32_t)8U]
>> ((uint32_t)253U - i) % (uint32_t)8U
& (uint8_t)1U);
uint64_t sw = swap2[0U] ^ bit;
Hacl_Impl_Curve25519_Field51_cswap2(sw, nq2, nq_p12);
Hacl_Curve25519_51_point_add_and_double(init1, p01_tmp12, tmp2);
swap2[0U] = bit;
}
uint64_t sw = swap1[0U];
Hacl_Impl_Curve25519_Field51_cswap2(sw, nq1, nq_p11);
uint64_t *nq10 = p01_tmp1;
uint64_t *tmp1 = p01_tmp1 + (uint32_t)20U;
Hacl_Curve25519_51_point_double(nq10, tmp1, tmp2);
Hacl_Curve25519_51_point_double(nq10, tmp1, tmp2);
Hacl_Curve25519_51_point_double(nq10, tmp1, tmp2);
memcpy(out, p0, (uint32_t)10U * sizeof p0[0U]);
}
void Hacl_Curve25519_51_fsquare_times(uint64_t *o, uint64_t *inp, uint128_t *tmp, uint32_t n1)
{
Hacl_Impl_Curve25519_Field51_fsqr(o, inp, tmp);
for (uint32_t i = (uint32_t)0U; i < n1 - (uint32_t)1U; i = i + (uint32_t)1U)
{
Hacl_Impl_Curve25519_Field51_fsqr(o, o, tmp);
}
}
void Hacl_Curve25519_51_finv(uint64_t *o, uint64_t *i, uint128_t *tmp)
{
uint64_t t1[20U] = { 0U };
uint64_t *a = t1;
uint64_t *b = t1 + (uint32_t)5U;
uint64_t *c = t1 + (uint32_t)10U;
uint64_t *t00 = t1 + (uint32_t)15U;
uint128_t *tmp1 = tmp;
Hacl_Curve25519_51_fsquare_times(a, i, tmp1, (uint32_t)1U);
Hacl_Curve25519_51_fsquare_times(t00, a, tmp1, (uint32_t)2U);
Hacl_Impl_Curve25519_Field51_fmul(b, t00, i, tmp);
Hacl_Impl_Curve25519_Field51_fmul(a, b, a, tmp);
Hacl_Curve25519_51_fsquare_times(t00, a, tmp1, (uint32_t)1U);
Hacl_Impl_Curve25519_Field51_fmul(b, t00, b, tmp);
Hacl_Curve25519_51_fsquare_times(t00, b, tmp1, (uint32_t)5U);
Hacl_Impl_Curve25519_Field51_fmul(b, t00, b, tmp);
Hacl_Curve25519_51_fsquare_times(t00, b, tmp1, (uint32_t)10U);
Hacl_Impl_Curve25519_Field51_fmul(c, t00, b, tmp);
Hacl_Curve25519_51_fsquare_times(t00, c, tmp1, (uint32_t)20U);
Hacl_Impl_Curve25519_Field51_fmul(t00, t00, c, tmp);
Hacl_Curve25519_51_fsquare_times(t00, t00, tmp1, (uint32_t)10U);
Hacl_Impl_Curve25519_Field51_fmul(b, t00, b, tmp);
Hacl_Curve25519_51_fsquare_times(t00, b, tmp1, (uint32_t)50U);
Hacl_Impl_Curve25519_Field51_fmul(c, t00, b, tmp);
Hacl_Curve25519_51_fsquare_times(t00, c, tmp1, (uint32_t)100U);
Hacl_Impl_Curve25519_Field51_fmul(t00, t00, c, tmp);
Hacl_Curve25519_51_fsquare_times(t00, t00, tmp1, (uint32_t)50U);
Hacl_Impl_Curve25519_Field51_fmul(t00, t00, b, tmp);
Hacl_Curve25519_51_fsquare_times(t00, t00, tmp1, (uint32_t)5U);
uint64_t *a0 = t1;
uint64_t *t0 = t1 + (uint32_t)15U;
Hacl_Impl_Curve25519_Field51_fmul(o, t0, a0, tmp);
}
static void Hacl_Curve25519_51_encode_point(uint8_t *o, uint64_t *i)
{
uint64_t *x = i;
uint64_t *z = i + (uint32_t)5U;
uint64_t tmp[5U] = { 0U };
uint64_t u64s[4U] = { 0U };
uint128_t tmp_w[10U];
for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
tmp_w[_i] = (uint128_t)(uint64_t)0U;
Hacl_Curve25519_51_finv(tmp, z, tmp_w);
Hacl_Impl_Curve25519_Field51_fmul(tmp, tmp, x, tmp_w);
Hacl_Impl_Curve25519_Field51_store_felem(u64s, tmp);
for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)4U; i0 = i0 + (uint32_t)1U)
{
store64_le(o + i0 * (uint32_t)8U, u64s[i0]);
}
}
void Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
{
uint64_t init1[10U] = { 0U };
uint64_t tmp[4U] = { 0U };
for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U)
{
uint64_t *os = tmp;
uint8_t *bj = pub + i * (uint32_t)8U;
uint64_t u = load64_le(bj);
uint64_t r = u;
uint64_t x = r;
os[i] = x;
}
uint64_t tmp3 = tmp[3U];
tmp[3U] = tmp3 & (uint64_t)0x7fffffffffffffffU;
uint64_t *x = init1;
uint64_t *z = init1 + (uint32_t)5U;
z[0U] = (uint64_t)1U;
z[1U] = (uint64_t)0U;
z[2U] = (uint64_t)0U;
z[3U] = (uint64_t)0U;
z[4U] = (uint64_t)0U;
uint64_t f0l = tmp[0U] & (uint64_t)0x7ffffffffffffU;
uint64_t f0h = tmp[0U] >> (uint32_t)51U;
uint64_t f1l = (tmp[1U] & (uint64_t)0x3fffffffffU) << (uint32_t)13U;
uint64_t f1h = tmp[1U] >> (uint32_t)38U;
uint64_t f2l = (tmp[2U] & (uint64_t)0x1ffffffU) << (uint32_t)26U;
uint64_t f2h = tmp[2U] >> (uint32_t)25U;
uint64_t f3l = (tmp[3U] & (uint64_t)0xfffU) << (uint32_t)39U;
uint64_t f3h = tmp[3U] >> (uint32_t)12U;
x[0U] = f0l;
x[1U] = f0h | f1l;
x[2U] = f1h | f2l;
x[3U] = f2h | f3l;
x[4U] = f3h;
Hacl_Curve25519_51_montgomery_ladder(init1, priv, init1);
Hacl_Curve25519_51_encode_point(out, init1);
}
void Hacl_Curve25519_51_secret_to_public(uint8_t *pub, uint8_t *priv)
{
uint8_t basepoint[32U] = { 0U };
for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i = i + (uint32_t)1U)
{
uint8_t *os = basepoint;
uint8_t x = Hacl_Curve25519_51_g25519[i];
os[i] = x;
}
Hacl_Curve25519_51_scalarmult(pub, priv, basepoint);
}
bool Hacl_Curve25519_51_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub)
{
uint8_t zeros1[32U] = { 0U };
Hacl_Curve25519_51_scalarmult(out, priv, pub);
uint8_t res = (uint8_t)255U;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i = i + (uint32_t)1U)
{
uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros1[i]);
res = uu____0 & res;
}
uint8_t z = res;
bool r = z == (uint8_t)255U;
return !r;
}

63
3rdparty/hacl-star/evercrypt/Hacl_Curve25519_51.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,63 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __Hacl_Curve25519_51_H
#define __Hacl_Curve25519_51_H
#include "Hacl_Kremlib.h"
void Hacl_Impl_Curve25519_Field51_fadd(uint64_t *out, uint64_t *f1, uint64_t *f2);
void Hacl_Impl_Curve25519_Field51_fsub(uint64_t *out, uint64_t *f1, uint64_t *f2);
void
Hacl_Impl_Curve25519_Field51_fmul(
uint64_t *out,
uint64_t *f1,
uint64_t *f2,
uint128_t *uu____2959
);
void Hacl_Impl_Curve25519_Field51_fmul1(uint64_t *out, uint64_t *f1, uint64_t f2);
void Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, uint128_t *uu____6941);
void Hacl_Curve25519_51_fsquare_times(uint64_t *o, uint64_t *inp, uint128_t *tmp, uint32_t n1);
void Hacl_Curve25519_51_finv(uint64_t *o, uint64_t *i, uint128_t *tmp);
void Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub);
void Hacl_Curve25519_51_secret_to_public(uint8_t *pub, uint8_t *priv);
bool Hacl_Curve25519_51_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub);
#define __Hacl_Curve25519_51_H_DEFINED
#endif

407
3rdparty/hacl-star/evercrypt/Hacl_Curve25519_64.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,407 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "Hacl_Curve25519_64.h"
inline static uint64_t
Hacl_Impl_Curve25519_Field64_Vale_add1(uint64_t *out1, uint64_t *f1, uint64_t f2)
{
#if EVERCRYPT_TARGETCONFIG_GCC
return add1_inline(out1, f1, f2);
#else
uint64_t scrut = add1(out1, f1, f2);
return scrut;
#endif
}
inline static void
Hacl_Impl_Curve25519_Field64_Vale_fadd(uint64_t *out1, uint64_t *f1, uint64_t *f2)
{
#if EVERCRYPT_TARGETCONFIG_GCC
fadd_inline(out1, f1, f2);
#else
uint64_t uu____0 = fadd_(out1, f1, f2);
#endif
}
inline static void
Hacl_Impl_Curve25519_Field64_Vale_fsub(uint64_t *out1, uint64_t *f1, uint64_t *f2)
{
#if EVERCRYPT_TARGETCONFIG_GCC
fsub_inline(out1, f1, f2);
#else
uint64_t uu____0 = fsub_(out1, f1, f2);
#endif
}
inline static void
Hacl_Impl_Curve25519_Field64_Vale_fmul(
uint64_t *out1,
uint64_t *f1,
uint64_t *f2,
uint64_t *tmp
)
{
#if EVERCRYPT_TARGETCONFIG_GCC
fmul_inline(tmp, f1, out1, f2);
#else
uint64_t uu____0 = fmul_(tmp, f1, out1, f2);
#endif
}
inline static void
Hacl_Impl_Curve25519_Field64_Vale_fmul2(
uint64_t *out1,
uint64_t *f1,
uint64_t *f2,
uint64_t *tmp
)
{
#if EVERCRYPT_TARGETCONFIG_GCC
fmul2_inline(tmp, f1, out1, f2);
#else
uint64_t uu____0 = fmul2(tmp, f1, out1, f2);
#endif
}
inline static void
Hacl_Impl_Curve25519_Field64_Vale_fmul1(uint64_t *out1, uint64_t *f1, uint64_t f2)
{
#if EVERCRYPT_TARGETCONFIG_GCC
fmul1_inline(out1, f1, f2);
#else
uint64_t uu____0 = fmul1(out1, f1, f2);
#endif
}
inline static void
Hacl_Impl_Curve25519_Field64_Vale_fsqr(uint64_t *out1, uint64_t *f1, uint64_t *tmp)
{
#if EVERCRYPT_TARGETCONFIG_GCC
fsqr_inline(tmp, f1, out1);
#else
uint64_t uu____0 = fsqr(tmp, f1, out1);
#endif
}
inline static void
Hacl_Impl_Curve25519_Field64_Vale_fsqr2(uint64_t *out1, uint64_t *f, uint64_t *tmp)
{
#if EVERCRYPT_TARGETCONFIG_GCC
fsqr2_inline(tmp, f, out1);
#else
uint64_t uu____0 = fsqr2(tmp, f, out1);
#endif
}
inline static void
Hacl_Impl_Curve25519_Field64_Vale_cswap2(uint64_t bit, uint64_t *p1, uint64_t *p2)
{
#if EVERCRYPT_TARGETCONFIG_GCC
cswap2_inline(bit, p1, p2);
#else
uint64_t uu____0 = cswap2(bit, p1, p2);
#endif
}
static uint8_t
Hacl_Curve25519_64_g25519[32U] =
{
(uint8_t)9U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
(uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
(uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
(uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
(uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U
};
static void
Hacl_Curve25519_64_point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2)
{
uint64_t *nq = p01_tmp1;
uint64_t *nq_p1 = p01_tmp1 + (uint32_t)8U;
uint64_t *tmp1 = p01_tmp1 + (uint32_t)16U;
uint64_t *x1 = q;
uint64_t *x2 = nq;
uint64_t *z2 = nq + (uint32_t)4U;
uint64_t *z3 = nq_p1 + (uint32_t)4U;
uint64_t *a = tmp1;
uint64_t *b = tmp1 + (uint32_t)4U;
uint64_t *ab = tmp1;
uint64_t *dc = tmp1 + (uint32_t)8U;
Hacl_Impl_Curve25519_Field64_Vale_fadd(a, x2, z2);
Hacl_Impl_Curve25519_Field64_Vale_fsub(b, x2, z2);
uint64_t *x3 = nq_p1;
uint64_t *z31 = nq_p1 + (uint32_t)4U;
uint64_t *d0 = dc;
uint64_t *c0 = dc + (uint32_t)4U;
Hacl_Impl_Curve25519_Field64_Vale_fadd(c0, x3, z31);
Hacl_Impl_Curve25519_Field64_Vale_fsub(d0, x3, z31);
Hacl_Impl_Curve25519_Field64_Vale_fmul2(dc, dc, ab, tmp2);
Hacl_Impl_Curve25519_Field64_Vale_fadd(x3, d0, c0);
Hacl_Impl_Curve25519_Field64_Vale_fsub(z31, d0, c0);
uint64_t *a1 = tmp1;
uint64_t *b1 = tmp1 + (uint32_t)4U;
uint64_t *d = tmp1 + (uint32_t)8U;
uint64_t *c = tmp1 + (uint32_t)12U;
uint64_t *ab1 = tmp1;
uint64_t *dc1 = tmp1 + (uint32_t)8U;
Hacl_Impl_Curve25519_Field64_Vale_fsqr2(dc1, ab1, tmp2);
Hacl_Impl_Curve25519_Field64_Vale_fsqr2(nq_p1, nq_p1, tmp2);
a1[0U] = c[0U];
a1[1U] = c[1U];
a1[2U] = c[2U];
a1[3U] = c[3U];
Hacl_Impl_Curve25519_Field64_Vale_fsub(c, d, c);
Hacl_Impl_Curve25519_Field64_Vale_fmul1(b1, c, (uint64_t)121665U);
Hacl_Impl_Curve25519_Field64_Vale_fadd(b1, b1, d);
Hacl_Impl_Curve25519_Field64_Vale_fmul2(nq, dc1, ab1, tmp2);
Hacl_Impl_Curve25519_Field64_Vale_fmul(z3, z3, x1, tmp2);
}
static void Hacl_Curve25519_64_point_double(uint64_t *nq, uint64_t *tmp1, uint64_t *tmp2)
{
uint64_t *x2 = nq;
uint64_t *z2 = nq + (uint32_t)4U;
uint64_t *a = tmp1;
uint64_t *b = tmp1 + (uint32_t)4U;
uint64_t *d = tmp1 + (uint32_t)8U;
uint64_t *c = tmp1 + (uint32_t)12U;
uint64_t *ab = tmp1;
uint64_t *dc = tmp1 + (uint32_t)8U;
Hacl_Impl_Curve25519_Field64_Vale_fadd(a, x2, z2);
Hacl_Impl_Curve25519_Field64_Vale_fsub(b, x2, z2);
Hacl_Impl_Curve25519_Field64_Vale_fsqr2(dc, ab, tmp2);
a[0U] = c[0U];
a[1U] = c[1U];
a[2U] = c[2U];
a[3U] = c[3U];
Hacl_Impl_Curve25519_Field64_Vale_fsub(c, d, c);
Hacl_Impl_Curve25519_Field64_Vale_fmul1(b, c, (uint64_t)121665U);
Hacl_Impl_Curve25519_Field64_Vale_fadd(b, b, d);
Hacl_Impl_Curve25519_Field64_Vale_fmul2(nq, dc, ab, tmp2);
}
static void Hacl_Curve25519_64_montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init1)
{
uint64_t tmp2[16U] = { 0U };
uint64_t p01_tmp1_swap[33U] = { 0U };
uint64_t *p0 = p01_tmp1_swap;
uint64_t *p01 = p01_tmp1_swap;
uint64_t *p03 = p01;
uint64_t *p11 = p01 + (uint32_t)8U;
memcpy(p11, init1, (uint32_t)8U * sizeof init1[0U]);
uint64_t *x0 = p03;
uint64_t *z0 = p03 + (uint32_t)4U;
x0[0U] = (uint64_t)1U;
x0[1U] = (uint64_t)0U;
x0[2U] = (uint64_t)0U;
x0[3U] = (uint64_t)0U;
z0[0U] = (uint64_t)0U;
z0[1U] = (uint64_t)0U;
z0[2U] = (uint64_t)0U;
z0[3U] = (uint64_t)0U;
uint64_t *p01_tmp1 = p01_tmp1_swap;
uint64_t *p01_tmp11 = p01_tmp1_swap;
uint64_t *nq1 = p01_tmp1_swap;
uint64_t *nq_p11 = p01_tmp1_swap + (uint32_t)8U;
uint64_t *swap1 = p01_tmp1_swap + (uint32_t)32U;
Hacl_Impl_Curve25519_Field64_Vale_cswap2((uint64_t)1U, nq1, nq_p11);
Hacl_Curve25519_64_point_add_and_double(init1, p01_tmp11, tmp2);
swap1[0U] = (uint64_t)1U;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)251U; i = i + (uint32_t)1U)
{
uint64_t *p01_tmp12 = p01_tmp1_swap;
uint64_t *swap2 = p01_tmp1_swap + (uint32_t)32U;
uint64_t *nq2 = p01_tmp12;
uint64_t *nq_p12 = p01_tmp12 + (uint32_t)8U;
uint64_t
bit =
(uint64_t)(key[((uint32_t)253U - i)
/ (uint32_t)8U]
>> ((uint32_t)253U - i) % (uint32_t)8U
& (uint8_t)1U);
uint64_t sw = swap2[0U] ^ bit;
Hacl_Impl_Curve25519_Field64_Vale_cswap2(sw, nq2, nq_p12);
Hacl_Curve25519_64_point_add_and_double(init1, p01_tmp12, tmp2);
swap2[0U] = bit;
}
uint64_t sw = swap1[0U];
Hacl_Impl_Curve25519_Field64_Vale_cswap2(sw, nq1, nq_p11);
uint64_t *nq10 = p01_tmp1;
uint64_t *tmp1 = p01_tmp1 + (uint32_t)16U;
Hacl_Curve25519_64_point_double(nq10, tmp1, tmp2);
Hacl_Curve25519_64_point_double(nq10, tmp1, tmp2);
Hacl_Curve25519_64_point_double(nq10, tmp1, tmp2);
memcpy(out, p0, (uint32_t)8U * sizeof p0[0U]);
}
static void
Hacl_Curve25519_64_fsquare_times(uint64_t *o, uint64_t *inp, uint64_t *tmp, uint32_t n1)
{
Hacl_Impl_Curve25519_Field64_Vale_fsqr(o, inp, tmp);
for (uint32_t i = (uint32_t)0U; i < n1 - (uint32_t)1U; i = i + (uint32_t)1U)
{
Hacl_Impl_Curve25519_Field64_Vale_fsqr(o, o, tmp);
}
}
static void Hacl_Curve25519_64_finv(uint64_t *o, uint64_t *i, uint64_t *tmp)
{
uint64_t t1[16U] = { 0U };
uint64_t *a = t1;
uint64_t *b = t1 + (uint32_t)4U;
uint64_t *c = t1 + (uint32_t)8U;
uint64_t *t00 = t1 + (uint32_t)12U;
uint64_t *tmp1 = tmp;
Hacl_Curve25519_64_fsquare_times(a, i, tmp1, (uint32_t)1U);
Hacl_Curve25519_64_fsquare_times(t00, a, tmp1, (uint32_t)2U);
Hacl_Impl_Curve25519_Field64_Vale_fmul(b, t00, i, tmp);
Hacl_Impl_Curve25519_Field64_Vale_fmul(a, b, a, tmp);
Hacl_Curve25519_64_fsquare_times(t00, a, tmp1, (uint32_t)1U);
Hacl_Impl_Curve25519_Field64_Vale_fmul(b, t00, b, tmp);
Hacl_Curve25519_64_fsquare_times(t00, b, tmp1, (uint32_t)5U);
Hacl_Impl_Curve25519_Field64_Vale_fmul(b, t00, b, tmp);
Hacl_Curve25519_64_fsquare_times(t00, b, tmp1, (uint32_t)10U);
Hacl_Impl_Curve25519_Field64_Vale_fmul(c, t00, b, tmp);
Hacl_Curve25519_64_fsquare_times(t00, c, tmp1, (uint32_t)20U);
Hacl_Impl_Curve25519_Field64_Vale_fmul(t00, t00, c, tmp);
Hacl_Curve25519_64_fsquare_times(t00, t00, tmp1, (uint32_t)10U);
Hacl_Impl_Curve25519_Field64_Vale_fmul(b, t00, b, tmp);
Hacl_Curve25519_64_fsquare_times(t00, b, tmp1, (uint32_t)50U);
Hacl_Impl_Curve25519_Field64_Vale_fmul(c, t00, b, tmp);
Hacl_Curve25519_64_fsquare_times(t00, c, tmp1, (uint32_t)100U);
Hacl_Impl_Curve25519_Field64_Vale_fmul(t00, t00, c, tmp);
Hacl_Curve25519_64_fsquare_times(t00, t00, tmp1, (uint32_t)50U);
Hacl_Impl_Curve25519_Field64_Vale_fmul(t00, t00, b, tmp);
Hacl_Curve25519_64_fsquare_times(t00, t00, tmp1, (uint32_t)5U);
uint64_t *a0 = t1;
uint64_t *t0 = t1 + (uint32_t)12U;
Hacl_Impl_Curve25519_Field64_Vale_fmul(o, t0, a0, tmp);
}
static void Hacl_Curve25519_64_store_felem(uint64_t *b, uint64_t *f)
{
uint64_t f30 = f[3U];
uint64_t top_bit0 = f30 >> (uint32_t)63U;
f[3U] = f30 & (uint64_t)0x7fffffffffffffffU;
uint64_t carry = Hacl_Impl_Curve25519_Field64_Vale_add1(f, f, (uint64_t)19U * top_bit0);
uint64_t f31 = f[3U];
uint64_t top_bit = f31 >> (uint32_t)63U;
f[3U] = f31 & (uint64_t)0x7fffffffffffffffU;
uint64_t carry0 = Hacl_Impl_Curve25519_Field64_Vale_add1(f, f, (uint64_t)19U * top_bit);
uint64_t f0 = f[0U];
uint64_t f1 = f[1U];
uint64_t f2 = f[2U];
uint64_t f3 = f[3U];
uint64_t m0 = FStar_UInt64_gte_mask(f0, (uint64_t)0xffffffffffffffedU);
uint64_t m1 = FStar_UInt64_eq_mask(f1, (uint64_t)0xffffffffffffffffU);
uint64_t m2 = FStar_UInt64_eq_mask(f2, (uint64_t)0xffffffffffffffffU);
uint64_t m3 = FStar_UInt64_eq_mask(f3, (uint64_t)0x7fffffffffffffffU);
uint64_t mask = ((m0 & m1) & m2) & m3;
uint64_t f0_ = f0 - (mask & (uint64_t)0xffffffffffffffedU);
uint64_t f1_ = f1 - (mask & (uint64_t)0xffffffffffffffffU);
uint64_t f2_ = f2 - (mask & (uint64_t)0xffffffffffffffffU);
uint64_t f3_ = f3 - (mask & (uint64_t)0x7fffffffffffffffU);
uint64_t o0 = f0_;
uint64_t o1 = f1_;
uint64_t o2 = f2_;
uint64_t o3 = f3_;
b[0U] = o0;
b[1U] = o1;
b[2U] = o2;
b[3U] = o3;
}
static void Hacl_Curve25519_64_encode_point(uint8_t *o, uint64_t *i)
{
uint64_t *x = i;
uint64_t *z = i + (uint32_t)4U;
uint64_t tmp[4U] = { 0U };
uint64_t u64s[4U] = { 0U };
uint64_t tmp_w[16U] = { 0U };
Hacl_Curve25519_64_finv(tmp, z, tmp_w);
Hacl_Impl_Curve25519_Field64_Vale_fmul(tmp, tmp, x, tmp_w);
Hacl_Curve25519_64_store_felem(u64s, tmp);
for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)4U; i0 = i0 + (uint32_t)1U)
{
store64_le(o + i0 * (uint32_t)8U, u64s[i0]);
}
}
void Hacl_Curve25519_64_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
{
uint64_t init1[8U] = { 0U };
uint64_t tmp[4U] = { 0U };
for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U)
{
uint64_t *os = tmp;
uint8_t *bj = pub + i * (uint32_t)8U;
uint64_t u = load64_le(bj);
uint64_t r = u;
uint64_t x = r;
os[i] = x;
}
uint64_t tmp3 = tmp[3U];
tmp[3U] = tmp3 & (uint64_t)0x7fffffffffffffffU;
uint64_t *x = init1;
uint64_t *z = init1 + (uint32_t)4U;
z[0U] = (uint64_t)1U;
z[1U] = (uint64_t)0U;
z[2U] = (uint64_t)0U;
z[3U] = (uint64_t)0U;
x[0U] = tmp[0U];
x[1U] = tmp[1U];
x[2U] = tmp[2U];
x[3U] = tmp[3U];
Hacl_Curve25519_64_montgomery_ladder(init1, priv, init1);
Hacl_Curve25519_64_encode_point(out, init1);
}
void Hacl_Curve25519_64_secret_to_public(uint8_t *pub, uint8_t *priv)
{
uint8_t basepoint[32U] = { 0U };
for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i = i + (uint32_t)1U)
{
uint8_t *os = basepoint;
uint8_t x = Hacl_Curve25519_64_g25519[i];
os[i] = x;
}
Hacl_Curve25519_64_scalarmult(pub, priv, basepoint);
}
bool Hacl_Curve25519_64_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub)
{
uint8_t zeros1[32U] = { 0U };
Hacl_Curve25519_64_scalarmult(out, priv, pub);
uint8_t res = (uint8_t)255U;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i = i + (uint32_t)1U)
{
uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros1[i]);
res = uu____0 & res;
}
uint8_t z = res;
bool r = z == (uint8_t)255U;
return !r;
}

46
3rdparty/hacl-star/evercrypt/Hacl_Curve25519_64.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,46 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "curve25519-inline.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __Hacl_Curve25519_64_H
#define __Hacl_Curve25519_64_H
#include "Hacl_Kremlib.h"
#include "Vale.h"
#include "Vale_Inline.h"
void Hacl_Curve25519_64_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub);
void Hacl_Curve25519_64_secret_to_public(uint8_t *pub, uint8_t *priv);
bool Hacl_Curve25519_64_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub);
#define __Hacl_Curve25519_64_H_DEFINED
#endif

960
3rdparty/hacl-star/evercrypt/Hacl_Curve25519_64_Slow.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,960 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "Hacl_Curve25519_64_Slow.h"
typedef struct K___uint64_t_uint64_t_uint64_t_uint64_t_s
{
uint64_t fst;
uint64_t snd;
uint64_t thd;
uint64_t f3;
}
K___uint64_t_uint64_t_uint64_t_uint64_t;
typedef struct K___uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_s
{
uint64_t fst;
uint64_t snd;
uint64_t thd;
uint64_t f3;
uint64_t f4;
uint64_t f5;
uint64_t f6;
uint64_t f7;
}
K___uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t;
typedef struct K___uint64_t_uint64_t_s
{
uint64_t fst;
uint64_t snd;
}
K___uint64_t_uint64_t;
inline static K___uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_addcarry(uint64_t x, uint64_t y, uint64_t cin)
{
uint64_t res1 = x + cin;
uint64_t c;
if (res1 < cin)
{
c = (uint64_t)1U;
}
else
{
c = (uint64_t)0U;
}
uint64_t res = res1 + y;
uint64_t c1;
if (res < res1)
{
c1 = c + (uint64_t)1U;
}
else
{
c1 = c;
}
return ((K___uint64_t_uint64_t){ .fst = res, .snd = c1 });
}
inline static K___uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_subborrow(uint64_t x, uint64_t y, uint64_t cin)
{
uint64_t res = x - y - cin;
uint64_t c;
if (cin == (uint64_t)1U)
{
if (x <= y)
{
c = (uint64_t)1U;
}
else
{
c = (uint64_t)0U;
}
}
else if (x < y)
{
c = (uint64_t)1U;
}
else
{
c = (uint64_t)0U;
}
return ((K___uint64_t_uint64_t){ .fst = res, .snd = c });
}
inline static K___uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_mul64(uint64_t x, uint64_t y)
{
uint128_t res = (uint128_t)x * y;
return
((K___uint64_t_uint64_t){ .fst = (uint64_t)res, .snd = (uint64_t)(res >> (uint32_t)64U) });
}
inline static K___uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_add0carry(uint64_t x, uint64_t y)
{
uint64_t res = x + y;
uint64_t c;
if (res < x)
{
c = (uint64_t)1U;
}
else
{
c = (uint64_t)0U;
}
return ((K___uint64_t_uint64_t){ .fst = res, .snd = c });
}
typedef struct K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t_s
{
uint64_t fst;
K___uint64_t_uint64_t_uint64_t_uint64_t snd;
}
K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t;
static K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_add1(K___uint64_t_uint64_t_uint64_t_uint64_t f, uint64_t cin)
{
uint64_t f0 = f.fst;
uint64_t f1 = f.snd;
uint64_t f2 = f.thd;
uint64_t f3 = f.f3;
K___uint64_t_uint64_t scrut = Hacl_Spec_Curve25519_Field64_Core_add0carry(f0, cin);
uint64_t o0 = scrut.fst;
uint64_t c0 = scrut.snd;
K___uint64_t_uint64_t scrut0 = Hacl_Spec_Curve25519_Field64_Core_add0carry(f1, c0);
uint64_t o1 = scrut0.fst;
uint64_t c1 = scrut0.snd;
K___uint64_t_uint64_t scrut1 = Hacl_Spec_Curve25519_Field64_Core_add0carry(f2, c1);
uint64_t o2 = scrut1.fst;
uint64_t c2 = scrut1.snd;
K___uint64_t_uint64_t scrut2 = Hacl_Spec_Curve25519_Field64_Core_add0carry(f3, c2);
uint64_t o3 = scrut2.fst;
uint64_t c3 = scrut2.snd;
K___uint64_t_uint64_t_uint64_t_uint64_t out = { .fst = o0, .snd = o1, .thd = o2, .f3 = o3 };
return ((K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = c3, .snd = out });
}
static K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_sub1(K___uint64_t_uint64_t_uint64_t_uint64_t f, uint64_t cin)
{
uint64_t f0 = f.fst;
uint64_t f1 = f.snd;
uint64_t f2 = f.thd;
uint64_t f3 = f.f3;
K___uint64_t_uint64_t
scrut = Hacl_Spec_Curve25519_Field64_Core_subborrow(f0, cin, (uint64_t)0U);
uint64_t o0 = scrut.fst;
uint64_t c0 = scrut.snd;
K___uint64_t_uint64_t
scrut0 = Hacl_Spec_Curve25519_Field64_Core_subborrow(f1, (uint64_t)0U, c0);
uint64_t o1 = scrut0.fst;
uint64_t c1 = scrut0.snd;
K___uint64_t_uint64_t
scrut1 = Hacl_Spec_Curve25519_Field64_Core_subborrow(f2, (uint64_t)0U, c1);
uint64_t o2 = scrut1.fst;
uint64_t c2 = scrut1.snd;
K___uint64_t_uint64_t
scrut2 = Hacl_Spec_Curve25519_Field64_Core_subborrow(f3, (uint64_t)0U, c2);
uint64_t o3 = scrut2.fst;
uint64_t c3 = scrut2.snd;
K___uint64_t_uint64_t_uint64_t_uint64_t out = { .fst = o0, .snd = o1, .thd = o2, .f3 = o3 };
return ((K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = c3, .snd = out });
}
static K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_mul1(K___uint64_t_uint64_t_uint64_t_uint64_t f, uint64_t u)
{
uint64_t f0 = f.fst;
uint64_t f1 = f.snd;
uint64_t f2 = f.thd;
uint64_t f3 = f.f3;
K___uint64_t_uint64_t scrut0 = Hacl_Spec_Curve25519_Field64_Core_mul64(f0, u);
uint64_t l0 = scrut0.fst;
uint64_t h0 = scrut0.snd;
K___uint64_t_uint64_t scrut1 = Hacl_Spec_Curve25519_Field64_Core_mul64(f1, u);
uint64_t l1 = scrut1.fst;
uint64_t h1 = scrut1.snd;
K___uint64_t_uint64_t scrut2 = Hacl_Spec_Curve25519_Field64_Core_mul64(f2, u);
uint64_t l2 = scrut2.fst;
uint64_t h2 = scrut2.snd;
K___uint64_t_uint64_t scrut3 = Hacl_Spec_Curve25519_Field64_Core_mul64(f3, u);
uint64_t l3 = scrut3.fst;
uint64_t h3 = scrut3.snd;
uint64_t o0 = l0;
K___uint64_t_uint64_t scrut = Hacl_Spec_Curve25519_Field64_Core_addcarry(l1, h0, (uint64_t)0U);
uint64_t o1 = scrut.fst;
uint64_t c0 = scrut.snd;
K___uint64_t_uint64_t scrut4 = Hacl_Spec_Curve25519_Field64_Core_addcarry(l2, h1, c0);
uint64_t o2 = scrut4.fst;
uint64_t c1 = scrut4.snd;
K___uint64_t_uint64_t scrut5 = Hacl_Spec_Curve25519_Field64_Core_addcarry(l3, h2, c1);
uint64_t o3 = scrut5.fst;
uint64_t c2 = scrut5.snd;
K___uint64_t_uint64_t_uint64_t_uint64_t out = { .fst = o0, .snd = o1, .thd = o2, .f3 = o3 };
uint64_t c3 = h3 + c2;
return ((K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = c3, .snd = out });
}
static K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_mul1_add(
K___uint64_t_uint64_t_uint64_t_uint64_t f1,
uint64_t u2,
K___uint64_t_uint64_t_uint64_t_uint64_t f3
)
{
K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
scrut0 = Hacl_Spec_Curve25519_Field64_Core_mul1(f1, u2);
uint64_t c = scrut0.fst;
K___uint64_t_uint64_t_uint64_t_uint64_t out0 = scrut0.snd;
uint64_t o0 = out0.fst;
uint64_t o1 = out0.snd;
uint64_t o2 = out0.thd;
uint64_t o3 = out0.f3;
uint64_t f30 = f3.fst;
uint64_t f31 = f3.snd;
uint64_t f32 = f3.thd;
uint64_t f33 = f3.f3;
K___uint64_t_uint64_t
scrut = Hacl_Spec_Curve25519_Field64_Core_addcarry(f30, o0, (uint64_t)0U);
uint64_t o0_ = scrut.fst;
uint64_t c0 = scrut.snd;
K___uint64_t_uint64_t scrut1 = Hacl_Spec_Curve25519_Field64_Core_addcarry(f31, o1, c0);
uint64_t o1_ = scrut1.fst;
uint64_t c1 = scrut1.snd;
K___uint64_t_uint64_t scrut2 = Hacl_Spec_Curve25519_Field64_Core_addcarry(f32, o2, c1);
uint64_t o2_ = scrut2.fst;
uint64_t c2 = scrut2.snd;
K___uint64_t_uint64_t scrut3 = Hacl_Spec_Curve25519_Field64_Core_addcarry(f33, o3, c2);
uint64_t o3_ = scrut3.fst;
uint64_t c3 = scrut3.snd;
K___uint64_t_uint64_t_uint64_t_uint64_t
out = { .fst = o0_, .snd = o1_, .thd = o2_, .f3 = o3_ };
uint64_t c4 = c + c3;
return ((K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = c4, .snd = out });
}
static K___uint64_t_uint64_t_uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_carry_pass(
K___uint64_t_uint64_t_uint64_t_uint64_t f,
uint64_t cin
)
{
K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
scrut = Hacl_Spec_Curve25519_Field64_Core_add1(f, cin * (uint64_t)38U);
uint64_t carry = scrut.fst;
K___uint64_t_uint64_t_uint64_t_uint64_t out0 = scrut.snd;
uint64_t o0 = out0.fst;
uint64_t o1 = out0.snd;
uint64_t o2 = out0.thd;
uint64_t o3 = out0.f3;
uint64_t o0_ = o0 + carry * (uint64_t)38U;
return
((K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = o0_, .snd = o1, .thd = o2, .f3 = o3 });
}
static K___uint64_t_uint64_t_uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_carry_wide(
K___uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t f
)
{
uint64_t f0 = f.fst;
uint64_t f1 = f.snd;
uint64_t f2 = f.thd;
uint64_t f3 = f.f3;
uint64_t f4 = f.f4;
uint64_t f5 = f.f5;
uint64_t f6 = f.f6;
uint64_t f7 = f.f7;
K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
scrut =
Hacl_Spec_Curve25519_Field64_Core_mul1_add((
(K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = f4, .snd = f5, .thd = f6, .f3 = f7 }
),
(uint64_t)38U,
((K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = f0, .snd = f1, .thd = f2, .f3 = f3 }));
uint64_t c0 = scrut.fst;
K___uint64_t_uint64_t_uint64_t_uint64_t out0 = scrut.snd;
K___uint64_t_uint64_t_uint64_t_uint64_t
out1 = Hacl_Spec_Curve25519_Field64_Core_carry_pass(out0, c0);
return out1;
}
static K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_add4(
K___uint64_t_uint64_t_uint64_t_uint64_t f1,
K___uint64_t_uint64_t_uint64_t_uint64_t f2
)
{
uint64_t f10 = f1.fst;
uint64_t f11 = f1.snd;
uint64_t f12 = f1.thd;
uint64_t f13 = f1.f3;
uint64_t f20 = f2.fst;
uint64_t f21 = f2.snd;
uint64_t f22 = f2.thd;
uint64_t f23 = f2.f3;
K___uint64_t_uint64_t
scrut = Hacl_Spec_Curve25519_Field64_Core_addcarry(f10, f20, (uint64_t)0U);
uint64_t o0 = scrut.fst;
uint64_t c0 = scrut.snd;
K___uint64_t_uint64_t scrut0 = Hacl_Spec_Curve25519_Field64_Core_addcarry(f11, f21, c0);
uint64_t o1 = scrut0.fst;
uint64_t c1 = scrut0.snd;
K___uint64_t_uint64_t scrut1 = Hacl_Spec_Curve25519_Field64_Core_addcarry(f12, f22, c1);
uint64_t o2 = scrut1.fst;
uint64_t c2 = scrut1.snd;
K___uint64_t_uint64_t scrut2 = Hacl_Spec_Curve25519_Field64_Core_addcarry(f13, f23, c2);
uint64_t o3 = scrut2.fst;
uint64_t c3 = scrut2.snd;
K___uint64_t_uint64_t_uint64_t_uint64_t out = { .fst = o0, .snd = o1, .thd = o2, .f3 = o3 };
return ((K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = c3, .snd = out });
}
static K___uint64_t_uint64_t_uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_fadd4(
K___uint64_t_uint64_t_uint64_t_uint64_t f1,
K___uint64_t_uint64_t_uint64_t_uint64_t f2
)
{
K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
scrut = Hacl_Spec_Curve25519_Field64_Core_add4(f1, f2);
uint64_t c0 = scrut.fst;
K___uint64_t_uint64_t_uint64_t_uint64_t out0 = scrut.snd;
K___uint64_t_uint64_t_uint64_t_uint64_t
out = Hacl_Spec_Curve25519_Field64_Core_carry_pass(out0, c0);
return out;
}
static K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_sub4(
K___uint64_t_uint64_t_uint64_t_uint64_t f1,
K___uint64_t_uint64_t_uint64_t_uint64_t f2
)
{
uint64_t f10 = f1.fst;
uint64_t f11 = f1.snd;
uint64_t f12 = f1.thd;
uint64_t f13 = f1.f3;
uint64_t f20 = f2.fst;
uint64_t f21 = f2.snd;
uint64_t f22 = f2.thd;
uint64_t f23 = f2.f3;
K___uint64_t_uint64_t
scrut = Hacl_Spec_Curve25519_Field64_Core_subborrow(f10, f20, (uint64_t)0U);
uint64_t o0 = scrut.fst;
uint64_t c0 = scrut.snd;
K___uint64_t_uint64_t scrut0 = Hacl_Spec_Curve25519_Field64_Core_subborrow(f11, f21, c0);
uint64_t o1 = scrut0.fst;
uint64_t c1 = scrut0.snd;
K___uint64_t_uint64_t scrut1 = Hacl_Spec_Curve25519_Field64_Core_subborrow(f12, f22, c1);
uint64_t o2 = scrut1.fst;
uint64_t c2 = scrut1.snd;
K___uint64_t_uint64_t scrut2 = Hacl_Spec_Curve25519_Field64_Core_subborrow(f13, f23, c2);
uint64_t o3 = scrut2.fst;
uint64_t c3 = scrut2.snd;
K___uint64_t_uint64_t_uint64_t_uint64_t out = { .fst = o0, .snd = o1, .thd = o2, .f3 = o3 };
return ((K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = c3, .snd = out });
}
static K___uint64_t_uint64_t_uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_fsub4(
K___uint64_t_uint64_t_uint64_t_uint64_t f1,
K___uint64_t_uint64_t_uint64_t_uint64_t f2
)
{
K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
scrut = Hacl_Spec_Curve25519_Field64_Core_sub4(f1, f2);
uint64_t c0 = scrut.fst;
K___uint64_t_uint64_t_uint64_t_uint64_t out0 = scrut.snd;
K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
scrut0 = Hacl_Spec_Curve25519_Field64_Core_sub1(out0, c0 * (uint64_t)38U);
uint64_t c1 = scrut0.fst;
K___uint64_t_uint64_t_uint64_t_uint64_t out1 = scrut0.snd;
uint64_t o0 = out1.fst;
uint64_t o1 = out1.snd;
uint64_t o2 = out1.thd;
uint64_t o3 = out1.f3;
uint64_t o0_ = o0 - c1 * (uint64_t)38U;
return
((K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = o0_, .snd = o1, .thd = o2, .f3 = o3 });
}
static K___uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_mul4(
K___uint64_t_uint64_t_uint64_t_uint64_t f,
K___uint64_t_uint64_t_uint64_t_uint64_t r
)
{
uint64_t f0 = f.fst;
uint64_t f1 = f.snd;
uint64_t f2 = f.thd;
uint64_t f3 = f.f3;
K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
scrut = Hacl_Spec_Curve25519_Field64_Core_mul1(r, f0);
uint64_t c0 = scrut.fst;
K___uint64_t_uint64_t_uint64_t_uint64_t out0 = scrut.snd;
uint64_t o00 = out0.fst;
uint64_t o01 = out0.snd;
uint64_t o02 = out0.thd;
uint64_t o03 = out0.f3;
K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
scrut0 =
Hacl_Spec_Curve25519_Field64_Core_mul1_add(r,
f1,
((K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = o01, .snd = o02, .thd = o03, .f3 = c0 }));
uint64_t c1 = scrut0.fst;
K___uint64_t_uint64_t_uint64_t_uint64_t out1 = scrut0.snd;
uint64_t o11 = out1.fst;
uint64_t o12 = out1.snd;
uint64_t o13 = out1.thd;
uint64_t o14 = out1.f3;
K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
scrut1 =
Hacl_Spec_Curve25519_Field64_Core_mul1_add(r,
f2,
((K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = o12, .snd = o13, .thd = o14, .f3 = c1 }));
uint64_t c2 = scrut1.fst;
K___uint64_t_uint64_t_uint64_t_uint64_t out2 = scrut1.snd;
uint64_t o22 = out2.fst;
uint64_t o23 = out2.snd;
uint64_t o24 = out2.thd;
uint64_t o25 = out2.f3;
K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
scrut2 =
Hacl_Spec_Curve25519_Field64_Core_mul1_add(r,
f3,
((K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = o23, .snd = o24, .thd = o25, .f3 = c2 }));
uint64_t c3 = scrut2.fst;
K___uint64_t_uint64_t_uint64_t_uint64_t out3 = scrut2.snd;
uint64_t o33 = out3.fst;
uint64_t o34 = out3.snd;
uint64_t o35 = out3.thd;
uint64_t o36 = out3.f3;
uint64_t o37 = c3;
return
(
(K___uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t){
.fst = o00,
.snd = o11,
.thd = o22,
.f3 = o33,
.f4 = o34,
.f5 = o35,
.f6 = o36,
.f7 = o37
}
);
}
static K___uint64_t_uint64_t_uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_fmul4(
K___uint64_t_uint64_t_uint64_t_uint64_t f1,
K___uint64_t_uint64_t_uint64_t_uint64_t r
)
{
K___uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t_uint64_t
tmp = Hacl_Spec_Curve25519_Field64_Core_mul4(f1, r);
K___uint64_t_uint64_t_uint64_t_uint64_t
out = Hacl_Spec_Curve25519_Field64_Core_carry_wide(tmp);
return out;
}
static K___uint64_t_uint64_t_uint64_t_uint64_t
Hacl_Spec_Curve25519_Field64_Core_fmul14(
K___uint64_t_uint64_t_uint64_t_uint64_t f1,
uint64_t f2
)
{
K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
scrut = Hacl_Spec_Curve25519_Field64_Core_mul1(f1, f2);
uint64_t c0 = scrut.fst;
K___uint64_t_uint64_t_uint64_t_uint64_t out0 = scrut.snd;
K___uint64_t_uint64_t_uint64_t_uint64_t
out1 = Hacl_Spec_Curve25519_Field64_Core_carry_pass(out0, c0);
return out1;
}
inline static uint64_t
Hacl_Impl_Curve25519_Field64_Hacl_add1(uint64_t *out, uint64_t *f1, uint64_t f2)
{
uint64_t f10 = f1[0U];
uint64_t f11 = f1[1U];
uint64_t f12 = f1[2U];
uint64_t f13 = f1[3U];
K___uint64_t_K___uint64_t_uint64_t_uint64_t_uint64_t
scrut =
Hacl_Spec_Curve25519_Field64_Core_add1((
(K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = f10, .snd = f11, .thd = f12, .f3 = f13 }
),
f2);
uint64_t o3 = scrut.snd.f3;
uint64_t o2 = scrut.snd.thd;
uint64_t o1 = scrut.snd.snd;
uint64_t o0 = scrut.snd.fst;
uint64_t carry = scrut.fst;
out[0U] = o0;
out[1U] = o1;
out[2U] = o2;
out[3U] = o3;
return carry;
}
inline static void
Hacl_Impl_Curve25519_Field64_Hacl_fadd(uint64_t *out, uint64_t *f1, uint64_t *f2)
{
uint64_t f10 = f1[0U];
uint64_t f11 = f1[1U];
uint64_t f12 = f1[2U];
uint64_t f13 = f1[3U];
uint64_t f20 = f2[0U];
uint64_t f21 = f2[1U];
uint64_t f22 = f2[2U];
uint64_t f23 = f2[3U];
K___uint64_t_uint64_t_uint64_t_uint64_t
scrut =
Hacl_Spec_Curve25519_Field64_Core_fadd4((
(K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = f10, .snd = f11, .thd = f12, .f3 = f13 }
),
((K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = f20, .snd = f21, .thd = f22, .f3 = f23 }));
uint64_t o0 = scrut.fst;
uint64_t o1 = scrut.snd;
uint64_t o2 = scrut.thd;
uint64_t o3 = scrut.f3;
out[0U] = o0;
out[1U] = o1;
out[2U] = o2;
out[3U] = o3;
}
inline static void
Hacl_Impl_Curve25519_Field64_Hacl_fsub(uint64_t *out, uint64_t *f1, uint64_t *f2)
{
uint64_t f10 = f1[0U];
uint64_t f11 = f1[1U];
uint64_t f12 = f1[2U];
uint64_t f13 = f1[3U];
uint64_t f20 = f2[0U];
uint64_t f21 = f2[1U];
uint64_t f22 = f2[2U];
uint64_t f23 = f2[3U];
K___uint64_t_uint64_t_uint64_t_uint64_t
scrut =
Hacl_Spec_Curve25519_Field64_Core_fsub4((
(K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = f10, .snd = f11, .thd = f12, .f3 = f13 }
),
((K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = f20, .snd = f21, .thd = f22, .f3 = f23 }));
uint64_t o0 = scrut.fst;
uint64_t o1 = scrut.snd;
uint64_t o2 = scrut.thd;
uint64_t o3 = scrut.f3;
out[0U] = o0;
out[1U] = o1;
out[2U] = o2;
out[3U] = o3;
}
inline static void
Hacl_Impl_Curve25519_Field64_Hacl_fmul(
uint64_t *out,
uint64_t *f1,
uint64_t *f2,
uint64_t *tmp
)
{
uint64_t f10 = f1[0U];
uint64_t f11 = f1[1U];
uint64_t f12 = f1[2U];
uint64_t f13 = f1[3U];
uint64_t f20 = f2[0U];
uint64_t f21 = f2[1U];
uint64_t f22 = f2[2U];
uint64_t f23 = f2[3U];
K___uint64_t_uint64_t_uint64_t_uint64_t
scrut =
Hacl_Spec_Curve25519_Field64_Core_fmul4((
(K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = f10, .snd = f11, .thd = f12, .f3 = f13 }
),
((K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = f20, .snd = f21, .thd = f22, .f3 = f23 }));
uint64_t o0 = scrut.fst;
uint64_t o1 = scrut.snd;
uint64_t o2 = scrut.thd;
uint64_t o3 = scrut.f3;
out[0U] = o0;
out[1U] = o1;
out[2U] = o2;
out[3U] = o3;
}
inline static void
Hacl_Impl_Curve25519_Field64_Hacl_fmul2(
uint64_t *out,
uint64_t *f1,
uint64_t *f2,
uint64_t *tmp
)
{
uint64_t *out1 = out;
uint64_t *out2 = out + (uint32_t)4U;
uint64_t *f11 = f1;
uint64_t *f12 = f1 + (uint32_t)4U;
uint64_t *f21 = f2;
uint64_t *f22 = f2 + (uint32_t)4U;
Hacl_Impl_Curve25519_Field64_Hacl_fmul(out1, f11, f21, tmp);
Hacl_Impl_Curve25519_Field64_Hacl_fmul(out2, f12, f22, tmp);
}
inline static void
Hacl_Impl_Curve25519_Field64_Hacl_fmul1(uint64_t *out, uint64_t *f1, uint64_t f2)
{
uint64_t f10 = f1[0U];
uint64_t f11 = f1[1U];
uint64_t f12 = f1[2U];
uint64_t f13 = f1[3U];
K___uint64_t_uint64_t_uint64_t_uint64_t
scrut =
Hacl_Spec_Curve25519_Field64_Core_fmul14((
(K___uint64_t_uint64_t_uint64_t_uint64_t){ .fst = f10, .snd = f11, .thd = f12, .f3 = f13 }
),
f2);
uint64_t o0 = scrut.fst;
uint64_t o1 = scrut.snd;
uint64_t o2 = scrut.thd;
uint64_t o3 = scrut.f3;
out[0U] = o0;
out[1U] = o1;
out[2U] = o2;
out[3U] = o3;
}
inline static void
Hacl_Impl_Curve25519_Field64_Hacl_fsqr(uint64_t *out, uint64_t *f1, uint64_t *tmp)
{
uint64_t tmp1[16U] = { 0U };
Hacl_Impl_Curve25519_Field64_Hacl_fmul(out, f1, f1, tmp1);
}
inline static void
Hacl_Impl_Curve25519_Field64_Hacl_fsqr2(uint64_t *out, uint64_t *f, uint64_t *tmp)
{
Hacl_Impl_Curve25519_Field64_Hacl_fmul2(out, f, f, tmp);
}
inline static void
Hacl_Impl_Curve25519_Field64_Hacl_cswap2(uint64_t bit, uint64_t *p1, uint64_t *p2)
{
uint64_t mask = (uint64_t)0U - bit;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)8U; i = i + (uint32_t)1U)
{
uint64_t dummy = mask & (p1[i] ^ p2[i]);
p1[i] = p1[i] ^ dummy;
p2[i] = p2[i] ^ dummy;
}
}
static uint8_t
Hacl_Curve25519_64_Slow_g25519[32U] =
{
(uint8_t)9U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
(uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
(uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
(uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
(uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U
};
static void
Hacl_Curve25519_64_Slow_point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2)
{
uint64_t *nq = p01_tmp1;
uint64_t *nq_p1 = p01_tmp1 + (uint32_t)8U;
uint64_t *tmp1 = p01_tmp1 + (uint32_t)16U;
uint64_t *x1 = q;
uint64_t *x2 = nq;
uint64_t *z2 = nq + (uint32_t)4U;
uint64_t *z3 = nq_p1 + (uint32_t)4U;
uint64_t *a = tmp1;
uint64_t *b = tmp1 + (uint32_t)4U;
uint64_t *ab = tmp1;
uint64_t *dc = tmp1 + (uint32_t)8U;
Hacl_Impl_Curve25519_Field64_Hacl_fadd(a, x2, z2);
Hacl_Impl_Curve25519_Field64_Hacl_fsub(b, x2, z2);
uint64_t *x3 = nq_p1;
uint64_t *z31 = nq_p1 + (uint32_t)4U;
uint64_t *d0 = dc;
uint64_t *c0 = dc + (uint32_t)4U;
Hacl_Impl_Curve25519_Field64_Hacl_fadd(c0, x3, z31);
Hacl_Impl_Curve25519_Field64_Hacl_fsub(d0, x3, z31);
Hacl_Impl_Curve25519_Field64_Hacl_fmul2(dc, dc, ab, tmp2);
Hacl_Impl_Curve25519_Field64_Hacl_fadd(x3, d0, c0);
Hacl_Impl_Curve25519_Field64_Hacl_fsub(z31, d0, c0);
uint64_t *a1 = tmp1;
uint64_t *b1 = tmp1 + (uint32_t)4U;
uint64_t *d = tmp1 + (uint32_t)8U;
uint64_t *c = tmp1 + (uint32_t)12U;
uint64_t *ab1 = tmp1;
uint64_t *dc1 = tmp1 + (uint32_t)8U;
Hacl_Impl_Curve25519_Field64_Hacl_fsqr2(dc1, ab1, tmp2);
Hacl_Impl_Curve25519_Field64_Hacl_fsqr2(nq_p1, nq_p1, tmp2);
a1[0U] = c[0U];
a1[1U] = c[1U];
a1[2U] = c[2U];
a1[3U] = c[3U];
Hacl_Impl_Curve25519_Field64_Hacl_fsub(c, d, c);
Hacl_Impl_Curve25519_Field64_Hacl_fmul1(b1, c, (uint64_t)121665U);
Hacl_Impl_Curve25519_Field64_Hacl_fadd(b1, b1, d);
Hacl_Impl_Curve25519_Field64_Hacl_fmul2(nq, dc1, ab1, tmp2);
Hacl_Impl_Curve25519_Field64_Hacl_fmul(z3, z3, x1, tmp2);
}
static void Hacl_Curve25519_64_Slow_point_double(uint64_t *nq, uint64_t *tmp1, uint64_t *tmp2)
{
uint64_t *x2 = nq;
uint64_t *z2 = nq + (uint32_t)4U;
uint64_t *a = tmp1;
uint64_t *b = tmp1 + (uint32_t)4U;
uint64_t *d = tmp1 + (uint32_t)8U;
uint64_t *c = tmp1 + (uint32_t)12U;
uint64_t *ab = tmp1;
uint64_t *dc = tmp1 + (uint32_t)8U;
Hacl_Impl_Curve25519_Field64_Hacl_fadd(a, x2, z2);
Hacl_Impl_Curve25519_Field64_Hacl_fsub(b, x2, z2);
Hacl_Impl_Curve25519_Field64_Hacl_fsqr2(dc, ab, tmp2);
a[0U] = c[0U];
a[1U] = c[1U];
a[2U] = c[2U];
a[3U] = c[3U];
Hacl_Impl_Curve25519_Field64_Hacl_fsub(c, d, c);
Hacl_Impl_Curve25519_Field64_Hacl_fmul1(b, c, (uint64_t)121665U);
Hacl_Impl_Curve25519_Field64_Hacl_fadd(b, b, d);
Hacl_Impl_Curve25519_Field64_Hacl_fmul2(nq, dc, ab, tmp2);
}
static void
Hacl_Curve25519_64_Slow_montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init1)
{
uint64_t tmp2[16U] = { 0U };
uint64_t p01_tmp1_swap[33U] = { 0U };
uint64_t *p0 = p01_tmp1_swap;
uint64_t *p01 = p01_tmp1_swap;
uint64_t *p03 = p01;
uint64_t *p11 = p01 + (uint32_t)8U;
memcpy(p11, init1, (uint32_t)8U * sizeof init1[0U]);
uint64_t *x0 = p03;
uint64_t *z0 = p03 + (uint32_t)4U;
x0[0U] = (uint64_t)1U;
x0[1U] = (uint64_t)0U;
x0[2U] = (uint64_t)0U;
x0[3U] = (uint64_t)0U;
z0[0U] = (uint64_t)0U;
z0[1U] = (uint64_t)0U;
z0[2U] = (uint64_t)0U;
z0[3U] = (uint64_t)0U;
uint64_t *p01_tmp1 = p01_tmp1_swap;
uint64_t *p01_tmp11 = p01_tmp1_swap;
uint64_t *nq1 = p01_tmp1_swap;
uint64_t *nq_p11 = p01_tmp1_swap + (uint32_t)8U;
uint64_t *swap1 = p01_tmp1_swap + (uint32_t)32U;
Hacl_Impl_Curve25519_Field64_Hacl_cswap2((uint64_t)1U, nq1, nq_p11);
Hacl_Curve25519_64_Slow_point_add_and_double(init1, p01_tmp11, tmp2);
swap1[0U] = (uint64_t)1U;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)251U; i = i + (uint32_t)1U)
{
uint64_t *p01_tmp12 = p01_tmp1_swap;
uint64_t *swap2 = p01_tmp1_swap + (uint32_t)32U;
uint64_t *nq2 = p01_tmp12;
uint64_t *nq_p12 = p01_tmp12 + (uint32_t)8U;
uint64_t
bit =
(uint64_t)(key[((uint32_t)253U - i)
/ (uint32_t)8U]
>> ((uint32_t)253U - i) % (uint32_t)8U
& (uint8_t)1U);
uint64_t sw = swap2[0U] ^ bit;
Hacl_Impl_Curve25519_Field64_Hacl_cswap2(sw, nq2, nq_p12);
Hacl_Curve25519_64_Slow_point_add_and_double(init1, p01_tmp12, tmp2);
swap2[0U] = bit;
}
uint64_t sw = swap1[0U];
Hacl_Impl_Curve25519_Field64_Hacl_cswap2(sw, nq1, nq_p11);
uint64_t *nq10 = p01_tmp1;
uint64_t *tmp1 = p01_tmp1 + (uint32_t)16U;
Hacl_Curve25519_64_Slow_point_double(nq10, tmp1, tmp2);
Hacl_Curve25519_64_Slow_point_double(nq10, tmp1, tmp2);
Hacl_Curve25519_64_Slow_point_double(nq10, tmp1, tmp2);
memcpy(out, p0, (uint32_t)8U * sizeof p0[0U]);
}
static void
Hacl_Curve25519_64_Slow_fsquare_times(uint64_t *o, uint64_t *inp, uint64_t *tmp, uint32_t n1)
{
Hacl_Impl_Curve25519_Field64_Hacl_fsqr(o, inp, tmp);
for (uint32_t i = (uint32_t)0U; i < n1 - (uint32_t)1U; i = i + (uint32_t)1U)
{
Hacl_Impl_Curve25519_Field64_Hacl_fsqr(o, o, tmp);
}
}
static void Hacl_Curve25519_64_Slow_finv(uint64_t *o, uint64_t *i, uint64_t *tmp)
{
uint64_t t1[16U] = { 0U };
uint64_t *a = t1;
uint64_t *b = t1 + (uint32_t)4U;
uint64_t *c = t1 + (uint32_t)8U;
uint64_t *t00 = t1 + (uint32_t)12U;
uint64_t *tmp1 = tmp;
Hacl_Curve25519_64_Slow_fsquare_times(a, i, tmp1, (uint32_t)1U);
Hacl_Curve25519_64_Slow_fsquare_times(t00, a, tmp1, (uint32_t)2U);
Hacl_Impl_Curve25519_Field64_Hacl_fmul(b, t00, i, tmp);
Hacl_Impl_Curve25519_Field64_Hacl_fmul(a, b, a, tmp);
Hacl_Curve25519_64_Slow_fsquare_times(t00, a, tmp1, (uint32_t)1U);
Hacl_Impl_Curve25519_Field64_Hacl_fmul(b, t00, b, tmp);
Hacl_Curve25519_64_Slow_fsquare_times(t00, b, tmp1, (uint32_t)5U);
Hacl_Impl_Curve25519_Field64_Hacl_fmul(b, t00, b, tmp);
Hacl_Curve25519_64_Slow_fsquare_times(t00, b, tmp1, (uint32_t)10U);
Hacl_Impl_Curve25519_Field64_Hacl_fmul(c, t00, b, tmp);
Hacl_Curve25519_64_Slow_fsquare_times(t00, c, tmp1, (uint32_t)20U);
Hacl_Impl_Curve25519_Field64_Hacl_fmul(t00, t00, c, tmp);
Hacl_Curve25519_64_Slow_fsquare_times(t00, t00, tmp1, (uint32_t)10U);
Hacl_Impl_Curve25519_Field64_Hacl_fmul(b, t00, b, tmp);
Hacl_Curve25519_64_Slow_fsquare_times(t00, b, tmp1, (uint32_t)50U);
Hacl_Impl_Curve25519_Field64_Hacl_fmul(c, t00, b, tmp);
Hacl_Curve25519_64_Slow_fsquare_times(t00, c, tmp1, (uint32_t)100U);
Hacl_Impl_Curve25519_Field64_Hacl_fmul(t00, t00, c, tmp);
Hacl_Curve25519_64_Slow_fsquare_times(t00, t00, tmp1, (uint32_t)50U);
Hacl_Impl_Curve25519_Field64_Hacl_fmul(t00, t00, b, tmp);
Hacl_Curve25519_64_Slow_fsquare_times(t00, t00, tmp1, (uint32_t)5U);
uint64_t *a0 = t1;
uint64_t *t0 = t1 + (uint32_t)12U;
Hacl_Impl_Curve25519_Field64_Hacl_fmul(o, t0, a0, tmp);
}
static void Hacl_Curve25519_64_Slow_store_felem(uint64_t *b, uint64_t *f)
{
uint64_t f30 = f[3U];
uint64_t top_bit0 = f30 >> (uint32_t)63U;
f[3U] = f30 & (uint64_t)0x7fffffffffffffffU;
uint64_t carry = Hacl_Impl_Curve25519_Field64_Hacl_add1(f, f, (uint64_t)19U * top_bit0);
uint64_t f31 = f[3U];
uint64_t top_bit = f31 >> (uint32_t)63U;
f[3U] = f31 & (uint64_t)0x7fffffffffffffffU;
uint64_t carry0 = Hacl_Impl_Curve25519_Field64_Hacl_add1(f, f, (uint64_t)19U * top_bit);
uint64_t f0 = f[0U];
uint64_t f1 = f[1U];
uint64_t f2 = f[2U];
uint64_t f3 = f[3U];
uint64_t m0 = FStar_UInt64_gte_mask(f0, (uint64_t)0xffffffffffffffedU);
uint64_t m1 = FStar_UInt64_eq_mask(f1, (uint64_t)0xffffffffffffffffU);
uint64_t m2 = FStar_UInt64_eq_mask(f2, (uint64_t)0xffffffffffffffffU);
uint64_t m3 = FStar_UInt64_eq_mask(f3, (uint64_t)0x7fffffffffffffffU);
uint64_t mask = ((m0 & m1) & m2) & m3;
uint64_t f0_ = f0 - (mask & (uint64_t)0xffffffffffffffedU);
uint64_t f1_ = f1 - (mask & (uint64_t)0xffffffffffffffffU);
uint64_t f2_ = f2 - (mask & (uint64_t)0xffffffffffffffffU);
uint64_t f3_ = f3 - (mask & (uint64_t)0x7fffffffffffffffU);
uint64_t o0 = f0_;
uint64_t o1 = f1_;
uint64_t o2 = f2_;
uint64_t o3 = f3_;
b[0U] = o0;
b[1U] = o1;
b[2U] = o2;
b[3U] = o3;
}
static void Hacl_Curve25519_64_Slow_encode_point(uint8_t *o, uint64_t *i)
{
uint64_t *x = i;
uint64_t *z = i + (uint32_t)4U;
uint64_t tmp[4U] = { 0U };
uint64_t u64s[4U] = { 0U };
uint64_t tmp_w[16U] = { 0U };
Hacl_Curve25519_64_Slow_finv(tmp, z, tmp_w);
Hacl_Impl_Curve25519_Field64_Hacl_fmul(tmp, tmp, x, tmp_w);
Hacl_Curve25519_64_Slow_store_felem(u64s, tmp);
for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)4U; i0 = i0 + (uint32_t)1U)
{
store64_le(o + i0 * (uint32_t)8U, u64s[i0]);
}
}
void Hacl_Curve25519_64_Slow_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
{
uint64_t init1[8U] = { 0U };
uint64_t tmp[4U] = { 0U };
for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U)
{
uint64_t *os = tmp;
uint8_t *bj = pub + i * (uint32_t)8U;
uint64_t u = load64_le(bj);
uint64_t r = u;
uint64_t x = r;
os[i] = x;
}
uint64_t tmp3 = tmp[3U];
tmp[3U] = tmp3 & (uint64_t)0x7fffffffffffffffU;
uint64_t *x = init1;
uint64_t *z = init1 + (uint32_t)4U;
z[0U] = (uint64_t)1U;
z[1U] = (uint64_t)0U;
z[2U] = (uint64_t)0U;
z[3U] = (uint64_t)0U;
x[0U] = tmp[0U];
x[1U] = tmp[1U];
x[2U] = tmp[2U];
x[3U] = tmp[3U];
Hacl_Curve25519_64_Slow_montgomery_ladder(init1, priv, init1);
Hacl_Curve25519_64_Slow_encode_point(out, init1);
}
void Hacl_Curve25519_64_Slow_secret_to_public(uint8_t *pub, uint8_t *priv)
{
uint8_t basepoint[32U] = { 0U };
for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i = i + (uint32_t)1U)
{
uint8_t *os = basepoint;
uint8_t x = Hacl_Curve25519_64_Slow_g25519[i];
os[i] = x;
}
Hacl_Curve25519_64_Slow_scalarmult(pub, priv, basepoint);
}
bool Hacl_Curve25519_64_Slow_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub)
{
uint8_t zeros1[32U] = { 0U };
Hacl_Curve25519_64_Slow_scalarmult(out, priv, pub);
uint8_t res = (uint8_t)255U;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i = i + (uint32_t)1U)
{
uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros1[i]);
res = uu____0 & res;
}
uint8_t z = res;
bool r = z == (uint8_t)255U;
return !r;
}

43
3rdparty/hacl-star/evercrypt/Hacl_Curve25519_64_Slow.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,43 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __Hacl_Curve25519_64_Slow_H
#define __Hacl_Curve25519_64_Slow_H
#include "Hacl_Kremlib.h"
void Hacl_Curve25519_64_Slow_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub);
void Hacl_Curve25519_64_Slow_secret_to_public(uint8_t *pub, uint8_t *priv);
bool Hacl_Curve25519_64_Slow_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub);
#define __Hacl_Curve25519_64_Slow_H_DEFINED
#endif

1949
3rdparty/hacl-star/evercrypt/Hacl_Ed25519.c поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

49
3rdparty/hacl-star/evercrypt/Hacl_Ed25519.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,49 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __Hacl_Ed25519_H
#define __Hacl_Ed25519_H
#include "Hacl_Kremlib.h"
#include "Hacl_Hash.h"
#include "Hacl_Curve25519_51.h"
void Hacl_Ed25519_sign(uint8_t *signature, uint8_t *secret1, uint32_t len, uint8_t *msg);
bool Hacl_Ed25519_verify(uint8_t *output, uint32_t len, uint8_t *msg, uint8_t *signature);
void Hacl_Ed25519_secret_to_public(uint8_t *output, uint8_t *secret1);
void Hacl_Ed25519_expand_keys(uint8_t *ks, uint8_t *secret1);
void Hacl_Ed25519_sign_expanded(uint8_t *signature, uint8_t *ks, uint32_t len, uint8_t *msg);
#define __Hacl_Ed25519_H_DEFINED
#endif

674
3rdparty/hacl-star/evercrypt/Hacl_Frodo_KEM.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,674 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "Hacl_Frodo_KEM.h"
inline static void
Hacl_Impl_Matrix_matrix_add(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
{
for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0 = i0 + (uint32_t)1U)
{
for (uint32_t i = (uint32_t)0U; i < n2; i = i + (uint32_t)1U)
{
a[i0 * n2 + i] = a[i0 * n2 + i] + b[i0 * n2 + i];
}
}
}
inline static void
Hacl_Impl_Matrix_matrix_sub(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
{
for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0 = i0 + (uint32_t)1U)
{
for (uint32_t i = (uint32_t)0U; i < n2; i = i + (uint32_t)1U)
{
b[i0 * n2 + i] = a[i0 * n2 + i] - b[i0 * n2 + i];
}
}
}
inline static void
Hacl_Impl_Matrix_matrix_mul(
uint32_t n1,
uint32_t n2,
uint32_t n3,
uint16_t *a,
uint16_t *b,
uint16_t *c
)
{
for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0 = i0 + (uint32_t)1U)
{
for (uint32_t i1 = (uint32_t)0U; i1 < n3; i1 = i1 + (uint32_t)1U)
{
uint16_t res = (uint16_t)0U;
for (uint32_t i = (uint32_t)0U; i < n2; i = i + (uint32_t)1U)
{
uint16_t aij = a[i0 * n2 + i];
uint16_t bjk = b[i * n3 + i1];
uint16_t res0 = res;
res = res0 + aij * bjk;
}
c[i0 * n3 + i1] = res;
}
}
}
inline static void
Hacl_Impl_Matrix_matrix_mul_s(
uint32_t n1,
uint32_t n2,
uint32_t n3,
uint16_t *a,
uint16_t *b,
uint16_t *c
)
{
for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0 = i0 + (uint32_t)1U)
{
for (uint32_t i1 = (uint32_t)0U; i1 < n3; i1 = i1 + (uint32_t)1U)
{
uint16_t res = (uint16_t)0U;
for (uint32_t i = (uint32_t)0U; i < n2; i = i + (uint32_t)1U)
{
uint16_t aij = a[i0 * n2 + i];
uint16_t bjk = b[i1 * n2 + i];
uint16_t res0 = res;
res = res0 + aij * bjk;
}
c[i0 * n3 + i1] = res;
}
}
}
inline static bool
Hacl_Impl_Matrix_matrix_eq(uint32_t n1, uint32_t n2, uint32_t m, uint16_t *a, uint16_t *b)
{
bool res = true;
uint32_t n3 = n1 * n2;
for (uint32_t i = (uint32_t)0U; i < n3; i = i + (uint32_t)1U)
{
uint16_t ai = a[i];
uint16_t bi = b[i];
bool a1 = res;
res =
a1
&&
((uint32_t)ai & (((uint32_t)1U << m) - (uint32_t)1U))
== ((uint32_t)bi & (((uint32_t)1U << m) - (uint32_t)1U));
}
return res;
}
inline static void
Hacl_Impl_Matrix_matrix_to_lbytes(uint32_t n1, uint32_t n2, uint16_t *m, uint8_t *res)
{
uint32_t n3 = n1 * n2;
for (uint32_t i = (uint32_t)0U; i < n3; i = i + (uint32_t)1U)
{
uint8_t *tmp = res + (uint32_t)2U * i;
store16_le(tmp, m[i]);
}
}
inline static void
Hacl_Impl_Matrix_matrix_from_lbytes(uint32_t n1, uint32_t n2, uint8_t *b, uint16_t *res)
{
uint32_t n3 = n1 * n2;
for (uint32_t i = (uint32_t)0U; i < n3; i = i + (uint32_t)1U)
{
uint16_t u = load16_le(b + (uint32_t)2U * i);
res[i] = u;
}
}
inline static void
Hacl_Impl_Frodo_Gen_frodo_gen_matrix_cshake(
uint32_t n1,
uint32_t seed_len,
uint8_t *seed,
uint16_t *res
)
{
KRML_CHECK_SIZE(sizeof (uint8_t), (uint32_t)2U * n1);
uint8_t r[(uint32_t)2U * n1];
memset(r, 0U, (uint32_t)2U * n1 * sizeof r[0U]);
memset(res, 0U, n1 * n1 * sizeof res[0U]);
for (uint32_t i = (uint32_t)0U; i < n1; i = i + (uint32_t)1U)
{
uint32_t ctr = (uint32_t)256U + i;
uint64_t s[25U] = { 0U };
s[0U] = (uint64_t)0x10010001a801U | (uint64_t)(uint16_t)ctr << (uint32_t)48U;
Hacl_Impl_SHA3_state_permute(s);
Hacl_Impl_SHA3_absorb(s, (uint32_t)168U, seed_len, seed, (uint8_t)0x04U);
Hacl_Impl_SHA3_squeeze(s, (uint32_t)168U, (uint32_t)2U * n1, r);
for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0 = i0 + (uint32_t)1U)
{
uint8_t *resij = r + (uint32_t)2U * i0;
uint16_t u = load16_le(resij);
res[i * n1 + i0] = u;
}
}
}
static uint16_t
Hacl_Impl_Frodo_Sample_cdf_table[12U] =
{
(uint16_t)4727U, (uint16_t)13584U, (uint16_t)20864U, (uint16_t)26113U, (uint16_t)29434U,
(uint16_t)31278U, (uint16_t)32176U, (uint16_t)32560U, (uint16_t)32704U, (uint16_t)32751U,
(uint16_t)32764U, (uint16_t)32767U
};
inline static uint16_t Hacl_Impl_Frodo_Sample_frodo_sample(uint16_t r)
{
uint16_t prnd = r >> (uint32_t)1U;
uint16_t sign = r & (uint16_t)1U;
uint16_t sample = (uint16_t)0U;
uint32_t bound = (uint32_t)11U;
for (uint32_t i = (uint32_t)0U; i < bound; i = i + (uint32_t)1U)
{
uint16_t sample0 = sample;
uint16_t ti = Hacl_Impl_Frodo_Sample_cdf_table[i];
uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
sample = samplei + sample0;
}
uint16_t sample0 = sample;
return ((~sign + (uint16_t)1U) ^ sample0) + sign;
}
inline static void
Hacl_Impl_Frodo_Sample_frodo_sample_matrix(
uint32_t n1,
uint32_t n2,
uint32_t seed_len,
uint8_t *seed,
uint16_t ctr,
uint16_t *res
)
{
KRML_CHECK_SIZE(sizeof (uint8_t), (uint32_t)2U * n1 * n2);
uint8_t r[(uint32_t)2U * n1 * n2];
memset(r, 0U, (uint32_t)2U * n1 * n2 * sizeof r[0U]);
uint64_t s[25U] = { 0U };
s[0U] = (uint64_t)0x10010001a801U | (uint64_t)ctr << (uint32_t)48U;
Hacl_Impl_SHA3_state_permute(s);
Hacl_Impl_SHA3_absorb(s, (uint32_t)168U, seed_len, seed, (uint8_t)0x04U);
Hacl_Impl_SHA3_squeeze(s, (uint32_t)168U, (uint32_t)2U * n1 * n2, r);
memset(res, 0U, n1 * n2 * sizeof res[0U]);
for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0 = i0 + (uint32_t)1U)
{
for (uint32_t i = (uint32_t)0U; i < n2; i = i + (uint32_t)1U)
{
uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i);
uint16_t u = load16_le(resij);
res[i0 * n2 + i] = Hacl_Impl_Frodo_Sample_frodo_sample(u);
}
}
}
inline static void
Hacl_Impl_Frodo_Pack_frodo_pack(
uint32_t n1,
uint32_t n2,
uint32_t d,
uint16_t *a,
uint8_t *res
)
{
uint32_t n3 = n1 * n2 / (uint32_t)8U;
for (uint32_t i = (uint32_t)0U; i < n3; i = i + (uint32_t)1U)
{
uint16_t *a1 = a + (uint32_t)8U * i;
uint8_t *r = res + d * i;
uint16_t maskd = (uint16_t)((uint32_t)1U << d) - (uint16_t)1U;
uint8_t v16[16U] = { 0U };
uint16_t a0 = a1[0U] & maskd;
uint16_t a11 = a1[1U] & maskd;
uint16_t a2 = a1[2U] & maskd;
uint16_t a3 = a1[3U] & maskd;
uint16_t a4 = a1[4U] & maskd;
uint16_t a5 = a1[5U] & maskd;
uint16_t a6 = a1[6U] & maskd;
uint16_t a7 = a1[7U] & maskd;
uint128_t
templong =
(((((((uint128_t)(uint64_t)a0
<< (uint32_t)7U * d
| (uint128_t)(uint64_t)a11 << (uint32_t)6U * d)
| (uint128_t)(uint64_t)a2 << (uint32_t)5U * d)
| (uint128_t)(uint64_t)a3 << (uint32_t)4U * d)
| (uint128_t)(uint64_t)a4 << (uint32_t)3U * d)
| (uint128_t)(uint64_t)a5 << (uint32_t)2U * d)
| (uint128_t)(uint64_t)a6 << (uint32_t)1U * d)
| (uint128_t)(uint64_t)a7 << (uint32_t)0U * d;
store128_be(v16, templong);
uint8_t *src = v16 + (uint32_t)16U - d;
memcpy(r, src, d * sizeof src[0U]);
}
}
inline static void
Hacl_Impl_Frodo_Pack_frodo_unpack(
uint32_t n1,
uint32_t n2,
uint32_t d,
uint8_t *b,
uint16_t *res
)
{
uint32_t n3 = n1 * n2 / (uint32_t)8U;
for (uint32_t i = (uint32_t)0U; i < n3; i = i + (uint32_t)1U)
{
uint8_t *b1 = b + d * i;
uint16_t *r = res + (uint32_t)8U * i;
uint16_t maskd = (uint16_t)((uint32_t)1U << d) - (uint16_t)1U;
uint8_t src[16U] = { 0U };
memcpy(src + (uint32_t)16U - d, b1, d * sizeof b1[0U]);
uint128_t u = load128_be(src);
uint128_t templong = u;
r[0U] = (uint16_t)(uint64_t)(templong >> (uint32_t)7U * d) & maskd;
r[1U] = (uint16_t)(uint64_t)(templong >> (uint32_t)6U * d) & maskd;
r[2U] = (uint16_t)(uint64_t)(templong >> (uint32_t)5U * d) & maskd;
r[3U] = (uint16_t)(uint64_t)(templong >> (uint32_t)4U * d) & maskd;
r[4U] = (uint16_t)(uint64_t)(templong >> (uint32_t)3U * d) & maskd;
r[5U] = (uint16_t)(uint64_t)(templong >> (uint32_t)2U * d) & maskd;
r[6U] = (uint16_t)(uint64_t)(templong >> (uint32_t)1U * d) & maskd;
r[7U] = (uint16_t)(uint64_t)(templong >> (uint32_t)0U * d) & maskd;
}
}
static void randombytes_(uint32_t len, uint8_t *res)
{
bool b = Lib_RandomBuffer_System_randombytes(res, len);
}
static uint32_t Hacl_Impl_Frodo_KEM_bytes_mu = (uint32_t)16U;
static uint32_t Hacl_Impl_Frodo_KEM_crypto_publickeybytes = (uint32_t)976U;
static uint32_t Hacl_Impl_Frodo_KEM_crypto_ciphertextbytes = (uint32_t)1096U;
inline static void
Hacl_Impl_Frodo_KEM_KeyGen_frodo_mul_add_as_plus_e_pack(
uint8_t *seed_a,
uint8_t *seed_e,
uint8_t *b,
uint8_t *s
)
{
uint16_t s_matrix[512U] = { 0U };
Hacl_Impl_Frodo_Sample_frodo_sample_matrix((uint32_t)64U,
(uint32_t)8U,
(uint32_t)16U,
seed_e,
(uint16_t)1U,
s_matrix);
Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)64U, (uint32_t)8U, s_matrix, s);
uint16_t b_matrix[512U] = { 0U };
uint16_t a_matrix[4096U] = { 0U };
uint16_t e_matrix[512U] = { 0U };
Hacl_Impl_Frodo_Gen_frodo_gen_matrix_cshake((uint32_t)64U, (uint32_t)16U, seed_a, a_matrix);
Hacl_Impl_Frodo_Sample_frodo_sample_matrix((uint32_t)64U,
(uint32_t)8U,
(uint32_t)16U,
seed_e,
(uint16_t)2U,
e_matrix);
Hacl_Impl_Matrix_matrix_mul_s((uint32_t)64U,
(uint32_t)64U,
(uint32_t)8U,
a_matrix,
s_matrix,
b_matrix);
Hacl_Impl_Matrix_matrix_add((uint32_t)64U, (uint32_t)8U, b_matrix, e_matrix);
Lib_Memzero_clear_words_u16((uint32_t)512U, e_matrix);
Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)64U, (uint32_t)8U, (uint32_t)15U, b_matrix, b);
Lib_Memzero_clear_words_u16((uint32_t)512U, s_matrix);
}
inline static void
Hacl_Impl_Frodo_Encode_frodo_key_encode(uint32_t b, uint8_t *a, uint16_t *res)
{
for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)8U; i0 = i0 + (uint32_t)1U)
{
uint8_t v8[8U] = { 0U };
uint8_t *chunk = a + i0 * b;
memcpy(v8, chunk, b * sizeof chunk[0U]);
uint64_t u = load64_le(v8);
uint64_t x = u;
uint64_t x0 = x;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)8U; i = i + (uint32_t)1U)
{
uint64_t rk = x0 >> b * i & (((uint64_t)1U << b) - (uint64_t)1U);
res[i0 * (uint32_t)8U + i] = (uint16_t)rk << ((uint32_t)15U - b);
}
}
}
inline static void
Hacl_Impl_Frodo_Encode_frodo_key_decode(uint32_t b, uint16_t *a, uint8_t *res)
{
for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)8U; i0 = i0 + (uint32_t)1U)
{
uint64_t templong = (uint64_t)0U;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)8U; i = i + (uint32_t)1U)
{
uint16_t aik = a[i0 * (uint32_t)8U + i];
uint16_t
res1 = (aik + ((uint16_t)1U << ((uint32_t)15U - b - (uint32_t)1U))) >> ((uint32_t)15U - b);
templong = templong | (uint64_t)(res1 & (((uint16_t)1U << b) - (uint16_t)1U)) << b * i;
}
uint64_t templong0 = templong;
uint8_t v8[8U] = { 0U };
store64_le(v8, templong0);
uint8_t *tmp = v8;
memcpy(res + i0 * b, tmp, b * sizeof tmp[0U]);
}
}
inline static void
Hacl_Impl_Frodo_KEM_Encaps_frodo_mul_add_sb_plus_e_plus_mu(
uint8_t *b,
uint8_t *seed_e,
uint8_t *coins,
uint16_t *sp_matrix,
uint16_t *v_matrix
)
{
uint16_t b_matrix[512U] = { 0U };
uint16_t epp_matrix[64U] = { 0U };
Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)64U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
Hacl_Impl_Frodo_Sample_frodo_sample_matrix((uint32_t)8U,
(uint32_t)8U,
(uint32_t)16U,
seed_e,
(uint16_t)6U,
epp_matrix);
Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
(uint32_t)64U,
(uint32_t)8U,
sp_matrix,
b_matrix,
v_matrix);
Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
Lib_Memzero_clear_words_u16((uint32_t)64U, epp_matrix);
uint16_t mu_encode[64U] = { 0U };
Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)2U, coins, mu_encode);
Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
}
inline static void
Hacl_Impl_Frodo_KEM_Encaps_crypto_kem_enc_ct(
uint8_t *pk,
uint8_t *g,
uint8_t *coins,
uint8_t *ct
)
{
uint8_t *seed_a = pk;
uint8_t *b = pk + (uint32_t)16U;
uint8_t *seed_e = g;
uint8_t *d = g + (uint32_t)32U;
uint16_t sp_matrix[512U] = { 0U };
Hacl_Impl_Frodo_Sample_frodo_sample_matrix((uint32_t)8U,
(uint32_t)64U,
(uint32_t)16U,
seed_e,
(uint16_t)4U,
sp_matrix);
uint32_t c1Len = (uint32_t)960U;
uint32_t c2Len = (uint32_t)120U;
uint32_t c12Len = c1Len + c2Len;
uint8_t *c1 = ct;
uint16_t bp_matrix[512U] = { 0U };
uint16_t a_matrix[4096U] = { 0U };
uint16_t ep_matrix[512U] = { 0U };
Hacl_Impl_Frodo_Gen_frodo_gen_matrix_cshake((uint32_t)64U, (uint32_t)16U, seed_a, a_matrix);
Hacl_Impl_Frodo_Sample_frodo_sample_matrix((uint32_t)8U,
(uint32_t)64U,
(uint32_t)16U,
seed_e,
(uint16_t)5U,
ep_matrix);
Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
(uint32_t)64U,
(uint32_t)64U,
sp_matrix,
a_matrix,
bp_matrix);
Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)64U, bp_matrix, ep_matrix);
Lib_Memzero_clear_words_u16((uint32_t)512U, ep_matrix);
Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, bp_matrix, c1);
uint8_t *c2 = ct + c1Len;
uint16_t v_matrix[64U] = { 0U };
Hacl_Impl_Frodo_KEM_Encaps_frodo_mul_add_sb_plus_e_plus_mu(b,
seed_e,
coins,
sp_matrix,
v_matrix);
Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, v_matrix, c2);
Lib_Memzero_clear_words_u16((uint32_t)64U, v_matrix);
memcpy(ct + c12Len, d, (uint32_t)16U * sizeof d[0U]);
Lib_Memzero_clear_words_u16((uint32_t)512U, sp_matrix);
}
inline static void
Hacl_Impl_Frodo_KEM_Encaps_crypto_kem_enc_ss(uint8_t *g, uint8_t *ct, uint8_t *ss)
{
uint32_t ss_init_len = Hacl_Impl_Frodo_KEM_crypto_ciphertextbytes + (uint32_t)16U;
KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
uint8_t ss_init[ss_init_len];
memset(ss_init, 0U, ss_init_len * sizeof ss_init[0U]);
uint8_t *c12 = ct;
uint8_t *kd = g + (uint32_t)16U;
memcpy(ss_init,
c12,
(Hacl_Impl_Frodo_KEM_crypto_ciphertextbytes - (uint32_t)16U) * sizeof c12[0U]);
memcpy(ss_init + Hacl_Impl_Frodo_KEM_crypto_ciphertextbytes - (uint32_t)16U,
kd,
(uint32_t)32U * sizeof kd[0U]);
uint64_t s[25U] = { 0U };
s[0U] = (uint64_t)0x10010001a801U | (uint64_t)(uint16_t)7U << (uint32_t)48U;
Hacl_Impl_SHA3_state_permute(s);
Hacl_Impl_SHA3_absorb(s, (uint32_t)168U, ss_init_len, ss_init, (uint8_t)0x04U);
Hacl_Impl_SHA3_squeeze(s, (uint32_t)168U, (uint32_t)16U, ss);
}
uint32_t Hacl_Frodo_KEM_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
{
uint8_t coins[48U] = { 0U };
randombytes_((uint32_t)48U, coins);
uint8_t *s = coins;
uint8_t *seed_e = coins + (uint32_t)16U;
uint8_t *z = coins + (uint32_t)32U;
uint8_t *seed_a = pk;
uint64_t s1[25U] = { 0U };
s1[0U] = (uint64_t)0x10010001a801U | (uint64_t)(uint16_t)0U << (uint32_t)48U;
Hacl_Impl_SHA3_state_permute(s1);
Hacl_Impl_SHA3_absorb(s1, (uint32_t)168U, (uint32_t)16U, z, (uint8_t)0x04U);
Hacl_Impl_SHA3_squeeze(s1, (uint32_t)168U, (uint32_t)16U, seed_a);
uint8_t *b = pk + (uint32_t)16U;
uint8_t *s_bytes = sk + (uint32_t)16U + Hacl_Impl_Frodo_KEM_crypto_publickeybytes;
Hacl_Impl_Frodo_KEM_KeyGen_frodo_mul_add_as_plus_e_pack(seed_a, seed_e, b, s_bytes);
memcpy(sk, s, (uint32_t)16U * sizeof s[0U]);
memcpy(sk + (uint32_t)16U, pk, Hacl_Impl_Frodo_KEM_crypto_publickeybytes * sizeof pk[0U]);
return (uint32_t)0U;
}
uint32_t Hacl_Frodo_KEM_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
{
uint8_t coins[16U] = { 0U };
randombytes_(Hacl_Impl_Frodo_KEM_bytes_mu, coins);
uint8_t g[48U] = { 0U };
uint8_t pk_coins[992U] = { 0U };
memcpy(pk_coins, pk, Hacl_Impl_Frodo_KEM_crypto_publickeybytes * sizeof pk[0U]);
memcpy(pk_coins + Hacl_Impl_Frodo_KEM_crypto_publickeybytes,
coins,
Hacl_Impl_Frodo_KEM_bytes_mu * sizeof coins[0U]);
uint64_t s[25U] = { 0U };
s[0U] = (uint64_t)0x10010001a801U | (uint64_t)(uint16_t)3U << (uint32_t)48U;
Hacl_Impl_SHA3_state_permute(s);
Hacl_Impl_SHA3_absorb(s,
(uint32_t)168U,
Hacl_Impl_Frodo_KEM_crypto_publickeybytes + Hacl_Impl_Frodo_KEM_bytes_mu,
pk_coins,
(uint8_t)0x04U);
Hacl_Impl_SHA3_squeeze(s, (uint32_t)168U, (uint32_t)48U, g);
Hacl_Impl_Frodo_KEM_Encaps_crypto_kem_enc_ct(pk, g, coins, ct);
Hacl_Impl_Frodo_KEM_Encaps_crypto_kem_enc_ss(g, ct, ss);
Lib_Memzero_clear_words_u8((uint32_t)32U, g);
return (uint32_t)0U;
}
uint32_t Hacl_Frodo_KEM_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
{
uint16_t bp_matrix[512U] = { 0U };
uint16_t c_matrix[64U] = { 0U };
uint8_t mu_decode[16U] = { 0U };
uint32_t c1Len = (uint32_t)960U;
uint8_t *c1 = ct;
uint8_t *c2 = ct + c1Len;
Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, c1, bp_matrix);
Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, c2, c_matrix);
uint8_t *s_bytes = sk + (uint32_t)16U + Hacl_Impl_Frodo_KEM_crypto_publickeybytes;
uint8_t mu_decode1[16U] = { 0U };
uint16_t s_matrix[512U] = { 0U };
uint16_t m_matrix[64U] = { 0U };
Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)64U, (uint32_t)8U, s_bytes, s_matrix);
Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
(uint32_t)64U,
(uint32_t)8U,
bp_matrix,
s_matrix,
m_matrix);
Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)2U, m_matrix, mu_decode1);
uint8_t g[48U] = { 0U };
uint32_t
pk_mu_decode_len = Hacl_Impl_Frodo_KEM_crypto_publickeybytes + Hacl_Impl_Frodo_KEM_bytes_mu;
KRML_CHECK_SIZE(sizeof (uint8_t), pk_mu_decode_len);
uint8_t pk_mu_decode[pk_mu_decode_len];
memset(pk_mu_decode, 0U, pk_mu_decode_len * sizeof pk_mu_decode[0U]);
uint8_t *pk0 = sk + (uint32_t)16U;
memcpy(pk_mu_decode, pk0, Hacl_Impl_Frodo_KEM_crypto_publickeybytes * sizeof pk0[0U]);
memcpy(pk_mu_decode + Hacl_Impl_Frodo_KEM_crypto_publickeybytes,
mu_decode1,
Hacl_Impl_Frodo_KEM_bytes_mu * sizeof mu_decode1[0U]);
uint64_t s0[25U] = { 0U };
s0[0U] = (uint64_t)0x10010001a801U | (uint64_t)(uint16_t)3U << (uint32_t)48U;
Hacl_Impl_SHA3_state_permute(s0);
Hacl_Impl_SHA3_absorb(s0, (uint32_t)168U, pk_mu_decode_len, pk_mu_decode, (uint8_t)0x04U);
Hacl_Impl_SHA3_squeeze(s0, (uint32_t)168U, (uint32_t)48U, g);
uint8_t *dp = g + (uint32_t)32U;
uint8_t *d0 = ct + Hacl_Impl_Frodo_KEM_crypto_ciphertextbytes - (uint32_t)16U;
uint16_t bpp_matrix[512U] = { 0U };
uint16_t cp_matrix[64U] = { 0U };
uint8_t *pk = sk + (uint32_t)16U;
uint8_t *seed_a = pk;
uint8_t *b = pk + (uint32_t)16U;
uint8_t *seed_ep = g;
uint16_t sp_matrix[512U] = { 0U };
Hacl_Impl_Frodo_Sample_frodo_sample_matrix((uint32_t)8U,
(uint32_t)64U,
(uint32_t)16U,
seed_ep,
(uint16_t)4U,
sp_matrix);
uint16_t a_matrix[4096U] = { 0U };
uint16_t ep_matrix[512U] = { 0U };
Hacl_Impl_Frodo_Gen_frodo_gen_matrix_cshake((uint32_t)64U, (uint32_t)16U, seed_a, a_matrix);
Hacl_Impl_Frodo_Sample_frodo_sample_matrix((uint32_t)8U,
(uint32_t)64U,
(uint32_t)16U,
seed_ep,
(uint16_t)5U,
ep_matrix);
Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
(uint32_t)64U,
(uint32_t)64U,
sp_matrix,
a_matrix,
bpp_matrix);
Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)64U, bpp_matrix, ep_matrix);
Lib_Memzero_clear_words_u16((uint32_t)512U, ep_matrix);
Hacl_Impl_Frodo_KEM_Encaps_frodo_mul_add_sb_plus_e_plus_mu(b,
seed_ep,
mu_decode1,
sp_matrix,
cp_matrix);
Lib_Memzero_clear_words_u16((uint32_t)512U, sp_matrix);
uint8_t res = (uint8_t)255U;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U)
{
uint8_t uu____0 = FStar_UInt8_eq_mask(d0[i], dp[i]);
res = uu____0 & res;
}
uint8_t z = res;
bool b1 = z == (uint8_t)255U;
bool
b2 =
Hacl_Impl_Matrix_matrix_eq((uint32_t)8U,
(uint32_t)64U,
(uint32_t)15U,
bp_matrix,
bpp_matrix);
bool
b3 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, c_matrix, cp_matrix);
bool b0 = b1 && b2 && b3;
bool b4 = b0;
uint8_t *kp = g + (uint32_t)16U;
uint8_t *s = sk;
uint8_t *kp_s;
if (b4)
{
kp_s = kp;
}
else
{
kp_s = s;
}
uint8_t *c12 = ct;
uint8_t *d = ct + Hacl_Impl_Frodo_KEM_crypto_ciphertextbytes - (uint32_t)16U;
uint32_t ss_init_len = Hacl_Impl_Frodo_KEM_crypto_ciphertextbytes + (uint32_t)16U;
KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
uint8_t ss_init[ss_init_len];
memset(ss_init, 0U, ss_init_len * sizeof ss_init[0U]);
memcpy(ss_init,
c12,
(Hacl_Impl_Frodo_KEM_crypto_ciphertextbytes - (uint32_t)16U) * sizeof c12[0U]);
memcpy(ss_init + Hacl_Impl_Frodo_KEM_crypto_ciphertextbytes - (uint32_t)16U,
kp_s,
(uint32_t)16U * sizeof kp_s[0U]);
memcpy(ss_init + Hacl_Impl_Frodo_KEM_crypto_ciphertextbytes - (uint32_t)16U + (uint32_t)16U,
d,
(uint32_t)16U * sizeof d[0U]);
uint64_t s1[25U] = { 0U };
s1[0U] = (uint64_t)0x10010001a801U | (uint64_t)(uint16_t)7U << (uint32_t)48U;
Hacl_Impl_SHA3_state_permute(s1);
Hacl_Impl_SHA3_absorb(s1, (uint32_t)168U, ss_init_len, ss_init, (uint8_t)0x04U);
Hacl_Impl_SHA3_squeeze(s1, (uint32_t)168U, (uint32_t)16U, ss);
Lib_Memzero_clear_words_u8((uint32_t)32U, g);
return (uint32_t)0U;
}

46
3rdparty/hacl-star/evercrypt/Hacl_Frodo_KEM.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,46 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __Hacl_Frodo_KEM_H
#define __Hacl_Frodo_KEM_H
#include "Hacl_Kremlib.h"
#include "Lib_RandomBuffer_System.h"
#include "Hacl_SHA3.h"
#include "Hacl_Lib.h"
uint32_t Hacl_Frodo_KEM_crypto_kem_keypair(uint8_t *pk, uint8_t *sk);
uint32_t Hacl_Frodo_KEM_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk);
uint32_t Hacl_Frodo_KEM_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk);
#define __Hacl_Frodo_KEM_H_DEFINED
#endif

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

161
3rdparty/hacl-star/evercrypt/Hacl_Hash.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,161 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __Hacl_Hash_H
#define __Hacl_Hash_H
#include "Hacl_Kremlib.h"
void Hacl_Hash_MD5_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
void
Hacl_Hash_MD5_legacy_update_last(
uint32_t *s,
uint64_t prev_len,
uint8_t *input,
uint32_t input_len
);
void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
void Hacl_Hash_Core_MD5_legacy_init(uint32_t *s);
void Hacl_Hash_Core_MD5_legacy_update(uint32_t *abcd, uint8_t *x);
void Hacl_Hash_Core_MD5_legacy_pad(uint64_t len, uint8_t *dst);
void Hacl_Hash_Core_MD5_legacy_finish(uint32_t *s, uint8_t *dst);
void Hacl_Hash_SHA1_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
void
Hacl_Hash_SHA1_legacy_update_last(
uint32_t *s,
uint64_t prev_len,
uint8_t *input,
uint32_t input_len
);
void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
void Hacl_Hash_Core_SHA1_legacy_init(uint32_t *s);
void Hacl_Hash_Core_SHA1_legacy_update(uint32_t *h, uint8_t *l);
void Hacl_Hash_Core_SHA1_legacy_pad(uint64_t len, uint8_t *dst);
void Hacl_Hash_Core_SHA1_legacy_finish(uint32_t *s, uint8_t *dst);
void Hacl_Hash_SHA2_update_multi_224(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
void Hacl_Hash_SHA2_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
void Hacl_Hash_SHA2_update_multi_384(uint64_t *s, uint8_t *blocks, uint32_t n_blocks);
void Hacl_Hash_SHA2_update_multi_512(uint64_t *s, uint8_t *blocks, uint32_t n_blocks);
void
Hacl_Hash_SHA2_update_last_224(
uint32_t *s,
uint64_t prev_len,
uint8_t *input,
uint32_t input_len
);
void
Hacl_Hash_SHA2_update_last_256(
uint32_t *s,
uint64_t prev_len,
uint8_t *input,
uint32_t input_len
);
void
Hacl_Hash_SHA2_update_last_384(
uint64_t *s,
uint128_t prev_len,
uint8_t *input,
uint32_t input_len
);
void
Hacl_Hash_SHA2_update_last_512(
uint64_t *s,
uint128_t prev_len,
uint8_t *input,
uint32_t input_len
);
void Hacl_Hash_SHA2_hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst);
void Hacl_Hash_SHA2_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst);
void Hacl_Hash_SHA2_hash_384(uint8_t *input, uint32_t input_len, uint8_t *dst);
void Hacl_Hash_SHA2_hash_512(uint8_t *input, uint32_t input_len, uint8_t *dst);
void Hacl_Hash_Core_SHA2_init_224(uint32_t *s);
void Hacl_Hash_Core_SHA2_init_256(uint32_t *s);
void Hacl_Hash_Core_SHA2_init_384(uint64_t *s);
void Hacl_Hash_Core_SHA2_init_512(uint64_t *s);
void Hacl_Hash_Core_SHA2_update_224(uint32_t *hash1, uint8_t *block);
void Hacl_Hash_Core_SHA2_update_256(uint32_t *hash1, uint8_t *block);
void Hacl_Hash_Core_SHA2_update_384(uint64_t *hash1, uint8_t *block);
void Hacl_Hash_Core_SHA2_update_512(uint64_t *hash1, uint8_t *block);
void Hacl_Hash_Core_SHA2_pad_224(uint64_t len, uint8_t *dst);
void Hacl_Hash_Core_SHA2_pad_256(uint64_t len, uint8_t *dst);
void Hacl_Hash_Core_SHA2_pad_384(uint128_t len, uint8_t *dst);
void Hacl_Hash_Core_SHA2_pad_512(uint128_t len, uint8_t *dst);
void Hacl_Hash_Core_SHA2_finish_224(uint32_t *s, uint8_t *dst);
void Hacl_Hash_Core_SHA2_finish_256(uint32_t *s, uint8_t *dst);
void Hacl_Hash_Core_SHA2_finish_384(uint64_t *s, uint8_t *dst);
void Hacl_Hash_Core_SHA2_finish_512(uint64_t *s, uint8_t *dst);
extern uint32_t Hacl_Hash_Core_SHA2_Constants_k224_256[64U];
extern uint64_t Hacl_Hash_Core_SHA2_Constants_k384_512[80U];
#define __Hacl_Hash_H_DEFINED
#endif

39
3rdparty/hacl-star/evercrypt/Hacl_Kremlib.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,39 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "Hacl_Kremlib.h"
static uint32_t LowStar_Vector_max_uint32 = (uint32_t)4294967295U;
static uint32_t LowStar_Vector_resize_ratio = (uint32_t)2U;
uint32_t LowStar_Vector_new_capacity(uint32_t cap)
{
if (cap >= LowStar_Vector_max_uint32 / LowStar_Vector_resize_ratio)
{
return LowStar_Vector_max_uint32;
}
return cap * LowStar_Vector_resize_ratio;
}

67
3rdparty/hacl-star/evercrypt/Hacl_Kremlib.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,67 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __Hacl_Kremlib_H
#define __Hacl_Kremlib_H
inline static uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b);
inline static uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b);
inline static uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b);
inline static uint128_t FStar_UInt128_add(uint128_t a, uint128_t b);
inline static uint128_t FStar_UInt128_add_mod(uint128_t a, uint128_t b);
inline static uint128_t FStar_UInt128_logor(uint128_t a, uint128_t b);
inline static uint128_t FStar_UInt128_shift_left(uint128_t a, uint32_t s);
inline static uint128_t FStar_UInt128_shift_right(uint128_t a, uint32_t s);
inline static uint128_t FStar_UInt128_uint64_to_uint128(uint64_t a);
inline static uint64_t FStar_UInt128_uint128_to_uint64(uint128_t a);
inline static uint128_t FStar_UInt128_mul_wide(uint64_t x, uint64_t y);
inline static void store128_le(uint8_t *x0, uint128_t x1);
inline static void store128_be(uint8_t *x0, uint128_t x1);
inline static uint128_t load128_be(uint8_t *x0);
uint32_t LowStar_Vector_new_capacity(uint32_t cap);
#define __Hacl_Kremlib_H_DEFINED
#endif

62
3rdparty/hacl-star/evercrypt/Hacl_Leftovers.c поставляемый Normal file
Просмотреть файл

@ -0,0 +1,62 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "Hacl_Leftovers.h"
uint32_t Hacl_HMAC_DRBG_reseed_interval = (uint32_t)1024U;
uint32_t Hacl_HMAC_DRBG_max_output_length = (uint32_t)65536U;
uint32_t Hacl_HMAC_DRBG_max_personalization_string_length = (uint32_t)65536U;
uint32_t Hacl_HMAC_DRBG_max_additional_input_length = (uint32_t)65536U;
uint32_t Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
{
switch (a)
{
case Spec_Hash_Definitions_SHA1:
{
return (uint32_t)16U;
}
case Spec_Hash_Definitions_SHA2_256:
{
return (uint32_t)32U;
}
case Spec_Hash_Definitions_SHA2_384:
{
return (uint32_t)32U;
}
case Spec_Hash_Definitions_SHA2_512:
{
return (uint32_t)32U;
}
default:
{
KRML_HOST_EPRINTF("KreMLin incomplete match at %s:%d\n", __FILE__, __LINE__);
KRML_HOST_EXIT(253U);
}
}
}

55
3rdparty/hacl-star/evercrypt/Hacl_Leftovers.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,55 @@
/* MIT License
*
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "evercrypt_targetconfig.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include "kremlin/internal/target.h"
#ifndef __Hacl_Leftovers_H
#define __Hacl_Leftovers_H
#include "Hacl_Spec.h"
extern uint32_t Hacl_HMAC_DRBG_reseed_interval;
extern uint32_t Hacl_HMAC_DRBG_max_output_length;
extern uint32_t Hacl_HMAC_DRBG_max_personalization_string_length;
extern uint32_t Hacl_HMAC_DRBG_max_additional_input_length;
uint32_t Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_hash_alg a);
typedef struct Hacl_HMAC_DRBG_state_s
{
uint8_t *k;
uint8_t *v;
uint32_t *reseed_counter;
}
Hacl_HMAC_DRBG_state;
#define __Hacl_Leftovers_H_DEFINED
#endif

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше