зеркало из https://github.com/microsoft/CCF.git
Upgrade hacl-star (#1155)
This commit is contained in:
Родитель
fb3306c8db
Коммит
1ef5ab6c5a
|
@ -0,0 +1,28 @@
|
|||
#ifndef __KREMLIB_H
|
||||
#define __KREMLIB_H
|
||||
|
||||
/******************************************************************************/
|
||||
/* The all-in-one kremlib.h header */
|
||||
/******************************************************************************/
|
||||
|
||||
/* This is a meta-header that is included by default in KreMLin generated
|
||||
* programs. If you wish to have a more lightweight set of headers, or are
|
||||
* targeting an environment where controlling these macros yourself is
|
||||
* important, consider using:
|
||||
*
|
||||
* krml -minimal
|
||||
*
|
||||
* to disable the inclusion of this file (note: this also disables the default
|
||||
* argument "-bundle FStar.*"). You can then include the headers of your choice
|
||||
* one by one, using -add-early-include. */
|
||||
|
||||
#include "kremlin/internal/target.h"
|
||||
#include "kremlin/internal/callconv.h"
|
||||
#include "kremlin/internal/builtin.h"
|
||||
#include "kremlin/internal/debug.h"
|
||||
#include "kremlin/internal/types.h"
|
||||
|
||||
#include "kremlin/lowstar_endianness.h"
|
||||
#include "kremlin/fstar_int.h"
|
||||
|
||||
#endif /* __KREMLIB_H */
|
|
@ -0,0 +1,13 @@
|
|||
/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License. */
|
||||
|
||||
#ifndef __KREMLIN_ENDIAN_H
|
||||
#define __KREMLIN_ENDIAN_H
|
||||
|
||||
#ifdef __GNUC__
|
||||
#warning "c_endianness.h is deprecated, include lowstar_endianness.h instead"
|
||||
#endif
|
||||
|
||||
#include "lowstar_endianness.h"
|
||||
|
||||
#endif
|
|
@ -0,0 +1,81 @@
|
|||
#ifndef __FSTAR_INT_H
|
||||
#define __FSTAR_INT_H
|
||||
|
||||
#include "internal/types.h"
|
||||
|
||||
/*
|
||||
* Arithmetic Shift Right operator
|
||||
*
|
||||
* In all C standards, a >> b is implementation-defined when a has a signed
|
||||
* type and a negative value. See e.g. 6.5.7 in
|
||||
* http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2310.pdf
|
||||
*
|
||||
* GCC, MSVC, and Clang implement a >> b as an arithmetic shift.
|
||||
*
|
||||
* GCC: https://gcc.gnu.org/onlinedocs/gcc-9.1.0/gcc/Integers-implementation.html#Integers-implementation
|
||||
* MSVC: https://docs.microsoft.com/en-us/cpp/cpp/left-shift-and-right-shift-operators-input-and-output?view=vs-2019#right-shifts
|
||||
* Clang: tested that Clang 7, 8 and 9 compile this to an arithmetic shift
|
||||
*
|
||||
* We implement arithmetic shift right simply as >> in these compilers
|
||||
* and bail out in others.
|
||||
*/
|
||||
|
||||
#if !(defined(_MSC_VER) || defined(__GNUC__) || (defined(__clang__) && (__clang_major__ >= 7)))
|
||||
|
||||
static inline
|
||||
int8_t FStar_Int8_shift_arithmetic_right(int8_t a, uint32_t b) {
|
||||
do {
|
||||
KRML_HOST_EPRINTF("Could not identify compiler so could not provide an implementation of signed arithmetic shift right.\n");
|
||||
KRML_HOST_EXIT(255);
|
||||
} while (0);
|
||||
}
|
||||
|
||||
static inline
|
||||
int16_t FStar_Int16_shift_arithmetic_right(int16_t a, uint32_t b) {
|
||||
do {
|
||||
KRML_HOST_EPRINTF("Could not identify compiler so could not provide an implementation of signed arithmetic shift right.\n");
|
||||
KRML_HOST_EXIT(255);
|
||||
} while (0);
|
||||
}
|
||||
|
||||
static inline
|
||||
int32_t FStar_Int32_shift_arithmetic_right(int32_t a, uint32_t b) {
|
||||
do {
|
||||
KRML_HOST_EPRINTF("Could not identify compiler so could not provide an implementation of signed arithmetic shift right.\n");
|
||||
KRML_HOST_EXIT(255);
|
||||
} while (0);
|
||||
}
|
||||
|
||||
static inline
|
||||
int64_t FStar_Int64_shift_arithmetic_right(int64_t a, uint32_t b) {
|
||||
do {
|
||||
KRML_HOST_EPRINTF("Could not identify compiler so could not provide an implementation of signed arithmetic shift right.\n");
|
||||
KRML_HOST_EXIT(255);
|
||||
} while (0);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline
|
||||
int8_t FStar_Int8_shift_arithmetic_right(int8_t a, uint32_t b) {
|
||||
return (a >> b);
|
||||
}
|
||||
|
||||
static inline
|
||||
int16_t FStar_Int16_shift_arithmetic_right(int16_t a, uint32_t b) {
|
||||
return (a >> b);
|
||||
}
|
||||
|
||||
static inline
|
||||
int32_t FStar_Int32_shift_arithmetic_right(int32_t a, uint32_t b) {
|
||||
return (a >> b);
|
||||
}
|
||||
|
||||
static inline
|
||||
int64_t FStar_Int64_shift_arithmetic_right(int64_t a, uint32_t b) {
|
||||
return (a >> b);
|
||||
}
|
||||
|
||||
#endif /* !(defined(_MSC_VER) ... ) */
|
||||
|
||||
#endif /* __FSTAR_INT_H */
|
|
@ -0,0 +1,16 @@
|
|||
/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License. */
|
||||
|
||||
#ifndef __KREMLIN_BUILTIN_H
|
||||
#define __KREMLIN_BUILTIN_H
|
||||
|
||||
/* For alloca, when using KreMLin's -falloca */
|
||||
#if (defined(_WIN32) || defined(_WIN64))
|
||||
# include <malloc.h>
|
||||
#endif
|
||||
|
||||
/* If some globals need to be initialized before the main, then kremlin will
|
||||
* generate and try to link last a function with this type: */
|
||||
void kremlinit_globals(void);
|
||||
|
||||
#endif
|
46
3rdparty/hacl-star/evercrypt/kremlin/include/kremlin/internal/callconv.h
поставляемый
Normal file
46
3rdparty/hacl-star/evercrypt/kremlin/include/kremlin/internal/callconv.h
поставляемый
Normal file
|
@ -0,0 +1,46 @@
|
|||
/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License. */
|
||||
|
||||
#ifndef __KREMLIN_CALLCONV_H
|
||||
#define __KREMLIN_CALLCONV_H
|
||||
|
||||
/******************************************************************************/
|
||||
/* Some macros to ease compatibility */
|
||||
/******************************************************************************/
|
||||
|
||||
/* We want to generate __cdecl safely without worrying about it being undefined.
|
||||
* When using MSVC, these are always defined. When using MinGW, these are
|
||||
* defined too. They have no meaning for other platforms, so we define them to
|
||||
* be empty macros in other situations. */
|
||||
#ifndef _MSC_VER
|
||||
#ifndef __cdecl
|
||||
#define __cdecl
|
||||
#endif
|
||||
#ifndef __stdcall
|
||||
#define __stdcall
|
||||
#endif
|
||||
#ifndef __fastcall
|
||||
#define __fastcall
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Since KreMLin emits the inline keyword unconditionally, we follow the
|
||||
* guidelines at https://gcc.gnu.org/onlinedocs/gcc/Inline.html and make this
|
||||
* __inline__ to ensure the code compiles with -std=c90 and earlier. */
|
||||
#ifdef __GNUC__
|
||||
# define inline __inline__
|
||||
#endif
|
||||
|
||||
/* GCC-specific attribute syntax; everyone else gets the standard C inline
|
||||
* attribute. */
|
||||
#ifdef __GNU_C__
|
||||
# ifndef __clang__
|
||||
# define force_inline inline __attribute__((always_inline))
|
||||
# else
|
||||
# define force_inline inline
|
||||
# endif
|
||||
#else
|
||||
# define force_inline inline
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -0,0 +1,32 @@
|
|||
/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License. */
|
||||
|
||||
#ifndef KRML_COMPAT_H
|
||||
#define KRML_COMPAT_H
|
||||
|
||||
#include <inttypes.h>
|
||||
|
||||
/* A series of macros that define C implementations of types that are not Low*,
|
||||
* to facilitate porting programs to Low*. */
|
||||
|
||||
typedef struct {
|
||||
uint32_t length;
|
||||
const char *data;
|
||||
} FStar_Bytes_bytes;
|
||||
|
||||
typedef int32_t Prims_pos, Prims_nat, Prims_nonzero, Prims_int,
|
||||
krml_checked_int_t;
|
||||
|
||||
#define RETURN_OR(x) \
|
||||
do { \
|
||||
int64_t __ret = x; \
|
||||
if (__ret < INT32_MIN || INT32_MAX < __ret) { \
|
||||
KRML_HOST_PRINTF( \
|
||||
"Prims.{int,nat,pos} integer overflow at %s:%d\n", __FILE__, \
|
||||
__LINE__); \
|
||||
KRML_HOST_EXIT(252); \
|
||||
} \
|
||||
return (int32_t)__ret; \
|
||||
} while (0)
|
||||
|
||||
#endif
|
|
@ -0,0 +1,57 @@
|
|||
/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License. */
|
||||
|
||||
#ifndef __KREMLIN_DEBUG_H
|
||||
#define __KREMLIN_DEBUG_H
|
||||
|
||||
#include <inttypes.h>
|
||||
|
||||
#include "kremlin/internal/target.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Debugging helpers - intended only for KreMLin developers */
|
||||
/******************************************************************************/
|
||||
|
||||
/* In support of "-wasm -d force-c": we might need this function to be
|
||||
* forward-declared, because the dependency on WasmSupport appears very late,
|
||||
* after SimplifyWasm, and sadly, after the topological order has been done. */
|
||||
void WasmSupport_check_buffer_size(uint32_t s);
|
||||
|
||||
/* A series of GCC atrocities to trace function calls (kremlin's [-d c-calls]
|
||||
* option). Useful when trying to debug, say, Wasm, to compare traces. */
|
||||
/* clang-format off */
|
||||
#ifdef __GNUC__
|
||||
#define KRML_FORMAT(X) _Generic((X), \
|
||||
uint8_t : "0x%08" PRIx8, \
|
||||
uint16_t: "0x%08" PRIx16, \
|
||||
uint32_t: "0x%08" PRIx32, \
|
||||
uint64_t: "0x%08" PRIx64, \
|
||||
int8_t : "0x%08" PRIx8, \
|
||||
int16_t : "0x%08" PRIx16, \
|
||||
int32_t : "0x%08" PRIx32, \
|
||||
int64_t : "0x%08" PRIx64, \
|
||||
default : "%s")
|
||||
|
||||
#define KRML_FORMAT_ARG(X) _Generic((X), \
|
||||
uint8_t : X, \
|
||||
uint16_t: X, \
|
||||
uint32_t: X, \
|
||||
uint64_t: X, \
|
||||
int8_t : X, \
|
||||
int16_t : X, \
|
||||
int32_t : X, \
|
||||
int64_t : X, \
|
||||
default : "unknown")
|
||||
/* clang-format on */
|
||||
|
||||
# define KRML_DEBUG_RETURN(X) \
|
||||
({ \
|
||||
__auto_type _ret = (X); \
|
||||
KRML_HOST_PRINTF("returning: "); \
|
||||
KRML_HOST_PRINTF(KRML_FORMAT(_ret), KRML_FORMAT_ARG(_ret)); \
|
||||
KRML_HOST_PRINTF(" \n"); \
|
||||
_ret; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -0,0 +1,111 @@
|
|||
/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License. */
|
||||
|
||||
#ifndef __KREMLIN_TARGET_H
|
||||
#define __KREMLIN_TARGET_H
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <stdbool.h>
|
||||
#include <inttypes.h>
|
||||
#include <limits.h>
|
||||
|
||||
#include "kremlin/internal/callconv.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Macros that KreMLin will generate. */
|
||||
/******************************************************************************/
|
||||
|
||||
/* For "bare" targets that do not have a C stdlib, the user might want to use
|
||||
* [-add-early-include '"mydefinitions.h"'] and override these. */
|
||||
#ifndef KRML_HOST_PRINTF
|
||||
# define KRML_HOST_PRINTF printf
|
||||
#endif
|
||||
|
||||
#if ( \
|
||||
(defined __STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \
|
||||
(!(defined KRML_HOST_EPRINTF)))
|
||||
# define KRML_HOST_EPRINTF(...) fprintf(stderr, __VA_ARGS__)
|
||||
#endif
|
||||
|
||||
#ifndef KRML_HOST_EXIT
|
||||
# define KRML_HOST_EXIT exit
|
||||
#endif
|
||||
|
||||
#ifndef KRML_HOST_MALLOC
|
||||
# define KRML_HOST_MALLOC malloc
|
||||
#endif
|
||||
|
||||
#ifndef KRML_HOST_CALLOC
|
||||
# define KRML_HOST_CALLOC calloc
|
||||
#endif
|
||||
|
||||
#ifndef KRML_HOST_FREE
|
||||
# define KRML_HOST_FREE free
|
||||
#endif
|
||||
|
||||
#ifndef KRML_HOST_TIME
|
||||
|
||||
# include <time.h>
|
||||
|
||||
/* Prims_nat not yet in scope */
|
||||
inline static int32_t krml_time() {
|
||||
return (int32_t)time(NULL);
|
||||
}
|
||||
|
||||
# define KRML_HOST_TIME krml_time
|
||||
#endif
|
||||
|
||||
/* In statement position, exiting is easy. */
|
||||
#define KRML_EXIT \
|
||||
do { \
|
||||
KRML_HOST_PRINTF("Unimplemented function at %s:%d\n", __FILE__, __LINE__); \
|
||||
KRML_HOST_EXIT(254); \
|
||||
} while (0)
|
||||
|
||||
/* In expression position, use the comma-operator and a malloc to return an
|
||||
* expression of the right size. KreMLin passes t as the parameter to the macro.
|
||||
*/
|
||||
#define KRML_EABORT(t, msg) \
|
||||
(KRML_HOST_PRINTF("KreMLin abort at %s:%d\n%s\n", __FILE__, __LINE__, msg), \
|
||||
KRML_HOST_EXIT(255), *((t *)KRML_HOST_MALLOC(sizeof(t))))
|
||||
|
||||
/* In FStar.Buffer.fst, the size of arrays is uint32_t, but it's a number of
|
||||
* *elements*. Do an ugly, run-time check (some of which KreMLin can eliminate).
|
||||
*/
|
||||
|
||||
#ifdef __GNUC__
|
||||
# define _KRML_CHECK_SIZE_PRAGMA \
|
||||
_Pragma("GCC diagnostic ignored \"-Wtype-limits\"")
|
||||
#else
|
||||
# define _KRML_CHECK_SIZE_PRAGMA
|
||||
#endif
|
||||
|
||||
#define KRML_CHECK_SIZE(size_elt, sz) \
|
||||
do { \
|
||||
_KRML_CHECK_SIZE_PRAGMA \
|
||||
if (((size_t)(sz)) > ((size_t)(SIZE_MAX / (size_elt)))) { \
|
||||
KRML_HOST_PRINTF( \
|
||||
"Maximum allocatable size exceeded, aborting before overflow at " \
|
||||
"%s:%d\n", \
|
||||
__FILE__, __LINE__); \
|
||||
KRML_HOST_EXIT(253); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#if defined(_MSC_VER) && _MSC_VER < 1900
|
||||
# define KRML_HOST_SNPRINTF(buf, sz, fmt, arg) _snprintf_s(buf, sz, _TRUNCATE, fmt, arg)
|
||||
#else
|
||||
# define KRML_HOST_SNPRINTF(buf, sz, fmt, arg) snprintf(buf, sz, fmt, arg)
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ > 4
|
||||
# define KRML_DEPRECATED(x) __attribute__((deprecated(x)))
|
||||
#elif defined(__GNUC__)
|
||||
/* deprecated attribute is not defined in GCC < 4.5. */
|
||||
# define KRML_DEPRECATED(x)
|
||||
#elif defined(_MSC_VER)
|
||||
# define KRML_DEPRECATED(x) __declspec(deprecated(x))
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -0,0 +1,99 @@
|
|||
/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License. */
|
||||
|
||||
#ifndef KRML_TYPES_H
|
||||
#define KRML_TYPES_H
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
|
||||
/* Types which are either abstract, meaning that have to be implemented in C, or
|
||||
* which are models, meaning that they are swapped out at compile-time for
|
||||
* hand-written C types (in which case they're marked as noextract). */
|
||||
|
||||
typedef uint64_t FStar_UInt64_t, FStar_UInt64_t_;
|
||||
typedef int64_t FStar_Int64_t, FStar_Int64_t_;
|
||||
typedef uint32_t FStar_UInt32_t, FStar_UInt32_t_;
|
||||
typedef int32_t FStar_Int32_t, FStar_Int32_t_;
|
||||
typedef uint16_t FStar_UInt16_t, FStar_UInt16_t_;
|
||||
typedef int16_t FStar_Int16_t, FStar_Int16_t_;
|
||||
typedef uint8_t FStar_UInt8_t, FStar_UInt8_t_;
|
||||
typedef int8_t FStar_Int8_t, FStar_Int8_t_;
|
||||
|
||||
/* Only useful when building Kremlib, because it's in the dependency graph of
|
||||
* FStar.Int.Cast. */
|
||||
typedef uint64_t FStar_UInt63_t, FStar_UInt63_t_;
|
||||
typedef int64_t FStar_Int63_t, FStar_Int63_t_;
|
||||
|
||||
typedef double FStar_Float_float;
|
||||
typedef uint32_t FStar_Char_char;
|
||||
typedef FILE *FStar_IO_fd_read, *FStar_IO_fd_write;
|
||||
|
||||
typedef void *FStar_Dyn_dyn;
|
||||
|
||||
typedef const char *C_String_t, *C_String_t_, *C_Compat_String_t, *C_Compat_String_t_;
|
||||
|
||||
typedef int exit_code;
|
||||
typedef FILE *channel;
|
||||
|
||||
typedef unsigned long long TestLib_cycles;
|
||||
|
||||
typedef uint64_t FStar_Date_dateTime, FStar_Date_timeSpan;
|
||||
|
||||
/* Now Prims.string is no longer illegal with the new model in LowStar.Printf;
|
||||
* it's operations that produce Prims_string which are illegal. Bring the
|
||||
* definition into scope by default. */
|
||||
typedef const char *Prims_string;
|
||||
|
||||
#if (defined(_MSC_VER) && defined(_M_X64) && !defined(__clang__))
|
||||
#define IS_MSVC64 1
|
||||
#endif
|
||||
|
||||
/* This code makes a number of assumptions and should be refined. In particular,
|
||||
* it assumes that: any non-MSVC amd64 compiler supports int128. Maybe it would
|
||||
* be easier to just test for defined(__SIZEOF_INT128__) only? */
|
||||
#if (defined(__x86_64__) || \
|
||||
defined(__x86_64) || \
|
||||
defined(__aarch64__) || \
|
||||
(defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)) || \
|
||||
defined(__s390x__) || \
|
||||
(defined(_MSC_VER) && !defined(_M_X64) && defined(__clang__)) || \
|
||||
(defined(__mips__) && defined(__LP64__)) || \
|
||||
(defined(__riscv) && __riscv_xlen == 64) || \
|
||||
defined(__SIZEOF_INT128__))
|
||||
#define HAS_INT128 1
|
||||
#endif
|
||||
|
||||
/* The uint128 type is a special case since we offer several implementations of
|
||||
* it, depending on the compiler and whether the user wants the verified
|
||||
* implementation or not. */
|
||||
#if !defined(KRML_VERIFIED_UINT128) && defined(IS_MSVC64)
|
||||
# include <emmintrin.h>
|
||||
typedef __m128i FStar_UInt128_uint128;
|
||||
#elif !defined(KRML_VERIFIED_UINT128) && defined(HAS_INT128)
|
||||
typedef unsigned __int128 FStar_UInt128_uint128;
|
||||
#else
|
||||
typedef struct FStar_UInt128_uint128_s {
|
||||
uint64_t low;
|
||||
uint64_t high;
|
||||
} FStar_UInt128_uint128;
|
||||
#endif
|
||||
|
||||
/* The former is defined once, here (otherwise, conflicts for test-c89. The
|
||||
* latter is for internal use. */
|
||||
typedef FStar_UInt128_uint128 FStar_UInt128_t, uint128_t;
|
||||
|
||||
#include "kremlin/lowstar_endianness.h"
|
||||
|
||||
#if !defined(KRML_VERIFIED_UINT128) && defined(IS_MSVC64)
|
||||
#include "fstar_uint128_msvc.h"
|
||||
#elif !defined(KRML_VERIFIED_UINT128) && defined(HAS_INT128)
|
||||
#include "fstar_uint128_gcc64.h"
|
||||
#else
|
||||
#include "FStar_UInt128_Verified.h"
|
||||
#include "fstar_uint128_struct_endianness.h"
|
||||
#endif
|
||||
|
||||
#endif
|
5
3rdparty/hacl-star/evercrypt/kremlin/include/kremlin/internal/wasmsupport.h
поставляемый
Normal file
5
3rdparty/hacl-star/evercrypt/kremlin/include/kremlin/internal/wasmsupport.h
поставляемый
Normal file
|
@ -0,0 +1,5 @@
|
|||
/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License. */
|
||||
|
||||
/* This file is automatically included when compiling with -wasm -d force-c */
|
||||
#define WasmSupport_check_buffer_size(X)
|
230
3rdparty/hacl-star/evercrypt/kremlin/include/kremlin/lowstar_endianness.h
поставляемый
Normal file
230
3rdparty/hacl-star/evercrypt/kremlin/include/kremlin/lowstar_endianness.h
поставляемый
Normal file
|
@ -0,0 +1,230 @@
|
|||
/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License. */
|
||||
|
||||
#ifndef __LOWSTAR_ENDIANNESS_H
|
||||
#define __LOWSTAR_ENDIANNESS_H
|
||||
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
|
||||
/******************************************************************************/
|
||||
/* Implementing C.fst (part 2: endian-ness macros) */
|
||||
/******************************************************************************/
|
||||
|
||||
/* ... for Linux */
|
||||
#if defined(__linux__) || defined(__CYGWIN__) || defined (__USE_SYSTEM_ENDIAN_H__)
|
||||
# include <endian.h>
|
||||
|
||||
/* ... for OSX */
|
||||
#elif defined(__APPLE__)
|
||||
# include <libkern/OSByteOrder.h>
|
||||
# define htole64(x) OSSwapHostToLittleInt64(x)
|
||||
# define le64toh(x) OSSwapLittleToHostInt64(x)
|
||||
# define htobe64(x) OSSwapHostToBigInt64(x)
|
||||
# define be64toh(x) OSSwapBigToHostInt64(x)
|
||||
|
||||
# define htole16(x) OSSwapHostToLittleInt16(x)
|
||||
# define le16toh(x) OSSwapLittleToHostInt16(x)
|
||||
# define htobe16(x) OSSwapHostToBigInt16(x)
|
||||
# define be16toh(x) OSSwapBigToHostInt16(x)
|
||||
|
||||
# define htole32(x) OSSwapHostToLittleInt32(x)
|
||||
# define le32toh(x) OSSwapLittleToHostInt32(x)
|
||||
# define htobe32(x) OSSwapHostToBigInt32(x)
|
||||
# define be32toh(x) OSSwapBigToHostInt32(x)
|
||||
|
||||
/* ... for Solaris */
|
||||
#elif defined(__sun__)
|
||||
# include <sys/byteorder.h>
|
||||
# define htole64(x) LE_64(x)
|
||||
# define le64toh(x) LE_64(x)
|
||||
# define htobe64(x) BE_64(x)
|
||||
# define be64toh(x) BE_64(x)
|
||||
|
||||
# define htole16(x) LE_16(x)
|
||||
# define le16toh(x) LE_16(x)
|
||||
# define htobe16(x) BE_16(x)
|
||||
# define be16toh(x) BE_16(x)
|
||||
|
||||
# define htole32(x) LE_32(x)
|
||||
# define le32toh(x) LE_32(x)
|
||||
# define htobe32(x) BE_32(x)
|
||||
# define be32toh(x) BE_32(x)
|
||||
|
||||
/* ... for the BSDs */
|
||||
#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__)
|
||||
# include <sys/endian.h>
|
||||
#elif defined(__OpenBSD__)
|
||||
# include <endian.h>
|
||||
|
||||
/* ... for Windows (MSVC)... not targeting XBOX 360! */
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
# include <stdlib.h>
|
||||
# define htobe16(x) _byteswap_ushort(x)
|
||||
# define htole16(x) (x)
|
||||
# define be16toh(x) _byteswap_ushort(x)
|
||||
# define le16toh(x) (x)
|
||||
|
||||
# define htobe32(x) _byteswap_ulong(x)
|
||||
# define htole32(x) (x)
|
||||
# define be32toh(x) _byteswap_ulong(x)
|
||||
# define le32toh(x) (x)
|
||||
|
||||
# define htobe64(x) _byteswap_uint64(x)
|
||||
# define htole64(x) (x)
|
||||
# define be64toh(x) _byteswap_uint64(x)
|
||||
# define le64toh(x) (x)
|
||||
|
||||
/* ... for Windows (GCC-like, e.g. mingw or clang) */
|
||||
#elif (defined(_WIN32) || defined(_WIN64)) && \
|
||||
(defined(__GNUC__) || defined(__clang__))
|
||||
|
||||
# define htobe16(x) __builtin_bswap16(x)
|
||||
# define htole16(x) (x)
|
||||
# define be16toh(x) __builtin_bswap16(x)
|
||||
# define le16toh(x) (x)
|
||||
|
||||
# define htobe32(x) __builtin_bswap32(x)
|
||||
# define htole32(x) (x)
|
||||
# define be32toh(x) __builtin_bswap32(x)
|
||||
# define le32toh(x) (x)
|
||||
|
||||
# define htobe64(x) __builtin_bswap64(x)
|
||||
# define htole64(x) (x)
|
||||
# define be64toh(x) __builtin_bswap64(x)
|
||||
# define le64toh(x) (x)
|
||||
|
||||
/* ... generic big-endian fallback code */
|
||||
#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
|
||||
/* byte swapping code inspired by:
|
||||
* https://github.com/rweather/arduinolibs/blob/master/libraries/Crypto/utility/EndianUtil.h
|
||||
* */
|
||||
|
||||
# define htobe32(x) (x)
|
||||
# define be32toh(x) (x)
|
||||
# define htole32(x) \
|
||||
(__extension__({ \
|
||||
uint32_t _temp = (x); \
|
||||
((_temp >> 24) & 0x000000FF) | ((_temp >> 8) & 0x0000FF00) | \
|
||||
((_temp << 8) & 0x00FF0000) | ((_temp << 24) & 0xFF000000); \
|
||||
}))
|
||||
# define le32toh(x) (htole32((x)))
|
||||
|
||||
# define htobe64(x) (x)
|
||||
# define be64toh(x) (x)
|
||||
# define htole64(x) \
|
||||
(__extension__({ \
|
||||
uint64_t __temp = (x); \
|
||||
uint32_t __low = htobe32((uint32_t)__temp); \
|
||||
uint32_t __high = htobe32((uint32_t)(__temp >> 32)); \
|
||||
(((uint64_t)__low) << 32) | __high; \
|
||||
}))
|
||||
# define le64toh(x) (htole64((x)))
|
||||
|
||||
/* ... generic little-endian fallback code */
|
||||
#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
|
||||
# define htole32(x) (x)
|
||||
# define le32toh(x) (x)
|
||||
# define htobe32(x) \
|
||||
(__extension__({ \
|
||||
uint32_t _temp = (x); \
|
||||
((_temp >> 24) & 0x000000FF) | ((_temp >> 8) & 0x0000FF00) | \
|
||||
((_temp << 8) & 0x00FF0000) | ((_temp << 24) & 0xFF000000); \
|
||||
}))
|
||||
# define be32toh(x) (htobe32((x)))
|
||||
|
||||
# define htole64(x) (x)
|
||||
# define le64toh(x) (x)
|
||||
# define htobe64(x) \
|
||||
(__extension__({ \
|
||||
uint64_t __temp = (x); \
|
||||
uint32_t __low = htobe32((uint32_t)__temp); \
|
||||
uint32_t __high = htobe32((uint32_t)(__temp >> 32)); \
|
||||
(((uint64_t)__low) << 32) | __high; \
|
||||
}))
|
||||
# define be64toh(x) (htobe64((x)))
|
||||
|
||||
/* ... couldn't determine endian-ness of the target platform */
|
||||
#else
|
||||
# error "Please define __BYTE_ORDER__!"
|
||||
|
||||
#endif /* defined(__linux__) || ... */
|
||||
|
||||
/* Loads and stores. These avoid undefined behavior due to unaligned memory
|
||||
* accesses, via memcpy. */
|
||||
|
||||
inline static uint16_t load16(uint8_t *b) {
|
||||
uint16_t x;
|
||||
memcpy(&x, b, 2);
|
||||
return x;
|
||||
}
|
||||
|
||||
inline static uint32_t load32(uint8_t *b) {
|
||||
uint32_t x;
|
||||
memcpy(&x, b, 4);
|
||||
return x;
|
||||
}
|
||||
|
||||
inline static uint64_t load64(uint8_t *b) {
|
||||
uint64_t x;
|
||||
memcpy(&x, b, 8);
|
||||
return x;
|
||||
}
|
||||
|
||||
inline static void store16(uint8_t *b, uint16_t i) {
|
||||
memcpy(b, &i, 2);
|
||||
}
|
||||
|
||||
inline static void store32(uint8_t *b, uint32_t i) {
|
||||
memcpy(b, &i, 4);
|
||||
}
|
||||
|
||||
inline static void store64(uint8_t *b, uint64_t i) {
|
||||
memcpy(b, &i, 8);
|
||||
}
|
||||
|
||||
/* Legacy accessors so that this header can serve as an implementation of
|
||||
* C.Endianness */
|
||||
#define load16_le(b) (le16toh(load16(b)))
|
||||
#define store16_le(b, i) (store16(b, htole16(i)))
|
||||
#define load16_be(b) (be16toh(load16(b)))
|
||||
#define store16_be(b, i) (store16(b, htobe16(i)))
|
||||
|
||||
#define load32_le(b) (le32toh(load32(b)))
|
||||
#define store32_le(b, i) (store32(b, htole32(i)))
|
||||
#define load32_be(b) (be32toh(load32(b)))
|
||||
#define store32_be(b, i) (store32(b, htobe32(i)))
|
||||
|
||||
#define load64_le(b) (le64toh(load64(b)))
|
||||
#define store64_le(b, i) (store64(b, htole64(i)))
|
||||
#define load64_be(b) (be64toh(load64(b)))
|
||||
#define store64_be(b, i) (store64(b, htobe64(i)))
|
||||
|
||||
/* Co-existence of LowStar.Endianness and FStar.Endianness generates name
|
||||
* conflicts, because of course both insist on having no prefixes. Until a
|
||||
* prefix is added, or until we truly retire FStar.Endianness, solve this issue
|
||||
* in an elegant way. */
|
||||
#define load16_le0 load16_le
|
||||
#define store16_le0 store16_le
|
||||
#define load16_be0 load16_be
|
||||
#define store16_be0 store16_be
|
||||
|
||||
#define load32_le0 load32_le
|
||||
#define store32_le0 store32_le
|
||||
#define load32_be0 load32_be
|
||||
#define store32_be0 store32_be
|
||||
|
||||
#define load64_le0 load64_le
|
||||
#define store64_le0 store64_le
|
||||
#define load64_be0 load64_be
|
||||
#define store64_be0 store64_be
|
||||
|
||||
#define load128_le0 load128_le
|
||||
#define store128_le0 store128_le
|
||||
#define load128_be0 load128_be
|
||||
#define store128_be0 store128_be
|
||||
|
||||
#endif
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License.
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <stdbool.h>
|
||||
#include "kremlin/internal/compat.h"
|
||||
#include "kremlin/lowstar_endianness.h"
|
||||
#include "kremlin/internal/types.h"
|
||||
#include "kremlin/internal/target.h"
|
||||
|
||||
#ifndef __FStar_UInt128_H
|
||||
#define __FStar_UInt128_H
|
||||
|
||||
|
||||
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_add_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_sub_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_sub_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
static inline FStar_UInt128_uint128 FStar_UInt128_lognot(FStar_UInt128_uint128 a);
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s);
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s);
|
||||
|
||||
static inline bool FStar_UInt128_eq(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
static inline bool FStar_UInt128_gt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
static inline bool FStar_UInt128_lt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
static inline bool FStar_UInt128_gte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
static inline bool FStar_UInt128_lte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
|
||||
|
||||
static inline FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a);
|
||||
|
||||
static inline uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a);
|
||||
|
||||
static inline FStar_UInt128_uint128 FStar_UInt128_mul32(uint64_t x, uint32_t y);
|
||||
|
||||
static inline FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y);
|
||||
|
||||
#define __FStar_UInt128_H_DEFINED
|
||||
#endif
|
346
3rdparty/hacl-star/evercrypt/kremlin/kremlib/minimal/FStar_UInt128_Verified.h
поставляемый
Normal file
346
3rdparty/hacl-star/evercrypt/kremlin/kremlib/minimal/FStar_UInt128_Verified.h
поставляемый
Normal file
|
@ -0,0 +1,346 @@
|
|||
/*
|
||||
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License.
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <stdbool.h>
|
||||
#include "kremlin/internal/types.h"
|
||||
#include "kremlin/internal/target.h"
|
||||
|
||||
#ifndef __FStar_UInt128_Verified_H
|
||||
#define __FStar_UInt128_Verified_H
|
||||
|
||||
#include "FStar_UInt_8_16_32_64.h"
|
||||
|
||||
|
||||
static inline uint64_t FStar_UInt128_constant_time_carry(uint64_t a, uint64_t b)
|
||||
{
|
||||
return (a ^ ((a ^ b) | ((a - b) ^ b))) >> (uint32_t)63U;
|
||||
}
|
||||
|
||||
static inline uint64_t FStar_UInt128_carry(uint64_t a, uint64_t b)
|
||||
{
|
||||
return FStar_UInt128_constant_time_carry(a, b);
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low = a.low + b.low;
|
||||
lit.high = a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low);
|
||||
return lit;
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_add_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low = a.low + b.low;
|
||||
lit.high = a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low);
|
||||
return lit;
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low = a.low + b.low;
|
||||
lit.high = a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low);
|
||||
return lit;
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low = a.low - b.low;
|
||||
lit.high = a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low);
|
||||
return lit;
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_sub_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low = a.low - b.low;
|
||||
lit.high = a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low);
|
||||
return lit;
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_sub_mod_impl(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low = a.low - b.low;
|
||||
lit.high = a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low);
|
||||
return lit;
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_sub_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
return FStar_UInt128_sub_mod_impl(a, b);
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low = a.low & b.low;
|
||||
lit.high = a.high & b.high;
|
||||
return lit;
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low = a.low ^ b.low;
|
||||
lit.high = a.high ^ b.high;
|
||||
return lit;
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low = a.low | b.low;
|
||||
lit.high = a.high | b.high;
|
||||
return lit;
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128 FStar_UInt128_lognot(FStar_UInt128_uint128 a)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low = ~a.low;
|
||||
lit.high = ~a.high;
|
||||
return lit;
|
||||
}
|
||||
|
||||
static uint32_t FStar_UInt128_u32_64 = (uint32_t)64U;
|
||||
|
||||
static inline uint64_t FStar_UInt128_add_u64_shift_left(uint64_t hi, uint64_t lo, uint32_t s)
|
||||
{
|
||||
return (hi << s) + (lo >> (FStar_UInt128_u32_64 - s));
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
FStar_UInt128_add_u64_shift_left_respec(uint64_t hi, uint64_t lo, uint32_t s)
|
||||
{
|
||||
return FStar_UInt128_add_u64_shift_left(hi, lo, s);
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_left_small(FStar_UInt128_uint128 a, uint32_t s)
|
||||
{
|
||||
if (s == (uint32_t)0U)
|
||||
{
|
||||
return a;
|
||||
}
|
||||
else
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low = a.low << s;
|
||||
lit.high = FStar_UInt128_add_u64_shift_left_respec(a.high, a.low, s);
|
||||
return lit;
|
||||
}
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_left_large(FStar_UInt128_uint128 a, uint32_t s)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low = (uint64_t)0U;
|
||||
lit.high = a.low << (s - FStar_UInt128_u32_64);
|
||||
return lit;
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s)
|
||||
{
|
||||
if (s < FStar_UInt128_u32_64)
|
||||
{
|
||||
return FStar_UInt128_shift_left_small(a, s);
|
||||
}
|
||||
else
|
||||
{
|
||||
return FStar_UInt128_shift_left_large(a, s);
|
||||
}
|
||||
}
|
||||
|
||||
static inline uint64_t FStar_UInt128_add_u64_shift_right(uint64_t hi, uint64_t lo, uint32_t s)
|
||||
{
|
||||
return (lo >> s) + (hi << (FStar_UInt128_u32_64 - s));
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
FStar_UInt128_add_u64_shift_right_respec(uint64_t hi, uint64_t lo, uint32_t s)
|
||||
{
|
||||
return FStar_UInt128_add_u64_shift_right(hi, lo, s);
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_right_small(FStar_UInt128_uint128 a, uint32_t s)
|
||||
{
|
||||
if (s == (uint32_t)0U)
|
||||
{
|
||||
return a;
|
||||
}
|
||||
else
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low = FStar_UInt128_add_u64_shift_right_respec(a.high, a.low, s);
|
||||
lit.high = a.high >> s;
|
||||
return lit;
|
||||
}
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_right_large(FStar_UInt128_uint128 a, uint32_t s)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low = a.high >> (s - FStar_UInt128_u32_64);
|
||||
lit.high = (uint64_t)0U;
|
||||
return lit;
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s)
|
||||
{
|
||||
if (s < FStar_UInt128_u32_64)
|
||||
{
|
||||
return FStar_UInt128_shift_right_small(a, s);
|
||||
}
|
||||
else
|
||||
{
|
||||
return FStar_UInt128_shift_right_large(a, s);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool FStar_UInt128_eq(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
return a.low == b.low && a.high == b.high;
|
||||
}
|
||||
|
||||
static inline bool FStar_UInt128_gt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
return a.high > b.high || (a.high == b.high && a.low > b.low);
|
||||
}
|
||||
|
||||
static inline bool FStar_UInt128_lt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
return a.high < b.high || (a.high == b.high && a.low < b.low);
|
||||
}
|
||||
|
||||
static inline bool FStar_UInt128_gte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
return a.high > b.high || (a.high == b.high && a.low >= b.low);
|
||||
}
|
||||
|
||||
static inline bool FStar_UInt128_lte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
return a.high < b.high || (a.high == b.high && a.low <= b.low);
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low = FStar_UInt64_eq_mask(a.low, b.low) & FStar_UInt64_eq_mask(a.high, b.high);
|
||||
lit.high = FStar_UInt64_eq_mask(a.low, b.low) & FStar_UInt64_eq_mask(a.high, b.high);
|
||||
return lit;
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128
|
||||
FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low =
|
||||
(FStar_UInt64_gte_mask(a.high, b.high) & ~FStar_UInt64_eq_mask(a.high, b.high))
|
||||
| (FStar_UInt64_eq_mask(a.high, b.high) & FStar_UInt64_gte_mask(a.low, b.low));
|
||||
lit.high =
|
||||
(FStar_UInt64_gte_mask(a.high, b.high) & ~FStar_UInt64_eq_mask(a.high, b.high))
|
||||
| (FStar_UInt64_eq_mask(a.high, b.high) & FStar_UInt64_gte_mask(a.low, b.low));
|
||||
return lit;
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low = a;
|
||||
lit.high = (uint64_t)0U;
|
||||
return lit;
|
||||
}
|
||||
|
||||
static inline uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a)
|
||||
{
|
||||
return a.low;
|
||||
}
|
||||
|
||||
static inline uint64_t FStar_UInt128_u64_mod_32(uint64_t a)
|
||||
{
|
||||
return a & (uint64_t)0xffffffffU;
|
||||
}
|
||||
|
||||
static uint32_t FStar_UInt128_u32_32 = (uint32_t)32U;
|
||||
|
||||
static inline uint64_t FStar_UInt128_u32_combine(uint64_t hi, uint64_t lo)
|
||||
{
|
||||
return lo + (hi << FStar_UInt128_u32_32);
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128 FStar_UInt128_mul32(uint64_t x, uint32_t y)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low =
|
||||
FStar_UInt128_u32_combine((x >> FStar_UInt128_u32_32)
|
||||
* (uint64_t)y
|
||||
+ (FStar_UInt128_u64_mod_32(x) * (uint64_t)y >> FStar_UInt128_u32_32),
|
||||
FStar_UInt128_u64_mod_32(FStar_UInt128_u64_mod_32(x) * (uint64_t)y));
|
||||
lit.high =
|
||||
((x >> FStar_UInt128_u32_32)
|
||||
* (uint64_t)y
|
||||
+ (FStar_UInt128_u64_mod_32(x) * (uint64_t)y >> FStar_UInt128_u32_32))
|
||||
>> FStar_UInt128_u32_32;
|
||||
return lit;
|
||||
}
|
||||
|
||||
static inline uint64_t FStar_UInt128_u32_combine_(uint64_t hi, uint64_t lo)
|
||||
{
|
||||
return lo + (hi << FStar_UInt128_u32_32);
|
||||
}
|
||||
|
||||
static inline FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y)
|
||||
{
|
||||
FStar_UInt128_uint128 lit;
|
||||
lit.low =
|
||||
FStar_UInt128_u32_combine_(FStar_UInt128_u64_mod_32(x)
|
||||
* (y >> FStar_UInt128_u32_32)
|
||||
+
|
||||
FStar_UInt128_u64_mod_32((x >> FStar_UInt128_u32_32)
|
||||
* FStar_UInt128_u64_mod_32(y)
|
||||
+ (FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y) >> FStar_UInt128_u32_32)),
|
||||
FStar_UInt128_u64_mod_32(FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y)));
|
||||
lit.high =
|
||||
(x >> FStar_UInt128_u32_32)
|
||||
* (y >> FStar_UInt128_u32_32)
|
||||
+
|
||||
(((x >> FStar_UInt128_u32_32)
|
||||
* FStar_UInt128_u64_mod_32(y)
|
||||
+ (FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y) >> FStar_UInt128_u32_32))
|
||||
>> FStar_UInt128_u32_32)
|
||||
+
|
||||
((FStar_UInt128_u64_mod_32(x)
|
||||
* (y >> FStar_UInt128_u32_32)
|
||||
+
|
||||
FStar_UInt128_u64_mod_32((x >> FStar_UInt128_u32_32)
|
||||
* FStar_UInt128_u64_mod_32(y)
|
||||
+ (FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y) >> FStar_UInt128_u32_32)))
|
||||
>> FStar_UInt128_u32_32);
|
||||
return lit;
|
||||
}
|
||||
|
||||
#define __FStar_UInt128_Verified_H_DEFINED
|
||||
#endif
|
198
3rdparty/hacl-star/evercrypt/kremlin/kremlib/minimal/FStar_UInt_8_16_32_64.h
поставляемый
Normal file
198
3rdparty/hacl-star/evercrypt/kremlin/kremlib/minimal/FStar_UInt_8_16_32_64.h
поставляемый
Normal file
|
@ -0,0 +1,198 @@
|
|||
/*
|
||||
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License.
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <stdbool.h>
|
||||
#include "kremlin/internal/compat.h"
|
||||
#include "kremlin/lowstar_endianness.h"
|
||||
#include "kremlin/internal/types.h"
|
||||
#include "kremlin/internal/target.h"
|
||||
|
||||
#ifndef __FStar_UInt_8_16_32_64_H
|
||||
#define __FStar_UInt_8_16_32_64_H
|
||||
|
||||
|
||||
|
||||
|
||||
extern Prims_int FStar_UInt64_n;
|
||||
|
||||
extern bool FStar_UInt64_uu___is_Mk(uint64_t projectee);
|
||||
|
||||
extern Prims_int FStar_UInt64___proj__Mk__item__v(uint64_t projectee);
|
||||
|
||||
extern Prims_int FStar_UInt64_v(uint64_t x);
|
||||
|
||||
extern uint64_t FStar_UInt64_uint_to_t(Prims_int x);
|
||||
|
||||
extern uint64_t FStar_UInt64_minus(uint64_t a);
|
||||
|
||||
extern uint32_t FStar_UInt64_n_minus_one;
|
||||
|
||||
static inline uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b)
|
||||
{
|
||||
uint64_t x = a ^ b;
|
||||
uint64_t minus_x = ~x + (uint64_t)1U;
|
||||
uint64_t x_or_minus_x = x | minus_x;
|
||||
uint64_t xnx = x_or_minus_x >> (uint32_t)63U;
|
||||
return xnx - (uint64_t)1U;
|
||||
}
|
||||
|
||||
static inline uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b)
|
||||
{
|
||||
uint64_t x = a;
|
||||
uint64_t y = b;
|
||||
uint64_t x_xor_y = x ^ y;
|
||||
uint64_t x_sub_y = x - y;
|
||||
uint64_t x_sub_y_xor_y = x_sub_y ^ y;
|
||||
uint64_t q = x_xor_y | x_sub_y_xor_y;
|
||||
uint64_t x_xor_q = x ^ q;
|
||||
uint64_t x_xor_q_ = x_xor_q >> (uint32_t)63U;
|
||||
return x_xor_q_ - (uint64_t)1U;
|
||||
}
|
||||
|
||||
extern Prims_string FStar_UInt64_to_string(uint64_t uu____888);
|
||||
|
||||
extern Prims_string FStar_UInt64_to_string_hex(uint64_t uu____899);
|
||||
|
||||
extern Prims_string FStar_UInt64_to_string_hex_pad(uint64_t uu____910);
|
||||
|
||||
extern uint64_t FStar_UInt64_of_string(Prims_string uu____921);
|
||||
|
||||
extern Prims_int FStar_UInt32_n;
|
||||
|
||||
extern bool FStar_UInt32_uu___is_Mk(uint32_t projectee);
|
||||
|
||||
extern Prims_int FStar_UInt32___proj__Mk__item__v(uint32_t projectee);
|
||||
|
||||
extern Prims_int FStar_UInt32_v(uint32_t x);
|
||||
|
||||
extern uint32_t FStar_UInt32_uint_to_t(Prims_int x);
|
||||
|
||||
extern uint32_t FStar_UInt32_minus(uint32_t a);
|
||||
|
||||
extern uint32_t FStar_UInt32_n_minus_one;
|
||||
|
||||
static inline uint32_t FStar_UInt32_eq_mask(uint32_t a, uint32_t b)
|
||||
{
|
||||
uint32_t x = a ^ b;
|
||||
uint32_t minus_x = ~x + (uint32_t)1U;
|
||||
uint32_t x_or_minus_x = x | minus_x;
|
||||
uint32_t xnx = x_or_minus_x >> (uint32_t)31U;
|
||||
return xnx - (uint32_t)1U;
|
||||
}
|
||||
|
||||
static inline uint32_t FStar_UInt32_gte_mask(uint32_t a, uint32_t b)
|
||||
{
|
||||
uint32_t x = a;
|
||||
uint32_t y = b;
|
||||
uint32_t x_xor_y = x ^ y;
|
||||
uint32_t x_sub_y = x - y;
|
||||
uint32_t x_sub_y_xor_y = x_sub_y ^ y;
|
||||
uint32_t q = x_xor_y | x_sub_y_xor_y;
|
||||
uint32_t x_xor_q = x ^ q;
|
||||
uint32_t x_xor_q_ = x_xor_q >> (uint32_t)31U;
|
||||
return x_xor_q_ - (uint32_t)1U;
|
||||
}
|
||||
|
||||
extern Prims_string FStar_UInt32_to_string(uint32_t uu____888);
|
||||
|
||||
extern Prims_string FStar_UInt32_to_string_hex(uint32_t uu____899);
|
||||
|
||||
extern Prims_string FStar_UInt32_to_string_hex_pad(uint32_t uu____910);
|
||||
|
||||
extern uint32_t FStar_UInt32_of_string(Prims_string uu____921);
|
||||
|
||||
extern Prims_int FStar_UInt16_n;
|
||||
|
||||
extern bool FStar_UInt16_uu___is_Mk(uint16_t projectee);
|
||||
|
||||
extern Prims_int FStar_UInt16___proj__Mk__item__v(uint16_t projectee);
|
||||
|
||||
extern Prims_int FStar_UInt16_v(uint16_t x);
|
||||
|
||||
extern uint16_t FStar_UInt16_uint_to_t(Prims_int x);
|
||||
|
||||
extern uint16_t FStar_UInt16_minus(uint16_t a);
|
||||
|
||||
extern uint32_t FStar_UInt16_n_minus_one;
|
||||
|
||||
static inline uint16_t FStar_UInt16_eq_mask(uint16_t a, uint16_t b)
|
||||
{
|
||||
uint16_t x = a ^ b;
|
||||
uint16_t minus_x = ~x + (uint16_t)1U;
|
||||
uint16_t x_or_minus_x = x | minus_x;
|
||||
uint16_t xnx = x_or_minus_x >> (uint32_t)15U;
|
||||
return xnx - (uint16_t)1U;
|
||||
}
|
||||
|
||||
static inline uint16_t FStar_UInt16_gte_mask(uint16_t a, uint16_t b)
|
||||
{
|
||||
uint16_t x = a;
|
||||
uint16_t y = b;
|
||||
uint16_t x_xor_y = x ^ y;
|
||||
uint16_t x_sub_y = x - y;
|
||||
uint16_t x_sub_y_xor_y = x_sub_y ^ y;
|
||||
uint16_t q = x_xor_y | x_sub_y_xor_y;
|
||||
uint16_t x_xor_q = x ^ q;
|
||||
uint16_t x_xor_q_ = x_xor_q >> (uint32_t)15U;
|
||||
return x_xor_q_ - (uint16_t)1U;
|
||||
}
|
||||
|
||||
extern Prims_string FStar_UInt16_to_string(uint16_t uu____888);
|
||||
|
||||
extern Prims_string FStar_UInt16_to_string_hex(uint16_t uu____899);
|
||||
|
||||
extern Prims_string FStar_UInt16_to_string_hex_pad(uint16_t uu____910);
|
||||
|
||||
extern uint16_t FStar_UInt16_of_string(Prims_string uu____921);
|
||||
|
||||
extern Prims_int FStar_UInt8_n;
|
||||
|
||||
extern bool FStar_UInt8_uu___is_Mk(uint8_t projectee);
|
||||
|
||||
extern Prims_int FStar_UInt8___proj__Mk__item__v(uint8_t projectee);
|
||||
|
||||
extern Prims_int FStar_UInt8_v(uint8_t x);
|
||||
|
||||
extern uint8_t FStar_UInt8_uint_to_t(Prims_int x);
|
||||
|
||||
extern uint8_t FStar_UInt8_minus(uint8_t a);
|
||||
|
||||
extern uint32_t FStar_UInt8_n_minus_one;
|
||||
|
||||
static inline uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b)
|
||||
{
|
||||
uint8_t x = a ^ b;
|
||||
uint8_t minus_x = ~x + (uint8_t)1U;
|
||||
uint8_t x_or_minus_x = x | minus_x;
|
||||
uint8_t xnx = x_or_minus_x >> (uint32_t)7U;
|
||||
return xnx - (uint8_t)1U;
|
||||
}
|
||||
|
||||
static inline uint8_t FStar_UInt8_gte_mask(uint8_t a, uint8_t b)
|
||||
{
|
||||
uint8_t x = a;
|
||||
uint8_t y = b;
|
||||
uint8_t x_xor_y = x ^ y;
|
||||
uint8_t x_sub_y = x - y;
|
||||
uint8_t x_sub_y_xor_y = x_sub_y ^ y;
|
||||
uint8_t q = x_xor_y | x_sub_y_xor_y;
|
||||
uint8_t x_xor_q = x ^ q;
|
||||
uint8_t x_xor_q_ = x_xor_q >> (uint32_t)7U;
|
||||
return x_xor_q_ - (uint8_t)1U;
|
||||
}
|
||||
|
||||
extern Prims_string FStar_UInt8_to_string(uint8_t uu____888);
|
||||
|
||||
extern Prims_string FStar_UInt8_to_string_hex(uint8_t uu____899);
|
||||
|
||||
extern Prims_string FStar_UInt8_to_string_hex_pad(uint8_t uu____910);
|
||||
|
||||
extern uint8_t FStar_UInt8_of_string(Prims_string uu____921);
|
||||
|
||||
typedef uint8_t FStar_UInt8_byte;
|
||||
|
||||
#define __FStar_UInt_8_16_32_64_H_DEFINED
|
||||
#endif
|
28
3rdparty/hacl-star/evercrypt/kremlin/kremlib/minimal/LowStar_Endianness.h
поставляемый
Normal file
28
3rdparty/hacl-star/evercrypt/kremlin/kremlib/minimal/LowStar_Endianness.h
поставляемый
Normal file
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License.
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <stdbool.h>
|
||||
#include "kremlin/internal/compat.h"
|
||||
#include "kremlin/lowstar_endianness.h"
|
||||
#include "kremlin/internal/types.h"
|
||||
#include "kremlin/internal/target.h"
|
||||
|
||||
#ifndef __LowStar_Endianness_H
|
||||
#define __LowStar_Endianness_H
|
||||
|
||||
#include "FStar_UInt128.h"
|
||||
|
||||
|
||||
static inline void store128_le(uint8_t *x0, FStar_UInt128_uint128 x1);
|
||||
|
||||
static inline FStar_UInt128_uint128 load128_le(uint8_t *x0);
|
||||
|
||||
static inline void store128_be(uint8_t *x0, FStar_UInt128_uint128 x1);
|
||||
|
||||
static inline FStar_UInt128_uint128 load128_be(uint8_t *x0);
|
||||
|
||||
#define __LowStar_Endianness_H_DEFINED
|
||||
#endif
|
|
@ -0,0 +1,56 @@
|
|||
# A basic Makefile that KreMLin copies in the output directory; this is not
|
||||
# guaranteed to work and will only work well for very simple projects. This
|
||||
# Makefile uses:
|
||||
# - the custom C files passed to your krml invocation
|
||||
# - the custom C flags passed to your krml invocation
|
||||
# - the -o option passed to your krml invocation
|
||||
|
||||
include Makefile.include
|
||||
|
||||
ifeq (,$(KREMLIN_HOME))
|
||||
$(error please define KREMLIN_HOME to point to the root of your KReMLin git checkout)
|
||||
endif
|
||||
|
||||
CFLAGS += -I. -I $(KREMLIN_HOME)/include -I $(KREMLIN_HOME)/kremlib/dist/minimal
|
||||
CFLAGS += -Wall -Wextra -Werror -std=c11 -Wno-unused-variable \
|
||||
-Wno-unknown-warning-option -Wno-unused-but-set-variable \
|
||||
-Wno-unused-parameter -Wno-infinite-recursion \
|
||||
-g -fwrapv -D_BSD_SOURCE -D_DEFAULT_SOURCE
|
||||
ifeq ($(OS),Windows_NT)
|
||||
CFLAGS += -D__USE_MINGW_ANSI_STDIO
|
||||
else
|
||||
CFLAGS += -fPIC
|
||||
endif
|
||||
CFLAGS += $(USER_CFLAGS)
|
||||
|
||||
SOURCES += $(ALL_C_FILES) $(USER_C_FILES)
|
||||
ifneq (,$(BLACKLIST))
|
||||
SOURCES := $(filter-out $(BLACKLIST),$(SOURCES))
|
||||
endif
|
||||
OBJS += $(patsubst %.c,%.o,$(SOURCES))
|
||||
|
||||
all: $(USER_TARGET)
|
||||
|
||||
$(USER_TARGET): $(OBJS)
|
||||
|
||||
AR ?= ar
|
||||
|
||||
%.a:
|
||||
$(AR) cr $@ $^
|
||||
|
||||
%.exe:
|
||||
$(CC) $(CFLAGS) -o $@ $^ $(KREMLIN_HOME)/kremlib/dist/generic/libkremlib.a
|
||||
|
||||
%.so:
|
||||
$(CC) $(CFLAGS) -shared -o $@ $^
|
||||
|
||||
%.d: %.c
|
||||
@set -e; rm -f $@; \
|
||||
$(CC) -MM $(CFLAGS) $< > $@.$$$$; \
|
||||
sed 's,\($(notdir $*)\)\.o[ :]*,$(dir $@)\1.o $@ : ,g' < $@.$$$$ > $@; \
|
||||
rm -f $@.$$$$
|
||||
|
||||
include $(patsubst %.c,%.d,$(SOURCES))
|
||||
|
||||
clean:
|
||||
rm -rf *.o *.d $(USER_TARGET)
|
|
@ -0,0 +1,5 @@
|
|||
USER_TARGET=libkremlib.a
|
||||
USER_CFLAGS=
|
||||
USER_C_FILES=fstar_uint128.c
|
||||
ALL_C_FILES=
|
||||
ALL_H_FILES=FStar_UInt_8_16_32_64.h FStar_UInt128.h LowStar_Endianness.h
|
160
3rdparty/hacl-star/evercrypt/kremlin/kremlib/minimal/fstar_uint128_gcc64.h
поставляемый
Normal file
160
3rdparty/hacl-star/evercrypt/kremlin/kremlib/minimal/fstar_uint128_gcc64.h
поставляемый
Normal file
|
@ -0,0 +1,160 @@
|
|||
/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License. */
|
||||
|
||||
/******************************************************************************/
|
||||
/* Machine integers (128-bit arithmetic) */
|
||||
/******************************************************************************/
|
||||
|
||||
/* This header contains two things.
|
||||
*
|
||||
* First, an implementation of 128-bit arithmetic suitable for 64-bit GCC and
|
||||
* Clang, i.e. all the operations from FStar.UInt128.
|
||||
*
|
||||
* Second, 128-bit operations from C.Endianness (or LowStar.Endianness),
|
||||
* suitable for any compiler and platform (via a series of ifdefs). This second
|
||||
* part is unfortunate, and should be fixed by moving {load,store}128_{be,le} to
|
||||
* FStar.UInt128 to avoid a maze of preprocessor guards and hand-written code.
|
||||
* */
|
||||
|
||||
/* This file is used for both the minimal and generic kremlib distributions. As
|
||||
* such, it assumes that the machine integers have been bundled the exact same
|
||||
* way in both cases. */
|
||||
|
||||
#include "FStar_UInt128.h"
|
||||
#include "FStar_UInt_8_16_32_64.h"
|
||||
#include "LowStar_Endianness.h"
|
||||
|
||||
/* GCC + using native unsigned __int128 support */
|
||||
|
||||
inline static uint128_t load128_le(uint8_t *b) {
|
||||
uint128_t l = (uint128_t)load64_le(b);
|
||||
uint128_t h = (uint128_t)load64_le(b + 8);
|
||||
return (h << 64 | l);
|
||||
}
|
||||
|
||||
inline static void store128_le(uint8_t *b, uint128_t n) {
|
||||
store64_le(b, (uint64_t)n);
|
||||
store64_le(b + 8, (uint64_t)(n >> 64));
|
||||
}
|
||||
|
||||
inline static uint128_t load128_be(uint8_t *b) {
|
||||
uint128_t h = (uint128_t)load64_be(b);
|
||||
uint128_t l = (uint128_t)load64_be(b + 8);
|
||||
return (h << 64 | l);
|
||||
}
|
||||
|
||||
inline static void store128_be(uint8_t *b, uint128_t n) {
|
||||
store64_be(b, (uint64_t)(n >> 64));
|
||||
store64_be(b + 8, (uint64_t)n);
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_add(uint128_t x, uint128_t y) {
|
||||
return x + y;
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_mul(uint128_t x, uint128_t y) {
|
||||
return x * y;
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_add_mod(uint128_t x, uint128_t y) {
|
||||
return x + y;
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_sub(uint128_t x, uint128_t y) {
|
||||
return x - y;
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_sub_mod(uint128_t x, uint128_t y) {
|
||||
return x - y;
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_logand(uint128_t x, uint128_t y) {
|
||||
return x & y;
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_logor(uint128_t x, uint128_t y) {
|
||||
return x | y;
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_logxor(uint128_t x, uint128_t y) {
|
||||
return x ^ y;
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_lognot(uint128_t x) {
|
||||
return ~x;
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_shift_left(uint128_t x, uint32_t y) {
|
||||
return x << y;
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_shift_right(uint128_t x, uint32_t y) {
|
||||
return x >> y;
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_uint64_to_uint128(uint64_t x) {
|
||||
return (uint128_t)x;
|
||||
}
|
||||
|
||||
inline static uint64_t FStar_UInt128_uint128_to_uint64(uint128_t x) {
|
||||
return (uint64_t)x;
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_mul_wide(uint64_t x, uint64_t y) {
|
||||
return ((uint128_t) x) * y;
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_eq_mask(uint128_t x, uint128_t y) {
|
||||
uint64_t mask =
|
||||
FStar_UInt64_eq_mask((uint64_t)(x >> 64), (uint64_t)(y >> 64)) &
|
||||
FStar_UInt64_eq_mask(x, y);
|
||||
return ((uint128_t)mask) << 64 | mask;
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_gte_mask(uint128_t x, uint128_t y) {
|
||||
uint64_t mask =
|
||||
(FStar_UInt64_gte_mask(x >> 64, y >> 64) &
|
||||
~(FStar_UInt64_eq_mask(x >> 64, y >> 64))) |
|
||||
(FStar_UInt64_eq_mask(x >> 64, y >> 64) & FStar_UInt64_gte_mask(x, y));
|
||||
return ((uint128_t)mask) << 64 | mask;
|
||||
}
|
||||
|
||||
inline static uint64_t FStar_UInt128___proj__Mkuint128__item__low(uint128_t x) {
|
||||
return (uint64_t) x;
|
||||
}
|
||||
|
||||
inline static uint64_t FStar_UInt128___proj__Mkuint128__item__high(uint128_t x) {
|
||||
return (uint64_t) (x >> 64);
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_add_underspec(uint128_t x, uint128_t y) {
|
||||
return x + y;
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_sub_underspec(uint128_t x, uint128_t y) {
|
||||
return x - y;
|
||||
}
|
||||
|
||||
inline static bool FStar_UInt128_eq(uint128_t x, uint128_t y) {
|
||||
return x == y;
|
||||
}
|
||||
|
||||
inline static bool FStar_UInt128_gt(uint128_t x, uint128_t y) {
|
||||
return x > y;
|
||||
}
|
||||
|
||||
inline static bool FStar_UInt128_lt(uint128_t x, uint128_t y) {
|
||||
return x < y;
|
||||
}
|
||||
|
||||
inline static bool FStar_UInt128_gte(uint128_t x, uint128_t y) {
|
||||
return x >= y;
|
||||
}
|
||||
|
||||
inline static bool FStar_UInt128_lte(uint128_t x, uint128_t y) {
|
||||
return x <= y;
|
||||
}
|
||||
|
||||
inline static uint128_t FStar_UInt128_mul32(uint64_t x, uint32_t y) {
|
||||
return (uint128_t) x * (uint128_t) y;
|
||||
}
|
467
3rdparty/hacl-star/evercrypt/kremlin/kremlib/minimal/fstar_uint128_msvc.h
поставляемый
Normal file
467
3rdparty/hacl-star/evercrypt/kremlin/kremlib/minimal/fstar_uint128_msvc.h
поставляемый
Normal file
|
@ -0,0 +1,467 @@
|
|||
/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License. */
|
||||
|
||||
/* This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
|
||||
* then hand-edited to use MSVC intrinsics KreMLin invocation:
|
||||
* C:\users\barrybo\mitls2c\kremlin\_build\src\Kremlin.native -minimal -fnouint128 C:/users/barrybo/mitls2c/FStar/ulib/FStar.UInt128.fst -tmpdir ../secure_api/out/runtime_switch/uint128 -skip-compilation -add-include "kremlib0.h" -drop FStar.Int.Cast.Full -bundle FStar.UInt128=FStar.*,Prims
|
||||
* F* version: 15104ff8
|
||||
* KreMLin version: 318b7fa8
|
||||
*/
|
||||
#include "kremlin/internal/types.h"
|
||||
#include "FStar_UInt128.h"
|
||||
#include "FStar_UInt_8_16_32_64.h"
|
||||
|
||||
#ifndef _MSC_VER
|
||||
# error This file only works with the MSVC compiler
|
||||
#endif
|
||||
|
||||
/* JP: need to rip out HAS_OPTIMIZED since the header guards in types.h are now
|
||||
* done properly and only include this file when we know for sure we are on
|
||||
* 64-bit MSVC. */
|
||||
|
||||
#if defined(_M_X64) && !defined(KRML_VERIFIED_UINT128)
|
||||
#define HAS_OPTIMIZED 1
|
||||
#else
|
||||
#define HAS_OPTIMIZED 0
|
||||
#endif
|
||||
|
||||
// Define .low and .high in terms of the __m128i fields, to reduce
|
||||
// the amount of churn in this file.
|
||||
#if HAS_OPTIMIZED
|
||||
#include <intrin.h>
|
||||
#include <immintrin.h>
|
||||
#define low m128i_u64[0]
|
||||
#define high m128i_u64[1]
|
||||
#endif
|
||||
|
||||
inline static FStar_UInt128_uint128 load128_le(uint8_t *b) {
|
||||
#if HAS_OPTIMIZED
|
||||
return _mm_loadu_si128((__m128i *)b);
|
||||
#else
|
||||
return (
|
||||
(FStar_UInt128_uint128){ .low = load64_le(b), .high = load64_le(b + 8) });
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static void store128_le(uint8_t *b, FStar_UInt128_uint128 n) {
|
||||
store64_le(b, n.low);
|
||||
store64_le(b + 8, n.high);
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128 load128_be(uint8_t *b) {
|
||||
uint64_t l = load64_be(b + 8);
|
||||
uint64_t h = load64_be(b);
|
||||
#if HAS_OPTIMIZED
|
||||
return _mm_set_epi64x(h, l);
|
||||
#else
|
||||
return ((FStar_UInt128_uint128){ .low = l, .high = h });
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static void store128_be(uint8_t *b, uint128_t n) {
|
||||
store64_be(b, n.high);
|
||||
store64_be(b + 8, n.low);
|
||||
}
|
||||
|
||||
inline static uint64_t FStar_UInt128_constant_time_carry(uint64_t a, uint64_t b) {
|
||||
return (a ^ (a ^ b | a - b ^ b)) >> (uint32_t)63U;
|
||||
}
|
||||
|
||||
inline static uint64_t FStar_UInt128_carry(uint64_t a, uint64_t b) {
|
||||
return FStar_UInt128_constant_time_carry(a, b);
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
#if HAS_OPTIMIZED
|
||||
uint64_t l, h;
|
||||
|
||||
unsigned char carry =
|
||||
_addcarry_u64(0, a.low, b.low, &l); // low/CF = a.low+b.low+0
|
||||
_addcarry_u64(carry, a.high, b.high, &h); // high = a.high+b.high+CF
|
||||
return _mm_set_epi64x(h, l);
|
||||
#else
|
||||
return ((FStar_UInt128_uint128){
|
||||
.low = a.low + b.low,
|
||||
.high = a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low) });
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_add_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
#if HAS_OPTIMIZED
|
||||
return FStar_UInt128_add(a, b);
|
||||
#else
|
||||
return ((FStar_UInt128_uint128){
|
||||
.low = a.low + b.low,
|
||||
.high = a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low)});
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
#if HAS_OPTIMIZED
|
||||
return FStar_UInt128_add(a, b);
|
||||
#else
|
||||
return ((FStar_UInt128_uint128){
|
||||
.low = a.low + b.low,
|
||||
.high = a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low) });
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
#if HAS_OPTIMIZED
|
||||
uint64_t l, h;
|
||||
|
||||
unsigned char borrow = _subborrow_u64(0, a.low, b.low, &l);
|
||||
_subborrow_u64(borrow, a.high, b.high, &h);
|
||||
return _mm_set_epi64x(h, l);
|
||||
#else
|
||||
return ((FStar_UInt128_uint128){
|
||||
.low = a.low - b.low,
|
||||
.high = a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low) });
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_sub_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
#if HAS_OPTIMIZED
|
||||
return FStar_UInt128_sub(a, b);
|
||||
#else
|
||||
return ((FStar_UInt128_uint128){
|
||||
.low = a.low - b.low,
|
||||
.high = a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low)});
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_sub_mod_impl(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
return ((FStar_UInt128_uint128){
|
||||
.low = a.low - b.low,
|
||||
.high = a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low) });
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_sub_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
#if HAS_OPTIMIZED
|
||||
return FStar_UInt128_sub(a, b);
|
||||
#else
|
||||
return FStar_UInt128_sub_mod_impl(a, b);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
#if HAS_OPTIMIZED
|
||||
return _mm_and_si128(a, b);
|
||||
#else
|
||||
return (
|
||||
(FStar_UInt128_uint128){ .low = a.low & b.low, .high = a.high & b.high });
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
#if HAS_OPTIMIZED
|
||||
return _mm_xor_si128(a, b);
|
||||
#else
|
||||
return (
|
||||
(FStar_UInt128_uint128){ .low = a.low ^ b.low, .high = a.high ^ b.high });
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
#if HAS_OPTIMIZED
|
||||
return _mm_or_si128(a, b);
|
||||
#else
|
||||
return (
|
||||
(FStar_UInt128_uint128){ .low = a.low | b.low, .high = a.high | b.high });
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128 FStar_UInt128_lognot(FStar_UInt128_uint128 a) {
|
||||
#if HAS_OPTIMIZED
|
||||
return _mm_andnot_si128(a, a);
|
||||
#else
|
||||
return ((FStar_UInt128_uint128){ .low = ~a.low, .high = ~a.high });
|
||||
#endif
|
||||
}
|
||||
|
||||
static const uint32_t FStar_UInt128_u32_64 = (uint32_t)64U;
|
||||
|
||||
inline static uint64_t
|
||||
FStar_UInt128_add_u64_shift_left(uint64_t hi, uint64_t lo, uint32_t s) {
|
||||
return (hi << s) + (lo >> FStar_UInt128_u32_64 - s);
|
||||
}
|
||||
|
||||
inline static uint64_t
|
||||
FStar_UInt128_add_u64_shift_left_respec(uint64_t hi, uint64_t lo, uint32_t s) {
|
||||
return FStar_UInt128_add_u64_shift_left(hi, lo, s);
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_left_small(FStar_UInt128_uint128 a, uint32_t s) {
|
||||
if (s == (uint32_t)0U)
|
||||
return a;
|
||||
else
|
||||
return ((FStar_UInt128_uint128){
|
||||
.low = a.low << s,
|
||||
.high = FStar_UInt128_add_u64_shift_left_respec(a.high, a.low, s) });
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_left_large(FStar_UInt128_uint128 a, uint32_t s) {
|
||||
return ((FStar_UInt128_uint128){ .low = (uint64_t)0U,
|
||||
.high = a.low << s - FStar_UInt128_u32_64 });
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s) {
|
||||
#if HAS_OPTIMIZED
|
||||
if (s == 0) {
|
||||
return a;
|
||||
} else if (s < FStar_UInt128_u32_64) {
|
||||
uint64_t l = a.low << s;
|
||||
uint64_t h = __shiftleft128(a.low, a.high, (unsigned char)s);
|
||||
return _mm_set_epi64x(h, l);
|
||||
} else {
|
||||
return _mm_set_epi64x(a.low << (s - FStar_UInt128_u32_64), 0);
|
||||
}
|
||||
#else
|
||||
if (s < FStar_UInt128_u32_64)
|
||||
return FStar_UInt128_shift_left_small(a, s);
|
||||
else
|
||||
return FStar_UInt128_shift_left_large(a, s);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static uint64_t
|
||||
FStar_UInt128_add_u64_shift_right(uint64_t hi, uint64_t lo, uint32_t s) {
|
||||
return (lo >> s) + (hi << FStar_UInt128_u32_64 - s);
|
||||
}
|
||||
|
||||
inline static uint64_t
|
||||
FStar_UInt128_add_u64_shift_right_respec(uint64_t hi, uint64_t lo, uint32_t s) {
|
||||
return FStar_UInt128_add_u64_shift_right(hi, lo, s);
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_right_small(FStar_UInt128_uint128 a, uint32_t s) {
|
||||
if (s == (uint32_t)0U)
|
||||
return a;
|
||||
else
|
||||
return ((FStar_UInt128_uint128){
|
||||
.low = FStar_UInt128_add_u64_shift_right_respec(a.high, a.low, s),
|
||||
.high = a.high >> s });
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_right_large(FStar_UInt128_uint128 a, uint32_t s) {
|
||||
return ((FStar_UInt128_uint128){ .low = a.high >> s - FStar_UInt128_u32_64,
|
||||
.high = (uint64_t)0U });
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s) {
|
||||
#if HAS_OPTIMIZED
|
||||
if (s == 0) {
|
||||
return a;
|
||||
} else if (s < FStar_UInt128_u32_64) {
|
||||
uint64_t l = __shiftright128(a.low, a.high, (unsigned char)s);
|
||||
uint64_t h = a.high >> s;
|
||||
return _mm_set_epi64x(h, l);
|
||||
} else {
|
||||
return _mm_set_epi64x(0, a.high >> (s - FStar_UInt128_u32_64));
|
||||
}
|
||||
#else
|
||||
if (s < FStar_UInt128_u32_64)
|
||||
return FStar_UInt128_shift_right_small(a, s);
|
||||
else
|
||||
return FStar_UInt128_shift_right_large(a, s);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static bool FStar_UInt128_eq(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
return a.low == b.low && a.high == b.high;
|
||||
}
|
||||
|
||||
inline static bool FStar_UInt128_gt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
return a.high > b.high || a.high == b.high && a.low > b.low;
|
||||
}
|
||||
|
||||
inline static bool FStar_UInt128_lt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
return a.high < b.high || a.high == b.high && a.low < b.low;
|
||||
}
|
||||
|
||||
inline static bool FStar_UInt128_gte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
return a.high > b.high || a.high == b.high && a.low >= b.low;
|
||||
}
|
||||
|
||||
inline static bool FStar_UInt128_lte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
return a.high < b.high || a.high == b.high && a.low <= b.low;
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
#if HAS_OPTIMIZED
|
||||
// PCMPW to produce 4 32-bit values, all either 0x0 or 0xffffffff
|
||||
__m128i r32 = _mm_cmpeq_epi32(a, b);
|
||||
// Shuffle 3,2,1,0 into 2,3,0,1 (swapping dwords inside each half)
|
||||
__m128i s32 = _mm_shuffle_epi32(r32, _MM_SHUFFLE(2, 3, 0, 1));
|
||||
// Bitwise and to compute (3&2),(2&3),(1&0),(0&1)
|
||||
__m128i ret64 = _mm_and_si128(r32, s32);
|
||||
// Swap the two 64-bit values to form s64
|
||||
__m128i s64 =
|
||||
_mm_shuffle_epi32(ret64, _MM_SHUFFLE(1, 0, 3, 2)); // 3,2,1,0 -> 1,0,3,2
|
||||
// And them together
|
||||
return _mm_and_si128(ret64, s64);
|
||||
#else
|
||||
return (
|
||||
(FStar_UInt128_uint128){ .low = FStar_UInt64_eq_mask(a.low, b.low) &
|
||||
FStar_UInt64_eq_mask(a.high, b.high),
|
||||
.high = FStar_UInt64_eq_mask(a.low, b.low) &
|
||||
FStar_UInt64_eq_mask(a.high, b.high) });
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b) {
|
||||
#if HAS_OPTIMIZED && 0
|
||||
// ge - compare 3,2,1,0 for >= and generating 0 or 0xffffffff for each
|
||||
// eq - compare 3,2,1,0 for == and generating 0 or 0xffffffff for each
|
||||
// slot 0 = ge0 | (eq0 & ge1) | (eq0 & eq1 & ge2) | (eq0 & eq1 & eq2 & ge3)
|
||||
// then splat slot 0 to 3,2,1,0
|
||||
__m128i gt = _mm_cmpgt_epi32(a, b);
|
||||
__m128i eq = _mm_cmpeq_epi32(a, b);
|
||||
__m128i ge = _mm_or_si128(gt, eq);
|
||||
__m128i ge0 = ge;
|
||||
__m128i eq0 = eq;
|
||||
__m128i ge1 = _mm_srli_si128(ge, 4); // shift ge from 3,2,1,0 to 0x0,3,2,1
|
||||
__m128i t1 = _mm_and_si128(eq0, ge1);
|
||||
__m128i ret = _mm_or_si128(ge, t1); // ge0 | (eq0 & ge1) is now in 0
|
||||
__m128i eq1 = _mm_srli_si128(eq, 4); // shift eq from 3,2,1,0 to 0x0,3,2,1
|
||||
__m128i ge2 =
|
||||
_mm_srli_si128(ge1, 4); // shift original ge from 3,2,1,0 to 0x0,0x0,3,2
|
||||
__m128i t2 =
|
||||
_mm_and_si128(eq0, _mm_and_si128(eq1, ge2)); // t2 = (eq0 & eq1 & ge2)
|
||||
ret = _mm_or_si128(ret, t2);
|
||||
__m128i eq2 = _mm_srli_si128(eq1, 4); // shift eq from 3,2,1,0 to 0x0,00,00,3
|
||||
__m128i ge3 =
|
||||
_mm_srli_si128(ge2, 4); // shift original ge from 3,2,1,0 to 0x0,0x0,0x0,3
|
||||
__m128i t3 = _mm_and_si128(
|
||||
eq0, _mm_and_si128(
|
||||
eq1, _mm_and_si128(eq2, ge3))); // t3 = (eq0 & eq1 & eq2 & ge3)
|
||||
ret = _mm_or_si128(ret, t3);
|
||||
return _mm_shuffle_epi32(
|
||||
ret,
|
||||
_MM_SHUFFLE(0, 0, 0, 0)); // the result is in 0. Shuffle into all dwords.
|
||||
#else
|
||||
return ((FStar_UInt128_uint128){
|
||||
.low = FStar_UInt64_gte_mask(a.high, b.high) &
|
||||
~FStar_UInt64_eq_mask(a.high, b.high) |
|
||||
FStar_UInt64_eq_mask(a.high, b.high) &
|
||||
FStar_UInt64_gte_mask(a.low, b.low),
|
||||
.high = FStar_UInt64_gte_mask(a.high, b.high) &
|
||||
~FStar_UInt64_eq_mask(a.high, b.high) |
|
||||
FStar_UInt64_eq_mask(a.high, b.high) &
|
||||
FStar_UInt64_gte_mask(a.low, b.low) });
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a) {
|
||||
#if HAS_OPTIMIZED
|
||||
return _mm_set_epi64x(0, a);
|
||||
#else
|
||||
return ((FStar_UInt128_uint128){ .low = a, .high = (uint64_t)0U });
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a) {
|
||||
return a.low;
|
||||
}
|
||||
|
||||
inline static uint64_t FStar_UInt128_u64_mod_32(uint64_t a) {
|
||||
return a & (uint64_t)0xffffffffU;
|
||||
}
|
||||
|
||||
static uint32_t FStar_UInt128_u32_32 = (uint32_t)32U;
|
||||
|
||||
inline static uint64_t FStar_UInt128_u32_combine(uint64_t hi, uint64_t lo) {
|
||||
return lo + (hi << FStar_UInt128_u32_32);
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128 FStar_UInt128_mul32(uint64_t x, uint32_t y) {
|
||||
#if HAS_OPTIMIZED
|
||||
uint64_t l, h;
|
||||
l = _umul128(x, (uint64_t)y, &h);
|
||||
return _mm_set_epi64x(h, l);
|
||||
#else
|
||||
return ((FStar_UInt128_uint128){
|
||||
.low = FStar_UInt128_u32_combine(
|
||||
(x >> FStar_UInt128_u32_32) * (uint64_t)y +
|
||||
(FStar_UInt128_u64_mod_32(x) * (uint64_t)y >>
|
||||
FStar_UInt128_u32_32),
|
||||
FStar_UInt128_u64_mod_32(FStar_UInt128_u64_mod_32(x) * (uint64_t)y)),
|
||||
.high = (x >> FStar_UInt128_u32_32) * (uint64_t)y +
|
||||
(FStar_UInt128_u64_mod_32(x) * (uint64_t)y >>
|
||||
FStar_UInt128_u32_32) >>
|
||||
FStar_UInt128_u32_32 });
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Note: static headers bring scope collision issues when they define types!
|
||||
* Because now client (kremlin-generated) code will include this header and
|
||||
* there might be type collisions if the client code uses quadruples of uint64s.
|
||||
* So, we cannot use the kremlin-generated name. */
|
||||
typedef struct K_quad_s {
|
||||
uint64_t fst;
|
||||
uint64_t snd;
|
||||
uint64_t thd;
|
||||
uint64_t f3;
|
||||
} K_quad;
|
||||
|
||||
inline static K_quad
|
||||
FStar_UInt128_mul_wide_impl_t_(uint64_t x, uint64_t y) {
|
||||
return ((K_quad){
|
||||
.fst = FStar_UInt128_u64_mod_32(x),
|
||||
.snd = FStar_UInt128_u64_mod_32(
|
||||
FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y)),
|
||||
.thd = x >> FStar_UInt128_u32_32,
|
||||
.f3 = (x >> FStar_UInt128_u32_32) * FStar_UInt128_u64_mod_32(y) +
|
||||
(FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y) >>
|
||||
FStar_UInt128_u32_32) });
|
||||
}
|
||||
|
||||
static uint64_t FStar_UInt128_u32_combine_(uint64_t hi, uint64_t lo) {
|
||||
return lo + (hi << FStar_UInt128_u32_32);
|
||||
}
|
||||
|
||||
inline static FStar_UInt128_uint128
|
||||
FStar_UInt128_mul_wide_impl(uint64_t x, uint64_t y) {
|
||||
K_quad scrut =
|
||||
FStar_UInt128_mul_wide_impl_t_(x, y);
|
||||
uint64_t u1 = scrut.fst;
|
||||
uint64_t w3 = scrut.snd;
|
||||
uint64_t x_ = scrut.thd;
|
||||
uint64_t t_ = scrut.f3;
|
||||
return ((FStar_UInt128_uint128){
|
||||
.low = FStar_UInt128_u32_combine_(
|
||||
u1 * (y >> FStar_UInt128_u32_32) + FStar_UInt128_u64_mod_32(t_), w3),
|
||||
.high =
|
||||
x_ * (y >> FStar_UInt128_u32_32) + (t_ >> FStar_UInt128_u32_32) +
|
||||
(u1 * (y >> FStar_UInt128_u32_32) + FStar_UInt128_u64_mod_32(t_) >>
|
||||
FStar_UInt128_u32_32) });
|
||||
}
|
||||
|
||||
inline static
|
||||
FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y) {
|
||||
#if HAS_OPTIMIZED
|
||||
uint64_t l, h;
|
||||
l = _umul128(x, y, &h);
|
||||
return _mm_set_epi64x(h, l);
|
||||
#else
|
||||
return FStar_UInt128_mul_wide_impl(x, y);
|
||||
#endif
|
||||
}
|
68
3rdparty/hacl-star/evercrypt/kremlin/kremlib/minimal/fstar_uint128_struct_endianness.h
поставляемый
Normal file
68
3rdparty/hacl-star/evercrypt/kremlin/kremlib/minimal/fstar_uint128_struct_endianness.h
поставляемый
Normal file
|
@ -0,0 +1,68 @@
|
|||
/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
|
||||
Licensed under the Apache 2.0 License. */
|
||||
|
||||
#ifndef FSTAR_UINT128_STRUCT_ENDIANNESS_H
|
||||
#define FSTAR_UINT128_STRUCT_ENDIANNESS_H
|
||||
|
||||
/* Hand-written implementation of endianness-related uint128 functions
|
||||
* for the extracted uint128 implementation */
|
||||
|
||||
/* Access 64-bit fields within the int128. */
|
||||
#define HIGH64_OF(x) ((x)->high)
|
||||
#define LOW64_OF(x) ((x)->low)
|
||||
|
||||
/* A series of definitions written using pointers. */
|
||||
|
||||
inline static void load128_le_(uint8_t *b, uint128_t *r) {
|
||||
LOW64_OF(r) = load64_le(b);
|
||||
HIGH64_OF(r) = load64_le(b + 8);
|
||||
}
|
||||
|
||||
inline static void store128_le_(uint8_t *b, uint128_t *n) {
|
||||
store64_le(b, LOW64_OF(n));
|
||||
store64_le(b + 8, HIGH64_OF(n));
|
||||
}
|
||||
|
||||
inline static void load128_be_(uint8_t *b, uint128_t *r) {
|
||||
HIGH64_OF(r) = load64_be(b);
|
||||
LOW64_OF(r) = load64_be(b + 8);
|
||||
}
|
||||
|
||||
inline static void store128_be_(uint8_t *b, uint128_t *n) {
|
||||
store64_be(b, HIGH64_OF(n));
|
||||
store64_be(b + 8, LOW64_OF(n));
|
||||
}
|
||||
|
||||
#ifndef KRML_NOSTRUCT_PASSING
|
||||
|
||||
inline static uint128_t load128_le(uint8_t *b) {
|
||||
uint128_t r;
|
||||
load128_le_(b, &r);
|
||||
return r;
|
||||
}
|
||||
|
||||
inline static void store128_le(uint8_t *b, uint128_t n) {
|
||||
store128_le_(b, &n);
|
||||
}
|
||||
|
||||
inline static uint128_t load128_be(uint8_t *b) {
|
||||
uint128_t r;
|
||||
load128_be_(b, &r);
|
||||
return r;
|
||||
}
|
||||
|
||||
inline static void store128_be(uint8_t *b, uint128_t n) {
|
||||
store128_be_(b, &n);
|
||||
}
|
||||
|
||||
#else /* !defined(KRML_STRUCT_PASSING) */
|
||||
|
||||
# define print128 print128_
|
||||
# define load128_le load128_le_
|
||||
# define store128_le store128_le_
|
||||
# define load128_be load128_be_
|
||||
# define store128_be store128_be_
|
||||
|
||||
#endif /* KRML_STRUCT_PASSING */
|
||||
|
||||
#endif
|
|
@ -0,0 +1,11 @@
|
|||
LIBRARY libkremlib
|
||||
|
||||
EXPORTS
|
||||
FStar_UInt64_eq_mask
|
||||
FStar_UInt64_gte_mask
|
||||
FStar_UInt32_eq_mask
|
||||
FStar_UInt32_gte_mask
|
||||
FStar_UInt16_eq_mask
|
||||
FStar_UInt16_gte_mask
|
||||
FStar_UInt8_eq_mask
|
||||
FStar_UInt8_gte_mask
|
|
@ -105,7 +105,7 @@
|
|||
"type": "git",
|
||||
"git": {
|
||||
"repositoryUrl": "https://github.com/project-everest/hacl-star",
|
||||
"commitHash": "9f592178e2e8e4b884f5605a3747ece2bb42663d"
|
||||
"commitHash": "ed8c28dcbc1c4b6846145686f81b063e80f601b1"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
Загрузка…
Ссылка в новой задаче