зеркало из https://github.com/microsoft/STL.git
2391 строка
97 KiB
C++
2391 строка
97 KiB
C++
// xcharconv_ryu.h internal header
|
|
|
|
// Copyright (c) Microsoft Corporation.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
|
|
// Copyright 2018 Ulf Adams
|
|
// Copyright (c) Microsoft Corporation. All rights reserved.
|
|
|
|
// Boost Software License - Version 1.0 - August 17th, 2003
|
|
|
|
// Permission is hereby granted, free of charge, to any person or organization
|
|
// obtaining a copy of the software and accompanying documentation covered by
|
|
// this license (the "Software") to use, reproduce, display, distribute,
|
|
// execute, and transmit the Software, and to prepare derivative works of the
|
|
// Software, and to permit third-parties to whom the Software is furnished to
|
|
// do so, all subject to the following:
|
|
|
|
// The copyright notices in the Software and this entire statement, including
|
|
// the above license grant, this restriction and the following disclaimer,
|
|
// must be included in all copies of the Software, in whole or in part, and
|
|
// all derivative works of the Software, unless such copies or derivative
|
|
// works are solely in the form of machine-executable object code generated by
|
|
// a source language processor.
|
|
|
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
|
|
// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
|
|
// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
|
|
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
// DEALINGS IN THE SOFTWARE.
|
|
|
|
|
|
#pragma once
|
|
#ifndef _XCHARCONV_RYU_H
|
|
#define _XCHARCONV_RYU_H
|
|
#include <yvals_core.h>
|
|
#if _STL_COMPILER_PREPROCESSOR
|
|
|
|
#include <string.h>
|
|
#include <xcharconv.h>
|
|
#include <xcharconv_ryu_tables.h>
|
|
|
|
#ifdef _M_X64
|
|
#include <intrin0.h> // for _umul128() and __shiftright128()
|
|
#endif // _M_X64
|
|
|
|
#if !_HAS_CXX17
|
|
#error The contents of <charconv> are only available with C++17. (Also, you should not include this internal header.)
|
|
#endif // !_HAS_CXX17
|
|
|
|
#pragma pack(push, _CRT_PACKING)
|
|
#pragma warning(push, _STL_WARNING_LEVEL)
|
|
#pragma warning(disable : _STL_DISABLED_WARNINGS)
|
|
_STL_DISABLE_CLANG_WARNINGS
|
|
#pragma push_macro("new")
|
|
#undef new
|
|
|
|
_STD_BEGIN
|
|
|
|
// https://github.com/ulfjack/ryu/tree/59661c3/ryu
|
|
// (Keep the cgmanifest.json commitHash in sync.)
|
|
|
|
// clang-format off
|
|
|
|
// vvvvvvvvvv DERIVED FROM common.h vvvvvvvvvv
|
|
|
|
_NODISCARD inline uint32_t __decimalLength9(const uint32_t __v) {
|
|
// Function precondition: __v is not a 10-digit number.
|
|
// (f2s: 9 digits are sufficient for round-tripping.)
|
|
// (d2fixed: We print 9-digit blocks.)
|
|
_STL_INTERNAL_CHECK(__v < 1000000000);
|
|
if (__v >= 100000000) { return 9; }
|
|
if (__v >= 10000000) { return 8; }
|
|
if (__v >= 1000000) { return 7; }
|
|
if (__v >= 100000) { return 6; }
|
|
if (__v >= 10000) { return 5; }
|
|
if (__v >= 1000) { return 4; }
|
|
if (__v >= 100) { return 3; }
|
|
if (__v >= 10) { return 2; }
|
|
return 1;
|
|
}
|
|
|
|
// Returns __e == 0 ? 1 : ceil(log_2(5^__e)).
|
|
_NODISCARD inline int32_t __pow5bits(const int32_t __e) {
|
|
// This approximation works up to the point that the multiplication overflows at __e = 3529.
|
|
// If the multiplication were done in 64 bits, it would fail at 5^4004 which is just greater
|
|
// than 2^9297.
|
|
_STL_INTERNAL_CHECK(__e >= 0);
|
|
_STL_INTERNAL_CHECK(__e <= 3528);
|
|
return static_cast<int32_t>(((static_cast<uint32_t>(__e) * 1217359) >> 19) + 1);
|
|
}
|
|
|
|
// Returns floor(log_10(2^__e)).
|
|
_NODISCARD inline uint32_t __log10Pow2(const int32_t __e) {
|
|
// The first value this approximation fails for is 2^1651 which is just greater than 10^297.
|
|
_STL_INTERNAL_CHECK(__e >= 0);
|
|
_STL_INTERNAL_CHECK(__e <= 1650);
|
|
return (static_cast<uint32_t>(__e) * 78913) >> 18;
|
|
}
|
|
|
|
// Returns floor(log_10(5^__e)).
|
|
_NODISCARD inline uint32_t __log10Pow5(const int32_t __e) {
|
|
// The first value this approximation fails for is 5^2621 which is just greater than 10^1832.
|
|
_STL_INTERNAL_CHECK(__e >= 0);
|
|
_STL_INTERNAL_CHECK(__e <= 2620);
|
|
return (static_cast<uint32_t>(__e) * 732923) >> 20;
|
|
}
|
|
|
|
_NODISCARD inline uint32_t __float_to_bits(const float __f) {
|
|
uint32_t __bits = 0;
|
|
_CSTD memcpy(&__bits, &__f, sizeof(float));
|
|
return __bits;
|
|
}
|
|
|
|
_NODISCARD inline uint64_t __double_to_bits(const double __d) {
|
|
uint64_t __bits = 0;
|
|
_CSTD memcpy(&__bits, &__d, sizeof(double));
|
|
return __bits;
|
|
}
|
|
|
|
// ^^^^^^^^^^ DERIVED FROM common.h ^^^^^^^^^^
|
|
|
|
// vvvvvvvvvv DERIVED FROM d2s.h vvvvvvvvvv
|
|
|
|
inline constexpr int __DOUBLE_MANTISSA_BITS = 52;
|
|
inline constexpr int __DOUBLE_EXPONENT_BITS = 11;
|
|
inline constexpr int __DOUBLE_BIAS = 1023;
|
|
|
|
inline constexpr int __DOUBLE_POW5_INV_BITCOUNT = 122;
|
|
inline constexpr int __DOUBLE_POW5_BITCOUNT = 121;
|
|
|
|
// ^^^^^^^^^^ DERIVED FROM d2s.h ^^^^^^^^^^
|
|
|
|
// vvvvvvvvvv DERIVED FROM d2s_intrinsics.h vvvvvvvvvv
|
|
|
|
#ifdef _M_X64
|
|
|
|
_NODISCARD inline uint64_t __ryu_umul128(const uint64_t __a, const uint64_t __b, uint64_t* const __productHi) {
|
|
return _umul128(__a, __b, __productHi);
|
|
}
|
|
|
|
_NODISCARD inline uint64_t __ryu_shiftright128(const uint64_t __lo, const uint64_t __hi, const uint32_t __dist) {
|
|
// For the __shiftright128 intrinsic, the shift value is always
|
|
// modulo 64.
|
|
// In the current implementation of the double-precision version
|
|
// of Ryu, the shift value is always < 64.
|
|
// (The shift value is in the range [49, 58].)
|
|
// Check this here in case a future change requires larger shift
|
|
// values. In this case this function needs to be adjusted.
|
|
_STL_INTERNAL_CHECK(__dist < 64);
|
|
return __shiftright128(__lo, __hi, static_cast<unsigned char>(__dist));
|
|
}
|
|
|
|
#else // ^^^ intrinsics available ^^^ / vvv intrinsics unavailable vvv
|
|
|
|
_NODISCARD __forceinline uint64_t __ryu_umul128(const uint64_t __a, const uint64_t __b, uint64_t* const __productHi) {
|
|
// TRANSITION, VSO-634761
|
|
// The casts here help MSVC to avoid calls to the __allmul library function.
|
|
const uint32_t __aLo = static_cast<uint32_t>(__a);
|
|
const uint32_t __aHi = static_cast<uint32_t>(__a >> 32);
|
|
const uint32_t __bLo = static_cast<uint32_t>(__b);
|
|
const uint32_t __bHi = static_cast<uint32_t>(__b >> 32);
|
|
|
|
const uint64_t __b00 = static_cast<uint64_t>(__aLo) * __bLo;
|
|
const uint64_t __b01 = static_cast<uint64_t>(__aLo) * __bHi;
|
|
const uint64_t __b10 = static_cast<uint64_t>(__aHi) * __bLo;
|
|
const uint64_t __b11 = static_cast<uint64_t>(__aHi) * __bHi;
|
|
|
|
const uint32_t __b00Lo = static_cast<uint32_t>(__b00);
|
|
const uint32_t __b00Hi = static_cast<uint32_t>(__b00 >> 32);
|
|
|
|
const uint64_t __mid1 = __b10 + __b00Hi;
|
|
const uint32_t __mid1Lo = static_cast<uint32_t>(__mid1);
|
|
const uint32_t __mid1Hi = static_cast<uint32_t>(__mid1 >> 32);
|
|
|
|
const uint64_t __mid2 = __b01 + __mid1Lo;
|
|
const uint32_t __mid2Lo = static_cast<uint32_t>(__mid2);
|
|
const uint32_t __mid2Hi = static_cast<uint32_t>(__mid2 >> 32);
|
|
|
|
const uint64_t __pHi = __b11 + __mid1Hi + __mid2Hi;
|
|
const uint64_t __pLo = (static_cast<uint64_t>(__mid2Lo) << 32) | __b00Lo;
|
|
|
|
*__productHi = __pHi;
|
|
return __pLo;
|
|
}
|
|
|
|
_NODISCARD inline uint64_t __ryu_shiftright128(const uint64_t __lo, const uint64_t __hi, const uint32_t __dist) {
|
|
// We don't need to handle the case __dist >= 64 here (see above).
|
|
_STL_INTERNAL_CHECK(__dist < 64);
|
|
#ifdef _WIN64
|
|
_STL_INTERNAL_CHECK(__dist > 0);
|
|
return (__hi << (64 - __dist)) | (__lo >> __dist);
|
|
#else // ^^^ 64-bit ^^^ / vvv 32-bit vvv
|
|
// Avoid a 64-bit shift by taking advantage of the range of shift values.
|
|
_STL_INTERNAL_CHECK(__dist >= 32);
|
|
return (__hi << (64 - __dist)) | (static_cast<uint32_t>(__lo >> 32) >> (__dist - 32));
|
|
#endif // ^^^ 32-bit ^^^
|
|
}
|
|
|
|
#endif // ^^^ intrinsics unavailable ^^^
|
|
|
|
#ifndef _WIN64
|
|
|
|
// Returns the high 64 bits of the 128-bit product of __a and __b.
|
|
_NODISCARD inline uint64_t __umulh(const uint64_t __a, const uint64_t __b) {
|
|
// Reuse the __ryu_umul128 implementation.
|
|
// Optimizers will likely eliminate the instructions used to compute the
|
|
// low part of the product.
|
|
uint64_t __hi;
|
|
(void) __ryu_umul128(__a, __b, &__hi);
|
|
return __hi;
|
|
}
|
|
|
|
// On 32-bit platforms, compilers typically generate calls to library
|
|
// functions for 64-bit divisions, even if the divisor is a constant.
|
|
//
|
|
// TRANSITION, LLVM-37932
|
|
//
|
|
// The functions here perform division-by-constant using multiplications
|
|
// in the same way as 64-bit compilers would do.
|
|
//
|
|
// NB:
|
|
// The multipliers and shift values are the ones generated by clang x64
|
|
// for expressions like x/5, x/10, etc.
|
|
|
|
_NODISCARD inline uint64_t __div5(const uint64_t __x) {
|
|
return __umulh(__x, 0xCCCCCCCCCCCCCCCDu) >> 2;
|
|
}
|
|
|
|
_NODISCARD inline uint64_t __div10(const uint64_t __x) {
|
|
return __umulh(__x, 0xCCCCCCCCCCCCCCCDu) >> 3;
|
|
}
|
|
|
|
_NODISCARD inline uint64_t __div100(const uint64_t __x) {
|
|
return __umulh(__x >> 2, 0x28F5C28F5C28F5C3u) >> 2;
|
|
}
|
|
|
|
_NODISCARD inline uint64_t __div1e8(const uint64_t __x) {
|
|
return __umulh(__x, 0xABCC77118461CEFDu) >> 26;
|
|
}
|
|
|
|
_NODISCARD inline uint64_t __div1e9(const uint64_t __x) {
|
|
return __umulh(__x >> 9, 0x44B82FA09B5A53u) >> 11;
|
|
}
|
|
|
|
_NODISCARD inline uint32_t __mod1e9(const uint64_t __x) {
|
|
// Avoid 64-bit math as much as possible.
|
|
// Returning static_cast<uint32_t>(__x - 1000000000 * __div1e9(__x)) would
|
|
// perform 32x64-bit multiplication and 64-bit subtraction.
|
|
// __x and 1000000000 * __div1e9(__x) are guaranteed to differ by
|
|
// less than 10^9, so their highest 32 bits must be identical,
|
|
// so we can truncate both sides to uint32_t before subtracting.
|
|
// We can also simplify static_cast<uint32_t>(1000000000 * __div1e9(__x)).
|
|
// We can truncate before multiplying instead of after, as multiplying
|
|
// the highest 32 bits of __div1e9(__x) can't affect the lowest 32 bits.
|
|
return static_cast<uint32_t>(__x) - 1000000000 * static_cast<uint32_t>(__div1e9(__x));
|
|
}
|
|
|
|
#else // ^^^ 32-bit ^^^ / vvv 64-bit vvv
|
|
|
|
_NODISCARD inline uint64_t __div5(const uint64_t __x) {
|
|
return __x / 5;
|
|
}
|
|
|
|
_NODISCARD inline uint64_t __div10(const uint64_t __x) {
|
|
return __x / 10;
|
|
}
|
|
|
|
_NODISCARD inline uint64_t __div100(const uint64_t __x) {
|
|
return __x / 100;
|
|
}
|
|
|
|
_NODISCARD inline uint64_t __div1e8(const uint64_t __x) {
|
|
return __x / 100000000;
|
|
}
|
|
|
|
_NODISCARD inline uint64_t __div1e9(const uint64_t __x) {
|
|
return __x / 1000000000;
|
|
}
|
|
|
|
_NODISCARD inline uint32_t __mod1e9(const uint64_t __x) {
|
|
return static_cast<uint32_t>(__x - 1000000000 * __div1e9(__x));
|
|
}
|
|
|
|
#endif // ^^^ 64-bit ^^^
|
|
|
|
_NODISCARD inline uint32_t __pow5Factor(uint64_t __value) {
|
|
uint32_t __count = 0;
|
|
for (;;) {
|
|
_STL_INTERNAL_CHECK(__value != 0);
|
|
const uint64_t __q = __div5(__value);
|
|
const uint32_t __r = static_cast<uint32_t>(__value) - 5 * static_cast<uint32_t>(__q);
|
|
if (__r != 0) {
|
|
break;
|
|
}
|
|
__value = __q;
|
|
++__count;
|
|
}
|
|
return __count;
|
|
}
|
|
|
|
// Returns true if __value is divisible by 5^__p.
|
|
_NODISCARD inline bool __multipleOfPowerOf5(const uint64_t __value, const uint32_t __p) {
|
|
// I tried a case distinction on __p, but there was no performance difference.
|
|
return __pow5Factor(__value) >= __p;
|
|
}
|
|
|
|
// Returns true if __value is divisible by 2^__p.
|
|
_NODISCARD inline bool __multipleOfPowerOf2(const uint64_t __value, const uint32_t __p) {
|
|
_STL_INTERNAL_CHECK(__value != 0);
|
|
_STL_INTERNAL_CHECK(__p < 64);
|
|
// return __builtin_ctzll(__value) >= __p;
|
|
return (__value & ((1ull << __p) - 1)) == 0;
|
|
}
|
|
|
|
// ^^^^^^^^^^ DERIVED FROM d2s_intrinsics.h ^^^^^^^^^^
|
|
|
|
// vvvvvvvvvv DERIVED FROM d2fixed.c vvvvvvvvvv
|
|
|
|
inline constexpr int __POW10_ADDITIONAL_BITS = 120;
|
|
|
|
#ifdef _M_X64
|
|
// Returns the low 64 bits of the high 128 bits of the 256-bit product of a and b.
|
|
_NODISCARD inline uint64_t __umul256_hi128_lo64(
|
|
const uint64_t __aHi, const uint64_t __aLo, const uint64_t __bHi, const uint64_t __bLo) {
|
|
uint64_t __b00Hi;
|
|
const uint64_t __b00Lo = __ryu_umul128(__aLo, __bLo, &__b00Hi);
|
|
uint64_t __b01Hi;
|
|
const uint64_t __b01Lo = __ryu_umul128(__aLo, __bHi, &__b01Hi);
|
|
uint64_t __b10Hi;
|
|
const uint64_t __b10Lo = __ryu_umul128(__aHi, __bLo, &__b10Hi);
|
|
uint64_t __b11Hi;
|
|
const uint64_t __b11Lo = __ryu_umul128(__aHi, __bHi, &__b11Hi);
|
|
(void) __b00Lo; // unused
|
|
(void) __b11Hi; // unused
|
|
const uint64_t __temp1Lo = __b10Lo + __b00Hi;
|
|
const uint64_t __temp1Hi = __b10Hi + (__temp1Lo < __b10Lo);
|
|
const uint64_t __temp2Lo = __b01Lo + __temp1Lo;
|
|
const uint64_t __temp2Hi = __b01Hi + (__temp2Lo < __b01Lo);
|
|
return __b11Lo + __temp1Hi + __temp2Hi;
|
|
}
|
|
|
|
_NODISCARD inline uint32_t __uint128_mod1e9(const uint64_t __vHi, const uint64_t __vLo) {
|
|
// After multiplying, we're going to shift right by 29, then truncate to uint32_t.
|
|
// This means that we need only 29 + 32 = 61 bits, so we can truncate to uint64_t before shifting.
|
|
const uint64_t __multiplied = __umul256_hi128_lo64(__vHi, __vLo, 0x89705F4136B4A597u, 0x31680A88F8953031u);
|
|
|
|
// For uint32_t truncation, see the __mod1e9() comment in d2s_intrinsics.h.
|
|
const uint32_t __shifted = static_cast<uint32_t>(__multiplied >> 29);
|
|
|
|
return static_cast<uint32_t>(__vLo) - 1000000000 * __shifted;
|
|
}
|
|
#endif // ^^^ intrinsics available ^^^
|
|
|
|
_NODISCARD inline uint32_t __mulShift_mod1e9(const uint64_t __m, const uint64_t* const __mul, const int32_t __j) {
|
|
uint64_t __high0; // 64
|
|
const uint64_t __low0 = __ryu_umul128(__m, __mul[0], &__high0); // 0
|
|
uint64_t __high1; // 128
|
|
const uint64_t __low1 = __ryu_umul128(__m, __mul[1], &__high1); // 64
|
|
uint64_t __high2; // 192
|
|
const uint64_t __low2 = __ryu_umul128(__m, __mul[2], &__high2); // 128
|
|
const uint64_t __s0low = __low0; // 0
|
|
(void) __s0low; // unused
|
|
const uint64_t __s0high = __low1 + __high0; // 64
|
|
const uint32_t __c1 = __s0high < __low1;
|
|
const uint64_t __s1low = __low2 + __high1 + __c1; // 128
|
|
const uint32_t __c2 = __s1low < __low2; // __high1 + __c1 can't overflow, so compare against __low2
|
|
const uint64_t __s1high = __high2 + __c2; // 192
|
|
_STL_INTERNAL_CHECK(__j >= 128);
|
|
_STL_INTERNAL_CHECK(__j <= 180);
|
|
#ifdef _M_X64
|
|
const uint32_t __dist = static_cast<uint32_t>(__j - 128); // __dist: [0, 52]
|
|
const uint64_t __shiftedhigh = __s1high >> __dist;
|
|
const uint64_t __shiftedlow = __ryu_shiftright128(__s1low, __s1high, __dist);
|
|
return __uint128_mod1e9(__shiftedhigh, __shiftedlow);
|
|
#else // ^^^ intrinsics available ^^^ / vvv intrinsics unavailable vvv
|
|
if (__j < 160) { // __j: [128, 160)
|
|
const uint64_t __r0 = __mod1e9(__s1high);
|
|
const uint64_t __r1 = __mod1e9((__r0 << 32) | (__s1low >> 32));
|
|
const uint64_t __r2 = ((__r1 << 32) | (__s1low & 0xffffffff));
|
|
return __mod1e9(__r2 >> (__j - 128));
|
|
} else { // __j: [160, 192)
|
|
const uint64_t __r0 = __mod1e9(__s1high);
|
|
const uint64_t __r1 = ((__r0 << 32) | (__s1low >> 32));
|
|
return __mod1e9(__r1 >> (__j - 160));
|
|
}
|
|
#endif // ^^^ intrinsics unavailable ^^^
|
|
}
|
|
|
|
inline void __append_n_digits(const uint32_t __olength, uint32_t __digits, char* const __result) {
|
|
uint32_t __i = 0;
|
|
while (__digits >= 10000) {
|
|
#ifdef __clang__ // TRANSITION, LLVM-38217
|
|
const uint32_t __c = __digits - 10000 * (__digits / 10000);
|
|
#else
|
|
const uint32_t __c = __digits % 10000;
|
|
#endif
|
|
__digits /= 10000;
|
|
const uint32_t __c0 = (__c % 100) << 1;
|
|
const uint32_t __c1 = (__c / 100) << 1;
|
|
_CSTD memcpy(__result + __olength - __i - 2, __DIGIT_TABLE + __c0, 2);
|
|
_CSTD memcpy(__result + __olength - __i - 4, __DIGIT_TABLE + __c1, 2);
|
|
__i += 4;
|
|
}
|
|
if (__digits >= 100) {
|
|
const uint32_t __c = (__digits % 100) << 1;
|
|
__digits /= 100;
|
|
_CSTD memcpy(__result + __olength - __i - 2, __DIGIT_TABLE + __c, 2);
|
|
__i += 2;
|
|
}
|
|
if (__digits >= 10) {
|
|
const uint32_t __c = __digits << 1;
|
|
_CSTD memcpy(__result + __olength - __i - 2, __DIGIT_TABLE + __c, 2);
|
|
} else {
|
|
__result[0] = static_cast<char>('0' + __digits);
|
|
}
|
|
}
|
|
|
|
inline void __append_d_digits(const uint32_t __olength, uint32_t __digits, char* const __result) {
|
|
uint32_t __i = 0;
|
|
while (__digits >= 10000) {
|
|
#ifdef __clang__ // TRANSITION, LLVM-38217
|
|
const uint32_t __c = __digits - 10000 * (__digits / 10000);
|
|
#else
|
|
const uint32_t __c = __digits % 10000;
|
|
#endif
|
|
__digits /= 10000;
|
|
const uint32_t __c0 = (__c % 100) << 1;
|
|
const uint32_t __c1 = (__c / 100) << 1;
|
|
_CSTD memcpy(__result + __olength + 1 - __i - 2, __DIGIT_TABLE + __c0, 2);
|
|
_CSTD memcpy(__result + __olength + 1 - __i - 4, __DIGIT_TABLE + __c1, 2);
|
|
__i += 4;
|
|
}
|
|
if (__digits >= 100) {
|
|
const uint32_t __c = (__digits % 100) << 1;
|
|
__digits /= 100;
|
|
_CSTD memcpy(__result + __olength + 1 - __i - 2, __DIGIT_TABLE + __c, 2);
|
|
__i += 2;
|
|
}
|
|
if (__digits >= 10) {
|
|
const uint32_t __c = __digits << 1;
|
|
__result[2] = __DIGIT_TABLE[__c + 1];
|
|
__result[1] = '.';
|
|
__result[0] = __DIGIT_TABLE[__c];
|
|
} else {
|
|
__result[1] = '.';
|
|
__result[0] = static_cast<char>('0' + __digits);
|
|
}
|
|
}
|
|
|
|
inline void __append_c_digits(const uint32_t __count, uint32_t __digits, char* const __result) {
|
|
uint32_t __i = 0;
|
|
for (; __i < __count - 1; __i += 2) {
|
|
const uint32_t __c = (__digits % 100) << 1;
|
|
__digits /= 100;
|
|
_CSTD memcpy(__result + __count - __i - 2, __DIGIT_TABLE + __c, 2);
|
|
}
|
|
if (__i < __count) {
|
|
const char __c = static_cast<char>('0' + (__digits % 10));
|
|
__result[__count - __i - 1] = __c;
|
|
}
|
|
}
|
|
|
|
inline void __append_nine_digits(uint32_t __digits, char* const __result) {
|
|
if (__digits == 0) {
|
|
_CSTD memset(__result, '0', 9);
|
|
return;
|
|
}
|
|
|
|
for (uint32_t __i = 0; __i < 5; __i += 4) {
|
|
#ifdef __clang__ // TRANSITION, LLVM-38217
|
|
const uint32_t __c = __digits - 10000 * (__digits / 10000);
|
|
#else
|
|
const uint32_t __c = __digits % 10000;
|
|
#endif
|
|
__digits /= 10000;
|
|
const uint32_t __c0 = (__c % 100) << 1;
|
|
const uint32_t __c1 = (__c / 100) << 1;
|
|
_CSTD memcpy(__result + 7 - __i, __DIGIT_TABLE + __c0, 2);
|
|
_CSTD memcpy(__result + 5 - __i, __DIGIT_TABLE + __c1, 2);
|
|
}
|
|
__result[0] = static_cast<char>('0' + __digits);
|
|
}
|
|
|
|
_NODISCARD inline uint32_t __indexForExponent(const uint32_t __e) {
|
|
return (__e + 15) / 16;
|
|
}
|
|
|
|
_NODISCARD inline uint32_t __pow10BitsForIndex(const uint32_t __idx) {
|
|
return 16 * __idx + __POW10_ADDITIONAL_BITS;
|
|
}
|
|
|
|
_NODISCARD inline uint32_t __lengthForIndex(const uint32_t __idx) {
|
|
// +1 for ceil, +16 for mantissa, +8 to round up when dividing by 9
|
|
return (__log10Pow2(16 * static_cast<int32_t>(__idx)) + 1 + 16 + 8) / 9;
|
|
}
|
|
|
|
_NODISCARD inline to_chars_result __d2fixed_buffered_n(char* _First, char* const _Last, const double __d,
|
|
const uint32_t __precision) {
|
|
char* const _Original_first = _First;
|
|
|
|
const uint64_t __bits = __double_to_bits(__d);
|
|
|
|
// Case distinction; exit early for the easy cases.
|
|
if (__bits == 0) {
|
|
const int32_t _Total_zero_length = 1 // leading zero
|
|
+ static_cast<int32_t>(__precision != 0) // possible decimal point
|
|
+ static_cast<int32_t>(__precision); // zeroes after decimal point
|
|
|
|
if (_Last - _First < _Total_zero_length) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
|
|
*_First++ = '0';
|
|
if (__precision > 0) {
|
|
*_First++ = '.';
|
|
_CSTD memset(_First, '0', __precision);
|
|
_First += __precision;
|
|
}
|
|
return { _First, errc{} };
|
|
}
|
|
|
|
// Decode __bits into mantissa and exponent.
|
|
const uint64_t __ieeeMantissa = __bits & ((1ull << __DOUBLE_MANTISSA_BITS) - 1);
|
|
const uint32_t __ieeeExponent = static_cast<uint32_t>(__bits >> __DOUBLE_MANTISSA_BITS);
|
|
|
|
int32_t __e2;
|
|
uint64_t __m2;
|
|
if (__ieeeExponent == 0) {
|
|
__e2 = 1 - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
|
|
__m2 = __ieeeMantissa;
|
|
} else {
|
|
__e2 = static_cast<int32_t>(__ieeeExponent) - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
|
|
__m2 = (1ull << __DOUBLE_MANTISSA_BITS) | __ieeeMantissa;
|
|
}
|
|
|
|
bool __nonzero = false;
|
|
if (__e2 >= -52) {
|
|
const uint32_t __idx = __e2 < 0 ? 0 : __indexForExponent(static_cast<uint32_t>(__e2));
|
|
const uint32_t __p10bits = __pow10BitsForIndex(__idx);
|
|
const int32_t __len = static_cast<int32_t>(__lengthForIndex(__idx));
|
|
for (int32_t __i = __len - 1; __i >= 0; --__i) {
|
|
const uint32_t __j = __p10bits - __e2;
|
|
// Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
|
|
// a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
|
|
const uint32_t __digits = __mulShift_mod1e9(__m2 << 8, __POW10_SPLIT[__POW10_OFFSET[__idx] + __i],
|
|
static_cast<int32_t>(__j + 8));
|
|
if (__nonzero) {
|
|
if (_Last - _First < 9) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
__append_nine_digits(__digits, _First);
|
|
_First += 9;
|
|
} else if (__digits != 0) {
|
|
const uint32_t __olength = __decimalLength9(__digits);
|
|
if (_Last - _First < static_cast<ptrdiff_t>(__olength)) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
__append_n_digits(__olength, __digits, _First);
|
|
_First += __olength;
|
|
__nonzero = true;
|
|
}
|
|
}
|
|
}
|
|
if (!__nonzero) {
|
|
if (_First == _Last) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
*_First++ = '0';
|
|
}
|
|
if (__precision > 0) {
|
|
if (_First == _Last) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
*_First++ = '.';
|
|
}
|
|
if (__e2 < 0) {
|
|
const int32_t __idx = -__e2 / 16;
|
|
const uint32_t __blocks = __precision / 9 + 1;
|
|
// 0 = don't round up; 1 = round up unconditionally; 2 = round up if odd.
|
|
int __roundUp = 0;
|
|
uint32_t __i = 0;
|
|
if (__blocks <= __MIN_BLOCK_2[__idx]) {
|
|
__i = __blocks;
|
|
if (_Last - _First < static_cast<ptrdiff_t>(__precision)) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
_CSTD memset(_First, '0', __precision);
|
|
_First += __precision;
|
|
} else if (__i < __MIN_BLOCK_2[__idx]) {
|
|
__i = __MIN_BLOCK_2[__idx];
|
|
if (_Last - _First < static_cast<ptrdiff_t>(9 * __i)) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
_CSTD memset(_First, '0', 9 * __i);
|
|
_First += 9 * __i;
|
|
}
|
|
for (; __i < __blocks; ++__i) {
|
|
const int32_t __j = __ADDITIONAL_BITS_2 + (-__e2 - 16 * __idx);
|
|
const uint32_t __p = __POW10_OFFSET_2[__idx] + __i - __MIN_BLOCK_2[__idx];
|
|
if (__p >= __POW10_OFFSET_2[__idx + 1]) {
|
|
// If the remaining digits are all 0, then we might as well use memset.
|
|
// No rounding required in this case.
|
|
const uint32_t __fill = __precision - 9 * __i;
|
|
if (_Last - _First < static_cast<ptrdiff_t>(__fill)) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
_CSTD memset(_First, '0', __fill);
|
|
_First += __fill;
|
|
break;
|
|
}
|
|
// Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
|
|
// a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
|
|
uint32_t __digits = __mulShift_mod1e9(__m2 << 8, __POW10_SPLIT_2[__p], __j + 8);
|
|
if (__i < __blocks - 1) {
|
|
if (_Last - _First < 9) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
__append_nine_digits(__digits, _First);
|
|
_First += 9;
|
|
} else {
|
|
const uint32_t __maximum = __precision - 9 * __i;
|
|
uint32_t __lastDigit = 0;
|
|
for (uint32_t __k = 0; __k < 9 - __maximum; ++__k) {
|
|
__lastDigit = __digits % 10;
|
|
__digits /= 10;
|
|
}
|
|
if (__lastDigit != 5) {
|
|
__roundUp = __lastDigit > 5;
|
|
} else {
|
|
// Is m * 10^(additionalDigits + 1) / 2^(-__e2) integer?
|
|
const int32_t __requiredTwos = -__e2 - static_cast<int32_t>(__precision) - 1;
|
|
const bool __trailingZeros = __requiredTwos <= 0
|
|
|| (__requiredTwos < 60 && __multipleOfPowerOf2(__m2, static_cast<uint32_t>(__requiredTwos)));
|
|
__roundUp = __trailingZeros ? 2 : 1;
|
|
}
|
|
if (__maximum > 0) {
|
|
if (_Last - _First < static_cast<ptrdiff_t>(__maximum)) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
__append_c_digits(__maximum, __digits, _First);
|
|
_First += __maximum;
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
if (__roundUp != 0) {
|
|
char* _Round = _First;
|
|
char* _Dot = _Last;
|
|
while (true) {
|
|
if (_Round == _Original_first) {
|
|
_Round[0] = '1';
|
|
if (_Dot != _Last) {
|
|
_Dot[0] = '0';
|
|
_Dot[1] = '.';
|
|
}
|
|
if (_First == _Last) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
*_First++ = '0';
|
|
break;
|
|
}
|
|
--_Round;
|
|
const char __c = _Round[0];
|
|
if (__c == '.') {
|
|
_Dot = _Round;
|
|
} else if (__c == '9') {
|
|
_Round[0] = '0';
|
|
__roundUp = 1;
|
|
} else {
|
|
if (__roundUp == 1 || __c % 2 != 0) {
|
|
_Round[0] = __c + 1;
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
if (_Last - _First < static_cast<ptrdiff_t>(__precision)) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
_CSTD memset(_First, '0', __precision);
|
|
_First += __precision;
|
|
}
|
|
return { _First, errc{} };
|
|
}
|
|
|
|
_NODISCARD inline to_chars_result __d2exp_buffered_n(char* _First, char* const _Last, const double __d,
|
|
uint32_t __precision) {
|
|
char* const _Original_first = _First;
|
|
|
|
const uint64_t __bits = __double_to_bits(__d);
|
|
|
|
// Case distinction; exit early for the easy cases.
|
|
if (__bits == 0) {
|
|
const int32_t _Total_zero_length = 1 // leading zero
|
|
+ static_cast<int32_t>(__precision != 0) // possible decimal point
|
|
+ static_cast<int32_t>(__precision) // zeroes after decimal point
|
|
+ 4; // "e+00"
|
|
if (_Last - _First < _Total_zero_length) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
*_First++ = '0';
|
|
if (__precision > 0) {
|
|
*_First++ = '.';
|
|
_CSTD memset(_First, '0', __precision);
|
|
_First += __precision;
|
|
}
|
|
_CSTD memcpy(_First, "e+00", 4);
|
|
_First += 4;
|
|
return { _First, errc{} };
|
|
}
|
|
|
|
// Decode __bits into mantissa and exponent.
|
|
const uint64_t __ieeeMantissa = __bits & ((1ull << __DOUBLE_MANTISSA_BITS) - 1);
|
|
const uint32_t __ieeeExponent = static_cast<uint32_t>(__bits >> __DOUBLE_MANTISSA_BITS);
|
|
|
|
int32_t __e2;
|
|
uint64_t __m2;
|
|
if (__ieeeExponent == 0) {
|
|
__e2 = 1 - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
|
|
__m2 = __ieeeMantissa;
|
|
} else {
|
|
__e2 = static_cast<int32_t>(__ieeeExponent) - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
|
|
__m2 = (1ull << __DOUBLE_MANTISSA_BITS) | __ieeeMantissa;
|
|
}
|
|
|
|
const bool __printDecimalPoint = __precision > 0;
|
|
++__precision;
|
|
uint32_t __digits = 0;
|
|
uint32_t __printedDigits = 0;
|
|
uint32_t __availableDigits = 0;
|
|
int32_t __exp = 0;
|
|
if (__e2 >= -52) {
|
|
const uint32_t __idx = __e2 < 0 ? 0 : __indexForExponent(static_cast<uint32_t>(__e2));
|
|
const uint32_t __p10bits = __pow10BitsForIndex(__idx);
|
|
const int32_t __len = static_cast<int32_t>(__lengthForIndex(__idx));
|
|
for (int32_t __i = __len - 1; __i >= 0; --__i) {
|
|
const uint32_t __j = __p10bits - __e2;
|
|
// Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
|
|
// a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
|
|
__digits = __mulShift_mod1e9(__m2 << 8, __POW10_SPLIT[__POW10_OFFSET[__idx] + __i],
|
|
static_cast<int32_t>(__j + 8));
|
|
if (__printedDigits != 0) {
|
|
if (__printedDigits + 9 > __precision) {
|
|
__availableDigits = 9;
|
|
break;
|
|
}
|
|
if (_Last - _First < 9) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
__append_nine_digits(__digits, _First);
|
|
_First += 9;
|
|
__printedDigits += 9;
|
|
} else if (__digits != 0) {
|
|
__availableDigits = __decimalLength9(__digits);
|
|
__exp = __i * 9 + static_cast<int32_t>(__availableDigits) - 1;
|
|
if (__availableDigits > __precision) {
|
|
break;
|
|
}
|
|
if (__printDecimalPoint) {
|
|
if (_Last - _First < static_cast<ptrdiff_t>(__availableDigits + 1)) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
__append_d_digits(__availableDigits, __digits, _First);
|
|
_First += __availableDigits + 1; // +1 for decimal point
|
|
} else {
|
|
if (_First == _Last) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
*_First++ = static_cast<char>('0' + __digits);
|
|
}
|
|
__printedDigits = __availableDigits;
|
|
__availableDigits = 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (__e2 < 0 && __availableDigits == 0) {
|
|
const int32_t __idx = -__e2 / 16;
|
|
for (int32_t __i = __MIN_BLOCK_2[__idx]; __i < 200; ++__i) {
|
|
const int32_t __j = __ADDITIONAL_BITS_2 + (-__e2 - 16 * __idx);
|
|
const uint32_t __p = __POW10_OFFSET_2[__idx] + static_cast<uint32_t>(__i) - __MIN_BLOCK_2[__idx];
|
|
// Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
|
|
// a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
|
|
__digits = (__p >= __POW10_OFFSET_2[__idx + 1]) ? 0 : __mulShift_mod1e9(__m2 << 8, __POW10_SPLIT_2[__p], __j + 8);
|
|
if (__printedDigits != 0) {
|
|
if (__printedDigits + 9 > __precision) {
|
|
__availableDigits = 9;
|
|
break;
|
|
}
|
|
if (_Last - _First < 9) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
__append_nine_digits(__digits, _First);
|
|
_First += 9;
|
|
__printedDigits += 9;
|
|
} else if (__digits != 0) {
|
|
__availableDigits = __decimalLength9(__digits);
|
|
__exp = -(__i + 1) * 9 + static_cast<int32_t>(__availableDigits) - 1;
|
|
if (__availableDigits > __precision) {
|
|
break;
|
|
}
|
|
if (__printDecimalPoint) {
|
|
if (_Last - _First < static_cast<ptrdiff_t>(__availableDigits + 1)) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
__append_d_digits(__availableDigits, __digits, _First);
|
|
_First += __availableDigits + 1; // +1 for decimal point
|
|
} else {
|
|
if (_First == _Last) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
*_First++ = static_cast<char>('0' + __digits);
|
|
}
|
|
__printedDigits = __availableDigits;
|
|
__availableDigits = 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
const uint32_t __maximum = __precision - __printedDigits;
|
|
if (__availableDigits == 0) {
|
|
__digits = 0;
|
|
}
|
|
uint32_t __lastDigit = 0;
|
|
if (__availableDigits > __maximum) {
|
|
for (uint32_t __k = 0; __k < __availableDigits - __maximum; ++__k) {
|
|
__lastDigit = __digits % 10;
|
|
__digits /= 10;
|
|
}
|
|
}
|
|
// 0 = don't round up; 1 = round up unconditionally; 2 = round up if odd.
|
|
int __roundUp = 0;
|
|
if (__lastDigit != 5) {
|
|
__roundUp = __lastDigit > 5;
|
|
} else {
|
|
// Is m * 2^__e2 * 10^(__precision + 1 - __exp) integer?
|
|
// __precision was already increased by 1, so we don't need to write + 1 here.
|
|
const int32_t __rexp = static_cast<int32_t>(__precision) - __exp;
|
|
const int32_t __requiredTwos = -__e2 - __rexp;
|
|
bool __trailingZeros = __requiredTwos <= 0
|
|
|| (__requiredTwos < 60 && __multipleOfPowerOf2(__m2, static_cast<uint32_t>(__requiredTwos)));
|
|
if (__rexp < 0) {
|
|
const int32_t __requiredFives = -__rexp;
|
|
__trailingZeros = __trailingZeros && __multipleOfPowerOf5(__m2, static_cast<uint32_t>(__requiredFives));
|
|
}
|
|
__roundUp = __trailingZeros ? 2 : 1;
|
|
}
|
|
if (__printedDigits != 0) {
|
|
if (_Last - _First < static_cast<ptrdiff_t>(__maximum)) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
if (__digits == 0) {
|
|
_CSTD memset(_First, '0', __maximum);
|
|
} else {
|
|
__append_c_digits(__maximum, __digits, _First);
|
|
}
|
|
_First += __maximum;
|
|
} else {
|
|
if (__printDecimalPoint) {
|
|
if (_Last - _First < static_cast<ptrdiff_t>(__maximum + 1)) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
__append_d_digits(__maximum, __digits, _First);
|
|
_First += __maximum + 1; // +1 for decimal point
|
|
} else {
|
|
if (_First == _Last) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
*_First++ = static_cast<char>('0' + __digits);
|
|
}
|
|
}
|
|
if (__roundUp != 0) {
|
|
char* _Round = _First;
|
|
while (true) {
|
|
if (_Round == _Original_first) {
|
|
_Round[0] = '1';
|
|
++__exp;
|
|
break;
|
|
}
|
|
--_Round;
|
|
const char __c = _Round[0];
|
|
if (__c == '.') {
|
|
// Keep going.
|
|
} else if (__c == '9') {
|
|
_Round[0] = '0';
|
|
__roundUp = 1;
|
|
} else {
|
|
if (__roundUp == 1 || __c % 2 != 0) {
|
|
_Round[0] = __c + 1;
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
char _Sign_character;
|
|
|
|
if (__exp < 0) {
|
|
_Sign_character = '-';
|
|
__exp = -__exp;
|
|
} else {
|
|
_Sign_character = '+';
|
|
}
|
|
|
|
const int _Exponent_part_length = __exp >= 100
|
|
? 5 // "e+NNN"
|
|
: 4; // "e+NN"
|
|
|
|
if (_Last - _First < _Exponent_part_length) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
|
|
*_First++ = 'e';
|
|
*_First++ = _Sign_character;
|
|
|
|
if (__exp >= 100) {
|
|
const int32_t __c = __exp % 10;
|
|
_CSTD memcpy(_First, __DIGIT_TABLE + 2 * (__exp / 10), 2);
|
|
_First[2] = static_cast<char>('0' + __c);
|
|
_First += 3;
|
|
} else {
|
|
_CSTD memcpy(_First, __DIGIT_TABLE + 2 * __exp, 2);
|
|
_First += 2;
|
|
}
|
|
|
|
return { _First, errc{} };
|
|
}
|
|
|
|
// ^^^^^^^^^^ DERIVED FROM d2fixed.c ^^^^^^^^^^
|
|
|
|
// vvvvvvvvvv DERIVED FROM f2s.c vvvvvvvvvv
|
|
|
|
inline constexpr int __FLOAT_MANTISSA_BITS = 23;
|
|
inline constexpr int __FLOAT_EXPONENT_BITS = 8;
|
|
inline constexpr int __FLOAT_BIAS = 127;
|
|
|
|
// This table is generated by PrintFloatLookupTable.
|
|
inline constexpr int __FLOAT_POW5_INV_BITCOUNT = 59;
|
|
inline constexpr uint64_t __FLOAT_POW5_INV_SPLIT[31] = {
|
|
576460752303423489u, 461168601842738791u, 368934881474191033u, 295147905179352826u,
|
|
472236648286964522u, 377789318629571618u, 302231454903657294u, 483570327845851670u,
|
|
386856262276681336u, 309485009821345069u, 495176015714152110u, 396140812571321688u,
|
|
316912650057057351u, 507060240091291761u, 405648192073033409u, 324518553658426727u,
|
|
519229685853482763u, 415383748682786211u, 332306998946228969u, 531691198313966350u,
|
|
425352958651173080u, 340282366920938464u, 544451787073501542u, 435561429658801234u,
|
|
348449143727040987u, 557518629963265579u, 446014903970612463u, 356811923176489971u,
|
|
570899077082383953u, 456719261665907162u, 365375409332725730u
|
|
};
|
|
inline constexpr int __FLOAT_POW5_BITCOUNT = 61;
|
|
inline constexpr uint64_t __FLOAT_POW5_SPLIT[47] = {
|
|
1152921504606846976u, 1441151880758558720u, 1801439850948198400u, 2251799813685248000u,
|
|
1407374883553280000u, 1759218604441600000u, 2199023255552000000u, 1374389534720000000u,
|
|
1717986918400000000u, 2147483648000000000u, 1342177280000000000u, 1677721600000000000u,
|
|
2097152000000000000u, 1310720000000000000u, 1638400000000000000u, 2048000000000000000u,
|
|
1280000000000000000u, 1600000000000000000u, 2000000000000000000u, 1250000000000000000u,
|
|
1562500000000000000u, 1953125000000000000u, 1220703125000000000u, 1525878906250000000u,
|
|
1907348632812500000u, 1192092895507812500u, 1490116119384765625u, 1862645149230957031u,
|
|
1164153218269348144u, 1455191522836685180u, 1818989403545856475u, 2273736754432320594u,
|
|
1421085471520200371u, 1776356839400250464u, 2220446049250313080u, 1387778780781445675u,
|
|
1734723475976807094u, 2168404344971008868u, 1355252715606880542u, 1694065894508600678u,
|
|
2117582368135750847u, 1323488980084844279u, 1654361225106055349u, 2067951531382569187u,
|
|
1292469707114105741u, 1615587133892632177u, 2019483917365790221u
|
|
};
|
|
|
|
_NODISCARD inline uint32_t __pow5Factor(uint32_t __value) {
|
|
uint32_t __count = 0;
|
|
for (;;) {
|
|
_STL_INTERNAL_CHECK(__value != 0);
|
|
const uint32_t __q = __value / 5;
|
|
const uint32_t __r = __value % 5;
|
|
if (__r != 0) {
|
|
break;
|
|
}
|
|
__value = __q;
|
|
++__count;
|
|
}
|
|
return __count;
|
|
}
|
|
|
|
// Returns true if __value is divisible by 5^__p.
|
|
_NODISCARD inline bool __multipleOfPowerOf5(const uint32_t __value, const uint32_t __p) {
|
|
return __pow5Factor(__value) >= __p;
|
|
}
|
|
|
|
// Returns true if __value is divisible by 2^__p.
|
|
_NODISCARD inline bool __multipleOfPowerOf2(const uint32_t __value, const uint32_t __p) {
|
|
_STL_INTERNAL_CHECK(__value != 0);
|
|
_STL_INTERNAL_CHECK(__p < 32);
|
|
// return __builtin_ctz(__value) >= __p;
|
|
return (__value & ((1u << __p) - 1)) == 0;
|
|
}
|
|
|
|
_NODISCARD inline uint32_t __mulShift(const uint32_t __m, const uint64_t __factor, const int32_t __shift) {
|
|
_STL_INTERNAL_CHECK(__shift > 32);
|
|
|
|
// The casts here help MSVC to avoid calls to the __allmul library
|
|
// function.
|
|
const uint32_t __factorLo = static_cast<uint32_t>(__factor);
|
|
const uint32_t __factorHi = static_cast<uint32_t>(__factor >> 32);
|
|
const uint64_t __bits0 = static_cast<uint64_t>(__m) * __factorLo;
|
|
const uint64_t __bits1 = static_cast<uint64_t>(__m) * __factorHi;
|
|
|
|
#ifndef _WIN64
|
|
// On 32-bit platforms we can avoid a 64-bit shift-right since we only
|
|
// need the upper 32 bits of the result and the shift value is > 32.
|
|
const uint32_t __bits0Hi = static_cast<uint32_t>(__bits0 >> 32);
|
|
uint32_t __bits1Lo = static_cast<uint32_t>(__bits1);
|
|
uint32_t __bits1Hi = static_cast<uint32_t>(__bits1 >> 32);
|
|
__bits1Lo += __bits0Hi;
|
|
__bits1Hi += (__bits1Lo < __bits0Hi);
|
|
const int32_t __s = __shift - 32;
|
|
return (__bits1Hi << (32 - __s)) | (__bits1Lo >> __s);
|
|
#else // ^^^ 32-bit ^^^ / vvv 64-bit vvv
|
|
const uint64_t __sum = (__bits0 >> 32) + __bits1;
|
|
const uint64_t __shiftedSum = __sum >> (__shift - 32);
|
|
_STL_INTERNAL_CHECK(__shiftedSum <= UINT32_MAX);
|
|
return static_cast<uint32_t>(__shiftedSum);
|
|
#endif // ^^^ 64-bit ^^^
|
|
}
|
|
|
|
_NODISCARD inline uint32_t __mulPow5InvDivPow2(const uint32_t __m, const uint32_t __q, const int32_t __j) {
|
|
return __mulShift(__m, __FLOAT_POW5_INV_SPLIT[__q], __j);
|
|
}
|
|
|
|
_NODISCARD inline uint32_t __mulPow5divPow2(const uint32_t __m, const uint32_t __i, const int32_t __j) {
|
|
return __mulShift(__m, __FLOAT_POW5_SPLIT[__i], __j);
|
|
}
|
|
|
|
// A floating decimal representing m * 10^e.
|
|
struct __floating_decimal_32 {
|
|
uint32_t __mantissa;
|
|
int32_t __exponent;
|
|
};
|
|
|
|
_NODISCARD inline __floating_decimal_32 __f2d(const uint32_t __ieeeMantissa, const uint32_t __ieeeExponent) {
|
|
int32_t __e2;
|
|
uint32_t __m2;
|
|
if (__ieeeExponent == 0) {
|
|
// We subtract 2 so that the bounds computation has 2 additional bits.
|
|
__e2 = 1 - __FLOAT_BIAS - __FLOAT_MANTISSA_BITS - 2;
|
|
__m2 = __ieeeMantissa;
|
|
} else {
|
|
__e2 = static_cast<int32_t>(__ieeeExponent) - __FLOAT_BIAS - __FLOAT_MANTISSA_BITS - 2;
|
|
__m2 = (1u << __FLOAT_MANTISSA_BITS) | __ieeeMantissa;
|
|
}
|
|
const bool __even = (__m2 & 1) == 0;
|
|
const bool __acceptBounds = __even;
|
|
|
|
// Step 2: Determine the interval of valid decimal representations.
|
|
const uint32_t __mv = 4 * __m2;
|
|
const uint32_t __mp = 4 * __m2 + 2;
|
|
// Implicit bool -> int conversion. True is 1, false is 0.
|
|
const uint32_t __mmShift = __ieeeMantissa != 0 || __ieeeExponent <= 1;
|
|
const uint32_t __mm = 4 * __m2 - 1 - __mmShift;
|
|
|
|
// Step 3: Convert to a decimal power base using 64-bit arithmetic.
|
|
uint32_t __vr, __vp, __vm;
|
|
int32_t __e10;
|
|
bool __vmIsTrailingZeros = false;
|
|
bool __vrIsTrailingZeros = false;
|
|
uint8_t __lastRemovedDigit = 0;
|
|
if (__e2 >= 0) {
|
|
const uint32_t __q = __log10Pow2(__e2);
|
|
__e10 = static_cast<int32_t>(__q);
|
|
const int32_t __k = __FLOAT_POW5_INV_BITCOUNT + __pow5bits(static_cast<int32_t>(__q)) - 1;
|
|
const int32_t __i = -__e2 + static_cast<int32_t>(__q) + __k;
|
|
__vr = __mulPow5InvDivPow2(__mv, __q, __i);
|
|
__vp = __mulPow5InvDivPow2(__mp, __q, __i);
|
|
__vm = __mulPow5InvDivPow2(__mm, __q, __i);
|
|
if (__q != 0 && (__vp - 1) / 10 <= __vm / 10) {
|
|
// We need to know one removed digit even if we are not going to loop below. We could use
|
|
// __q = X - 1 above, except that would require 33 bits for the result, and we've found that
|
|
// 32-bit arithmetic is faster even on 64-bit machines.
|
|
const int32_t __l = __FLOAT_POW5_INV_BITCOUNT + __pow5bits(static_cast<int32_t>(__q - 1)) - 1;
|
|
__lastRemovedDigit = static_cast<uint8_t>(__mulPow5InvDivPow2(__mv, __q - 1,
|
|
-__e2 + static_cast<int32_t>(__q) - 1 + __l) % 10);
|
|
}
|
|
if (__q <= 9) {
|
|
// The largest power of 5 that fits in 24 bits is 5^10, but __q <= 9 seems to be safe as well.
|
|
// Only one of __mp, __mv, and __mm can be a multiple of 5, if any.
|
|
if (__mv % 5 == 0) {
|
|
__vrIsTrailingZeros = __multipleOfPowerOf5(__mv, __q);
|
|
} else if (__acceptBounds) {
|
|
__vmIsTrailingZeros = __multipleOfPowerOf5(__mm, __q);
|
|
} else {
|
|
__vp -= __multipleOfPowerOf5(__mp, __q);
|
|
}
|
|
}
|
|
} else {
|
|
const uint32_t __q = __log10Pow5(-__e2);
|
|
__e10 = static_cast<int32_t>(__q) + __e2;
|
|
const int32_t __i = -__e2 - static_cast<int32_t>(__q);
|
|
const int32_t __k = __pow5bits(__i) - __FLOAT_POW5_BITCOUNT;
|
|
int32_t __j = static_cast<int32_t>(__q) - __k;
|
|
__vr = __mulPow5divPow2(__mv, static_cast<uint32_t>(__i), __j);
|
|
__vp = __mulPow5divPow2(__mp, static_cast<uint32_t>(__i), __j);
|
|
__vm = __mulPow5divPow2(__mm, static_cast<uint32_t>(__i), __j);
|
|
if (__q != 0 && (__vp - 1) / 10 <= __vm / 10) {
|
|
__j = static_cast<int32_t>(__q) - 1 - (__pow5bits(__i + 1) - __FLOAT_POW5_BITCOUNT);
|
|
__lastRemovedDigit = static_cast<uint8_t>(__mulPow5divPow2(__mv, static_cast<uint32_t>(__i + 1), __j) % 10);
|
|
}
|
|
if (__q <= 1) {
|
|
// {__vr,__vp,__vm} is trailing zeros if {__mv,__mp,__mm} has at least __q trailing 0 bits.
|
|
// __mv = 4 * __m2, so it always has at least two trailing 0 bits.
|
|
__vrIsTrailingZeros = true;
|
|
if (__acceptBounds) {
|
|
// __mm = __mv - 1 - __mmShift, so it has 1 trailing 0 bit iff __mmShift == 1.
|
|
__vmIsTrailingZeros = __mmShift == 1;
|
|
} else {
|
|
// __mp = __mv + 2, so it always has at least one trailing 0 bit.
|
|
--__vp;
|
|
}
|
|
} else if (__q < 31) { // TRANSITION(ulfjack): Use a tighter bound here.
|
|
__vrIsTrailingZeros = __multipleOfPowerOf2(__mv, __q - 1);
|
|
}
|
|
}
|
|
|
|
// Step 4: Find the shortest decimal representation in the interval of valid representations.
|
|
int32_t __removed = 0;
|
|
uint32_t __output;
|
|
if (__vmIsTrailingZeros || __vrIsTrailingZeros) {
|
|
// General case, which happens rarely (~4.0%).
|
|
while (__vp / 10 > __vm / 10) {
|
|
#ifdef __clang__ // TRANSITION, LLVM-23106
|
|
__vmIsTrailingZeros &= __vm - (__vm / 10) * 10 == 0;
|
|
#else
|
|
__vmIsTrailingZeros &= __vm % 10 == 0;
|
|
#endif
|
|
__vrIsTrailingZeros &= __lastRemovedDigit == 0;
|
|
__lastRemovedDigit = static_cast<uint8_t>(__vr % 10);
|
|
__vr /= 10;
|
|
__vp /= 10;
|
|
__vm /= 10;
|
|
++__removed;
|
|
}
|
|
if (__vmIsTrailingZeros) {
|
|
while (__vm % 10 == 0) {
|
|
__vrIsTrailingZeros &= __lastRemovedDigit == 0;
|
|
__lastRemovedDigit = static_cast<uint8_t>(__vr % 10);
|
|
__vr /= 10;
|
|
__vp /= 10;
|
|
__vm /= 10;
|
|
++__removed;
|
|
}
|
|
}
|
|
if (__vrIsTrailingZeros && __lastRemovedDigit == 5 && __vr % 2 == 0) {
|
|
// Round even if the exact number is .....50..0.
|
|
__lastRemovedDigit = 4;
|
|
}
|
|
// We need to take __vr + 1 if __vr is outside bounds or we need to round up.
|
|
__output = __vr + ((__vr == __vm && (!__acceptBounds || !__vmIsTrailingZeros)) || __lastRemovedDigit >= 5);
|
|
} else {
|
|
// Specialized for the common case (~96.0%). Percentages below are relative to this.
|
|
// Loop iterations below (approximately):
|
|
// 0: 13.6%, 1: 70.7%, 2: 14.1%, 3: 1.39%, 4: 0.14%, 5+: 0.01%
|
|
while (__vp / 10 > __vm / 10) {
|
|
__lastRemovedDigit = static_cast<uint8_t>(__vr % 10);
|
|
__vr /= 10;
|
|
__vp /= 10;
|
|
__vm /= 10;
|
|
++__removed;
|
|
}
|
|
// We need to take __vr + 1 if __vr is outside bounds or we need to round up.
|
|
__output = __vr + (__vr == __vm || __lastRemovedDigit >= 5);
|
|
}
|
|
const int32_t __exp = __e10 + __removed;
|
|
|
|
__floating_decimal_32 __fd;
|
|
__fd.__exponent = __exp;
|
|
__fd.__mantissa = __output;
|
|
return __fd;
|
|
}
|
|
|
|
_NODISCARD inline to_chars_result _Large_integer_to_chars(char* const _First, char* const _Last,
|
|
const uint32_t _Mantissa2, const int32_t _Exponent2) {
|
|
|
|
// Print the integer _Mantissa2 * 2^_Exponent2 exactly.
|
|
|
|
// For nonzero integers, _Exponent2 >= -23. (The minimum value occurs when _Mantissa2 * 2^_Exponent2 is 1.
|
|
// In that case, _Mantissa2 is the implicit 1 bit followed by 23 zeros, so _Exponent2 is -23 to shift away
|
|
// the zeros.) The dense range of exactly representable integers has negative or zero exponents
|
|
// (as positive exponents make the range non-dense). For that dense range, Ryu will always be used:
|
|
// every digit is necessary to uniquely identify the value, so Ryu must print them all.
|
|
|
|
// Positive exponents are the non-dense range of exactly representable integers.
|
|
// This contains all of the values for which Ryu can't be used (and a few Ryu-friendly values).
|
|
|
|
// Performance note: Long division appears to be faster than losslessly widening float to double and calling
|
|
// __d2fixed_buffered_n(). If __f2fixed_buffered_n() is implemented, it might be faster than long division.
|
|
|
|
_STL_INTERNAL_CHECK(_Exponent2 > 0);
|
|
_STL_INTERNAL_CHECK(_Exponent2 <= 104); // because __ieeeExponent <= 254
|
|
|
|
// Manually represent _Mantissa2 * 2^_Exponent2 as a large integer. _Mantissa2 is always 24 bits
|
|
// (due to the implicit bit), while _Exponent2 indicates a shift of at most 104 bits.
|
|
// 24 + 104 equals 128 equals 4 * 32, so we need exactly 4 32-bit elements.
|
|
// We use a little-endian representation, visualized like this:
|
|
|
|
// << left shift <<
|
|
// most significant
|
|
// _Data[3] _Data[2] _Data[1] _Data[0]
|
|
// least significant
|
|
// >> right shift >>
|
|
|
|
constexpr uint32_t _Data_size = 4;
|
|
uint32_t _Data[_Data_size]{};
|
|
|
|
// _Maxidx is the index of the most significant nonzero element.
|
|
uint32_t _Maxidx = ((24 + static_cast<uint32_t>(_Exponent2) + 31) / 32) - 1;
|
|
_STL_INTERNAL_CHECK(_Maxidx < _Data_size);
|
|
|
|
const uint32_t _Bit_shift = static_cast<uint32_t>(_Exponent2) % 32;
|
|
if (_Bit_shift <= 8) { // _Mantissa2's 24 bits don't cross an element boundary
|
|
_Data[_Maxidx] = _Mantissa2 << _Bit_shift;
|
|
} else { // _Mantissa2's 24 bits cross an element boundary
|
|
_Data[_Maxidx - 1] = _Mantissa2 << _Bit_shift;
|
|
_Data[_Maxidx] = _Mantissa2 >> (32 - _Bit_shift);
|
|
}
|
|
|
|
// If Ryu hasn't determined the total output length, we need to buffer the digits generated from right to left
|
|
// by long division. The largest possible float is: 340'282346638'528859811'704183484'516925440
|
|
uint32_t _Blocks[4];
|
|
int32_t _Filled_blocks = 0;
|
|
// From left to right, we're going to print:
|
|
// _Data[0] will be [1, 10] digits.
|
|
// Then if _Filled_blocks > 0:
|
|
// _Blocks[_Filled_blocks - 1], ..., _Blocks[0] will be 0-filled 9-digit blocks.
|
|
|
|
if (_Maxidx != 0) { // If the integer is actually large, perform long division.
|
|
// Otherwise, skip to printing _Data[0].
|
|
for (;;) {
|
|
// Loop invariant: _Maxidx != 0 (i.e. the integer is actually large)
|
|
|
|
const uint32_t _Most_significant_elem = _Data[_Maxidx];
|
|
const uint32_t _Initial_remainder = _Most_significant_elem % 1000000000;
|
|
const uint32_t _Initial_quotient = _Most_significant_elem / 1000000000;
|
|
_Data[_Maxidx] = _Initial_quotient;
|
|
uint64_t _Remainder = _Initial_remainder;
|
|
|
|
// Process less significant elements.
|
|
uint32_t _Idx = _Maxidx;
|
|
do {
|
|
--_Idx; // Initially, _Remainder is at most 10^9 - 1.
|
|
|
|
// Now, _Remainder is at most (10^9 - 1) * 2^32 + 2^32 - 1, simplified to 10^9 * 2^32 - 1.
|
|
_Remainder = (_Remainder << 32) | _Data[_Idx];
|
|
|
|
// floor((10^9 * 2^32 - 1) / 10^9) == 2^32 - 1, so uint32_t _Quotient is lossless.
|
|
const uint32_t _Quotient = static_cast<uint32_t>(__div1e9(_Remainder));
|
|
|
|
// _Remainder is at most 10^9 - 1 again.
|
|
// For uint32_t truncation, see the __mod1e9() comment in d2s_intrinsics.h.
|
|
_Remainder = static_cast<uint32_t>(_Remainder) - 1000000000u * _Quotient;
|
|
|
|
_Data[_Idx] = _Quotient;
|
|
} while (_Idx != 0);
|
|
|
|
// Store a 0-filled 9-digit block.
|
|
_Blocks[_Filled_blocks++] = static_cast<uint32_t>(_Remainder);
|
|
|
|
if (_Initial_quotient == 0) { // Is the large integer shrinking?
|
|
--_Maxidx; // log2(10^9) is 29.9, so we can't shrink by more than one element.
|
|
if (_Maxidx == 0) {
|
|
break; // We've finished long division. Now we need to print _Data[0].
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
_STL_INTERNAL_CHECK(_Data[0] != 0);
|
|
for (uint32_t _Idx = 1; _Idx < _Data_size; ++_Idx) {
|
|
_STL_INTERNAL_CHECK(_Data[_Idx] == 0);
|
|
}
|
|
|
|
const uint32_t _Data_olength = _Data[0] >= 1000000000 ? 10 : __decimalLength9(_Data[0]);
|
|
const uint32_t _Total_fixed_length = _Data_olength + 9 * _Filled_blocks;
|
|
|
|
if (_Last - _First < static_cast<ptrdiff_t>(_Total_fixed_length)) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
|
|
char* _Result = _First;
|
|
|
|
// Print _Data[0]. While it's up to 10 digits,
|
|
// which is more than Ryu generates, the code below can handle this.
|
|
__append_n_digits(_Data_olength, _Data[0], _Result);
|
|
_Result += _Data_olength;
|
|
|
|
// Print 0-filled 9-digit blocks.
|
|
for (int32_t _Idx = _Filled_blocks - 1; _Idx >= 0; --_Idx) {
|
|
__append_nine_digits(_Blocks[_Idx], _Result);
|
|
_Result += 9;
|
|
}
|
|
|
|
return { _Result, errc{} };
|
|
}
|
|
|
|
_NODISCARD inline to_chars_result __to_chars(char* const _First, char* const _Last, const __floating_decimal_32 __v,
|
|
chars_format _Fmt, const uint32_t __ieeeMantissa, const uint32_t __ieeeExponent) {
|
|
// Step 5: Print the decimal representation.
|
|
uint32_t __output = __v.__mantissa;
|
|
int32_t _Ryu_exponent = __v.__exponent;
|
|
const uint32_t __olength = __decimalLength9(__output);
|
|
int32_t _Scientific_exponent = _Ryu_exponent + static_cast<int32_t>(__olength) - 1;
|
|
|
|
if (_Fmt == chars_format{}) {
|
|
int32_t _Lower;
|
|
int32_t _Upper;
|
|
|
|
if (__olength == 1) {
|
|
// Value | Fixed | Scientific
|
|
// 1e-3 | "0.001" | "1e-03"
|
|
// 1e4 | "10000" | "1e+04"
|
|
_Lower = -3;
|
|
_Upper = 4;
|
|
} else {
|
|
// Value | Fixed | Scientific
|
|
// 1234e-7 | "0.0001234" | "1.234e-04"
|
|
// 1234e5 | "123400000" | "1.234e+08"
|
|
_Lower = -static_cast<int32_t>(__olength + 3);
|
|
_Upper = 5;
|
|
}
|
|
|
|
if (_Lower <= _Ryu_exponent && _Ryu_exponent <= _Upper) {
|
|
_Fmt = chars_format::fixed;
|
|
} else {
|
|
_Fmt = chars_format::scientific;
|
|
}
|
|
} else if (_Fmt == chars_format::general) {
|
|
// C11 7.21.6.1 "The fprintf function"/8:
|
|
// "Let P equal [...] 6 if the precision is omitted [...].
|
|
// Then, if a conversion with style E would have an exponent of X:
|
|
// - if P > X >= -4, the conversion is with style f [...].
|
|
// - otherwise, the conversion is with style e [...]."
|
|
if (-4 <= _Scientific_exponent && _Scientific_exponent < 6) {
|
|
_Fmt = chars_format::fixed;
|
|
} else {
|
|
_Fmt = chars_format::scientific;
|
|
}
|
|
}
|
|
|
|
if (_Fmt == chars_format::fixed) {
|
|
// Example: __output == 1729, __olength == 4
|
|
|
|
// _Ryu_exponent | Printed | _Whole_digits | _Total_fixed_length | Notes
|
|
// --------------|----------|---------------|----------------------|---------------------------------------
|
|
// 2 | 172900 | 6 | _Whole_digits | Ryu can't be used for printing
|
|
// 1 | 17290 | 5 | (sometimes adjusted) | when the trimmed digits are nonzero.
|
|
// --------------|----------|---------------|----------------------|---------------------------------------
|
|
// 0 | 1729 | 4 | _Whole_digits | Unified length cases.
|
|
// --------------|----------|---------------|----------------------|---------------------------------------
|
|
// -1 | 172.9 | 3 | __olength + 1 | This case can't happen for
|
|
// -2 | 17.29 | 2 | | __olength == 1, but no additional
|
|
// -3 | 1.729 | 1 | | code is needed to avoid it.
|
|
// --------------|----------|---------------|----------------------|---------------------------------------
|
|
// -4 | 0.1729 | 0 | 2 - _Ryu_exponent | C11 7.21.6.1 "The fprintf function"/8:
|
|
// -5 | 0.01729 | -1 | | "If a decimal-point character appears,
|
|
// -6 | 0.001729 | -2 | | at least one digit appears before it."
|
|
|
|
const int32_t _Whole_digits = static_cast<int32_t>(__olength) + _Ryu_exponent;
|
|
|
|
uint32_t _Total_fixed_length;
|
|
if (_Ryu_exponent >= 0) { // cases "172900" and "1729"
|
|
_Total_fixed_length = static_cast<uint32_t>(_Whole_digits);
|
|
if (__output == 1) {
|
|
// Rounding can affect the number of digits.
|
|
// For example, 1e11f is exactly "99999997952" which is 11 digits instead of 12.
|
|
// We can use a lookup table to detect this and adjust the total length.
|
|
static constexpr uint8_t _Adjustment[39] = {
|
|
0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,1,0,1,1,1,0,0,1,1,0,1,0,1,1,0,0,1,0,1,1,0,1,1,1 };
|
|
_Total_fixed_length -= _Adjustment[_Ryu_exponent];
|
|
// _Whole_digits doesn't need to be adjusted because these cases won't refer to it later.
|
|
}
|
|
} else if (_Whole_digits > 0) { // case "17.29"
|
|
_Total_fixed_length = __olength + 1;
|
|
} else { // case "0.001729"
|
|
_Total_fixed_length = static_cast<uint32_t>(2 - _Ryu_exponent);
|
|
}
|
|
|
|
if (_Last - _First < static_cast<ptrdiff_t>(_Total_fixed_length)) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
|
|
char* _Mid;
|
|
if (_Ryu_exponent > 0) { // case "172900"
|
|
bool _Can_use_ryu;
|
|
|
|
if (_Ryu_exponent > 10) { // 10^10 is the largest power of 10 that's exactly representable as a float.
|
|
_Can_use_ryu = false;
|
|
} else {
|
|
// Ryu generated X: __v.__mantissa * 10^_Ryu_exponent
|
|
// __v.__mantissa == 2^_Trailing_zero_bits * (__v.__mantissa >> _Trailing_zero_bits)
|
|
// 10^_Ryu_exponent == 2^_Ryu_exponent * 5^_Ryu_exponent
|
|
|
|
// _Trailing_zero_bits is [0, 29] (aside: because 2^29 is the largest power of 2
|
|
// with 9 decimal digits, which is float's round-trip limit.)
|
|
// _Ryu_exponent is [1, 10].
|
|
// Normalization adds [2, 23] (aside: at least 2 because the pre-normalized mantissa is at least 5).
|
|
// This adds up to [3, 62], which is well below float's maximum binary exponent 127.
|
|
|
|
// Therefore, we just need to consider (__v.__mantissa >> _Trailing_zero_bits) * 5^_Ryu_exponent.
|
|
|
|
// If that product would exceed 24 bits, then X can't be exactly represented as a float.
|
|
// (That's not a problem for round-tripping, because X is close enough to the original float,
|
|
// but X isn't mathematically equal to the original float.) This requires a high-precision fallback.
|
|
|
|
// If the product is 24 bits or smaller, then X can be exactly represented as a float (and we don't
|
|
// need to re-synthesize it; the original float must have been X, because Ryu wouldn't produce the
|
|
// same output for two different floats X and Y). This allows Ryu's output to be used (zero-filled).
|
|
|
|
// (2^24 - 1) / 5^0 (for indexing), (2^24 - 1) / 5^1, ..., (2^24 - 1) / 5^10
|
|
static constexpr uint32_t _Max_shifted_mantissa[11] = {
|
|
16777215, 3355443, 671088, 134217, 26843, 5368, 1073, 214, 42, 8, 1 };
|
|
|
|
unsigned long _Trailing_zero_bits;
|
|
(void) _BitScanForward(&_Trailing_zero_bits, __v.__mantissa); // __v.__mantissa is guaranteed nonzero
|
|
const uint32_t _Shifted_mantissa = __v.__mantissa >> _Trailing_zero_bits;
|
|
_Can_use_ryu = _Shifted_mantissa <= _Max_shifted_mantissa[_Ryu_exponent];
|
|
}
|
|
|
|
if (!_Can_use_ryu) {
|
|
const uint32_t _Mantissa2 = __ieeeMantissa | (1u << __FLOAT_MANTISSA_BITS); // restore implicit bit
|
|
const int32_t _Exponent2 = static_cast<int32_t>(__ieeeExponent)
|
|
- __FLOAT_BIAS - __FLOAT_MANTISSA_BITS; // bias and normalization
|
|
|
|
// Performance note: We've already called Ryu, so this will redundantly perform buffering and bounds checking.
|
|
return _Large_integer_to_chars(_First, _Last, _Mantissa2, _Exponent2);
|
|
}
|
|
|
|
// _Can_use_ryu
|
|
// Print the decimal digits, left-aligned within [_First, _First + _Total_fixed_length).
|
|
_Mid = _First + __olength;
|
|
} else { // cases "1729", "17.29", and "0.001729"
|
|
// Print the decimal digits, right-aligned within [_First, _First + _Total_fixed_length).
|
|
_Mid = _First + _Total_fixed_length;
|
|
}
|
|
|
|
while (__output >= 10000) {
|
|
#ifdef __clang__ // TRANSITION, LLVM-38217
|
|
const uint32_t __c = __output - 10000 * (__output / 10000);
|
|
#else
|
|
const uint32_t __c = __output % 10000;
|
|
#endif
|
|
__output /= 10000;
|
|
const uint32_t __c0 = (__c % 100) << 1;
|
|
const uint32_t __c1 = (__c / 100) << 1;
|
|
_CSTD memcpy(_Mid -= 2, __DIGIT_TABLE + __c0, 2);
|
|
_CSTD memcpy(_Mid -= 2, __DIGIT_TABLE + __c1, 2);
|
|
}
|
|
if (__output >= 100) {
|
|
const uint32_t __c = (__output % 100) << 1;
|
|
__output /= 100;
|
|
_CSTD memcpy(_Mid -= 2, __DIGIT_TABLE + __c, 2);
|
|
}
|
|
if (__output >= 10) {
|
|
const uint32_t __c = __output << 1;
|
|
_CSTD memcpy(_Mid -= 2, __DIGIT_TABLE + __c, 2);
|
|
} else {
|
|
*--_Mid = static_cast<char>('0' + __output);
|
|
}
|
|
|
|
if (_Ryu_exponent > 0) { // case "172900" with _Can_use_ryu
|
|
// Performance note: it might be more efficient to do this immediately after setting _Mid.
|
|
_CSTD memset(_First + __olength, '0', static_cast<size_t>(_Ryu_exponent));
|
|
} else if (_Ryu_exponent == 0) { // case "1729"
|
|
// Done!
|
|
} else if (_Whole_digits > 0) { // case "17.29"
|
|
// Performance note: moving digits might not be optimal.
|
|
_CSTD memmove(_First, _First + 1, static_cast<size_t>(_Whole_digits));
|
|
_First[_Whole_digits] = '.';
|
|
} else { // case "0.001729"
|
|
// Performance note: a larger memset() followed by overwriting '.' might be more efficient.
|
|
_First[0] = '0';
|
|
_First[1] = '.';
|
|
_CSTD memset(_First + 2, '0', static_cast<size_t>(-_Whole_digits));
|
|
}
|
|
|
|
return { _First + _Total_fixed_length, errc{} };
|
|
}
|
|
|
|
const uint32_t _Total_scientific_length =
|
|
__olength + (__olength > 1) + 4; // digits + possible decimal point + scientific exponent
|
|
if (_Last - _First < static_cast<ptrdiff_t>(_Total_scientific_length)) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
char* const __result = _First;
|
|
|
|
// Print the decimal digits.
|
|
uint32_t __i = 0;
|
|
while (__output >= 10000) {
|
|
#ifdef __clang__ // TRANSITION, LLVM-38217
|
|
const uint32_t __c = __output - 10000 * (__output / 10000);
|
|
#else
|
|
const uint32_t __c = __output % 10000;
|
|
#endif
|
|
__output /= 10000;
|
|
const uint32_t __c0 = (__c % 100) << 1;
|
|
const uint32_t __c1 = (__c / 100) << 1;
|
|
_CSTD memcpy(__result + __olength - __i - 1, __DIGIT_TABLE + __c0, 2);
|
|
_CSTD memcpy(__result + __olength - __i - 3, __DIGIT_TABLE + __c1, 2);
|
|
__i += 4;
|
|
}
|
|
if (__output >= 100) {
|
|
const uint32_t __c = (__output % 100) << 1;
|
|
__output /= 100;
|
|
_CSTD memcpy(__result + __olength - __i - 1, __DIGIT_TABLE + __c, 2);
|
|
__i += 2;
|
|
}
|
|
if (__output >= 10) {
|
|
const uint32_t __c = __output << 1;
|
|
// We can't use memcpy here: the decimal dot goes between these two digits.
|
|
__result[2] = __DIGIT_TABLE[__c + 1];
|
|
__result[0] = __DIGIT_TABLE[__c];
|
|
} else {
|
|
__result[0] = static_cast<char>('0' + __output);
|
|
}
|
|
|
|
// Print decimal point if needed.
|
|
uint32_t __index;
|
|
if (__olength > 1) {
|
|
__result[1] = '.';
|
|
__index = __olength + 1;
|
|
} else {
|
|
__index = 1;
|
|
}
|
|
|
|
// Print the exponent.
|
|
__result[__index++] = 'e';
|
|
if (_Scientific_exponent < 0) {
|
|
__result[__index++] = '-';
|
|
_Scientific_exponent = -_Scientific_exponent;
|
|
} else {
|
|
__result[__index++] = '+';
|
|
}
|
|
|
|
_CSTD memcpy(__result + __index, __DIGIT_TABLE + 2 * _Scientific_exponent, 2);
|
|
__index += 2;
|
|
|
|
return { _First + _Total_scientific_length, errc{} };
|
|
}
|
|
|
|
_NODISCARD inline to_chars_result __f2s_buffered_n(char* const _First, char* const _Last, const float __f,
|
|
const chars_format _Fmt) {
|
|
|
|
// Step 1: Decode the floating-point number, and unify normalized and subnormal cases.
|
|
const uint32_t __bits = __float_to_bits(__f);
|
|
|
|
// Case distinction; exit early for the easy cases.
|
|
if (__bits == 0) {
|
|
if (_Fmt == chars_format::scientific) {
|
|
if (_Last - _First < 5) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
|
|
_CSTD memcpy(_First, "0e+00", 5);
|
|
|
|
return { _First + 5, errc{} };
|
|
}
|
|
|
|
// Print "0" for chars_format::fixed, chars_format::general, and chars_format{}.
|
|
if (_First == _Last) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
|
|
*_First = '0';
|
|
|
|
return { _First + 1, errc{} };
|
|
}
|
|
|
|
// Decode __bits into mantissa and exponent.
|
|
const uint32_t __ieeeMantissa = __bits & ((1u << __FLOAT_MANTISSA_BITS) - 1);
|
|
const uint32_t __ieeeExponent = __bits >> __FLOAT_MANTISSA_BITS;
|
|
|
|
// When _Fmt == chars_format::fixed and the floating-point number is a large integer,
|
|
// it's faster to skip Ryu and immediately print the integer exactly.
|
|
if (_Fmt == chars_format::fixed) {
|
|
const uint32_t _Mantissa2 = __ieeeMantissa | (1u << __FLOAT_MANTISSA_BITS); // restore implicit bit
|
|
const int32_t _Exponent2 = static_cast<int32_t>(__ieeeExponent)
|
|
- __FLOAT_BIAS - __FLOAT_MANTISSA_BITS; // bias and normalization
|
|
|
|
// Normal values are equal to _Mantissa2 * 2^_Exponent2.
|
|
// (Subnormals are different, but they'll be rejected by the _Exponent2 test here, so they can be ignored.)
|
|
|
|
if (_Exponent2 > 0) {
|
|
return _Large_integer_to_chars(_First, _Last, _Mantissa2, _Exponent2);
|
|
}
|
|
}
|
|
|
|
const __floating_decimal_32 __v = __f2d(__ieeeMantissa, __ieeeExponent);
|
|
return __to_chars(_First, _Last, __v, _Fmt, __ieeeMantissa, __ieeeExponent);
|
|
}
|
|
|
|
// ^^^^^^^^^^ DERIVED FROM f2s.c ^^^^^^^^^^
|
|
|
|
// vvvvvvvvvv DERIVED FROM d2s.c vvvvvvvvvv
|
|
|
|
// We need a 64x128-bit multiplication and a subsequent 128-bit shift.
|
|
// Multiplication:
|
|
// The 64-bit factor is variable and passed in, the 128-bit factor comes
|
|
// from a lookup table. We know that the 64-bit factor only has 55
|
|
// significant bits (i.e., the 9 topmost bits are zeros). The 128-bit
|
|
// factor only has 124 significant bits (i.e., the 4 topmost bits are
|
|
// zeros).
|
|
// Shift:
|
|
// In principle, the multiplication result requires 55 + 124 = 179 bits to
|
|
// represent. However, we then shift this value to the right by __j, which is
|
|
// at least __j >= 115, so the result is guaranteed to fit into 179 - 115 = 64
|
|
// bits. This means that we only need the topmost 64 significant bits of
|
|
// the 64x128-bit multiplication.
|
|
//
|
|
// There are several ways to do this:
|
|
// 1. Best case: the compiler exposes a 128-bit type.
|
|
// We perform two 64x64-bit multiplications, add the higher 64 bits of the
|
|
// lower result to the higher result, and shift by __j - 64 bits.
|
|
//
|
|
// We explicitly cast from 64-bit to 128-bit, so the compiler can tell
|
|
// that these are only 64-bit inputs, and can map these to the best
|
|
// possible sequence of assembly instructions.
|
|
// x64 machines happen to have matching assembly instructions for
|
|
// 64x64-bit multiplications and 128-bit shifts.
|
|
//
|
|
// 2. Second best case: the compiler exposes intrinsics for the x64 assembly
|
|
// instructions mentioned in 1.
|
|
//
|
|
// 3. We only have 64x64 bit instructions that return the lower 64 bits of
|
|
// the result, i.e., we have to use plain C.
|
|
// Our inputs are less than the full width, so we have three options:
|
|
// a. Ignore this fact and just implement the intrinsics manually.
|
|
// b. Split both into 31-bit pieces, which guarantees no internal overflow,
|
|
// but requires extra work upfront (unless we change the lookup table).
|
|
// c. Split only the first factor into 31-bit pieces, which also guarantees
|
|
// no internal overflow, but requires extra work since the intermediate
|
|
// results are not perfectly aligned.
|
|
#ifdef _M_X64
|
|
|
|
_NODISCARD inline uint64_t __mulShift(const uint64_t __m, const uint64_t* const __mul, const int32_t __j) {
|
|
// __m is maximum 55 bits
|
|
uint64_t __high1; // 128
|
|
const uint64_t __low1 = __ryu_umul128(__m, __mul[1], &__high1); // 64
|
|
uint64_t __high0; // 64
|
|
(void) __ryu_umul128(__m, __mul[0], &__high0); // 0
|
|
const uint64_t __sum = __high0 + __low1;
|
|
if (__sum < __high0) {
|
|
++__high1; // overflow into __high1
|
|
}
|
|
return __ryu_shiftright128(__sum, __high1, static_cast<uint32_t>(__j - 64));
|
|
}
|
|
|
|
_NODISCARD inline uint64_t __mulShiftAll(const uint64_t __m, const uint64_t* const __mul, const int32_t __j,
|
|
uint64_t* const __vp, uint64_t* const __vm, const uint32_t __mmShift) {
|
|
*__vp = __mulShift(4 * __m + 2, __mul, __j);
|
|
*__vm = __mulShift(4 * __m - 1 - __mmShift, __mul, __j);
|
|
return __mulShift(4 * __m, __mul, __j);
|
|
}
|
|
|
|
#else // ^^^ intrinsics available ^^^ / vvv intrinsics unavailable vvv
|
|
|
|
_NODISCARD __forceinline uint64_t __mulShiftAll(uint64_t __m, const uint64_t* const __mul, const int32_t __j,
|
|
uint64_t* const __vp, uint64_t* const __vm, const uint32_t __mmShift) { // TRANSITION, VSO-634761
|
|
__m <<= 1;
|
|
// __m is maximum 55 bits
|
|
uint64_t __tmp;
|
|
const uint64_t __lo = __ryu_umul128(__m, __mul[0], &__tmp);
|
|
uint64_t __hi;
|
|
const uint64_t __mid = __tmp + __ryu_umul128(__m, __mul[1], &__hi);
|
|
__hi += __mid < __tmp; // overflow into __hi
|
|
|
|
const uint64_t __lo2 = __lo + __mul[0];
|
|
const uint64_t __mid2 = __mid + __mul[1] + (__lo2 < __lo);
|
|
const uint64_t __hi2 = __hi + (__mid2 < __mid);
|
|
*__vp = __ryu_shiftright128(__mid2, __hi2, static_cast<uint32_t>(__j - 64 - 1));
|
|
|
|
if (__mmShift == 1) {
|
|
const uint64_t __lo3 = __lo - __mul[0];
|
|
const uint64_t __mid3 = __mid - __mul[1] - (__lo3 > __lo);
|
|
const uint64_t __hi3 = __hi - (__mid3 > __mid);
|
|
*__vm = __ryu_shiftright128(__mid3, __hi3, static_cast<uint32_t>(__j - 64 - 1));
|
|
} else {
|
|
const uint64_t __lo3 = __lo + __lo;
|
|
const uint64_t __mid3 = __mid + __mid + (__lo3 < __lo);
|
|
const uint64_t __hi3 = __hi + __hi + (__mid3 < __mid);
|
|
const uint64_t __lo4 = __lo3 - __mul[0];
|
|
const uint64_t __mid4 = __mid3 - __mul[1] - (__lo4 > __lo3);
|
|
const uint64_t __hi4 = __hi3 - (__mid4 > __mid3);
|
|
*__vm = __ryu_shiftright128(__mid4, __hi4, static_cast<uint32_t>(__j - 64));
|
|
}
|
|
|
|
return __ryu_shiftright128(__mid, __hi, static_cast<uint32_t>(__j - 64 - 1));
|
|
}
|
|
|
|
#endif // ^^^ intrinsics unavailable ^^^
|
|
|
|
_NODISCARD inline uint32_t __decimalLength17(const uint64_t __v) {
|
|
// This is slightly faster than a loop.
|
|
// The average output length is 16.38 digits, so we check high-to-low.
|
|
// Function precondition: __v is not an 18, 19, or 20-digit number.
|
|
// (17 digits are sufficient for round-tripping.)
|
|
_STL_INTERNAL_CHECK(__v < 100000000000000000u);
|
|
if (__v >= 10000000000000000u) { return 17; }
|
|
if (__v >= 1000000000000000u) { return 16; }
|
|
if (__v >= 100000000000000u) { return 15; }
|
|
if (__v >= 10000000000000u) { return 14; }
|
|
if (__v >= 1000000000000u) { return 13; }
|
|
if (__v >= 100000000000u) { return 12; }
|
|
if (__v >= 10000000000u) { return 11; }
|
|
if (__v >= 1000000000u) { return 10; }
|
|
if (__v >= 100000000u) { return 9; }
|
|
if (__v >= 10000000u) { return 8; }
|
|
if (__v >= 1000000u) { return 7; }
|
|
if (__v >= 100000u) { return 6; }
|
|
if (__v >= 10000u) { return 5; }
|
|
if (__v >= 1000u) { return 4; }
|
|
if (__v >= 100u) { return 3; }
|
|
if (__v >= 10u) { return 2; }
|
|
return 1;
|
|
}
|
|
|
|
// A floating decimal representing m * 10^e.
|
|
struct __floating_decimal_64 {
|
|
uint64_t __mantissa;
|
|
int32_t __exponent;
|
|
};
|
|
|
|
_NODISCARD inline __floating_decimal_64 __d2d(const uint64_t __ieeeMantissa, const uint32_t __ieeeExponent) {
|
|
int32_t __e2;
|
|
uint64_t __m2;
|
|
if (__ieeeExponent == 0) {
|
|
// We subtract 2 so that the bounds computation has 2 additional bits.
|
|
__e2 = 1 - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS - 2;
|
|
__m2 = __ieeeMantissa;
|
|
} else {
|
|
__e2 = static_cast<int32_t>(__ieeeExponent) - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS - 2;
|
|
__m2 = (1ull << __DOUBLE_MANTISSA_BITS) | __ieeeMantissa;
|
|
}
|
|
const bool __even = (__m2 & 1) == 0;
|
|
const bool __acceptBounds = __even;
|
|
|
|
// Step 2: Determine the interval of valid decimal representations.
|
|
const uint64_t __mv = 4 * __m2;
|
|
// Implicit bool -> int conversion. True is 1, false is 0.
|
|
const uint32_t __mmShift = __ieeeMantissa != 0 || __ieeeExponent <= 1;
|
|
// We would compute __mp and __mm like this:
|
|
// uint64_t __mp = 4 * __m2 + 2;
|
|
// uint64_t __mm = __mv - 1 - __mmShift;
|
|
|
|
// Step 3: Convert to a decimal power base using 128-bit arithmetic.
|
|
uint64_t __vr, __vp, __vm;
|
|
int32_t __e10;
|
|
bool __vmIsTrailingZeros = false;
|
|
bool __vrIsTrailingZeros = false;
|
|
if (__e2 >= 0) {
|
|
// I tried special-casing __q == 0, but there was no effect on performance.
|
|
// This expression is slightly faster than max(0, __log10Pow2(__e2) - 1).
|
|
const uint32_t __q = __log10Pow2(__e2) - (__e2 > 3);
|
|
__e10 = static_cast<int32_t>(__q);
|
|
const int32_t __k = __DOUBLE_POW5_INV_BITCOUNT + __pow5bits(static_cast<int32_t>(__q)) - 1;
|
|
const int32_t __i = -__e2 + static_cast<int32_t>(__q) + __k;
|
|
__vr = __mulShiftAll(__m2, __DOUBLE_POW5_INV_SPLIT[__q], __i, &__vp, &__vm, __mmShift);
|
|
if (__q <= 21) {
|
|
// This should use __q <= 22, but I think 21 is also safe. Smaller values
|
|
// may still be safe, but it's more difficult to reason about them.
|
|
// Only one of __mp, __mv, and __mm can be a multiple of 5, if any.
|
|
const uint32_t __mvMod5 = static_cast<uint32_t>(__mv) - 5 * static_cast<uint32_t>(__div5(__mv));
|
|
if (__mvMod5 == 0) {
|
|
__vrIsTrailingZeros = __multipleOfPowerOf5(__mv, __q);
|
|
} else if (__acceptBounds) {
|
|
// Same as min(__e2 + (~__mm & 1), __pow5Factor(__mm)) >= __q
|
|
// <=> __e2 + (~__mm & 1) >= __q && __pow5Factor(__mm) >= __q
|
|
// <=> true && __pow5Factor(__mm) >= __q, since __e2 >= __q.
|
|
__vmIsTrailingZeros = __multipleOfPowerOf5(__mv - 1 - __mmShift, __q);
|
|
} else {
|
|
// Same as min(__e2 + 1, __pow5Factor(__mp)) >= __q.
|
|
__vp -= __multipleOfPowerOf5(__mv + 2, __q);
|
|
}
|
|
}
|
|
} else {
|
|
// This expression is slightly faster than max(0, __log10Pow5(-__e2) - 1).
|
|
const uint32_t __q = __log10Pow5(-__e2) - (-__e2 > 1);
|
|
__e10 = static_cast<int32_t>(__q) + __e2;
|
|
const int32_t __i = -__e2 - static_cast<int32_t>(__q);
|
|
const int32_t __k = __pow5bits(__i) - __DOUBLE_POW5_BITCOUNT;
|
|
const int32_t __j = static_cast<int32_t>(__q) - __k;
|
|
__vr = __mulShiftAll(__m2, __DOUBLE_POW5_SPLIT[__i], __j, &__vp, &__vm, __mmShift);
|
|
if (__q <= 1) {
|
|
// {__vr,__vp,__vm} is trailing zeros if {__mv,__mp,__mm} has at least __q trailing 0 bits.
|
|
// __mv = 4 * __m2, so it always has at least two trailing 0 bits.
|
|
__vrIsTrailingZeros = true;
|
|
if (__acceptBounds) {
|
|
// __mm = __mv - 1 - __mmShift, so it has 1 trailing 0 bit iff __mmShift == 1.
|
|
__vmIsTrailingZeros = __mmShift == 1;
|
|
} else {
|
|
// __mp = __mv + 2, so it always has at least one trailing 0 bit.
|
|
--__vp;
|
|
}
|
|
} else if (__q < 63) { // TRANSITION(ulfjack): Use a tighter bound here.
|
|
// We need to compute min(ntz(__mv), __pow5Factor(__mv) - __e2) >= __q - 1
|
|
// <=> ntz(__mv) >= __q - 1 && __pow5Factor(__mv) - __e2 >= __q - 1
|
|
// <=> ntz(__mv) >= __q - 1 (__e2 is negative and -__e2 >= __q)
|
|
// <=> (__mv & ((1 << (__q - 1)) - 1)) == 0
|
|
// We also need to make sure that the left shift does not overflow.
|
|
__vrIsTrailingZeros = __multipleOfPowerOf2(__mv, __q - 1);
|
|
}
|
|
}
|
|
|
|
// Step 4: Find the shortest decimal representation in the interval of valid representations.
|
|
int32_t __removed = 0;
|
|
uint8_t __lastRemovedDigit = 0;
|
|
uint64_t __output;
|
|
// On average, we remove ~2 digits.
|
|
if (__vmIsTrailingZeros || __vrIsTrailingZeros) {
|
|
// General case, which happens rarely (~0.7%).
|
|
for (;;) {
|
|
const uint64_t __vpDiv10 = __div10(__vp);
|
|
const uint64_t __vmDiv10 = __div10(__vm);
|
|
if (__vpDiv10 <= __vmDiv10) {
|
|
break;
|
|
}
|
|
const uint32_t __vmMod10 = static_cast<uint32_t>(__vm) - 10 * static_cast<uint32_t>(__vmDiv10);
|
|
const uint64_t __vrDiv10 = __div10(__vr);
|
|
const uint32_t __vrMod10 = static_cast<uint32_t>(__vr) - 10 * static_cast<uint32_t>(__vrDiv10);
|
|
__vmIsTrailingZeros &= __vmMod10 == 0;
|
|
__vrIsTrailingZeros &= __lastRemovedDigit == 0;
|
|
__lastRemovedDigit = static_cast<uint8_t>(__vrMod10);
|
|
__vr = __vrDiv10;
|
|
__vp = __vpDiv10;
|
|
__vm = __vmDiv10;
|
|
++__removed;
|
|
}
|
|
if (__vmIsTrailingZeros) {
|
|
for (;;) {
|
|
const uint64_t __vmDiv10 = __div10(__vm);
|
|
const uint32_t __vmMod10 = static_cast<uint32_t>(__vm) - 10 * static_cast<uint32_t>(__vmDiv10);
|
|
if (__vmMod10 != 0) {
|
|
break;
|
|
}
|
|
const uint64_t __vpDiv10 = __div10(__vp);
|
|
const uint64_t __vrDiv10 = __div10(__vr);
|
|
const uint32_t __vrMod10 = static_cast<uint32_t>(__vr) - 10 * static_cast<uint32_t>(__vrDiv10);
|
|
__vrIsTrailingZeros &= __lastRemovedDigit == 0;
|
|
__lastRemovedDigit = static_cast<uint8_t>(__vrMod10);
|
|
__vr = __vrDiv10;
|
|
__vp = __vpDiv10;
|
|
__vm = __vmDiv10;
|
|
++__removed;
|
|
}
|
|
}
|
|
if (__vrIsTrailingZeros && __lastRemovedDigit == 5 && __vr % 2 == 0) {
|
|
// Round even if the exact number is .....50..0.
|
|
__lastRemovedDigit = 4;
|
|
}
|
|
// We need to take __vr + 1 if __vr is outside bounds or we need to round up.
|
|
__output = __vr + ((__vr == __vm && (!__acceptBounds || !__vmIsTrailingZeros)) || __lastRemovedDigit >= 5);
|
|
} else {
|
|
// Specialized for the common case (~99.3%). Percentages below are relative to this.
|
|
bool __roundUp = false;
|
|
const uint64_t __vpDiv100 = __div100(__vp);
|
|
const uint64_t __vmDiv100 = __div100(__vm);
|
|
if (__vpDiv100 > __vmDiv100) { // Optimization: remove two digits at a time (~86.2%).
|
|
const uint64_t __vrDiv100 = __div100(__vr);
|
|
const uint32_t __vrMod100 = static_cast<uint32_t>(__vr) - 100 * static_cast<uint32_t>(__vrDiv100);
|
|
__roundUp = __vrMod100 >= 50;
|
|
__vr = __vrDiv100;
|
|
__vp = __vpDiv100;
|
|
__vm = __vmDiv100;
|
|
__removed += 2;
|
|
}
|
|
// Loop iterations below (approximately), without optimization above:
|
|
// 0: 0.03%, 1: 13.8%, 2: 70.6%, 3: 14.0%, 4: 1.40%, 5: 0.14%, 6+: 0.02%
|
|
// Loop iterations below (approximately), with optimization above:
|
|
// 0: 70.6%, 1: 27.8%, 2: 1.40%, 3: 0.14%, 4+: 0.02%
|
|
for (;;) {
|
|
const uint64_t __vpDiv10 = __div10(__vp);
|
|
const uint64_t __vmDiv10 = __div10(__vm);
|
|
if (__vpDiv10 <= __vmDiv10) {
|
|
break;
|
|
}
|
|
const uint64_t __vrDiv10 = __div10(__vr);
|
|
const uint32_t __vrMod10 = static_cast<uint32_t>(__vr) - 10 * static_cast<uint32_t>(__vrDiv10);
|
|
__roundUp = __vrMod10 >= 5;
|
|
__vr = __vrDiv10;
|
|
__vp = __vpDiv10;
|
|
__vm = __vmDiv10;
|
|
++__removed;
|
|
}
|
|
// We need to take __vr + 1 if __vr is outside bounds or we need to round up.
|
|
__output = __vr + (__vr == __vm || __roundUp);
|
|
}
|
|
const int32_t __exp = __e10 + __removed;
|
|
|
|
__floating_decimal_64 __fd;
|
|
__fd.__exponent = __exp;
|
|
__fd.__mantissa = __output;
|
|
return __fd;
|
|
}
|
|
|
|
_NODISCARD inline to_chars_result __to_chars(char* const _First, char* const _Last, const __floating_decimal_64 __v,
|
|
chars_format _Fmt, const double __f) {
|
|
// Step 5: Print the decimal representation.
|
|
uint64_t __output = __v.__mantissa;
|
|
int32_t _Ryu_exponent = __v.__exponent;
|
|
const uint32_t __olength = __decimalLength17(__output);
|
|
int32_t _Scientific_exponent = _Ryu_exponent + static_cast<int32_t>(__olength) - 1;
|
|
|
|
if (_Fmt == chars_format{}) {
|
|
int32_t _Lower;
|
|
int32_t _Upper;
|
|
|
|
if (__olength == 1) {
|
|
// Value | Fixed | Scientific
|
|
// 1e-3 | "0.001" | "1e-03"
|
|
// 1e4 | "10000" | "1e+04"
|
|
_Lower = -3;
|
|
_Upper = 4;
|
|
} else {
|
|
// Value | Fixed | Scientific
|
|
// 1234e-7 | "0.0001234" | "1.234e-04"
|
|
// 1234e5 | "123400000" | "1.234e+08"
|
|
_Lower = -static_cast<int32_t>(__olength + 3);
|
|
_Upper = 5;
|
|
}
|
|
|
|
if (_Lower <= _Ryu_exponent && _Ryu_exponent <= _Upper) {
|
|
_Fmt = chars_format::fixed;
|
|
} else {
|
|
_Fmt = chars_format::scientific;
|
|
}
|
|
} else if (_Fmt == chars_format::general) {
|
|
// C11 7.21.6.1 "The fprintf function"/8:
|
|
// "Let P equal [...] 6 if the precision is omitted [...].
|
|
// Then, if a conversion with style E would have an exponent of X:
|
|
// - if P > X >= -4, the conversion is with style f [...].
|
|
// - otherwise, the conversion is with style e [...]."
|
|
if (-4 <= _Scientific_exponent && _Scientific_exponent < 6) {
|
|
_Fmt = chars_format::fixed;
|
|
} else {
|
|
_Fmt = chars_format::scientific;
|
|
}
|
|
}
|
|
|
|
if (_Fmt == chars_format::fixed) {
|
|
// Example: __output == 1729, __olength == 4
|
|
|
|
// _Ryu_exponent | Printed | _Whole_digits | _Total_fixed_length | Notes
|
|
// --------------|----------|---------------|----------------------|---------------------------------------
|
|
// 2 | 172900 | 6 | _Whole_digits | Ryu can't be used for printing
|
|
// 1 | 17290 | 5 | (sometimes adjusted) | when the trimmed digits are nonzero.
|
|
// --------------|----------|---------------|----------------------|---------------------------------------
|
|
// 0 | 1729 | 4 | _Whole_digits | Unified length cases.
|
|
// --------------|----------|---------------|----------------------|---------------------------------------
|
|
// -1 | 172.9 | 3 | __olength + 1 | This case can't happen for
|
|
// -2 | 17.29 | 2 | | __olength == 1, but no additional
|
|
// -3 | 1.729 | 1 | | code is needed to avoid it.
|
|
// --------------|----------|---------------|----------------------|---------------------------------------
|
|
// -4 | 0.1729 | 0 | 2 - _Ryu_exponent | C11 7.21.6.1 "The fprintf function"/8:
|
|
// -5 | 0.01729 | -1 | | "If a decimal-point character appears,
|
|
// -6 | 0.001729 | -2 | | at least one digit appears before it."
|
|
|
|
const int32_t _Whole_digits = static_cast<int32_t>(__olength) + _Ryu_exponent;
|
|
|
|
uint32_t _Total_fixed_length;
|
|
if (_Ryu_exponent >= 0) { // cases "172900" and "1729"
|
|
_Total_fixed_length = static_cast<uint32_t>(_Whole_digits);
|
|
if (__output == 1) {
|
|
// Rounding can affect the number of digits.
|
|
// For example, 1e23 is exactly "99999999999999991611392" which is 23 digits instead of 24.
|
|
// We can use a lookup table to detect this and adjust the total length.
|
|
static constexpr uint8_t _Adjustment[309] = {
|
|
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,0,1,0,1,1,1,0,1,1,1,0,0,0,0,0,
|
|
1,1,0,0,1,0,1,1,1,0,0,0,0,1,1,1,1,0,0,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,1,0,1,1,0,0,0,0,1,1,1,
|
|
1,0,0,0,0,0,0,0,1,1,0,1,1,0,0,1,0,1,0,1,0,1,1,0,0,0,0,0,1,1,1,0,0,1,1,1,1,1,0,1,0,1,1,0,1,
|
|
1,0,0,0,0,0,0,0,0,0,1,1,1,0,0,1,0,0,1,0,0,1,1,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0,0,1,0,0,0,1,
|
|
0,1,0,1,0,1,1,1,0,0,0,0,0,0,1,1,1,1,0,0,1,0,1,1,1,0,0,0,1,0,1,1,1,1,1,1,0,1,0,1,1,0,0,0,1,
|
|
1,1,0,1,1,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,1,1,0,0,0,1,0,1,0,0,0,0,0,1,1,0,
|
|
0,1,0,1,1,1,0,0,1,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,1,1,0,1,0,0,0,0,0,1,1,0,1,0 };
|
|
_Total_fixed_length -= _Adjustment[_Ryu_exponent];
|
|
// _Whole_digits doesn't need to be adjusted because these cases won't refer to it later.
|
|
}
|
|
} else if (_Whole_digits > 0) { // case "17.29"
|
|
_Total_fixed_length = __olength + 1;
|
|
} else { // case "0.001729"
|
|
_Total_fixed_length = static_cast<uint32_t>(2 - _Ryu_exponent);
|
|
}
|
|
|
|
if (_Last - _First < static_cast<ptrdiff_t>(_Total_fixed_length)) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
|
|
char* _Mid;
|
|
if (_Ryu_exponent > 0) { // case "172900"
|
|
bool _Can_use_ryu;
|
|
|
|
if (_Ryu_exponent > 22) { // 10^22 is the largest power of 10 that's exactly representable as a double.
|
|
_Can_use_ryu = false;
|
|
} else {
|
|
// Ryu generated X: __v.__mantissa * 10^_Ryu_exponent
|
|
// __v.__mantissa == 2^_Trailing_zero_bits * (__v.__mantissa >> _Trailing_zero_bits)
|
|
// 10^_Ryu_exponent == 2^_Ryu_exponent * 5^_Ryu_exponent
|
|
|
|
// _Trailing_zero_bits is [0, 56] (aside: because 2^56 is the largest power of 2
|
|
// with 17 decimal digits, which is double's round-trip limit.)
|
|
// _Ryu_exponent is [1, 22].
|
|
// Normalization adds [2, 52] (aside: at least 2 because the pre-normalized mantissa is at least 5).
|
|
// This adds up to [3, 130], which is well below double's maximum binary exponent 1023.
|
|
|
|
// Therefore, we just need to consider (__v.__mantissa >> _Trailing_zero_bits) * 5^_Ryu_exponent.
|
|
|
|
// If that product would exceed 53 bits, then X can't be exactly represented as a double.
|
|
// (That's not a problem for round-tripping, because X is close enough to the original double,
|
|
// but X isn't mathematically equal to the original double.) This requires a high-precision fallback.
|
|
|
|
// If the product is 53 bits or smaller, then X can be exactly represented as a double (and we don't
|
|
// need to re-synthesize it; the original double must have been X, because Ryu wouldn't produce the
|
|
// same output for two different doubles X and Y). This allows Ryu's output to be used (zero-filled).
|
|
|
|
// (2^53 - 1) / 5^0 (for indexing), (2^53 - 1) / 5^1, ..., (2^53 - 1) / 5^22
|
|
static constexpr uint64_t _Max_shifted_mantissa[23] = {
|
|
9007199254740991u, 1801439850948198u, 360287970189639u, 72057594037927u, 14411518807585u,
|
|
2882303761517u, 576460752303u, 115292150460u, 23058430092u, 4611686018u, 922337203u, 184467440u,
|
|
36893488u, 7378697u, 1475739u, 295147u, 59029u, 11805u, 2361u, 472u, 94u, 18u, 3u };
|
|
|
|
unsigned long _Trailing_zero_bits;
|
|
#ifdef _WIN64
|
|
(void) _BitScanForward64(&_Trailing_zero_bits, __v.__mantissa); // __v.__mantissa is guaranteed nonzero
|
|
#else // ^^^ 64-bit ^^^ / vvv 32-bit vvv
|
|
const uint32_t _Low_mantissa = static_cast<uint32_t>(__v.__mantissa);
|
|
if (_Low_mantissa != 0) {
|
|
(void) _BitScanForward(&_Trailing_zero_bits, _Low_mantissa);
|
|
} else {
|
|
const uint32_t _High_mantissa = static_cast<uint32_t>(__v.__mantissa >> 32); // nonzero here
|
|
(void) _BitScanForward(&_Trailing_zero_bits, _High_mantissa);
|
|
_Trailing_zero_bits += 32;
|
|
}
|
|
#endif // ^^^ 32-bit ^^^
|
|
const uint64_t _Shifted_mantissa = __v.__mantissa >> _Trailing_zero_bits;
|
|
_Can_use_ryu = _Shifted_mantissa <= _Max_shifted_mantissa[_Ryu_exponent];
|
|
}
|
|
|
|
if (!_Can_use_ryu) {
|
|
// Print the integer exactly.
|
|
// Performance note: This will redundantly perform bounds checking.
|
|
// Performance note: This will redundantly decompose the IEEE representation.
|
|
return __d2fixed_buffered_n(_First, _Last, __f, 0);
|
|
}
|
|
|
|
// _Can_use_ryu
|
|
// Print the decimal digits, left-aligned within [_First, _First + _Total_fixed_length).
|
|
_Mid = _First + __olength;
|
|
} else { // cases "1729", "17.29", and "0.001729"
|
|
// Print the decimal digits, right-aligned within [_First, _First + _Total_fixed_length).
|
|
_Mid = _First + _Total_fixed_length;
|
|
}
|
|
|
|
// We prefer 32-bit operations, even on 64-bit platforms.
|
|
// We have at most 17 digits, and uint32_t can store 9 digits.
|
|
// If __output doesn't fit into uint32_t, we cut off 8 digits,
|
|
// so the rest will fit into uint32_t.
|
|
if ((__output >> 32) != 0) {
|
|
// Expensive 64-bit division.
|
|
const uint64_t __q = __div1e8(__output);
|
|
uint32_t __output2 = static_cast<uint32_t>(__output - 100000000 * __q);
|
|
__output = __q;
|
|
|
|
const uint32_t __c = __output2 % 10000;
|
|
__output2 /= 10000;
|
|
const uint32_t __d = __output2 % 10000;
|
|
const uint32_t __c0 = (__c % 100) << 1;
|
|
const uint32_t __c1 = (__c / 100) << 1;
|
|
const uint32_t __d0 = (__d % 100) << 1;
|
|
const uint32_t __d1 = (__d / 100) << 1;
|
|
|
|
_CSTD memcpy(_Mid -= 2, __DIGIT_TABLE + __c0, 2);
|
|
_CSTD memcpy(_Mid -= 2, __DIGIT_TABLE + __c1, 2);
|
|
_CSTD memcpy(_Mid -= 2, __DIGIT_TABLE + __d0, 2);
|
|
_CSTD memcpy(_Mid -= 2, __DIGIT_TABLE + __d1, 2);
|
|
}
|
|
uint32_t __output2 = static_cast<uint32_t>(__output);
|
|
while (__output2 >= 10000) {
|
|
#ifdef __clang__ // TRANSITION, LLVM-38217
|
|
const uint32_t __c = __output2 - 10000 * (__output2 / 10000);
|
|
#else
|
|
const uint32_t __c = __output2 % 10000;
|
|
#endif
|
|
__output2 /= 10000;
|
|
const uint32_t __c0 = (__c % 100) << 1;
|
|
const uint32_t __c1 = (__c / 100) << 1;
|
|
_CSTD memcpy(_Mid -= 2, __DIGIT_TABLE + __c0, 2);
|
|
_CSTD memcpy(_Mid -= 2, __DIGIT_TABLE + __c1, 2);
|
|
}
|
|
if (__output2 >= 100) {
|
|
const uint32_t __c = (__output2 % 100) << 1;
|
|
__output2 /= 100;
|
|
_CSTD memcpy(_Mid -= 2, __DIGIT_TABLE + __c, 2);
|
|
}
|
|
if (__output2 >= 10) {
|
|
const uint32_t __c = __output2 << 1;
|
|
_CSTD memcpy(_Mid -= 2, __DIGIT_TABLE + __c, 2);
|
|
} else {
|
|
*--_Mid = static_cast<char>('0' + __output2);
|
|
}
|
|
|
|
if (_Ryu_exponent > 0) { // case "172900" with _Can_use_ryu
|
|
// Performance note: it might be more efficient to do this immediately after setting _Mid.
|
|
_CSTD memset(_First + __olength, '0', static_cast<size_t>(_Ryu_exponent));
|
|
} else if (_Ryu_exponent == 0) { // case "1729"
|
|
// Done!
|
|
} else if (_Whole_digits > 0) { // case "17.29"
|
|
// Performance note: moving digits might not be optimal.
|
|
_CSTD memmove(_First, _First + 1, static_cast<size_t>(_Whole_digits));
|
|
_First[_Whole_digits] = '.';
|
|
} else { // case "0.001729"
|
|
// Performance note: a larger memset() followed by overwriting '.' might be more efficient.
|
|
_First[0] = '0';
|
|
_First[1] = '.';
|
|
_CSTD memset(_First + 2, '0', static_cast<size_t>(-_Whole_digits));
|
|
}
|
|
|
|
return { _First + _Total_fixed_length, errc{} };
|
|
}
|
|
|
|
const uint32_t _Total_scientific_length = __olength + (__olength > 1) // digits + possible decimal point
|
|
+ (-100 < _Scientific_exponent && _Scientific_exponent < 100 ? 4 : 5); // + scientific exponent
|
|
if (_Last - _First < static_cast<ptrdiff_t>(_Total_scientific_length)) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
char* const __result = _First;
|
|
|
|
// Print the decimal digits.
|
|
uint32_t __i = 0;
|
|
// We prefer 32-bit operations, even on 64-bit platforms.
|
|
// We have at most 17 digits, and uint32_t can store 9 digits.
|
|
// If __output doesn't fit into uint32_t, we cut off 8 digits,
|
|
// so the rest will fit into uint32_t.
|
|
if ((__output >> 32) != 0) {
|
|
// Expensive 64-bit division.
|
|
const uint64_t __q = __div1e8(__output);
|
|
uint32_t __output2 = static_cast<uint32_t>(__output) - 100000000 * static_cast<uint32_t>(__q);
|
|
__output = __q;
|
|
|
|
const uint32_t __c = __output2 % 10000;
|
|
__output2 /= 10000;
|
|
const uint32_t __d = __output2 % 10000;
|
|
const uint32_t __c0 = (__c % 100) << 1;
|
|
const uint32_t __c1 = (__c / 100) << 1;
|
|
const uint32_t __d0 = (__d % 100) << 1;
|
|
const uint32_t __d1 = (__d / 100) << 1;
|
|
_CSTD memcpy(__result + __olength - __i - 1, __DIGIT_TABLE + __c0, 2);
|
|
_CSTD memcpy(__result + __olength - __i - 3, __DIGIT_TABLE + __c1, 2);
|
|
_CSTD memcpy(__result + __olength - __i - 5, __DIGIT_TABLE + __d0, 2);
|
|
_CSTD memcpy(__result + __olength - __i - 7, __DIGIT_TABLE + __d1, 2);
|
|
__i += 8;
|
|
}
|
|
uint32_t __output2 = static_cast<uint32_t>(__output);
|
|
while (__output2 >= 10000) {
|
|
#ifdef __clang__ // TRANSITION, LLVM-38217
|
|
const uint32_t __c = __output2 - 10000 * (__output2 / 10000);
|
|
#else
|
|
const uint32_t __c = __output2 % 10000;
|
|
#endif
|
|
__output2 /= 10000;
|
|
const uint32_t __c0 = (__c % 100) << 1;
|
|
const uint32_t __c1 = (__c / 100) << 1;
|
|
_CSTD memcpy(__result + __olength - __i - 1, __DIGIT_TABLE + __c0, 2);
|
|
_CSTD memcpy(__result + __olength - __i - 3, __DIGIT_TABLE + __c1, 2);
|
|
__i += 4;
|
|
}
|
|
if (__output2 >= 100) {
|
|
const uint32_t __c = (__output2 % 100) << 1;
|
|
__output2 /= 100;
|
|
_CSTD memcpy(__result + __olength - __i - 1, __DIGIT_TABLE + __c, 2);
|
|
__i += 2;
|
|
}
|
|
if (__output2 >= 10) {
|
|
const uint32_t __c = __output2 << 1;
|
|
// We can't use memcpy here: the decimal dot goes between these two digits.
|
|
__result[2] = __DIGIT_TABLE[__c + 1];
|
|
__result[0] = __DIGIT_TABLE[__c];
|
|
} else {
|
|
__result[0] = static_cast<char>('0' + __output2);
|
|
}
|
|
|
|
// Print decimal point if needed.
|
|
uint32_t __index;
|
|
if (__olength > 1) {
|
|
__result[1] = '.';
|
|
__index = __olength + 1;
|
|
} else {
|
|
__index = 1;
|
|
}
|
|
|
|
// Print the exponent.
|
|
__result[__index++] = 'e';
|
|
if (_Scientific_exponent < 0) {
|
|
__result[__index++] = '-';
|
|
_Scientific_exponent = -_Scientific_exponent;
|
|
} else {
|
|
__result[__index++] = '+';
|
|
}
|
|
|
|
if (_Scientific_exponent >= 100) {
|
|
const int32_t __c = _Scientific_exponent % 10;
|
|
_CSTD memcpy(__result + __index, __DIGIT_TABLE + 2 * (_Scientific_exponent / 10), 2);
|
|
__result[__index + 2] = static_cast<char>('0' + __c);
|
|
__index += 3;
|
|
} else {
|
|
_CSTD memcpy(__result + __index, __DIGIT_TABLE + 2 * _Scientific_exponent, 2);
|
|
__index += 2;
|
|
}
|
|
|
|
return { _First + _Total_scientific_length, errc{} };
|
|
}
|
|
|
|
_NODISCARD inline bool __d2d_small_int(const uint64_t __ieeeMantissa, const uint32_t __ieeeExponent,
|
|
__floating_decimal_64* const __v) {
|
|
const uint64_t __m2 = (1ull << __DOUBLE_MANTISSA_BITS) | __ieeeMantissa;
|
|
const int32_t __e2 = static_cast<int32_t>(__ieeeExponent) - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;
|
|
|
|
if (__e2 > 0) {
|
|
// f = __m2 * 2^__e2 >= 2^53 is an integer.
|
|
// Ignore this case for now.
|
|
return false;
|
|
}
|
|
|
|
if (__e2 < -52) {
|
|
// f < 1.
|
|
return false;
|
|
}
|
|
|
|
// Since 2^52 <= __m2 < 2^53 and 0 <= -__e2 <= 52: 1 <= f = __m2 / 2^-__e2 < 2^53.
|
|
// Test if the lower -__e2 bits of the significand are 0, i.e. whether the fraction is 0.
|
|
const uint64_t __mask = (1ull << -__e2) - 1;
|
|
const uint64_t __fraction = __m2 & __mask;
|
|
if (__fraction != 0) {
|
|
return false;
|
|
}
|
|
|
|
// f is an integer in the range [1, 2^53).
|
|
// Note: __mantissa might contain trailing (decimal) 0's.
|
|
// Note: since 2^53 < 10^16, there is no need to adjust __decimalLength17().
|
|
__v->__mantissa = __m2 >> -__e2;
|
|
__v->__exponent = 0;
|
|
return true;
|
|
}
|
|
|
|
_NODISCARD inline to_chars_result __d2s_buffered_n(char* const _First, char* const _Last, const double __f,
|
|
const chars_format _Fmt) {
|
|
|
|
// Step 1: Decode the floating-point number, and unify normalized and subnormal cases.
|
|
const uint64_t __bits = __double_to_bits(__f);
|
|
|
|
// Case distinction; exit early for the easy cases.
|
|
if (__bits == 0) {
|
|
if (_Fmt == chars_format::scientific) {
|
|
if (_Last - _First < 5) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
|
|
_CSTD memcpy(_First, "0e+00", 5);
|
|
|
|
return { _First + 5, errc{} };
|
|
}
|
|
|
|
// Print "0" for chars_format::fixed, chars_format::general, and chars_format{}.
|
|
if (_First == _Last) {
|
|
return { _Last, errc::value_too_large };
|
|
}
|
|
|
|
*_First = '0';
|
|
|
|
return { _First + 1, errc{} };
|
|
}
|
|
|
|
// Decode __bits into mantissa and exponent.
|
|
const uint64_t __ieeeMantissa = __bits & ((1ull << __DOUBLE_MANTISSA_BITS) - 1);
|
|
const uint32_t __ieeeExponent = static_cast<uint32_t>(__bits >> __DOUBLE_MANTISSA_BITS);
|
|
|
|
if (_Fmt == chars_format::fixed) {
|
|
// const uint64_t _Mantissa2 = __ieeeMantissa | (1ull << __DOUBLE_MANTISSA_BITS); // restore implicit bit
|
|
const int32_t _Exponent2 = static_cast<int32_t>(__ieeeExponent)
|
|
- __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS; // bias and normalization
|
|
|
|
// Normal values are equal to _Mantissa2 * 2^_Exponent2.
|
|
// (Subnormals are different, but they'll be rejected by the _Exponent2 test here, so they can be ignored.)
|
|
|
|
// For nonzero integers, _Exponent2 >= -52. (The minimum value occurs when _Mantissa2 * 2^_Exponent2 is 1.
|
|
// In that case, _Mantissa2 is the implicit 1 bit followed by 52 zeros, so _Exponent2 is -52 to shift away
|
|
// the zeros.) The dense range of exactly representable integers has negative or zero exponents
|
|
// (as positive exponents make the range non-dense). For that dense range, Ryu will always be used:
|
|
// every digit is necessary to uniquely identify the value, so Ryu must print them all.
|
|
|
|
// Positive exponents are the non-dense range of exactly representable integers. This contains all of the values
|
|
// for which Ryu can't be used (and a few Ryu-friendly values). We can save time by detecting positive
|
|
// exponents here and skipping Ryu. Calling __d2fixed_buffered_n() with precision 0 is valid for all integers
|
|
// (so it's okay if we call it with a Ryu-friendly value).
|
|
if (_Exponent2 > 0) {
|
|
return __d2fixed_buffered_n(_First, _Last, __f, 0);
|
|
}
|
|
}
|
|
|
|
__floating_decimal_64 __v;
|
|
const bool __isSmallInt = __d2d_small_int(__ieeeMantissa, __ieeeExponent, &__v);
|
|
if (__isSmallInt) {
|
|
// For small integers in the range [1, 2^53), __v.__mantissa might contain trailing (decimal) zeros.
|
|
// For scientific notation we need to move these zeros into the exponent.
|
|
// (This is not needed for fixed-point notation, so it might be beneficial to trim
|
|
// trailing zeros in __to_chars only if needed - once fixed-point notation output is implemented.)
|
|
for (;;) {
|
|
const uint64_t __q = __div10(__v.__mantissa);
|
|
const uint32_t __r = static_cast<uint32_t>(__v.__mantissa) - 10 * static_cast<uint32_t>(__q);
|
|
if (__r != 0) {
|
|
break;
|
|
}
|
|
__v.__mantissa = __q;
|
|
++__v.__exponent;
|
|
}
|
|
} else {
|
|
__v = __d2d(__ieeeMantissa, __ieeeExponent);
|
|
}
|
|
|
|
return __to_chars(_First, _Last, __v, _Fmt, __f);
|
|
}
|
|
|
|
// ^^^^^^^^^^ DERIVED FROM d2s.c ^^^^^^^^^^
|
|
|
|
// clang-format on
|
|
|
|
template <class _Floating>
|
|
_NODISCARD to_chars_result _Floating_to_chars_ryu(
|
|
char* const _First, char* const _Last, const _Floating _Value, const chars_format _Fmt) noexcept {
|
|
if constexpr (is_same_v<_Floating, float>) {
|
|
return __f2s_buffered_n(_First, _Last, _Value, _Fmt);
|
|
} else {
|
|
return __d2s_buffered_n(_First, _Last, _Value, _Fmt);
|
|
}
|
|
}
|
|
|
|
template <class _Floating>
|
|
_NODISCARD to_chars_result _Floating_to_chars_scientific_precision(
|
|
char* const _First, char* const _Last, const _Floating _Value, int _Precision) noexcept {
|
|
|
|
// C11 7.21.6.1 "The fprintf function"/5:
|
|
// "A negative precision argument is taken as if the precision were omitted."
|
|
// /8: "e,E [...] if the precision is missing, it is taken as 6"
|
|
|
|
if (_Precision < 0) {
|
|
_Precision = 6;
|
|
} else if (_Precision < 1'000'000'000) {
|
|
// _Precision is ok.
|
|
} else {
|
|
// Avoid integer overflow.
|
|
// (This defensive check is slightly nonconformant; it can be carefully improved in the future.)
|
|
return {_Last, errc::value_too_large};
|
|
}
|
|
|
|
return __d2exp_buffered_n(_First, _Last, _Value, static_cast<uint32_t>(_Precision));
|
|
}
|
|
|
|
template <class _Floating>
|
|
_NODISCARD to_chars_result _Floating_to_chars_fixed_precision(
|
|
char* const _First, char* const _Last, const _Floating _Value, int _Precision) noexcept {
|
|
|
|
// C11 7.21.6.1 "The fprintf function"/5:
|
|
// "A negative precision argument is taken as if the precision were omitted."
|
|
// /8: "f,F [...] If the precision is missing, it is taken as 6"
|
|
|
|
if (_Precision < 0) {
|
|
_Precision = 6;
|
|
} else if (_Precision < 1'000'000'000) {
|
|
// _Precision is ok.
|
|
} else {
|
|
// Avoid integer overflow.
|
|
// (This defensive check is slightly nonconformant; it can be carefully improved in the future.)
|
|
return {_Last, errc::value_too_large};
|
|
}
|
|
|
|
return __d2fixed_buffered_n(_First, _Last, _Value, static_cast<uint32_t>(_Precision));
|
|
}
|
|
|
|
_STD_END
|
|
|
|
#pragma pop_macro("new")
|
|
_STL_RESTORE_CLANG_WARNINGS
|
|
#pragma warning(pop)
|
|
#pragma pack(pop)
|
|
|
|
#endif // _STL_COMPILER_PREPROCESSOR
|
|
#endif // _XCHARCONV_RYU_H
|