// xmemory internal header // Copyright (c) Microsoft Corporation. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception #pragma once #ifndef _XMEMORY_ #define _XMEMORY_ #include #if _STL_COMPILER_PREPROCESSOR #include #include #include #include #include #include #if _HAS_CXX20 #include #endif // _HAS_CXX20 #pragma pack(push, _CRT_PACKING) #pragma warning(push, _STL_WARNING_LEVEL) #pragma warning(disable : _STL_DISABLED_WARNINGS) _STL_DISABLE_CLANG_WARNINGS #pragma push_macro("new") #undef new _STD_BEGIN template struct _NODISCARD _Tidy_guard { // class with destructor that calls _Tidy _Ty* _Target; _CONSTEXPR20 ~_Tidy_guard() { if (_Target) { _Target->_Tidy(); } } }; template struct _NODISCARD _Tidy_deallocate_guard { // class with destructor that calls _Tidy_deallocate _Ty* _Target; _CONSTEXPR20 ~_Tidy_deallocate_guard() { if (_Target) { _Target->_Tidy_deallocate(); } } }; template _INLINE_VAR constexpr bool _Nothrow_compare = noexcept( static_cast(_STD declval()(_STD declval(), _STD declval()))); template _NODISCARD constexpr size_t _Get_size_of_n(const size_t _Count) { constexpr bool _Overflow_is_possible = _Ty_size > 1; if constexpr (_Overflow_is_possible) { constexpr size_t _Max_possible = static_cast(-1) / _Ty_size; if (_Count > _Max_possible) { _Throw_bad_array_new_length(); // multiply overflow } } return _Count * _Ty_size; } template _INLINE_VAR constexpr size_t _New_alignof = (_STD max)(alignof(_Ty), __STDCPP_DEFAULT_NEW_ALIGNMENT__); struct _Default_allocate_traits { __declspec(allocator) static #ifdef __clang__ // Clang and MSVC implement P0784R7 differently; see GH-1532 _CONSTEXPR20 #endif // __clang__ void* _Allocate(const size_t _Bytes) { return ::operator new(_Bytes); } #ifdef __cpp_aligned_new __declspec(allocator) static #ifdef __clang__ // Clang and MSVC implement P0784R7 differently; see GH-1532 _CONSTEXPR20 #endif // __clang__ void* _Allocate_aligned(const size_t _Bytes, const size_t _Align) { #ifdef __clang__ // Clang and MSVC implement P0784R7 differently; see GH-1532 #if _HAS_CXX20 if (_STD is_constant_evaluated()) { return ::operator new(_Bytes); } else #endif // _HAS_CXX20 #endif // __clang__ { return ::operator new(_Bytes, align_val_t{_Align}); } } #endif // __cpp_aligned_new }; constexpr bool _Is_pow_2(const size_t _Value) noexcept { return _Value != 0 && (_Value & (_Value - 1)) == 0; } #if defined(_M_IX86) || defined(_M_X64) _INLINE_VAR constexpr size_t _Big_allocation_threshold = 4096; _INLINE_VAR constexpr size_t _Big_allocation_alignment = 32; // Big allocation alignment should at least match vector register alignment _STL_INTERNAL_STATIC_ASSERT(2 * sizeof(void*) <= _Big_allocation_alignment); // Big allocation alignment must be a power of two _STL_INTERNAL_STATIC_ASSERT(_Is_pow_2(_Big_allocation_alignment)); #ifdef _DEBUG _INLINE_VAR constexpr size_t _Non_user_size = 2 * sizeof(void*) + _Big_allocation_alignment - 1; #else // _DEBUG _INLINE_VAR constexpr size_t _Non_user_size = sizeof(void*) + _Big_allocation_alignment - 1; #endif // _DEBUG #ifdef _WIN64 _INLINE_VAR constexpr size_t _Big_allocation_sentinel = 0xFAFAFAFAFAFAFAFAULL; #else // ^^^ _WIN64 / !_WIN64 vvv _INLINE_VAR constexpr size_t _Big_allocation_sentinel = 0xFAFAFAFAUL; #endif // _WIN64 template __declspec(allocator) void* _Allocate_manually_vector_aligned(const size_t _Bytes) { // allocate _Bytes manually aligned to at least _Big_allocation_alignment const size_t _Block_size = _Non_user_size + _Bytes; if (_Block_size <= _Bytes) { _Throw_bad_array_new_length(); // add overflow } const uintptr_t _Ptr_container = reinterpret_cast(_Traits::_Allocate(_Block_size)); _STL_VERIFY(_Ptr_container != 0, "invalid argument"); // validate even in release since we're doing p[-1] void* const _Ptr = reinterpret_cast((_Ptr_container + _Non_user_size) & ~(_Big_allocation_alignment - 1)); static_cast(_Ptr)[-1] = _Ptr_container; #ifdef _DEBUG static_cast(_Ptr)[-2] = _Big_allocation_sentinel; #endif // _DEBUG return _Ptr; } inline void _Adjust_manually_vector_aligned(void*& _Ptr, size_t& _Bytes) { // adjust parameters from _Allocate_manually_vector_aligned to pass to operator delete _Bytes += _Non_user_size; const uintptr_t* const _Ptr_user = static_cast(_Ptr); const uintptr_t _Ptr_container = _Ptr_user[-1]; // If the following asserts, it likely means that we are performing // an aligned delete on memory coming from an unaligned allocation. _STL_ASSERT(_Ptr_user[-2] == _Big_allocation_sentinel, "invalid argument"); // Extra paranoia on aligned allocation/deallocation; ensure _Ptr_container is // in range [_Min_back_shift, _Non_user_size] #ifdef _DEBUG constexpr uintptr_t _Min_back_shift = 2 * sizeof(void*); #else // ^^^ _DEBUG / !_DEBUG vvv constexpr uintptr_t _Min_back_shift = sizeof(void*); #endif // _DEBUG const uintptr_t _Back_shift = reinterpret_cast(_Ptr) - _Ptr_container; _STL_VERIFY(_Back_shift >= _Min_back_shift && _Back_shift <= _Non_user_size, "invalid argument"); _Ptr = reinterpret_cast(_Ptr_container); } #endif // defined(_M_IX86) || defined(_M_X64) #ifdef __cpp_aligned_new template __STDCPP_DEFAULT_NEW_ALIGNMENT__), int> = 0> __declspec(allocator) _CONSTEXPR20 void* _Allocate(const size_t _Bytes) { // allocate _Bytes when __cpp_aligned_new && _Align > __STDCPP_DEFAULT_NEW_ALIGNMENT__ if (_Bytes == 0) { return nullptr; } #if _HAS_CXX20 // TRANSITION, GH-1532 if (_STD is_constant_evaluated()) { return _Traits::_Allocate(_Bytes); } else #endif // _HAS_CXX20 { size_t _Passed_align = _Align; #if defined(_M_IX86) || defined(_M_X64) if (_Bytes >= _Big_allocation_threshold) { // boost the alignment of big allocations to help autovectorization _Passed_align = (_STD max)(_Align, _Big_allocation_alignment); } #endif // defined(_M_IX86) || defined(_M_X64) return _Traits::_Allocate_aligned(_Bytes, _Passed_align); } } template __STDCPP_DEFAULT_NEW_ALIGNMENT__), int> = 0> _CONSTEXPR20 void _Deallocate(void* _Ptr, const size_t _Bytes) noexcept { // deallocate storage allocated by _Allocate when __cpp_aligned_new && _Align > __STDCPP_DEFAULT_NEW_ALIGNMENT__ #if _HAS_CXX20 // TRANSITION, GH-1532 if (_STD is_constant_evaluated()) { ::operator delete(_Ptr); } else #endif // _HAS_CXX20 { size_t _Passed_align = _Align; #if defined(_M_IX86) || defined(_M_X64) if (_Bytes >= _Big_allocation_threshold) { // boost the alignment of big allocations to help autovectorization _Passed_align = (_STD max)(_Align, _Big_allocation_alignment); } #endif // defined(_M_IX86) || defined(_M_X64) ::operator delete(_Ptr, _Bytes, align_val_t{_Passed_align}); } } #define _HAS_ALIGNED_NEW 1 #else // ^^^ __cpp_aligned_new / !__cpp_aligned_new vvv #define _HAS_ALIGNED_NEW 0 #endif // __cpp_aligned_new template = 0> __declspec(allocator) _CONSTEXPR20 void* _Allocate(const size_t _Bytes) { // allocate _Bytes when !_HAS_ALIGNED_NEW || _Align <= __STDCPP_DEFAULT_NEW_ALIGNMENT__ #if defined(_M_IX86) || defined(_M_X64) #if _HAS_CXX20 // TRANSITION, GH-1532 if (!_STD is_constant_evaluated()) #endif // _HAS_CXX20 { if (_Bytes >= _Big_allocation_threshold) { // boost the alignment of big allocations to help autovectorization return _Allocate_manually_vector_aligned<_Traits>(_Bytes); } } #endif // defined(_M_IX86) || defined(_M_X64) if (_Bytes != 0) { return _Traits::_Allocate(_Bytes); } return nullptr; } template = 0> _CONSTEXPR20 void _Deallocate(void* _Ptr, size_t _Bytes) noexcept { // deallocate storage allocated by _Allocate when !_HAS_ALIGNED_NEW || _Align <= __STDCPP_DEFAULT_NEW_ALIGNMENT__ #if _HAS_CXX20 // TRANSITION, GH-1532 if (_STD is_constant_evaluated()) { ::operator delete(_Ptr); } else #endif // _HAS_CXX20 { #if defined(_M_IX86) || defined(_M_X64) if (_Bytes >= _Big_allocation_threshold) { // boost the alignment of big allocations to help autovectorization _Adjust_manually_vector_aligned(_Ptr, _Bytes); } #endif // defined(_M_IX86) || defined(_M_X64) ::operator delete(_Ptr, _Bytes); } } #undef _HAS_ALIGNED_NEW template _Ty* _Global_new(_Types&&... _Args) { // acts as "new" while disallowing user overload selection struct _NODISCARD _Guard_type { void* _Result; ~_Guard_type() { if (_Result) { _Deallocate<_New_alignof<_Ty>>(_Result, sizeof(_Ty)); } } }; _Guard_type _Guard{_Allocate<_New_alignof<_Ty>>(sizeof(_Ty))}; ::new (_Guard._Result) _Ty(_STD forward<_Types>(_Args)...); return static_cast<_Ty*>(_STD exchange(_Guard._Result, nullptr)); } template using _Rebind_pointer_t = typename pointer_traits<_Ptr>::template rebind<_Ty>; template , int> = 0> _CONSTEXPR20 _Pointer _Refancy(typename pointer_traits<_Pointer>::element_type* _Ptr) noexcept { return pointer_traits<_Pointer>::pointer_to(*_Ptr); } template , int> = 0> _CONSTEXPR20 _Pointer _Refancy(_Pointer _Ptr) noexcept { return _Ptr; } template _CONSTEXPR20 void _Destroy_range(_NoThrowFwdIt _First, _NoThrowSentinel _Last) noexcept; template _CONSTEXPR20 void _Destroy_in_place(_Ty& _Obj) noexcept { if constexpr (is_array_v<_Ty>) { _Destroy_range(_Obj, _Obj + extent_v<_Ty>); } else { _Obj.~_Ty(); } } #if _HAS_CXX17 _EXPORT_STD template _CONSTEXPR20 void destroy_at(_Ty* const _Location) noexcept /* strengthened */ { #if _HAS_CXX20 if constexpr (is_array_v<_Ty>) { _Destroy_range(_STD begin(*_Location), _STD end(*_Location)); } else #endif // _HAS_CXX20 { _Location->~_Ty(); } } #endif // _HAS_CXX17 template auto _Const_cast(_Ptrty _Ptr) noexcept { // remove constness from a fancy pointer using _Elem = typename pointer_traits<_Ptrty>::element_type; using _Modifiable = remove_const_t<_Elem>; using _Dest = typename pointer_traits<_Ptrty>::template rebind<_Modifiable>; return pointer_traits<_Dest>::pointer_to(const_cast<_Modifiable&>(*_Ptr)); } template auto _Const_cast(_Ty* _Ptr) noexcept { return const_cast*>(_Ptr); } template struct _Get_pointer_type { using type = typename _Ty::value_type*; }; _STL_DISABLE_DEPRECATED_WARNING template struct _Get_pointer_type<_Ty, void_t> { using type = typename _Ty::pointer; }; _STL_RESTORE_DEPRECATED_WARNING template struct _Get_const_pointer_type { using _Ptrty = typename _Get_pointer_type<_Ty>::type; using _Valty = typename _Ty::value_type; using type = typename pointer_traits<_Ptrty>::template rebind; }; _STL_DISABLE_DEPRECATED_WARNING template struct _Get_const_pointer_type<_Ty, void_t> { using type = typename _Ty::const_pointer; }; _STL_RESTORE_DEPRECATED_WARNING template struct _Get_void_pointer_type { using _Ptrty = typename _Get_pointer_type<_Ty>::type; using type = typename pointer_traits<_Ptrty>::template rebind; }; template struct _Get_void_pointer_type<_Ty, void_t> { using type = typename _Ty::void_pointer; }; template struct _Get_const_void_pointer_type { using _Ptrty = typename _Get_pointer_type<_Ty>::type; using type = typename pointer_traits<_Ptrty>::template rebind; }; template struct _Get_const_void_pointer_type<_Ty, void_t> { using type = typename _Ty::const_void_pointer; }; template struct _Get_difference_type { using _Ptrty = typename _Get_pointer_type<_Ty>::type; using type = typename pointer_traits<_Ptrty>::difference_type; }; template struct _Get_difference_type<_Ty, void_t> { using type = typename _Ty::difference_type; }; template struct _Get_size_type { using type = make_unsigned_t::type>; }; template struct _Get_size_type<_Ty, void_t> { using type = typename _Ty::size_type; }; template struct _Get_propagate_on_container_copy { using type = false_type; }; template struct _Get_propagate_on_container_copy<_Ty, void_t> { using type = typename _Ty::propagate_on_container_copy_assignment; }; template struct _Get_propagate_on_container_move { using type = false_type; }; template struct _Get_propagate_on_container_move<_Ty, void_t> { using type = typename _Ty::propagate_on_container_move_assignment; }; template struct _Get_propagate_on_container_swap { using type = false_type; }; template struct _Get_propagate_on_container_swap<_Ty, void_t> { using type = typename _Ty::propagate_on_container_swap; }; template struct _Get_is_always_equal { using type = bool_constant>; }; _STL_DISABLE_DEPRECATED_WARNING template struct _Get_is_always_equal<_Ty, void_t> { using type = typename _Ty::is_always_equal; }; _STL_RESTORE_DEPRECATED_WARNING template struct _Get_rebind_type { using type = typename _Replace_first_parameter<_Other, _Ty>::type; }; _STL_DISABLE_DEPRECATED_WARNING template struct _Get_rebind_type<_Ty, _Other, void_t::other>> { using type = typename _Ty::template rebind<_Other>::other; }; _STL_RESTORE_DEPRECATED_WARNING _EXPORT_STD template class allocator; template struct _Is_default_allocator : false_type {}; template struct _Is_default_allocator, void_t::_From_primary>> : is_same::_From_primary, allocator<_Ty>>::type {}; #if _HAS_CXX23 template inline constexpr bool _Has_member_allocate_at_least = false; template inline constexpr bool _Has_member_allocate_at_least<_Alloc, _SizeTy, void_t().allocate_at_least(_STD declval()))>> = true; #endif // _HAS_CXX23 template struct _Has_no_allocator_construct : true_type {}; _STL_DISABLE_DEPRECATED_WARNING template struct _Has_no_allocator_construct< void_t().construct(_STD declval<_Ptr>(), _STD declval<_Args>()...))>, _Alloc, _Ptr, _Args...> : false_type {}; _STL_RESTORE_DEPRECATED_WARNING template using _Uses_default_construct = disjunction<_Is_default_allocator<_Alloc>, _Has_no_allocator_construct>; template struct _Has_no_alloc_destroy : true_type {}; _STL_DISABLE_DEPRECATED_WARNING template struct _Has_no_alloc_destroy<_Alloc, _Ptr, void_t().destroy(_STD declval<_Ptr>()))>> : false_type {}; _STL_RESTORE_DEPRECATED_WARNING template using _Uses_default_destroy = disjunction<_Is_default_allocator<_Alloc>, _Has_no_alloc_destroy<_Alloc, _Ptr>>; template struct _Has_allocate_hint : false_type {}; _STL_DISABLE_DEPRECATED_WARNING template struct _Has_allocate_hint<_Alloc, _Size_type, _Const_void_pointer, void_t().allocate( _STD declval(), _STD declval()))>> : true_type {}; _STL_RESTORE_DEPRECATED_WARNING template struct _Has_max_size : false_type {}; _STL_DISABLE_DEPRECATED_WARNING template struct _Has_max_size<_Alloc, void_t().max_size())>> : true_type {}; _STL_RESTORE_DEPRECATED_WARNING template struct _Has_select_on_container_copy_construction : false_type {}; template struct _Has_select_on_container_copy_construction<_Alloc, void_t().select_on_container_copy_construction())>> : true_type {}; #if _HAS_CXX23 _EXPORT_STD template struct allocation_result { _Ptr ptr; _SizeTy count; }; #endif // _HAS_CXX23 _EXPORT_STD template struct allocator_traits; _STL_DISABLE_DEPRECATED_WARNING template struct _Normal_allocator_traits { // defines traits for allocators using allocator_type = _Alloc; using value_type = typename _Alloc::value_type; using pointer = typename _Get_pointer_type<_Alloc>::type; using const_pointer = typename _Get_const_pointer_type<_Alloc>::type; using void_pointer = typename _Get_void_pointer_type<_Alloc>::type; using const_void_pointer = typename _Get_const_void_pointer_type<_Alloc>::type; using size_type = typename _Get_size_type<_Alloc>::type; using difference_type = typename _Get_difference_type<_Alloc>::type; using propagate_on_container_copy_assignment = typename _Get_propagate_on_container_copy<_Alloc>::type; using propagate_on_container_move_assignment = typename _Get_propagate_on_container_move<_Alloc>::type; using propagate_on_container_swap = typename _Get_propagate_on_container_swap<_Alloc>::type; using is_always_equal = typename _Get_is_always_equal<_Alloc>::type; template using rebind_alloc = typename _Get_rebind_type<_Alloc, _Other>::type; template using rebind_traits = allocator_traits>; _NODISCARD_RAW_PTR_ALLOC static _CONSTEXPR20 __declspec(allocator) pointer allocate(_Alloc& _Al, _CRT_GUARDOVERFLOW const size_type _Count) { return _Al.allocate(_Count); } _NODISCARD_RAW_PTR_ALLOC static _CONSTEXPR20 __declspec(allocator) pointer allocate(_Alloc& _Al, _CRT_GUARDOVERFLOW const size_type _Count, const const_void_pointer _Hint) { if constexpr (_Has_allocate_hint<_Alloc, size_type, const_void_pointer>::value) { return _Al.allocate(_Count, _Hint); } else { return _Al.allocate(_Count); } } #if _HAS_CXX23 _NODISCARD_RAW_PTR_ALLOC static constexpr allocation_result allocate_at_least( _Alloc& _Al, _CRT_GUARDOVERFLOW const size_type _Count) { if constexpr (_Has_member_allocate_at_least<_Alloc, size_type>) { return _Al.allocate_at_least(_Count); } else { return {_Al.allocate(_Count), _Count}; } } #endif // _HAS_CXX23 static _CONSTEXPR20 void deallocate(_Alloc& _Al, pointer _Ptr, size_type _Count) { _Al.deallocate(_Ptr, _Count); } template static _CONSTEXPR20 void construct(_Alloc& _Al, _Ty* _Ptr, _Types&&... _Args) { if constexpr (_Uses_default_construct<_Alloc, _Ty*, _Types...>::value) { #if _HAS_CXX20 _STD construct_at(_Ptr, _STD forward<_Types>(_Args)...); #else // _HAS_CXX20 ::new (static_cast(_Ptr)) _Ty(_STD forward<_Types>(_Args)...); #endif // _HAS_CXX20 } else { _Al.construct(_Ptr, _STD forward<_Types>(_Args)...); } } template static _CONSTEXPR20 void destroy(_Alloc& _Al, _Ty* _Ptr) { if constexpr (_Uses_default_destroy<_Alloc, _Ty*>::value) { #if _HAS_CXX20 _STD destroy_at(_Ptr); #else // _HAS_CXX20 _Ptr->~_Ty(); #endif // _HAS_CXX20 } else { _Al.destroy(_Ptr); } } _NODISCARD static _CONSTEXPR20 size_type max_size(const _Alloc& _Al) noexcept { if constexpr (_Has_max_size<_Alloc>::value) { return _Al.max_size(); } else { return (numeric_limits::max)() / sizeof(value_type); } } _NODISCARD static _CONSTEXPR20 _Alloc select_on_container_copy_construction(const _Alloc& _Al) { if constexpr (_Has_select_on_container_copy_construction<_Alloc>::value) { return _Al.select_on_container_copy_construction(); } else { return _Al; } } }; _STL_RESTORE_DEPRECATED_WARNING template struct _Default_allocator_traits { // traits for std::allocator using allocator_type = _Alloc; using value_type = typename _Alloc::value_type; using pointer = value_type*; using const_pointer = const value_type*; using void_pointer = void*; using const_void_pointer = const void*; using size_type = size_t; using difference_type = ptrdiff_t; using propagate_on_container_copy_assignment = false_type; using propagate_on_container_move_assignment = true_type; using propagate_on_container_swap = false_type; using is_always_equal = true_type; template using rebind_alloc = allocator<_Other>; template using rebind_traits = allocator_traits>; _NODISCARD_RAW_PTR_ALLOC static _CONSTEXPR20 __declspec(allocator) pointer allocate(_Alloc& _Al, _CRT_GUARDOVERFLOW const size_type _Count) { #if _HAS_CXX20 // TRANSITION, GH-1532 if (_STD is_constant_evaluated()) { return _Al.allocate(_Count); } else #endif // _HAS_CXX20 { (void) _Al; return static_cast( _Allocate<_New_alignof>(_Get_size_of_n(_Count))); } } _NODISCARD_RAW_PTR_ALLOC static _CONSTEXPR20 __declspec(allocator) pointer allocate(_Alloc& _Al, _CRT_GUARDOVERFLOW const size_type _Count, const_void_pointer) { #if _HAS_CXX20 // TRANSITION, GH-1532 if (_STD is_constant_evaluated()) { return _Al.allocate(_Count); } else #endif // _HAS_CXX20 { (void) _Al; return static_cast( _Allocate<_New_alignof>(_Get_size_of_n(_Count))); } } #if _HAS_CXX23 _NODISCARD_RAW_PTR_ALLOC static constexpr allocation_result allocate_at_least( _Alloc& _Al, _CRT_GUARDOVERFLOW const size_type _Count) { return {_Al.allocate(_Count), _Count}; } #endif // _HAS_CXX23 static _CONSTEXPR20 void deallocate(_Alloc& _Al, const pointer _Ptr, const size_type _Count) { // no overflow check on the following multiply; we assume _Allocate did that check #if _HAS_CXX20 // TRANSITION, GH-1532 if (_STD is_constant_evaluated()) { _Al.deallocate(_Ptr, _Count); } else #endif // _HAS_CXX20 { (void) _Al; _Deallocate<_New_alignof>(_Ptr, sizeof(value_type) * _Count); } } template static _CONSTEXPR20 void construct(_Alloc&, _Objty* const _Ptr, _Types&&... _Args) { #if _HAS_CXX20 _STD construct_at(_Ptr, _STD forward<_Types>(_Args)...); #else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv ::new (const_cast(static_cast(_Ptr))) _Objty(_STD forward<_Types>(_Args)...); #endif // ^^^ !_HAS_CXX20 ^^^ } template static _CONSTEXPR20 void destroy(_Alloc&, _Uty* const _Ptr) { #if _HAS_CXX20 _STD destroy_at(_Ptr); #else // _HAS_CXX20 _Ptr->~_Uty(); #endif // _HAS_CXX20 } _NODISCARD static _CONSTEXPR20 size_type max_size(const _Alloc&) noexcept { return static_cast(-1) / sizeof(value_type); } _NODISCARD static _CONSTEXPR20 _Alloc select_on_container_copy_construction(const _Alloc& _Al) { return _Al; } }; _EXPORT_STD template struct allocator_traits : conditional_t<_Is_default_allocator<_Alloc>::value, _Default_allocator_traits<_Alloc>, _Normal_allocator_traits<_Alloc>> {}; // _Choose_pocca_v returns whether an attempt to propagate allocators is necessary in copy assignment operations. // Note that even when false_type, callers should call _Pocca as we want to assign allocators even when equal. template _INLINE_VAR constexpr bool _Choose_pocca_v = allocator_traits<_Alloc>::propagate_on_container_copy_assignment::value && !allocator_traits<_Alloc>::is_always_equal::value; enum class _Pocma_values { _Equal_allocators, // usually allows contents to be stolen (e.g. with swap) _Propagate_allocators, // usually allows the allocator to be propagated, and then contents stolen _No_propagate_allocators, // usually turns moves into copies }; template _INLINE_VAR constexpr _Pocma_values _Choose_pocma_v = allocator_traits<_Alloc>::is_always_equal::value ? _Pocma_values::_Equal_allocators : (allocator_traits<_Alloc>::propagate_on_container_move_assignment::value ? _Pocma_values::_Propagate_allocators : _Pocma_values::_No_propagate_allocators); template using _Rebind_alloc_t = typename allocator_traits<_Alloc>::template rebind_alloc<_Value_type>; // If _Alloc is already rebound appropriately, binds an lvalue reference to it, avoiding a copy. Otherwise, creates a // rebound copy. template using _Maybe_rebind_alloc_t = typename _Select>::template _Apply<_Alloc&, _Rebind_alloc_t<_Alloc, _Value_type>>; template // tests if allocator has simple addressing _INLINE_VAR constexpr bool _Is_simple_alloc_v = is_same_v::size_type, size_t> && is_same_v::difference_type, ptrdiff_t> && is_same_v::pointer, typename _Alloc::value_type*> && is_same_v::const_pointer, const typename _Alloc::value_type*>; template struct _Simple_types { // wraps types from allocators with simple addressing for use in iterators // and other SCARY machinery using value_type = _Value_type; using size_type = size_t; using difference_type = ptrdiff_t; using pointer = value_type*; using const_pointer = const value_type*; }; // The number of user bytes a single byte of ASAN shadow memory can track. _INLINE_VAR constexpr size_t _Asan_granularity = 8; _INLINE_VAR constexpr size_t _Asan_granularity_mask = _Asan_granularity - 1; struct _Asan_aligned_pointers { const void* _First; const void* _End; _NODISCARD constexpr const void* _Clamp_to_end(const void* _Mid) const noexcept { _STL_INTERNAL_CHECK(_Mid >= _First); if (_Mid > _End) { return _End; } else { return _Mid; } } }; // The way that ASan shadow memory works, each eight byte block of memory ("shadow memory section") // has a single byte to mark it as either poison or valid. // Each section has 0 to 8 "valid" bytes followed by poison bytes, so: // ``` // [ v v v p p p p p ] // ``` // or // ``` // [ v v v v v v v v ] // ``` // are okay, but // ``` // [ p p p p v v v v ] // ``` // is not. // // This function exists to fix up `first` and `end` pointers so that one can call // `__sanitizer_annotate_contiguous_container`: // // - `__sanitizer_annotate_contiguous_container` checks that `first` is aligned to an 8-byte boundary // - if `end` is not aligned to an 8-byte boundary, `__sanitizer_annotate_contiguous_container` still poisons the // remaining bytes in the shadow memory section. // // Because of the second property, we can only mark poison up to the final aligned address before the true `last`. // Otherwise, we'd poison the memory _after_ `last` as well. // For the first property, we can assume that everything before `first` in the shadow memory section is valid // (since otherwise we couldn't mark `first` valid), and so we just return back the first address in // `first`'s shadow memory section. // // ### Example // // ```cpp // struct alignas(8) cat { // int meow; // bytes [0, 4) // char buffer[16]; // bytes [4, 20) // int purr; // bytes [20, 24) // }; // ``` // // First, `meow` and `purr` are just regular data members, not container buffers, so they _must_ be valid. // Then, assume we want to poison all of `buffer`. // This would mean that, in a perfect world, we want something like: // // ``` // | meow | buffer | purr | // [ v v v v p p p p ][ p p p p p p p p ][ p p p p v v v v ] // sm1 sm2 sm3 // ``` // // However, note that by the rules above, `sm3` is not a valid shadow memory section; we always need // the valid bytes to come before the poison bytes. Thus, the closest we can actually get to it is: // // ``` // | meow | buffer | purr | // [ v v v v p p p p ][ p p p p p p p p ][ v v v v v v v v ] // sm1 sm2 sm3 // ``` // // We call `aligned = _Get_asan_aligned_first_end(cat.buffer, cat.buffer + 16);`, and we get back // // ```cpp // aligned = { // ._First = &cat.meow, // ._End = cat.buffer + 12, // }; // ``` // // Then, we poison as much of buffer as we can via // // ```cpp // __sanitizer_annotate_contiguous_container( // aligned._First, // aligned._End, // cat.buffer, // aligned._Clamp_to_end(cat.buffer + 16)); // ``` // // We are allowed to assume that `&cat.meow` is valid, since otherwise `cat.buffer + [0, 4)` could not be valid. // We cannot poison up to `cat.buffer + 16`, since then `&purr` could not be valid. // Thus, this results in the shadow memory state from the second example. _NODISCARD inline _Asan_aligned_pointers _Get_asan_aligned_first_end( const void* const _First, const void* const _End) noexcept { return { reinterpret_cast(reinterpret_cast(_First) & ~_Asan_granularity_mask), reinterpret_cast(reinterpret_cast(_End) & ~_Asan_granularity_mask), }; } // When we can assume that the allocator we are using will always align allocations to the 8-byte, // we can simply push the `_End` pointer to the end of the shadow memory section. // This is _not_ safe in general (see _Get_asan_aligned_first_end's comment for why). _NODISCARD inline const void* _Get_asan_aligned_after(const void* const _End) noexcept { return reinterpret_cast( (reinterpret_cast(_End) + _Asan_granularity_mask) & ~_Asan_granularity_mask); } template _INLINE_VAR constexpr size_t _Container_allocation_minimum_asan_alignment = alignof(typename _Container::value_type); template _INLINE_VAR constexpr size_t _Container_allocation_minimum_asan_alignment<_Container, void_t> = (_STD max)( alignof(typename _Container::value_type), _Container::allocator_type::_Minimum_asan_allocation_alignment); _EXPORT_STD template class allocator { public: static_assert(!is_const_v<_Ty>, "The C++ Standard forbids containers of const elements " "because allocator is ill-formed."); static_assert(!is_function_v<_Ty>, "The C++ Standard forbids allocators for function elements " "because of [allocator.requirements]."); static_assert(!is_reference_v<_Ty>, "The C++ Standard forbids allocators for reference elements " "because of [allocator.requirements]."); using _From_primary = allocator; using value_type = _Ty; #if _HAS_DEPRECATED_ALLOCATOR_MEMBERS using pointer _CXX17_DEPRECATE_OLD_ALLOCATOR_MEMBERS = _Ty*; using const_pointer _CXX17_DEPRECATE_OLD_ALLOCATOR_MEMBERS = const _Ty*; using reference _CXX17_DEPRECATE_OLD_ALLOCATOR_MEMBERS = _Ty&; using const_reference _CXX17_DEPRECATE_OLD_ALLOCATOR_MEMBERS = const _Ty&; #endif // _HAS_DEPRECATED_ALLOCATOR_MEMBERS using size_type = size_t; using difference_type = ptrdiff_t; using propagate_on_container_move_assignment = true_type; using is_always_equal _CXX20_DEPRECATE_IS_ALWAYS_EQUAL = true_type; #if _HAS_DEPRECATED_ALLOCATOR_MEMBERS template struct _CXX17_DEPRECATE_OLD_ALLOCATOR_MEMBERS rebind { using other = allocator<_Other>; }; _CXX17_DEPRECATE_OLD_ALLOCATOR_MEMBERS _NODISCARD _Ty* address(_Ty& _Val) const noexcept { return _STD addressof(_Val); } _CXX17_DEPRECATE_OLD_ALLOCATOR_MEMBERS _NODISCARD const _Ty* address(const _Ty& _Val) const noexcept { return _STD addressof(_Val); } #endif // _HAS_DEPRECATED_ALLOCATOR_MEMBERS constexpr allocator() noexcept {} constexpr allocator(const allocator&) noexcept = default; template constexpr allocator(const allocator<_Other>&) noexcept {} _CONSTEXPR20 ~allocator() = default; _CONSTEXPR20 allocator& operator=(const allocator&) = default; _CONSTEXPR20 void deallocate(_Ty* const _Ptr, const size_t _Count) { _STL_ASSERT(_Ptr != nullptr || _Count == 0, "null pointer cannot point to a block of non-zero size"); // no overflow check on the following multiply; we assume _Allocate did that check _Deallocate<_New_alignof<_Ty>>(_Ptr, sizeof(_Ty) * _Count); } _NODISCARD_RAW_PTR_ALLOC _CONSTEXPR20 __declspec(allocator) _Ty* allocate(_CRT_GUARDOVERFLOW const size_t _Count) { static_assert(sizeof(value_type) > 0, "value_type must be complete before calling allocate."); return static_cast<_Ty*>(_Allocate<_New_alignof<_Ty>>(_Get_size_of_n(_Count))); } #if _HAS_CXX23 _NODISCARD_RAW_PTR_ALLOC constexpr allocation_result<_Ty*> allocate_at_least( _CRT_GUARDOVERFLOW const size_t _Count) { return {allocate(_Count), _Count}; } #endif // _HAS_CXX23 #if _HAS_DEPRECATED_ALLOCATOR_MEMBERS _CXX17_DEPRECATE_OLD_ALLOCATOR_MEMBERS _NODISCARD_RAW_PTR_ALLOC __declspec(allocator) _Ty* allocate( _CRT_GUARDOVERFLOW const size_t _Count, const void*) { return allocate(_Count); } template _CXX17_DEPRECATE_OLD_ALLOCATOR_MEMBERS void construct(_Objty* const _Ptr, _Types&&... _Args) { ::new (const_cast(static_cast(_Ptr))) _Objty(_STD forward<_Types>(_Args)...); } template _CXX17_DEPRECATE_OLD_ALLOCATOR_MEMBERS void destroy(_Uty* const _Ptr) { _Ptr->~_Uty(); } _CXX17_DEPRECATE_OLD_ALLOCATOR_MEMBERS _NODISCARD size_t max_size() const noexcept { return static_cast(-1) / sizeof(_Ty); } #endif // _HAS_DEPRECATED_ALLOCATOR_MEMBERS static constexpr size_t _Minimum_asan_allocation_alignment = _Asan_granularity; }; #if _HAS_DEPRECATED_ALLOCATOR_VOID || _HAS_DEPRECATED_ALLOCATOR_MEMBERS template <> class allocator { public: using value_type = void; #if _HAS_DEPRECATED_ALLOCATOR_MEMBERS using pointer _CXX17_DEPRECATE_OLD_ALLOCATOR_MEMBERS = void*; using const_pointer _CXX17_DEPRECATE_OLD_ALLOCATOR_MEMBERS = const void*; template struct _CXX17_DEPRECATE_OLD_ALLOCATOR_MEMBERS rebind { using other = allocator<_Other>; }; #endif // _HAS_DEPRECATED_ALLOCATOR_MEMBERS #if _HAS_CXX20 using size_type = size_t; using difference_type = ptrdiff_t; using propagate_on_container_move_assignment = true_type; using is_always_equal _CXX20_DEPRECATE_IS_ALWAYS_EQUAL = true_type; #endif // _HAS_CXX20 }; #endif // _HAS_DEPRECATED_ALLOCATOR_VOID || _HAS_DEPRECATED_ALLOCATOR_MEMBERS _EXPORT_STD template _NODISCARD _CONSTEXPR20 bool operator==(const allocator<_Ty>&, const allocator<_Other>&) noexcept { return true; } #if !_HAS_CXX20 template _NODISCARD bool operator!=(const allocator<_Ty>&, const allocator<_Other>&) noexcept { return false; } #endif // !_HAS_CXX20 #if _HAS_CXX17 // See N4892 [unord.map.overview]/4 template using _Guide_size_type_t = typename allocator_traits::value, _Alloc, allocator>>::size_type; #endif // _HAS_CXX17 template using _Alloc_ptr_t = typename allocator_traits<_Alloc>::pointer; template using _Alloc_size_t = typename allocator_traits<_Alloc>::size_type; template _CONSTEXPR20 void _Pocca(_Alloc& _Left, const _Alloc& _Right) noexcept { if constexpr (allocator_traits<_Alloc>::propagate_on_container_copy_assignment::value) { _Left = _Right; } } template _CONSTEXPR20 void _Pocma(_Alloc& _Left, _Alloc& _Right) noexcept { // (maybe) propagate on container move assignment if constexpr (allocator_traits<_Alloc>::propagate_on_container_move_assignment::value) { _Left = _STD move(_Right); } } template _CONSTEXPR20 void _Pocs(_Alloc& _Left, _Alloc& _Right) noexcept { if constexpr (allocator_traits<_Alloc>::propagate_on_container_swap::value) { _Swap_adl(_Left, _Right); } else { _STL_ASSERT(_Left == _Right, "containers incompatible for swap"); } } template _CONSTEXPR20 void _Destroy_range(_Alloc_ptr_t<_Alloc> _First, const _Alloc_ptr_t<_Alloc> _Last, _Alloc& _Al) noexcept { // note that this is an optimization for debug mode codegen; in release mode the BE removes all of this using _Ty = typename _Alloc::value_type; if constexpr (!conjunction_v, _Uses_default_destroy<_Alloc, _Ty*>>) { for (; _First != _Last; ++_First) { allocator_traits<_Alloc>::destroy(_Al, _Unfancy(_First)); } } } template _CONSTEXPR20 void _Destroy_range(_NoThrowFwdIt _First, const _NoThrowSentinel _Last) noexcept { // note that this is an optimization for debug mode codegen; in release mode the BE removes all of this if constexpr (!is_trivially_destructible_v<_Iter_value_t<_NoThrowFwdIt>>) { for (; _First != _Last; ++_First) { _Destroy_in_place(*_First); } } } template _NODISCARD constexpr _Size_type _Convert_size(const _Unsigned_type _Len) noexcept( sizeof(_Unsigned_type) <= sizeof(_Size_type)) { // convert _Unsigned_type to _Size_type, avoiding truncation _STL_INTERNAL_STATIC_ASSERT(_Unsigned_type(-1) > 0); _STL_INTERNAL_STATIC_ASSERT(_Size_type(-1) > 0); if constexpr (sizeof(_Unsigned_type) > sizeof(_Size_type)) { if (_Len > (numeric_limits<_Size_type>::max)()) { _Xlength_error("size is too long for _Size_type"); } } return static_cast<_Size_type>(_Len); } template _CONSTEXPR20 void _Deallocate_plain(_Alloc& _Al, typename _Alloc::value_type* const _Ptr) noexcept { // deallocate a plain pointer using an allocator using _Alloc_traits = allocator_traits<_Alloc>; if constexpr (is_same_v<_Alloc_ptr_t<_Alloc>, typename _Alloc::value_type*>) { _Alloc_traits::deallocate(_Al, _Ptr, 1); } else { using _Ptr_traits = pointer_traits<_Alloc_ptr_t<_Alloc>>; _Alloc_traits::deallocate(_Al, _Ptr_traits::pointer_to(*_Ptr), 1); } } template _CONSTEXPR20 void _Delete_plain_internal(_Alloc& _Al, typename _Alloc::value_type* const _Ptr) noexcept { // destroy *_Ptr in place, then deallocate _Ptr using _Al; used for internal container types the user didn't name using _Ty = typename _Alloc::value_type; _Ptr->~_Ty(); _Deallocate_plain(_Al, _Ptr); } template struct _Alloc_construct_ptr { // pointer used to help construct 1 _Alloc::value_type without EH using pointer = _Alloc_ptr_t<_Alloc>; _Alloc& _Al; pointer _Ptr; _CONSTEXPR20 explicit _Alloc_construct_ptr(_Alloc& _Al_) : _Al(_Al_), _Ptr(nullptr) {} _NODISCARD _CONSTEXPR20 pointer _Release() noexcept { // disengage *this and return contained pointer return _STD exchange(_Ptr, nullptr); } _CONSTEXPR20 void _Allocate() { // disengage *this, then allocate a new memory block _Ptr = nullptr; // if allocate throws, prevents double-free _Ptr = _Al.allocate(1); } _CONSTEXPR20 ~_Alloc_construct_ptr() { // if this instance is engaged, deallocate storage if (_Ptr) { _Al.deallocate(_Ptr, 1); } } _Alloc_construct_ptr(const _Alloc_construct_ptr&) = delete; _Alloc_construct_ptr& operator=(const _Alloc_construct_ptr&) = delete; }; struct _Fake_allocator {}; struct _Container_base0 { _CONSTEXPR20 void _Orphan_all() noexcept {} _CONSTEXPR20 void _Swap_proxy_and_iterators(_Container_base0&) noexcept {} _CONSTEXPR20 void _Alloc_proxy(const _Fake_allocator&) noexcept {} _CONSTEXPR20 void _Reload_proxy(const _Fake_allocator&, const _Fake_allocator&) noexcept {} }; struct _Iterator_base0 { _CONSTEXPR20 void _Adopt(const void*) noexcept {} _CONSTEXPR20 const _Container_base0* _Getcont() const noexcept { return nullptr; } static constexpr bool _Unwrap_when_unverified = true; }; struct _Container_base12; struct _Container_proxy { // store head of iterator chain and back pointer _CONSTEXPR20 _Container_proxy() noexcept = default; _CONSTEXPR20 _Container_proxy(_Container_base12* _Mycont_) noexcept : _Mycont(_Mycont_) {} const _Container_base12* _Mycont = nullptr; mutable _Iterator_base12* _Myfirstiter = nullptr; }; struct _Container_base12 { public: _CONSTEXPR20 _Container_base12() noexcept = default; _Container_base12(const _Container_base12&) = delete; _Container_base12& operator=(const _Container_base12&) = delete; _CONSTEXPR20 void _Orphan_all() noexcept; _CONSTEXPR20 void _Swap_proxy_and_iterators(_Container_base12&) noexcept; template _CONSTEXPR20 void _Alloc_proxy(_Alloc&& _Al) { _Container_proxy* const _New_proxy = _Unfancy(_Al.allocate(1)); _Construct_in_place(*_New_proxy, this); _Myproxy = _New_proxy; _New_proxy->_Mycont = this; } template _CONSTEXPR20 void _Reload_proxy(_Alloc&& _Old_alloc, _Alloc&& _New_alloc) { // pre: no iterators refer to the existing proxy _Container_proxy* const _New_proxy = _Unfancy(_New_alloc.allocate(1)); _Construct_in_place(*_New_proxy, this); _New_proxy->_Mycont = this; _Delete_plain_internal(_Old_alloc, _STD exchange(_Myproxy, _New_proxy)); } _Container_proxy* _Myproxy = nullptr; private: _CONSTEXPR20 void _Orphan_all_unlocked_v3() noexcept; _CONSTEXPR20 void _Swap_proxy_and_iterators_unlocked(_Container_base12&) noexcept; void _Orphan_all_locked_v3() noexcept { _Lockit _Lock(_LOCK_DEBUG); _Orphan_all_unlocked_v3(); } void _Swap_proxy_and_iterators_locked(_Container_base12& _Right) noexcept { _Lockit _Lock(_LOCK_DEBUG); _Swap_proxy_and_iterators_unlocked(_Right); } }; struct _Iterator_base12 { // store links to container proxy, next iterator public: _CONSTEXPR20 _Iterator_base12() noexcept = default; // construct orphaned iterator _CONSTEXPR20 _Iterator_base12(const _Iterator_base12& _Right) noexcept { *this = _Right; } _CONSTEXPR20 _Iterator_base12& operator=(const _Iterator_base12& _Right) noexcept { #if _ITERATOR_DEBUG_LEVEL == 2 #if _HAS_CXX20 if (_STD is_constant_evaluated()) { _Assign_unlocked(_Right); } else #endif // _HAS_CXX20 { _Assign_locked(_Right); } #else // ^^^ _ITERATOR_DEBUG_LEVEL == 2 / _ITERATOR_DEBUG_LEVEL != 2 vvv _Myproxy = _Right._Myproxy; #endif // _ITERATOR_DEBUG_LEVEL != 2 return *this; } #if _ITERATOR_DEBUG_LEVEL == 2 _CONSTEXPR20 ~_Iterator_base12() noexcept { #if _HAS_CXX20 if (_STD is_constant_evaluated()) { _Orphan_me_unlocked_v3(); } else #endif // _HAS_CXX20 { _Orphan_me_locked_v3(); } } _CONSTEXPR20 void _Adopt(const _Container_base12* _Parent) noexcept { #if _HAS_CXX20 if (_STD is_constant_evaluated()) { _Adopt_unlocked(_Parent); } else #endif // _HAS_CXX20 { _Adopt_locked(_Parent); } } #else // ^^^ _ITERATOR_DEBUG_LEVEL == 2 / _ITERATOR_DEBUG_LEVEL != 2 vvv _CONSTEXPR20 void _Adopt(const _Container_base12* _Parent) noexcept { if (_Parent) { // have a parent, do adoption _Myproxy = _Parent->_Myproxy; } else { // no future parent, just disown current parent _Myproxy = nullptr; } } #endif // _ITERATOR_DEBUG_LEVEL != 2 _CONSTEXPR20 const _Container_base12* _Getcont() const noexcept { return _Myproxy ? _Myproxy->_Mycont : nullptr; } static constexpr bool _Unwrap_when_unverified = _ITERATOR_DEBUG_LEVEL == 0; mutable _Container_proxy* _Myproxy = nullptr; mutable _Iterator_base12* _Mynextiter = nullptr; #if _ITERATOR_DEBUG_LEVEL == 2 private: _CONSTEXPR20 void _Assign_unlocked(const _Iterator_base12& _Right) noexcept { if (_Myproxy == _Right._Myproxy) { return; } if (_Right._Myproxy) { _Adopt_unlocked(_Right._Myproxy->_Mycont); } else { // becoming invalid, disown current parent _Orphan_me_unlocked_v3(); } } void _Assign_locked(const _Iterator_base12& _Right) noexcept { _Lockit _Lock(_LOCK_DEBUG); _Assign_unlocked(_Right); } _CONSTEXPR20 void _Adopt_unlocked(const _Container_base12* _Parent) noexcept { if (!_Parent) { _Orphan_me_unlocked_v3(); return; } _Container_proxy* _Parent_proxy = _Parent->_Myproxy; if (_Myproxy != _Parent_proxy) { // change parentage if (_Myproxy) { // adopted, remove self from list _Orphan_me_unlocked_v3(); } _Mynextiter = _Parent_proxy->_Myfirstiter; _Parent_proxy->_Myfirstiter = this; _Myproxy = _Parent_proxy; } } void _Adopt_locked(const _Container_base12* _Parent) noexcept { _Lockit _Lock(_LOCK_DEBUG); _Adopt_unlocked(_Parent); } _CONSTEXPR20 void _Orphan_me_unlocked_v3() noexcept { if (!_Myproxy) { // already orphaned return; } // adopted, remove self from list _Iterator_base12** _Pnext = &_Myproxy->_Myfirstiter; while (*_Pnext && *_Pnext != this) { _Pnext = &(*_Pnext)->_Mynextiter; } _STL_VERIFY(*_Pnext, "ITERATOR LIST CORRUPTED!"); *_Pnext = _Mynextiter; _Myproxy = nullptr; } void _Orphan_me_locked_v3() noexcept { _Lockit _Lock(_LOCK_DEBUG); _Orphan_me_unlocked_v3(); } #endif // _ITERATOR_DEBUG_LEVEL == 2 }; _CONSTEXPR20 void _Container_base12::_Orphan_all_unlocked_v3() noexcept { if (!_Myproxy) { // no proxy, already done return; } // proxy allocated, drain it for (auto _Pnext = _STD exchange(_Myproxy->_Myfirstiter, nullptr); _Pnext; _Pnext = _Pnext->_Mynextiter) { _Pnext->_Myproxy = nullptr; } } _CONSTEXPR20 void _Container_base12::_Orphan_all() noexcept { #if _ITERATOR_DEBUG_LEVEL == 2 #if _HAS_CXX20 if (_STD is_constant_evaluated()) { _Orphan_all_unlocked_v3(); } else #endif // _HAS_CXX20 { _Orphan_all_locked_v3(); } #endif // _ITERATOR_DEBUG_LEVEL == 2 } _CONSTEXPR20 void _Container_base12::_Swap_proxy_and_iterators_unlocked(_Container_base12& _Right) noexcept { _Container_proxy* _Temp = _Myproxy; _Myproxy = _Right._Myproxy; _Right._Myproxy = _Temp; if (_Myproxy) { _Myproxy->_Mycont = this; } if (_Right._Myproxy) { _Right._Myproxy->_Mycont = &_Right; } } _CONSTEXPR20 void _Container_base12::_Swap_proxy_and_iterators(_Container_base12& _Right) noexcept { #if _ITERATOR_DEBUG_LEVEL == 2 #if _HAS_CXX20 if (_STD is_constant_evaluated()) { _Swap_proxy_and_iterators_unlocked(_Right); } else #endif // _HAS_CXX20 { _Swap_proxy_and_iterators_locked(_Right); } #else // ^^^ _ITERATOR_DEBUG_LEVEL == 2 / _ITERATOR_DEBUG_LEVEL != 2 vvv _Swap_proxy_and_iterators_unlocked(_Right); #endif // _ITERATOR_DEBUG_LEVEL != 2 } #if _ITERATOR_DEBUG_LEVEL == 0 using _Container_base = _Container_base0; using _Iterator_base = _Iterator_base0; #else // _ITERATOR_DEBUG_LEVEL == 0 using _Container_base = _Container_base12; using _Iterator_base = _Iterator_base12; #endif // _ITERATOR_DEBUG_LEVEL == 0 struct _Leave_proxy_unbound { explicit _Leave_proxy_unbound() = default; }; // tag to indicate that a proxy is being allocated before it is safe to bind to a _Container_base12 struct _Fake_proxy_ptr_impl { // fake replacement for a container proxy smart pointer when no container proxy is in use _Fake_proxy_ptr_impl(const _Fake_proxy_ptr_impl&) = delete; _Fake_proxy_ptr_impl& operator=(const _Fake_proxy_ptr_impl&) = delete; _CONSTEXPR20 _Fake_proxy_ptr_impl(const _Fake_allocator&, _Leave_proxy_unbound) noexcept {} _CONSTEXPR20 _Fake_proxy_ptr_impl(const _Fake_allocator&, const _Container_base0&) noexcept {} _CONSTEXPR20 void _Bind(const _Fake_allocator&, _Container_base0*) noexcept {} _CONSTEXPR20 void _Release() noexcept {} }; struct _Basic_container_proxy_ptr12 { // smart pointer components for a _Container_proxy * that don't depend on the allocator _Container_proxy* _Ptr = nullptr; constexpr void _Release() noexcept { // disengage this _Basic_container_proxy_ptr12 _Ptr = nullptr; } protected: _CONSTEXPR20 _Basic_container_proxy_ptr12() = default; _Basic_container_proxy_ptr12(const _Basic_container_proxy_ptr12&) = delete; _Basic_container_proxy_ptr12(_Basic_container_proxy_ptr12&&) = delete; }; template struct _Container_proxy_ptr12 : _Basic_container_proxy_ptr12 { // smart pointer components for a _Container_proxy * for an allocator family _Alloc& _Al; _CONSTEXPR20 _Container_proxy_ptr12(_Alloc& _Al_, _Leave_proxy_unbound) : _Al(_Al_) { // create a new unbound _Container_proxy _Ptr = _Unfancy(_Al_.allocate(1)); _Construct_in_place(*_Ptr); } _CONSTEXPR20 _Container_proxy_ptr12(_Alloc& _Al_, _Container_base12& _Mycont) : _Al(_Al_) { // create a new _Container_proxy pointing at _Mycont _Ptr = _Unfancy(_Al_.allocate(1)); _Construct_in_place(*_Ptr, _STD addressof(_Mycont)); _Mycont._Myproxy = _Ptr; } _CONSTEXPR20 void _Bind(_Alloc& _Old_alloc, _Container_base12* _Mycont) noexcept { // Attach the proxy stored in *this to _Mycont, and destroy _Mycont's existing proxy // with _Old_alloc. Requires that no iterators are alive referring to _Mycont. _Ptr->_Mycont = _Mycont; _Delete_plain_internal(_Old_alloc, _STD exchange(_Mycont->_Myproxy, _STD exchange(_Ptr, nullptr))); } _CONSTEXPR20 ~_Container_proxy_ptr12() { if (_Ptr) { _Delete_plain_internal(_Al, _Ptr); } } }; #if _ITERATOR_DEBUG_LEVEL == 0 _INLINE_VAR constexpr _Fake_allocator _Fake_alloc{}; #define _GET_PROXY_ALLOCATOR(_Alty, _Al) _Fake_alloc // TRANSITION, VSO-1284799, should be _Fake_allocator{} template using _Container_proxy_ptr = _Fake_proxy_ptr_impl; #else // _ITERATOR_DEBUG_LEVEL == 0 #define _GET_PROXY_ALLOCATOR(_Alty, _Al) static_cast<_Rebind_alloc_t<_Alty, _Container_proxy>>(_Al) template using _Container_proxy_ptr = _Container_proxy_ptr12<_Rebind_alloc_t<_Alloc, _Container_proxy>>; #endif // _ITERATOR_DEBUG_LEVEL == 0 struct _Zero_then_variadic_args_t { explicit _Zero_then_variadic_args_t() = default; }; // tag type for value-initializing first, constructing second from remaining args struct _One_then_variadic_args_t { explicit _One_then_variadic_args_t() = default; }; // tag type for constructing first from one arg, constructing second from remaining args template && !is_final_v<_Ty1>> class _Compressed_pair final : private _Ty1 { // store a pair of values, deriving from empty first public: _Ty2 _Myval2; using _Mybase = _Ty1; // for visualization template constexpr explicit _Compressed_pair(_Zero_then_variadic_args_t, _Other2&&... _Val2) noexcept( conjunction_v, is_nothrow_constructible<_Ty2, _Other2...>>) : _Ty1(), _Myval2(_STD forward<_Other2>(_Val2)...) {} template constexpr _Compressed_pair(_One_then_variadic_args_t, _Other1&& _Val1, _Other2&&... _Val2) noexcept( conjunction_v, is_nothrow_constructible<_Ty2, _Other2...>>) : _Ty1(_STD forward<_Other1>(_Val1)), _Myval2(_STD forward<_Other2>(_Val2)...) {} constexpr _Ty1& _Get_first() noexcept { return *this; } constexpr const _Ty1& _Get_first() const noexcept { return *this; } }; template class _Compressed_pair<_Ty1, _Ty2, false> final { // store a pair of values, not deriving from first public: _Ty1 _Myval1; _Ty2 _Myval2; template constexpr explicit _Compressed_pair(_Zero_then_variadic_args_t, _Other2&&... _Val2) noexcept( conjunction_v, is_nothrow_constructible<_Ty2, _Other2...>>) : _Myval1(), _Myval2(_STD forward<_Other2>(_Val2)...) {} template constexpr _Compressed_pair(_One_then_variadic_args_t, _Other1&& _Val1, _Other2&&... _Val2) noexcept( conjunction_v, is_nothrow_constructible<_Ty2, _Other2...>>) : _Myval1(_STD forward<_Other1>(_Val1)), _Myval2(_STD forward<_Other2>(_Val2)...) {} constexpr _Ty1& _Get_first() noexcept { return _Myval1; } constexpr const _Ty1& _Get_first() const noexcept { return _Myval1; } }; struct _Move_allocator_tag { explicit _Move_allocator_tag() = default; }; template pair<_Ty*, ptrdiff_t> _Get_temporary_buffer(ptrdiff_t _Count) noexcept { if (static_cast(_Count) <= static_cast(-1) / sizeof(_Ty)) { for (; 0 < _Count; _Count /= 2) { const auto _Size = static_cast(_Count) * sizeof(_Ty); void* _Pbuf; #ifdef __cpp_aligned_new if constexpr (alignof(_Ty) > __STDCPP_DEFAULT_NEW_ALIGNMENT__) { _Pbuf = ::operator new(_Size, align_val_t{alignof(_Ty)}, nothrow); } else #endif // __cpp_aligned_new { _Pbuf = ::operator new(_Size, nothrow); } if (_Pbuf) { return {static_cast<_Ty*>(_Pbuf), _Count}; } } } return {nullptr, 0}; } template void _Return_temporary_buffer(_Ty* const _Pbuf) noexcept { #ifdef __cpp_aligned_new if constexpr (alignof(_Ty) > __STDCPP_DEFAULT_NEW_ALIGNMENT__) { ::operator delete(_Pbuf, align_val_t{alignof(_Ty)}); } else #endif // __cpp_aligned_new { ::operator delete(_Pbuf); } } template struct _NODISCARD _Uninitialized_backout { // struct to undo partially constructed ranges in _Uninitialized_xxx algorithms _NoThrowFwdIt _First; _NoThrowFwdIt _Last; constexpr explicit _Uninitialized_backout(_NoThrowFwdIt _Dest) : _First(_Dest), _Last(_Dest) {} constexpr _Uninitialized_backout(_NoThrowFwdIt _First_, _NoThrowFwdIt _Last_) : _First(_First_), _Last(_Last_) {} _Uninitialized_backout(const _Uninitialized_backout&) = delete; _Uninitialized_backout& operator=(const _Uninitialized_backout&) = delete; _CONSTEXPR20 ~_Uninitialized_backout() { _Destroy_range(_First, _Last); } template _CONSTEXPR20 void _Emplace_back(_Types&&... _Vals) { // construct a new element at *_Last and increment _Construct_in_place(*_Last, _STD forward<_Types>(_Vals)...); ++_Last; } constexpr _NoThrowFwdIt _Release() { // suppress any exception handling backout and return _Last _First = _Last; return _Last; } }; template _CONSTEXPR20 _NoThrowFwdIt _Uninitialized_move_unchecked(_InIt _First, const _InIt _Last, _NoThrowFwdIt _Dest) { // move [_First, _Last) to raw [_Dest, ...) if constexpr (_Iter_move_cat<_InIt, _NoThrowFwdIt>::_Bitcopy_constructible) { #if _HAS_CXX20 if (!_STD is_constant_evaluated()) #endif // _HAS_CXX20 { return _Copy_memmove(_First, _Last, _Dest); } } _Uninitialized_backout<_NoThrowFwdIt> _Backout{_Dest}; for (; _First != _Last; ++_First) { _Backout._Emplace_back(_STD move(*_First)); } return _Backout._Release(); } #ifdef __cpp_lib_concepts namespace ranges { template concept _No_throw_input_iterator = input_iterator<_It> // && is_lvalue_reference_v> // && same_as>, remove_reference_t>> // per LWG-3888 && same_as>, iter_value_t<_It>>; template concept _No_throw_sentinel_for = sentinel_for<_Se, _It>; template concept _No_throw_forward_iterator = _No_throw_input_iterator<_It> // && forward_iterator<_It> // && _No_throw_sentinel_for<_It, _It>; template concept _No_throw_input_range = range<_Rng> // && _No_throw_input_iterator> // && _No_throw_sentinel_for, iterator_t<_Rng>>; template concept _No_throw_forward_range = _No_throw_input_range<_Rng> // && _No_throw_forward_iterator>; template in_out_result<_InIt, _OutIt> _Copy_memcpy_count(_InIt _IFirst, _OutIt _OFirst, const size_t _Count) noexcept { const auto _IFirstPtr = _To_address(_IFirst); const auto _OFirstPtr = _To_address(_OFirst); const auto _IFirst_ch = const_cast(reinterpret_cast(_IFirstPtr)); const auto _OFirst_ch = const_cast(reinterpret_cast(_OFirstPtr)); const size_t _Count_bytes = _Count * sizeof(iter_value_t<_InIt>); _CSTD memcpy(_OFirst_ch, _IFirst_ch, _Count_bytes); if constexpr (is_pointer_v<_InIt>) { _IFirst = reinterpret_cast<_InIt>(_IFirst_ch + _Count_bytes); } else { _IFirst += static_cast>(_Count); } if constexpr (is_pointer_v<_OutIt>) { _OFirst = reinterpret_cast<_OutIt>(_OFirst_ch + _Count_bytes); } else { _OFirst += static_cast>(_Count); } return {_STD move(_IFirst), _STD move(_OFirst)}; } template in_out_result<_InIt, _OutIt> _Copy_memcpy_distance( _InIt _IFirst, _OutIt _OFirst, const _DistIt _DFirst, const _DistIt _DLast) noexcept { // equivalent to _Copy_memcpy_count(_IFirst, _OFirst, _DLast - _DFirst) but computes distance more efficiently const auto _IFirstPtr = _To_address(_IFirst); const auto _OFirstPtr = _To_address(_OFirst); const auto _DFirstPtr = _To_address(_DFirst); const auto _DLastPtr = _To_address(_DLast); const auto _IFirst_ch = const_cast(reinterpret_cast(_IFirstPtr)); const auto _OFirst_ch = const_cast(reinterpret_cast(_OFirstPtr)); const auto _DFirst_ch = const_cast(reinterpret_cast(_DFirstPtr)); const auto _DLast_ch = const_cast(reinterpret_cast(_DLastPtr)); const auto _Count_bytes = static_cast(_DLast_ch - _DFirst_ch); _CSTD memcpy(_OFirst_ch, _IFirst_ch, _Count_bytes); if constexpr (is_pointer_v<_InIt>) { _IFirst = reinterpret_cast<_InIt>(_IFirst_ch + _Count_bytes); } else { _IFirst += _Count_bytes / sizeof(iter_value_t<_InIt>); } if constexpr (is_pointer_v<_OutIt>) { _OFirst = reinterpret_cast<_OutIt>(_OFirst_ch + _Count_bytes); } else { _OFirst += _Count_bytes / sizeof(iter_value_t<_OutIt>); } return {_STD move(_IFirst), _STD move(_OFirst)}; } template in_out_result<_InIt, _OutIt> _Copy_memcpy_common( _InIt _IFirst, _InIt _ILast, _OutIt _OFirst, _OutIt _OLast) noexcept { const auto _IFirstPtr = _To_address(_IFirst); const auto _ILastPtr = _To_address(_ILast); const auto _OFirstPtr = _To_address(_OFirst); const auto _OLastPtr = _To_address(_OLast); const auto _IFirst_ch = const_cast(reinterpret_cast(_IFirstPtr)); const auto _ILast_ch = const_cast(reinterpret_cast(_ILastPtr)); const auto _OFirst_ch = const_cast(reinterpret_cast(_OFirstPtr)); const auto _OLast_ch = const_cast(reinterpret_cast(_OLastPtr)); const auto _Count_bytes = static_cast((_STD min)(_ILast_ch - _IFirst_ch, _OLast_ch - _OFirst_ch)); _CSTD memcpy(_OFirst_ch, _IFirst_ch, _Count_bytes); if constexpr (is_pointer_v<_InIt>) { _IFirst = reinterpret_cast<_InIt>(_IFirst_ch + _Count_bytes); } else { _IFirst += static_cast>(_Count_bytes / sizeof(iter_value_t<_InIt>)); } if constexpr (is_pointer_v<_OutIt>) { _OFirst = reinterpret_cast<_OutIt>(_OFirst_ch + _Count_bytes); } else { _OFirst += static_cast>(_Count_bytes / sizeof(iter_value_t<_OutIt>)); } return {_STD move(_IFirst), _STD move(_OFirst)}; } _EXPORT_STD template using uninitialized_move_result = in_out_result<_In, _Out>; template _Se, _No_throw_forward_iterator _Out, _No_throw_sentinel_for<_Out> _OSe> requires (constructible_from, iter_rvalue_reference_t<_It>>) uninitialized_move_result<_It, _Out> _Uninitialized_move_unchecked( _It _IFirst, _Se _ILast, _Out _OFirst, _OSe _OLast) { constexpr bool _Is_sized1 = sized_sentinel_for<_Se, _It>; constexpr bool _Is_sized2 = sized_sentinel_for<_OSe, _Out>; if constexpr (_Iter_move_cat<_It, _Out>::_Bitcopy_constructible && _Sized_or_unreachable_sentinel_for<_Se, _It> // && _Sized_or_unreachable_sentinel_for<_OSe, _Out>) { if constexpr (_Is_sized1 && _Is_sized2) { return _Copy_memcpy_common(_IFirst, _RANGES next(_IFirst, _STD move(_ILast)), _OFirst, _RANGES next(_OFirst, _STD move(_OLast))); } else if constexpr (_Is_sized1) { return _Copy_memcpy_distance(_IFirst, _OFirst, _IFirst, _RANGES next(_IFirst, _STD move(_ILast))); } else if constexpr (_Is_sized2) { return _Copy_memcpy_distance(_IFirst, _OFirst, _OFirst, _RANGES next(_OFirst, _STD move(_OLast))); } else { _STL_ASSERT(false, "Tried to uninitialized_move two ranges with unreachable sentinels"); } } else { _Uninitialized_backout _Backout{_STD move(_OFirst)}; for (; _IFirst != _ILast && _Backout._Last != _OLast; ++_IFirst) { _Backout._Emplace_back(_RANGES iter_move(_IFirst)); } return {_STD move(_IFirst), _Backout._Release()}; } } } // namespace ranges #endif // __cpp_lib_concepts template class _NODISCARD _Uninitialized_backout_al { // struct to undo partially constructed ranges in _Uninitialized_xxx_al algorithms private: using pointer = _Alloc_ptr_t<_Alloc>; public: _CONSTEXPR20 _Uninitialized_backout_al(pointer _Dest, _Alloc& _Al_) : _First(_Dest), _Last(_Dest), _Al(_Al_) {} _Uninitialized_backout_al(const _Uninitialized_backout_al&) = delete; _Uninitialized_backout_al& operator=(const _Uninitialized_backout_al&) = delete; _CONSTEXPR20 ~_Uninitialized_backout_al() { _Destroy_range(_First, _Last, _Al); } template _CONSTEXPR20 void _Emplace_back(_Types&&... _Vals) { // construct a new element at *_Last and increment allocator_traits<_Alloc>::construct(_Al, _Unfancy(_Last), _STD forward<_Types>(_Vals)...); ++_Last; } constexpr pointer _Release() { // suppress any exception handling backout and return _Last _First = _Last; return _Last; } private: pointer _First; pointer _Last; _Alloc& _Al; }; template _CONSTEXPR20 _Alloc_ptr_t<_Alloc> _Uninitialized_copy( _InIt _First, _Se _Last, _Alloc_ptr_t<_Alloc> _Dest, _Alloc& _Al) { // copy [_First, _Last) to raw _Dest, using _Al // note: only called internally from elsewhere in the STL using _Ptrval = typename _Alloc::value_type*; #ifdef __cpp_lib_concepts auto _UFirst = _RANGES _Unwrap_iter<_Se>(_STD move(_First)); auto _ULast = _RANGES _Unwrap_sent<_InIt>(_STD move(_Last)); #else // ^^^ __cpp_lib_concepts / !__cpp_lib_concepts vvv // In pre-concepts world, _Uninitialized_copy should only ever be called with an iterator // and sentinel of the same type, so `_Get_unwrapped` is fine to call. auto _UFirst = _Get_unwrapped(_STD move(_First)); auto _ULast = _Get_unwrapped(_STD move(_Last)); #endif // ^^^ !__cpp_lib_concepts ^^^ constexpr bool _Can_memmove = _Sent_copy_cat::_Bitcopy_constructible && _Uses_default_construct<_Alloc, _Ptrval, decltype(*_UFirst)>::value; if constexpr (_Can_memmove) { #if _HAS_CXX20 if (!_STD is_constant_evaluated()) #endif // _HAS_CXX20 { if constexpr (is_same_v) { _Copy_memmove(_To_address(_UFirst), _To_address(_ULast), _Unfancy(_Dest)); _Dest += _ULast - _UFirst; } else { const auto _Count = static_cast(_ULast - _UFirst); _Copy_memmove_n(_To_address(_UFirst), _Count, _Unfancy(_Dest)); _Dest += _Count; } return _Dest; } } _Uninitialized_backout_al<_Alloc> _Backout{_Dest, _Al}; for (; _UFirst != _ULast; ++_UFirst) { _Backout._Emplace_back(*_UFirst); } return _Backout._Release(); } template _CONSTEXPR20 _Alloc_ptr_t<_Alloc> _Uninitialized_copy_n( _InIt _First, size_t _Count, _Alloc_ptr_t<_Alloc> _Dest, _Alloc& _Al) { // copy _First + [0, _Count) to raw _Dest, using _Al // note: only called internally from elsewhere in the STL using _Ptrval = typename _Alloc::value_type*; auto _UFirst = _Get_unwrapped(_STD move(_First)); constexpr bool _Can_memmove = conjunction_v::_Bitcopy_constructible>, _Uses_default_construct<_Alloc, _Ptrval, decltype(*_UFirst)>>; if constexpr (_Can_memmove) { #if _HAS_CXX20 if (!_STD is_constant_evaluated()) #endif // _HAS_CXX20 { _Copy_memmove_n(_UFirst, _Count, _Unfancy(_Dest)); _Dest += _Count; return _Dest; } } _Uninitialized_backout_al<_Alloc> _Backout{_Dest, _Al}; for (; _Count != 0; ++_UFirst, (void) --_Count) { _Backout._Emplace_back(*_UFirst); } return _Backout._Release(); } template _CONSTEXPR20 _NoThrowFwdIt _Uninitialized_copy_unchecked(_InIt _First, const _InIt _Last, _NoThrowFwdIt _Dest) { // copy [_First, _Last) to raw [_Dest, ...) if constexpr (_Iter_copy_cat<_InIt, _NoThrowFwdIt>::_Bitcopy_constructible) { #if _HAS_CXX20 if (!_STD is_constant_evaluated()) #endif // _HAS_CXX20 { return _Copy_memmove(_First, _Last, _Dest); } } _Uninitialized_backout<_NoThrowFwdIt> _Backout{_Dest}; for (; _First != _Last; ++_First) { _Backout._Emplace_back(*_First); } return _Backout._Release(); } _EXPORT_STD template _NoThrowFwdIt uninitialized_copy(const _InIt _First, const _InIt _Last, _NoThrowFwdIt _Dest) { // copy [_First, _Last) to raw [_Dest, ...) _Adl_verify_range(_First, _Last); auto _UFirst = _Get_unwrapped(_First); const auto _ULast = _Get_unwrapped(_Last); auto _UDest = _Get_unwrapped_n(_Dest, _Idl_distance<_InIt>(_UFirst, _ULast)); _Seek_wrapped(_Dest, _Uninitialized_copy_unchecked(_UFirst, _ULast, _UDest)); return _Dest; } template _CONSTEXPR20 _Alloc_ptr_t<_Alloc> _Uninitialized_move( const _InIt _First, const _InIt _Last, _Alloc_ptr_t<_Alloc> _Dest, _Alloc& _Al) { // move [_First, _Last) to raw _Dest, using _Al // note: only called internally from elsewhere in the STL using _Ptrval = typename _Alloc::value_type*; auto _UFirst = _Get_unwrapped(_First); const auto _ULast = _Get_unwrapped(_Last); if constexpr (conjunction_v::_Bitcopy_constructible>, _Uses_default_construct<_Alloc, _Ptrval, decltype(_STD move(*_UFirst))>>) { #if _HAS_CXX20 if (!_STD is_constant_evaluated()) #endif // _HAS_CXX20 { _Copy_memmove(_UFirst, _ULast, _Unfancy(_Dest)); return _Dest + (_ULast - _UFirst); } } _Uninitialized_backout_al<_Alloc> _Backout{_Dest, _Al}; for (; _UFirst != _ULast; ++_UFirst) { _Backout._Emplace_back(_STD move(*_UFirst)); } return _Backout._Release(); } template _CONSTEXPR20 _Alloc_ptr_t<_Alloc> _Uninitialized_fill_n( _Alloc_ptr_t<_Alloc> _First, _Alloc_size_t<_Alloc> _Count, const typename _Alloc::value_type& _Val, _Alloc& _Al) { // copy _Count copies of _Val to raw _First, using _Al using _Ty = typename _Alloc::value_type; if constexpr (_Fill_memset_is_safe<_Ty*, _Ty> && _Uses_default_construct<_Alloc, _Ty*, _Ty>::value) { #if _HAS_CXX20 if (!_STD is_constant_evaluated()) #endif // _HAS_CXX20 { _Fill_memset(_Unfancy(_First), _Val, static_cast(_Count)); return _First + _Count; } } else if constexpr (_Fill_zero_memset_is_safe<_Ty*, _Ty> && _Uses_default_construct<_Alloc, _Ty*, _Ty>::value) { #if _HAS_CXX20 if (!_STD is_constant_evaluated()) #endif // _HAS_CXX20 { if (_Is_all_bits_zero(_Val)) { _Fill_zero_memset(_Unfancy(_First), static_cast(_Count)); return _First + _Count; } } } _Uninitialized_backout_al<_Alloc> _Backout{_First, _Al}; for (; 0 < _Count; --_Count) { _Backout._Emplace_back(_Val); } return _Backout._Release(); } _EXPORT_STD template void uninitialized_fill(const _NoThrowFwdIt _First, const _NoThrowFwdIt _Last, const _Tval& _Val) { // copy _Val throughout raw [_First, _Last) _Adl_verify_range(_First, _Last); auto _UFirst = _Get_unwrapped(_First); const auto _ULast = _Get_unwrapped(_Last); if constexpr (_Fill_memset_is_safe<_Unwrapped_t, _Tval>) { _Fill_memset(_UFirst, _Val, static_cast(_ULast - _UFirst)); } else { if constexpr (_Fill_zero_memset_is_safe<_Unwrapped_t, _Tval>) { if (_Is_all_bits_zero(_Val)) { _Fill_zero_memset(_UFirst, static_cast(_ULast - _UFirst)); return; } } _Uninitialized_backout<_Unwrapped_t> _Backout{_UFirst}; while (_Backout._Last != _ULast) { _Backout._Emplace_back(_Val); } _Backout._Release(); } } template _INLINE_VAR constexpr bool _Use_memset_value_construct_v = conjunction_v>, is_scalar<_Iter_value_t<_NoThrowFwdIt>>, negation>>>, negation>>>; template _Ptr _Zero_range(const _Ptr _First, const _Ptr _Last) { // fill [_First, _Last) with zeroes char* const _First_ch = reinterpret_cast(_To_address(_First)); char* const _Last_ch = reinterpret_cast(_To_address(_Last)); _CSTD memset(_First_ch, 0, static_cast(_Last_ch - _First_ch)); return _Last; } template _CONSTEXPR20 _Alloc_ptr_t<_Alloc> _Uninitialized_value_construct_n( _Alloc_ptr_t<_Alloc> _First, _Alloc_size_t<_Alloc> _Count, _Alloc& _Al) { // value-initialize _Count objects to raw _First, using _Al using _Ptrty = typename _Alloc::value_type*; if constexpr (_Use_memset_value_construct_v<_Ptrty> && _Uses_default_construct<_Alloc, _Ptrty>::value) { #if _HAS_CXX20 if (!_STD is_constant_evaluated()) #endif // _HAS_CXX20 { auto _PFirst = _Unfancy(_First); _Zero_range(_PFirst, _PFirst + _Count); return _First + _Count; } } _Uninitialized_backout_al<_Alloc> _Backout{_First, _Al}; for (; 0 < _Count; --_Count) { _Backout._Emplace_back(); } return _Backout._Release(); } template _NoThrowFwdIt _Uninitialized_value_construct_n_unchecked1(_NoThrowFwdIt _UFirst, _Diff _Count) { // value-initialize all elements in [_UFirst, _UFirst + _Count) _STL_INTERNAL_CHECK(_Count >= 0); if constexpr (_Use_memset_value_construct_v<_NoThrowFwdIt>) { return _Zero_range(_UFirst, _UFirst + _Count); } else { _Uninitialized_backout<_NoThrowFwdIt> _Backout{_UFirst}; for (; 0 < _Count; --_Count) { _Backout._Emplace_back(); } return _Backout._Release(); } } #if _HAS_DEPRECATED_TEMPORARY_BUFFER _EXPORT_STD template _CXX17_DEPRECATE_TEMPORARY_BUFFER _NODISCARD pair<_Ty*, ptrdiff_t> get_temporary_buffer(ptrdiff_t _Count) noexcept { return _Get_temporary_buffer<_Ty>(_Count); } _EXPORT_STD template _CXX17_DEPRECATE_TEMPORARY_BUFFER void return_temporary_buffer(_Ty* _Pbuf) { _Return_temporary_buffer(_Pbuf); } #endif // _HAS_DEPRECATED_TEMPORARY_BUFFER // assumes _Args have already been _Remove_cvref_t'd template struct _In_place_key_extract_set { // by default we can't extract the key in the emplace family and must construct a node we might not use static constexpr bool _Extractable = false; }; template struct _In_place_key_extract_set<_Key, _Key> { // we can extract the key in emplace if the emplaced type is identical to the key type static constexpr bool _Extractable = true; static const _Key& _Extract(const _Key& _Val) noexcept { return _Val; } }; // assumes _Args have already been _Remove_cvref_t'd template struct _In_place_key_extract_map { // by default we can't extract the key in the emplace family and must construct a node we might not use static constexpr bool _Extractable = false; }; template struct _In_place_key_extract_map<_Key, _Key, _Second> { // if we would call the pair(key, value) constructor family, we can use the first parameter as the key static constexpr bool _Extractable = true; static const _Key& _Extract(const _Key& _Val, const _Second&) noexcept { return _Val; } }; template struct _In_place_key_extract_map<_Key, pair<_First, _Second>> { // if we would call the pair(pair) constructor family, we can use the pair.first member as the key static constexpr bool _Extractable = is_same_v<_Key, _Remove_cvref_t<_First>>; static const _Key& _Extract(const pair<_First, _Second>& _Val) { return _Val.first; } }; #pragma warning(push) #pragma warning(disable : 4624) // '%s': destructor was implicitly defined as deleted template struct _Wrap { _Ty _Value; // workaround for VSO-586813 "T^ is not allowed in a union" }; #pragma warning(pop) template struct _Alloc_temporary2 { using value_type = typename _Alloc::value_type; using _Traits = allocator_traits<_Alloc>; _Alloc& _Al; #ifdef __cplusplus_winrt union { _Wrap _Storage; }; _NODISCARD _CONSTEXPR20 value_type& _Get_value() noexcept { return _Storage._Value; } _NODISCARD _CONSTEXPR20 const value_type& _Get_value() const noexcept { return _Storage._Value; } #else // ^^^ workaround for VSO-586813 "T^ is not allowed in a union" / no workaround vvv union { value_type _Value; }; _NODISCARD _CONSTEXPR20 value_type& _Get_value() noexcept { return _Value; } _NODISCARD _CONSTEXPR20 const value_type& _Get_value() const noexcept { return _Value; } #endif // ^^^ no workaround ^^^ template _CONSTEXPR20 explicit _Alloc_temporary2(_Alloc& _Al_, _Args&&... _Vals) noexcept( noexcept(_Traits::construct(_Al_, _STD addressof(_Get_value()), _STD forward<_Args>(_Vals)...))) : _Al(_Al_) { _Traits::construct(_Al, _STD addressof(_Get_value()), _STD forward<_Args>(_Vals)...); } _Alloc_temporary2(const _Alloc_temporary2&) = delete; _Alloc_temporary2& operator=(const _Alloc_temporary2&) = delete; _CONSTEXPR20 ~_Alloc_temporary2() { _Traits::destroy(_Al, _STD addressof(_Get_value())); } }; template _NODISCARD constexpr bool _Allocators_equal(const _Alloc& _Lhs, const _Alloc& _Rhs) noexcept { if constexpr (allocator_traits<_Alloc>::is_always_equal::value) { return true; } else { return _Lhs == _Rhs; } } _EXPORT_STD template _NODISCARD_REMOVE_ALG _CONSTEXPR20 _FwdIt remove(_FwdIt _First, const _FwdIt _Last, const _Ty& _Val) { // remove each matching _Val _Adl_verify_range(_First, _Last); auto _UFirst = _Get_unwrapped(_First); const auto _ULast = _Get_unwrapped(_Last); _UFirst = _STD _Find_unchecked(_UFirst, _ULast, _Val); auto _UNext = _UFirst; if (_UFirst != _ULast) { while (++_UFirst != _ULast) { if (!(*_UFirst == _Val)) { *_UNext = _STD move(*_UFirst); ++_UNext; } } } _Seek_wrapped(_First, _UNext); return _First; } _EXPORT_STD template _NODISCARD_REMOVE_ALG _CONSTEXPR20 _FwdIt remove_if(_FwdIt _First, const _FwdIt _Last, _Pr _Pred) { // remove each satisfying _Pred _Adl_verify_range(_First, _Last); auto _UFirst = _Get_unwrapped(_First); const auto _ULast = _Get_unwrapped(_Last); _UFirst = _STD find_if(_UFirst, _ULast, _Pass_fn(_Pred)); auto _UNext = _UFirst; if (_UFirst != _ULast) { while (++_UFirst != _ULast) { if (!_Pred(*_UFirst)) { *_UNext = _STD move(*_UFirst); ++_UNext; } } } _Seek_wrapped(_First, _UNext); return _First; } template _CONSTEXPR20 typename _Container::size_type _Erase_remove(_Container& _Cont, const _Uty& _Val) { // erase each element matching _Val auto _First = _Cont.begin(); const auto _Last = _Cont.end(); const auto _Old_size = _Cont.size(); _Seek_wrapped(_First, _STD remove(_Get_unwrapped(_First), _Get_unwrapped(_Last), _Val)); _Cont.erase(_First, _Last); return _Old_size - _Cont.size(); } template _CONSTEXPR20 typename _Container::size_type _Erase_remove_if(_Container& _Cont, _Pr _Pred) { // erase each element satisfying _Pred auto _First = _Cont.begin(); const auto _Last = _Cont.end(); const auto _Old_size = _Cont.size(); _Seek_wrapped(_First, _STD remove_if(_Get_unwrapped(_First), _Get_unwrapped(_Last), _Pred)); _Cont.erase(_First, _Last); return _Old_size - _Cont.size(); } template typename _Container::size_type _Erase_nodes_if(_Container& _Cont, _Pr _Pred) { // erase each element satisfying _Pred auto _First = _Cont.begin(); const auto _Last = _Cont.end(); const auto _Old_size = _Cont.size(); while (_First != _Last) { if (_Pred(*_First)) { _First = _Cont.erase(_First); } else { ++_First; } } return _Old_size - _Cont.size(); } template void _Deduce_as_pair(const pair<_Ty1, _Ty2>&); // not defined template _INLINE_VAR constexpr bool _Is_deducible_as_pair = false; template _INLINE_VAR constexpr bool _Is_deducible_as_pair<_Ty, decltype(_STD _Deduce_as_pair(_STD declval<_Ty>()))> = true; template _INLINE_VAR constexpr bool _Is_cv_pair = _Is_specialization_v, pair>; template const _Ty& _Normally_bind(_Identity_t); // not defined template _Ty&& _Normally_bind(_Identity_t<_Ty&&>); // not defined template using _Normally_bound_ref = decltype(_STD _Normally_bind<_Ty>(_STD declval<_Uty>())); template _INLINE_VAR constexpr bool _Is_normally_bindable = false; template _INLINE_VAR constexpr bool _Is_normally_bindable<_Ty, _Uty, void_t<_Normally_bound_ref<_Ty, _Uty>>> = true; #if _HAS_CXX20 _EXPORT_STD template , int> = 0> _NODISCARD constexpr auto uses_allocator_construction_args(const _Alloc& _Al, _Types&&... _Args) noexcept { if constexpr (!uses_allocator_v, _Alloc>) { static_assert(is_constructible_v<_Ty, _Types...>, "If uses_allocator_v, Alloc> does not hold, T must be constructible from Types..."); (void) _Al; return _STD forward_as_tuple(_STD forward<_Types>(_Args)...); } else if constexpr (is_constructible_v<_Ty, allocator_arg_t, const _Alloc&, _Types...>) { using _ReturnType = tuple; return _ReturnType{allocator_arg, _Al, _STD forward<_Types>(_Args)...}; } else if constexpr (is_constructible_v<_Ty, _Types..., const _Alloc&>) { return _STD forward_as_tuple(_STD forward<_Types>(_Args)..., _Al); } else { static_assert(_Always_false<_Ty>, "T must be constructible from either (allocator_arg_t, const Alloc&, Types...) " "or (Types..., const Alloc&) if uses_allocator_v, Alloc> is true"); } } _EXPORT_STD template , int> = 0> _NODISCARD constexpr auto uses_allocator_construction_args(const _Alloc& _Al) noexcept; _EXPORT_STD template , int> = 0> _NODISCARD constexpr auto uses_allocator_construction_args(const _Alloc& _Al, _Uty1&& _Val1, _Uty2&& _Val2) noexcept; #if _HAS_CXX23 _EXPORT_STD template , int> = 0> _NODISCARD constexpr auto uses_allocator_construction_args(const _Alloc& _Al, pair<_Uty1, _Uty2>& _Pair) noexcept; #endif // _HAS_CXX23 _EXPORT_STD template , int> = 0> _NODISCARD constexpr auto uses_allocator_construction_args(const _Alloc& _Al, const pair<_Uty1, _Uty2>& _Pair) noexcept; _EXPORT_STD template , int> = 0> _NODISCARD constexpr auto uses_allocator_construction_args(const _Alloc& _Al, pair<_Uty1, _Uty2>&& _Pair) noexcept; #if _HAS_CXX23 _EXPORT_STD template , int> = 0> _NODISCARD constexpr auto uses_allocator_construction_args( const _Alloc& _Al, const pair<_Uty1, _Uty2>&& _Pair) noexcept; #endif // _HAS_CXX23 #if _HAS_CXX23 && defined(__cpp_lib_concepts) // TRANSITION, GH-395 _EXPORT_STD template requires _Is_cv_pair<_Ty> && (_Pair_like<_Uty> || !_Is_deducible_as_pair<_Uty&>) #else // ^^^ C++23 with concepts / C++20 or no concepts vvv _EXPORT_STD template && !_Is_deducible_as_pair<_Uty&>, int> = 0> #endif // ^^^ C++20 or no concepts ^^^ _NODISCARD constexpr auto uses_allocator_construction_args(const _Alloc& _Al, _Uty&& _Ux) noexcept; _EXPORT_STD template , int> = 0> _NODISCARD constexpr auto uses_allocator_construction_args( const _Alloc& _Al, piecewise_construct_t, _Tuple1&& _Tup1, _Tuple2&& _Tup2) noexcept { return _STD make_tuple(piecewise_construct, _STD apply( [&_Al](auto&&... _Tuple_args) { return _STD uses_allocator_construction_args( _Al, _STD forward(_Tuple_args)...); }, _STD forward<_Tuple1>(_Tup1)), _STD apply( [&_Al](auto&&... _Tuple_args) { return _STD uses_allocator_construction_args( _Al, _STD forward(_Tuple_args)...); }, _STD forward<_Tuple2>(_Tup2))); } _EXPORT_STD template , int> /* = 0 */> _NODISCARD constexpr auto uses_allocator_construction_args(const _Alloc& _Al) noexcept { // equivalent to // return _STD uses_allocator_construction_args<_Ty>(_Al, piecewise_construct, tuple<>{}, tuple<>{}); return _STD make_tuple(piecewise_construct, _STD uses_allocator_construction_args(_Al), _STD uses_allocator_construction_args(_Al)); } _EXPORT_STD template , int> /* = 0 */> _NODISCARD constexpr auto uses_allocator_construction_args(const _Alloc& _Al, _Uty1&& _Val1, _Uty2&& _Val2) noexcept { // equivalent to // return _STD uses_allocator_construction_args<_Ty>(_Al, piecewise_construct, // _STD forward_as_tuple(_STD forward<_Uty1>(_Val1)), _STD forward_as_tuple(_STD forward<_Uty2>(_Val2))); return _STD make_tuple(piecewise_construct, _STD uses_allocator_construction_args(_Al, _STD forward<_Uty1>(_Val1)), _STD uses_allocator_construction_args(_Al, _STD forward<_Uty2>(_Val2))); } #if _HAS_CXX23 _EXPORT_STD template , int> /* = 0 */> _NODISCARD constexpr auto uses_allocator_construction_args(const _Alloc& _Al, pair<_Uty1, _Uty2>& _Pair) noexcept { // equivalent to // return _STD uses_allocator_construction_args<_Ty>(_Al, piecewise_construct, // _STD forward_as_tuple(_Pair.first), _STD forward_as_tuple(_Pair.second)); return _STD make_tuple(piecewise_construct, _STD uses_allocator_construction_args(_Al, _Pair.first), _STD uses_allocator_construction_args(_Al, _Pair.second)); } #endif // _HAS_CXX23 _EXPORT_STD template , int> /* = 0 */> _NODISCARD constexpr auto uses_allocator_construction_args( const _Alloc& _Al, const pair<_Uty1, _Uty2>& _Pair) noexcept { // equivalent to // return _STD uses_allocator_construction_args<_Ty>(_Al, piecewise_construct, // _STD forward_as_tuple(_Pair.first), _STD forward_as_tuple(_Pair.second)); return _STD make_tuple(piecewise_construct, _STD uses_allocator_construction_args(_Al, _Pair.first), _STD uses_allocator_construction_args(_Al, _Pair.second)); } _EXPORT_STD template , int> /* = 0 */> _NODISCARD constexpr auto uses_allocator_construction_args(const _Alloc& _Al, pair<_Uty1, _Uty2>&& _Pair) noexcept { // equivalent to // return _STD uses_allocator_construction_args<_Ty>(_Al, piecewise_construct, // _STD forward_as_tuple(_STD get<0>(_STD move(_Pair)), _STD forward_as_tuple(_STD get<1>(_STD move(_Pair))); return _STD make_tuple(piecewise_construct, _STD uses_allocator_construction_args(_Al, _STD get<0>(_STD move(_Pair))), _STD uses_allocator_construction_args(_Al, _STD get<1>(_STD move(_Pair)))); } #if _HAS_CXX23 _EXPORT_STD template , int> /* = 0 */> _NODISCARD constexpr auto uses_allocator_construction_args( const _Alloc& _Al, const pair<_Uty1, _Uty2>&& _Pair) noexcept { // equivalent to // return _STD uses_allocator_construction_args<_Ty>(_Al, piecewise_construct, // _STD forward_as_tuple(_STD get<0>(_STD move(_Pair)), _STD forward_as_tuple(_STD get<1>(_STD move(_Pair))); return _STD make_tuple(piecewise_construct, _STD uses_allocator_construction_args(_Al, _STD get<0>(_STD move(_Pair))), _STD uses_allocator_construction_args(_Al, _STD get<1>(_STD move(_Pair)))); } #endif // _HAS_CXX23 #if _HAS_CXX23 && defined(__cpp_lib_concepts) // TRANSITION, GH-395 _EXPORT_STD template requires _Is_cv_pair<_Ty> && (_Pair_like<_Uty> || !_Is_deducible_as_pair<_Uty&>) #else // ^^^ C++23 with concepts / C++20 or no concepts vvv _EXPORT_STD template && !_Is_deducible_as_pair<_Uty&>, int> /* = 0 */> #endif // ^^^ C++20 or no concepts ^^^ _NODISCARD constexpr auto uses_allocator_construction_args(const _Alloc& _Al, _Uty&& _Ux) noexcept { #if _HAS_CXX23 && defined(__cpp_lib_concepts) if constexpr (_Pair_like<_Uty> && !_Is_subrange_v>) { // equivalent to // return _STD uses_allocator_construction_args<_Ty>(_Al, piecewise_construct, // _STD forward_as_tuple(_STD get<0>(_STD forward<_Uty>(_Ux)), // _STD forward_as_tuple(_STD get<1>(_STD forward<_Uty>(_Ux))); return _STD make_tuple(piecewise_construct, _STD uses_allocator_construction_args(_Al, _STD get<0>(_STD forward<_Uty>(_Ux))), _STD uses_allocator_construction_args( _Al, _STD get<1>(_STD forward<_Uty>(_Ux)))); } else #endif // _HAS_CXX23 && defined(__cpp_lib_concepts) { struct _Pair_remaker { const _Alloc& _Al; _Uty& _Ux; constexpr operator remove_cv_t<_Ty>() const { using _Pair_t = remove_cv_t<_Ty>; static_assert(_Is_normally_bindable<_Pair_t, _Uty>, "The argument must be bindable to a reference to the std::pair type."); using _Pair_first_t = typename _Pair_t::first_type; using _Pair_second_t = typename _Pair_t::second_type; using _Pair_ref_t = _Normally_bound_ref<_Pair_t, _Uty>; _Pair_ref_t _Pair_ref = _STD forward<_Uty>(_Ux); if constexpr (is_same_v<_Pair_ref_t, const _Pair_t&>) { // equivalent to // return _STD make_obj_using_allocator<_Pair_t>(_Al, _Pair_ref); return _Pair_t{piecewise_construct, _STD uses_allocator_construction_args<_Pair_first_t>(_Al, _Pair_ref.first), _STD uses_allocator_construction_args<_Pair_second_t>(_Al, _Pair_ref.second)}; } else { // equivalent to // return _STD make_obj_using_allocator<_Pair_t>(_Al, _STD move(_Pair_ref)); return _Pair_t{piecewise_construct, _STD uses_allocator_construction_args<_Pair_first_t>(_Al, _STD get<0>(_STD move(_Pair_ref))), _STD uses_allocator_construction_args<_Pair_second_t>(_Al, _STD get<1>(_STD move(_Pair_ref)))}; } } }; // equivalent to // return _STD make_tuple(_Pair_remaker{_Al, _Ux}); return tuple<_Pair_remaker>({_Al, _Ux}); } } _EXPORT_STD template _NODISCARD constexpr _Ty make_obj_using_allocator(const _Alloc& _Al, _Types&&... _Args) { return _STD make_from_tuple<_Ty>(_STD uses_allocator_construction_args<_Ty>(_Al, _STD forward<_Types>(_Args)...)); } _EXPORT_STD template constexpr _Ty* uninitialized_construct_using_allocator(_Ty* _Ptr, const _Alloc& _Al, _Types&&... _Args) { return _STD apply( [&](auto&&... _Construct_args) { return _STD construct_at(_Ptr, _STD forward(_Construct_args)...); }, _STD uses_allocator_construction_args<_Ty>(_Al, _STD forward<_Types>(_Args)...)); } #endif // _HAS_CXX20 #if _HAS_CXX23 && defined(__cpp_lib_concepts) // TRANSITION, GH-395 _EXPORT_STD struct from_range_t { explicit from_range_t() = default; }; _EXPORT_STD inline constexpr from_range_t from_range; template concept _Container_compatible_range = (_RANGES input_range<_Rng>) && convertible_to<_RANGES range_reference_t<_Rng>, _Elem>; template <_RANGES input_range _Rng> using _Range_key_type = remove_const_t::first_type>; template <_RANGES input_range _Rng> using _Range_mapped_type = typename _RANGES range_value_t<_Rng>::second_type; template <_RANGES input_range _Rng> using _Range_to_alloc_type = pair::first_type, typename _RANGES range_value_t<_Rng>::second_type>; #endif // _HAS_CXX23 && defined(__cpp_lib_concepts) template && !is_final_v<_Ty>> class _Ebco_base : private _Ty { // Empty Base Class Optimization, active private: using _Mybase = _Ty; // for visualization protected: template , _Ebco_base>, int> = 0> constexpr explicit _Ebco_base(_Other&& _Val) noexcept(is_nothrow_constructible_v<_Ty, _Other>) : _Ty(_STD forward<_Other>(_Val)) {} constexpr _Ty& _Get_val() noexcept { return *this; } constexpr const _Ty& _Get_val() const noexcept { return *this; } }; template class _Ebco_base<_Ty, false> { // Empty Base Class Optimization, inactive private: _Ty _Myval; protected: template , _Ebco_base>, int> = 0> constexpr explicit _Ebco_base(_Other&& _Val) noexcept(is_nothrow_constructible_v<_Ty, _Other>) : _Myval(_STD forward<_Other>(_Val)) {} constexpr _Ty& _Get_val() noexcept { return _Myval; } constexpr const _Ty& _Get_val() const noexcept { return _Myval; } }; _EXPORT_STD inline void* align(size_t _Bound, size_t _Size, void*& _Ptr, size_t& _Space) noexcept /* strengthened */ { // try to carve out _Size bytes on boundary _Bound size_t _Off = static_cast(reinterpret_cast(_Ptr) & (_Bound - 1)); if (_Off != 0) { _Off = _Bound - _Off; // number of bytes to skip } if (_Space < _Off || _Space - _Off < _Size) { return nullptr; } // enough room, update _Ptr = static_cast(_Ptr) + _Off; _Space -= _Off; return _Ptr; } _STD_END #pragma pop_macro("new") _STL_RESTORE_CLANG_WARNINGS #pragma warning(pop) #pragma pack(pop) #endif // _STL_COMPILER_PREPROCESSOR #endif // _XMEMORY_