P0674R1 make_shared() For Arrays (#309)

Fixes #33.

Co-authored-by: Gianni Weinand <t-giwein@microsoft.com>
Co-authored-by: Stephan T. Lavavej <stl@microsoft.com>
This commit is contained in:
Adam Bucior 2020-04-08 21:23:05 +02:00 коммит произвёл GitHub
Родитель 714f499f4b
Коммит a099e85ce3
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
6 изменённых файлов: 1355 добавлений и 26 удалений

Просмотреть файл

@ -1269,11 +1269,47 @@ private:
_Owner._Call_deleter = false;
}
#if _HAS_CXX20
template <class _Ty0, class... _Types>
friend enable_if_t<!is_array_v<_Ty0>, shared_ptr<_Ty0>> make_shared(_Types&&... _Args);
template <class _Ty0, class _Alloc, class... _Types>
friend enable_if_t<!is_array_v<_Ty0>, shared_ptr<_Ty0>> allocate_shared(const _Alloc& _Al_arg, _Types&&... _Args);
template <class _Ty0>
friend enable_if_t<is_unbounded_array_v<_Ty0>, shared_ptr<_Ty0>> make_shared(size_t _Count);
template <class _Ty0, class _Alloc>
friend enable_if_t<is_unbounded_array_v<_Ty0>, shared_ptr<_Ty0>> allocate_shared(
const _Alloc& _Al_arg, size_t _Count);
template <class _Ty0>
friend enable_if_t<is_bounded_array_v<_Ty0>, shared_ptr<_Ty0>> make_shared();
template <class _Ty0, class _Alloc>
friend enable_if_t<is_bounded_array_v<_Ty0>, shared_ptr<_Ty0>> allocate_shared(const _Alloc& _Al_arg);
template <class _Ty0>
friend enable_if_t<is_unbounded_array_v<_Ty0>, shared_ptr<_Ty0>> make_shared(
size_t _Count, const remove_extent_t<_Ty0>& _Val);
template <class _Ty0, class _Alloc>
friend enable_if_t<is_unbounded_array_v<_Ty0>, shared_ptr<_Ty0>> allocate_shared(
const _Alloc& _Al_arg, size_t _Count, const remove_extent_t<_Ty0>& _Val);
template <class _Ty0>
friend enable_if_t<is_bounded_array_v<_Ty0>, shared_ptr<_Ty0>> make_shared(const remove_extent_t<_Ty0>& _Val);
template <class _Ty0, class _Alloc>
friend enable_if_t<is_bounded_array_v<_Ty0>, shared_ptr<_Ty0>> allocate_shared(
const _Alloc& _Al_arg, const remove_extent_t<_Ty0>& _Val);
#else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv
template <class _Ty0, class... _Types>
friend shared_ptr<_Ty0> make_shared(_Types&&... _Args);
template <class _Ty0, class _Alloc, class... _Types>
friend shared_ptr<_Ty0> allocate_shared(const _Alloc& _Al_arg, _Types&&... _Args);
#endif // !_HAS_CXX20
template <class _Ux>
void _Set_ptr_rep_and_enable_shared(_Ux* const _Px, _Ref_count_base* const _Rx) noexcept { // take ownership of _Px
@ -1506,6 +1542,10 @@ public:
~_Ref_count_obj2() {
// nothing to do, _Storage._Value was already destroyed in _Destroy
// N4849 [class.dtor]/7:
// "A defaulted destructor for a class X is defined as deleted if:
// X is a union-like class that has a variant member with a non-trivial destructor"
}
union {
@ -1522,6 +1562,303 @@ private:
}
};
#if _HAS_CXX20
template <size_t _Align>
struct _Alignas_storage_unit {
alignas(_Align) char _Space[_Align];
};
enum class _Check_overflow : bool { _No, _Yes };
template <class _Refc, _Check_overflow _Check>
_NODISCARD size_t _Calculate_bytes_for_flexible_array(const size_t _Count) noexcept(_Check == _Check_overflow::_No) {
constexpr size_t _Align = alignof(_Refc);
size_t _Bytes = sizeof(_Refc); // contains storage for one element
if (_Count > 1) {
constexpr size_t _Element_size = sizeof(typename _Refc::_Element_type);
size_t _Extra_bytes;
if constexpr (_Check == _Check_overflow::_Yes) {
_Extra_bytes = _Get_size_of_n<_Element_size>(_Count - 1); // check multiplication overflow
if (_Extra_bytes > static_cast<size_t>(-1) - _Bytes - (_Align - 1)) { // assume worst case adjustment
_Throw_bad_array_new_length(); // addition overflow
}
} else {
_Extra_bytes = _Element_size * (_Count - 1);
}
_Bytes += _Extra_bytes;
_Bytes = (_Bytes + _Align - 1) & ~(_Align - 1);
}
#ifdef _ENABLE_STL_INTERNAL_CHECK
using _Storage = _Alignas_storage_unit<_Align>;
_STL_INTERNAL_CHECK(_Bytes % sizeof(_Storage) == 0);
#endif // _ENABLE_STL_INTERNAL_CHECK
return _Bytes;
}
template <class _Refc>
_NODISCARD _Refc* _Allocate_flexible_array(const size_t _Count) {
const size_t _Bytes = _Calculate_bytes_for_flexible_array<_Refc, _Check_overflow::_Yes>(_Count);
constexpr size_t _Align = alignof(_Refc);
if constexpr (_Align <= __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
return static_cast<_Refc*>(::operator new(_Bytes));
} else {
return static_cast<_Refc*>(::operator new (_Bytes, align_val_t{_Align}));
}
}
template <class _Refc>
void _Deallocate_flexible_array(_Refc* const _Ptr) noexcept {
constexpr size_t _Align = alignof(_Refc);
if constexpr (_Align <= __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
::operator delete(static_cast<void*>(_Ptr));
} else {
::operator delete (static_cast<void*>(_Ptr), align_val_t{_Align});
}
}
template <class _NoThrowIt>
struct _Uninitialized_rev_destroying_backout {
// struct to undo partially constructed ranges in _Uninitialized_xxx algorithms
_NoThrowIt _First;
_NoThrowIt _Last;
explicit _Uninitialized_rev_destroying_backout(_NoThrowIt _Dest) noexcept
: _First(_Dest), _Last(_Dest) {} // TRANSITION, P1771R1 [[nodiscard]] For Constructors
_Uninitialized_rev_destroying_backout(const _Uninitialized_rev_destroying_backout&) = delete;
_Uninitialized_rev_destroying_backout& operator=(const _Uninitialized_rev_destroying_backout&) = delete;
~_Uninitialized_rev_destroying_backout() {
while (_Last != _First) {
--_Last;
_STD destroy_at(_STD addressof(*_Last));
}
}
template <class... _Types>
void _Emplace_back(_Types&&... _Vals) { // construct a new element at *_Last and increment
_Construct_in_place(*_Last, _STD forward<_Types>(_Vals)...);
++_Last;
}
_NoThrowIt _Release() noexcept { // suppress any exception handling backout and return _Last
_First = _Last;
return _Last;
}
};
template <class _Ty>
void _Reverse_destroy_multidimensional_n(_Ty* const _Arr, size_t _Size) noexcept {
while (_Size > 0) {
--_Size;
if constexpr (is_array_v<_Ty>) {
_Reverse_destroy_multidimensional_n(_Arr[_Size], extent_v<_Ty>);
} else {
_Destroy_in_place(_Arr[_Size]);
}
}
}
template <class _Ty>
struct _Reverse_destroy_multidimensional_n_guard {
_Ty* _Target;
size_t _Index;
~_Reverse_destroy_multidimensional_n_guard() {
if (_Target) {
_Reverse_destroy_multidimensional_n(_Target, _Index);
}
}
};
template <class _Ty, size_t _Size>
void _Uninitialized_copy_multidimensional(const _Ty (&_In)[_Size], _Ty (&_Out)[_Size]) {
if constexpr (is_trivial_v<_Ty>) {
_Copy_memmove(_In, _In + _Size, _Out);
} else if constexpr (is_array_v<_Ty>) {
_Reverse_destroy_multidimensional_n_guard<_Ty> _Guard{_Out, 0};
for (size_t& _Idx = _Guard._Index; _Idx < _Size; ++_Idx) {
_Uninitialized_copy_multidimensional(_In[_Idx], _Out[_Idx]);
}
_Guard._Target = nullptr;
} else {
_Uninitialized_rev_destroying_backout _Backout{_Out};
for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
_Backout._Emplace_back(_In[_Idx]);
}
_Backout._Release();
}
}
template <class _Ty>
void _Uninitialized_value_construct_multidimensional_n(_Ty* const _Out, const size_t _Size) {
using _Item = remove_all_extents_t<_Ty>;
if constexpr (_Use_memset_value_construct_v<_Item*>) {
_Zero_range(_Out, _Out + _Size);
} else if constexpr (is_array_v<_Ty>) {
_Reverse_destroy_multidimensional_n_guard<_Ty> _Guard{_Out, 0};
for (size_t& _Idx = _Guard._Index; _Idx < _Size; ++_Idx) {
_Uninitialized_value_construct_multidimensional_n(_Out[_Idx], extent_v<_Ty>);
}
_Guard._Target = nullptr;
} else {
_Uninitialized_rev_destroying_backout _Backout{_Out};
for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
_Backout._Emplace_back();
}
_Backout._Release();
}
}
template <class _Ty>
void _Uninitialized_fill_multidimensional_n(_Ty* const _Out, const size_t _Size, const _Ty& _Val) {
if constexpr (is_array_v<_Ty>) {
_Reverse_destroy_multidimensional_n_guard<_Ty> _Guard{_Out, 0};
for (size_t& _Idx = _Guard._Index; _Idx < _Size; ++_Idx) {
_Uninitialized_copy_multidimensional(_Val, _Out[_Idx]); // intentionally copy, not fill
}
_Guard._Target = nullptr;
} else if constexpr (_Fill_memset_is_safe<_Ty*, _Ty>) {
_CSTD memset(_Out, static_cast<unsigned char>(_Val), _Size);
} else {
_Uninitialized_rev_destroying_backout _Backout{_Out};
for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
_Backout._Emplace_back(_Val);
}
_Backout._Release();
}
}
// CLASS TEMPLATE _Ref_count_unbounded_array
template <class _Ty, bool = is_trivially_destructible_v<remove_extent_t<_Ty>>>
class _Ref_count_unbounded_array : public _Ref_count_base {
// handle reference counting for unbounded array with trivial destruction in control block, no allocator
public:
static_assert(is_unbounded_array_v<_Ty>);
using _Element_type = remove_extent_t<_Ty>;
explicit _Ref_count_unbounded_array(const size_t _Count) : _Ref_count_base() {
_Uninitialized_value_construct_multidimensional_n(_Get_ptr(), _Count);
}
explicit _Ref_count_unbounded_array(const size_t _Count, const _Element_type& _Val) : _Ref_count_base() {
_Uninitialized_fill_multidimensional_n(_Get_ptr(), _Count, _Val);
}
_NODISCARD auto _Get_ptr() noexcept {
return _STD addressof(_Storage._Value);
}
private:
union {
_Wrap<_Element_type> _Storage; // flexible array must be last member
};
~_Ref_count_unbounded_array() {
// nothing to do, _Ty is trivially destructible
// See N4849 [class.dtor]/7.
}
virtual void _Destroy() noexcept override { // destroy managed resource
// nothing to do, _Ty is trivially destructible
}
virtual void _Delete_this() noexcept override { // destroy self
this->~_Ref_count_unbounded_array();
_Deallocate_flexible_array(this);
}
};
template <class _Ty>
class _Ref_count_unbounded_array<_Ty, false> : public _Ref_count_base {
// handle reference counting for unbounded array with non-trivial destruction in control block, no allocator
public:
static_assert(is_unbounded_array_v<_Ty>);
using _Element_type = remove_extent_t<_Ty>;
explicit _Ref_count_unbounded_array(const size_t _Count) : _Ref_count_base(), _Size(_Count) {
_Uninitialized_value_construct_multidimensional_n(_Get_ptr(), _Size);
}
explicit _Ref_count_unbounded_array(const size_t _Count, const _Element_type& _Val)
: _Ref_count_base(), _Size(_Count) {
_Uninitialized_fill_multidimensional_n(_Get_ptr(), _Size, _Val);
}
_NODISCARD auto _Get_ptr() noexcept {
return _STD addressof(_Storage._Value);
}
private:
size_t _Size;
union {
_Wrap<_Element_type> _Storage; // flexible array must be last member
};
~_Ref_count_unbounded_array() {
// nothing to do, _Storage was already destroyed in _Destroy
// See N4849 [class.dtor]/7.
}
virtual void _Destroy() noexcept override { // destroy managed resource
_Reverse_destroy_multidimensional_n(_Get_ptr(), _Size);
}
virtual void _Delete_this() noexcept override { // destroy self
this->~_Ref_count_unbounded_array();
_Deallocate_flexible_array(this);
}
};
// CLASS TEMPLATE _Ref_count_bounded_array
template <class _Ty>
class _Ref_count_bounded_array : public _Ref_count_base {
// handle reference counting for bounded array in control block, no allocator
public:
static_assert(is_bounded_array_v<_Ty>);
_Ref_count_bounded_array() : _Ref_count_base(), _Storage() {} // value-initializing _Storage is necessary here
explicit _Ref_count_bounded_array(const remove_extent_t<_Ty>& _Val)
: _Ref_count_base() { // don't value-initialize _Storage
_Uninitialized_fill_multidimensional_n(_Storage._Value, extent_v<_Ty>, _Val);
}
union {
_Wrap<_Ty> _Storage;
};
private:
~_Ref_count_bounded_array() {
// nothing to do, _Storage was already destroyed in _Destroy
// See N4849 [class.dtor]/7.
}
virtual void _Destroy() noexcept override { // destroy managed resource
_Destroy_in_place(_Storage); // not _Storage._Value, see N4849 [expr.prim.id.dtor]
}
virtual void _Delete_this() noexcept override { // destroy self
delete this;
}
};
#endif // _HAS_CXX20
// CLASS TEMPLATE _Ebco_base
template <class _Ty,
bool = is_empty_v<_Ty> && !is_final_v<_Ty>>
@ -1562,17 +1899,22 @@ protected:
}
};
// CLASS TEMPLATE _Ref_count_obj_alloc2
// CLASS TEMPLATE _Ref_count_obj_alloc3
template <class _Ty, class _Alloc>
class __declspec(empty_bases) _Ref_count_obj_alloc2 : public _Ebco_base<_Alloc>, public _Ref_count_base {
class __declspec(empty_bases) _Ref_count_obj_alloc3 : public _Ebco_base<_Rebind_alloc_t<_Alloc, _Ty>>,
public _Ref_count_base {
// handle reference counting for object in control block, allocator
private:
static_assert(is_same_v<_Ty, remove_cv_t<_Ty>>, "allocate_shared should remove_cv_t");
using _Rebound = _Rebind_alloc_t<_Alloc, _Ty>;
public:
template <class... _Types>
explicit _Ref_count_obj_alloc2(const _Alloc& _Al_arg, _Types&&... _Args)
: _Ebco_base<_Alloc>(_Al_arg), _Ref_count_base() {
_Maybe_rebind_alloc_t<_Alloc, _Ty> _Alty(this->_Get_val());
allocator_traits<_Rebind_alloc_t<_Alloc, _Ty>>::construct(
_Alty, _STD addressof(_Storage._Value), _STD forward<_Types>(_Args)...);
explicit _Ref_count_obj_alloc3(const _Alloc& _Al_arg, _Types&&... _Args)
: _Ebco_base<_Rebound>(_Al_arg), _Ref_count_base() {
allocator_traits<_Rebound>::construct(
this->_Get_val(), _STD addressof(_Storage._Value), _STD forward<_Types>(_Args)...);
}
union {
@ -1580,37 +1922,350 @@ public:
};
private:
~_Ref_count_obj_alloc2() {
~_Ref_count_obj_alloc3() {
// nothing to do; _Storage._Value already destroyed by _Destroy()
// See N4849 [class.dtor]/7.
}
virtual void _Destroy() noexcept override { // destroy managed resource
_Maybe_rebind_alloc_t<_Alloc, _Ty> _Alty(this->_Get_val());
allocator_traits<_Rebind_alloc_t<_Alloc, _Ty>>::destroy(_Alty, _STD addressof(_Storage._Value));
allocator_traits<_Rebound>::destroy(this->_Get_val(), _STD addressof(_Storage._Value));
}
virtual void _Delete_this() noexcept override { // destroy self
_Rebind_alloc_t<_Alloc, _Ref_count_obj_alloc2> _Al(this->_Get_val());
this->~_Ref_count_obj_alloc2();
_Rebind_alloc_t<_Alloc, _Ref_count_obj_alloc3> _Al(this->_Get_val());
this->~_Ref_count_obj_alloc3();
_Deallocate_plain(_Al, this);
}
};
#if _HAS_CXX20
template <class _Alloc>
class _Uninitialized_rev_destroying_backout_al {
// class to undo partially constructed ranges in _Uninitialized_xxx_al algorithms
private:
using pointer = _Alloc_ptr_t<_Alloc>;
public:
_Uninitialized_rev_destroying_backout_al(pointer _Dest, _Alloc& _Al_) noexcept
: _First(_Dest), _Last(_Dest), _Al(_Al_) {} // TRANSITION, P1771R1 [[nodiscard]] For Constructors
_Uninitialized_rev_destroying_backout_al(const _Uninitialized_rev_destroying_backout_al&) = delete;
_Uninitialized_rev_destroying_backout_al& operator=(const _Uninitialized_rev_destroying_backout_al&) = delete;
~_Uninitialized_rev_destroying_backout_al() {
while (_Last != _First) {
--_Last;
allocator_traits<_Alloc>::destroy(_Al, _Last);
}
}
template <class... _Types>
void _Emplace_back(_Types&&... _Vals) { // construct a new element at *_Last and increment
allocator_traits<_Alloc>::construct(_Al, _Unfancy(_Last), _STD forward<_Types>(_Vals)...);
++_Last;
}
pointer _Release() noexcept { // suppress any exception handling backout and return _Last
_First = _Last;
return _Last;
}
private:
pointer _First;
pointer _Last;
_Alloc& _Al;
};
template <class _Ty, class _Alloc>
void _Reverse_destroy_multidimensional_n_al(_Ty* const _Arr, size_t _Size, _Alloc& _Al) noexcept {
while (_Size > 0) {
--_Size;
if constexpr (is_array_v<_Ty>) {
_Reverse_destroy_multidimensional_n_al(_Arr[_Size], extent_v<_Ty>, _Al);
} else {
allocator_traits<_Alloc>::destroy(_Al, _Arr + _Size);
}
}
}
template <class _Ty, class _Alloc>
struct _Reverse_destroy_multidimensional_n_al_guard {
_Ty* _Target;
size_t _Index;
_Alloc& _Al;
~_Reverse_destroy_multidimensional_n_al_guard() {
if (_Target) {
_Reverse_destroy_multidimensional_n_al(_Target, _Index, _Al);
}
}
};
template <class _Ty, size_t _Size, class _Alloc>
void _Uninitialized_copy_multidimensional_al(const _Ty (&_In)[_Size], _Ty (&_Out)[_Size], _Alloc& _Al) {
using _Item = remove_all_extents_t<_Ty>;
if constexpr (conjunction_v<is_trivial<_Ty>, _Uses_default_construct<_Alloc, _Item*, const _Item&>>) {
(void) _Al;
_Copy_memmove(_In, _In + _Size, _Out);
} else if constexpr (is_array_v<_Ty>) {
_Reverse_destroy_multidimensional_n_al_guard<_Ty, _Alloc> _Guard{_Out, 0, _Al};
for (size_t& _Idx = _Guard._Index; _Idx < _Size; ++_Idx) {
_Uninitialized_copy_multidimensional_al(_In[_Idx], _Out[_Idx], _Al);
}
_Guard._Target = nullptr;
} else {
_Uninitialized_rev_destroying_backout_al _Backout{_Out, _Al};
for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
_Backout._Emplace_back(_In[_Idx]);
}
_Backout._Release();
}
}
template <class _Ty, class _Alloc>
void _Uninitialized_value_construct_multidimensional_n_al(_Ty* const _Out, const size_t _Size, _Alloc& _Al) {
using _Item = remove_all_extents_t<_Ty>;
if constexpr (_Use_memset_value_construct_v<_Item*> && _Uses_default_construct<_Alloc, _Item*>::value) {
(void) _Al;
_Zero_range(_Out, _Out + _Size);
} else if constexpr (is_array_v<_Ty>) {
_Reverse_destroy_multidimensional_n_al_guard<_Ty, _Alloc> _Guard{_Out, 0, _Al};
for (size_t& _Idx = _Guard._Index; _Idx < _Size; ++_Idx) {
_Uninitialized_value_construct_multidimensional_n_al(_Out[_Idx], extent_v<_Ty>, _Al);
}
_Guard._Target = nullptr;
} else {
_Uninitialized_rev_destroying_backout_al _Backout{_Out, _Al};
for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
_Backout._Emplace_back();
}
_Backout._Release();
}
}
template <class _Ty, class _Alloc>
void _Uninitialized_fill_multidimensional_n_al(_Ty* const _Out, const size_t _Size, const _Ty& _Val, _Alloc& _Al) {
if constexpr (is_array_v<_Ty>) {
_Reverse_destroy_multidimensional_n_al_guard<_Ty, _Alloc> _Guard{_Out, 0, _Al};
for (size_t& _Idx = _Guard._Index; _Idx < _Size; ++_Idx) {
_Uninitialized_copy_multidimensional_al(_Val, _Out[_Idx], _Al); // intentionally copy, not fill
}
_Guard._Target = nullptr;
} else if constexpr (_Fill_memset_is_safe<_Ty*, _Ty> && _Uses_default_construct<_Alloc, _Ty*, const _Ty&>::value) {
(void) _Al;
_CSTD memset(_Out, static_cast<unsigned char>(_Val), _Size);
} else {
_Uninitialized_rev_destroying_backout_al _Backout{_Out, _Al};
for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
_Backout._Emplace_back(_Val);
}
_Backout._Release();
}
}
// CLASS TEMPLATE _Ref_count_unbounded_array_alloc
template <class _Ty, class _Alloc>
class __declspec(empty_bases) _Ref_count_unbounded_array_alloc
: public _Ebco_base<_Rebind_alloc_t<_Alloc, remove_all_extents_t<_Ty>>>,
public _Ref_count_base {
// handle reference counting for unbounded array in control block, allocator
private:
static_assert(is_unbounded_array_v<_Ty>);
static_assert(is_same_v<_Ty, remove_cv_t<_Ty>>, "allocate_shared should remove_cv_t");
using _Item = remove_all_extents_t<_Ty>;
using _Rebound = _Rebind_alloc_t<_Alloc, _Item>;
public:
using _Element_type = remove_extent_t<_Ty>;
explicit _Ref_count_unbounded_array_alloc(const _Alloc& _Al_arg, const size_t _Count)
: _Ebco_base<_Rebound>(_Al_arg), _Ref_count_base(), _Size(_Count) {
_Uninitialized_value_construct_multidimensional_n_al(_Get_ptr(), _Size, this->_Get_val());
}
explicit _Ref_count_unbounded_array_alloc(const _Alloc& _Al_arg, const size_t _Count, const _Element_type& _Val)
: _Ebco_base<_Rebound>(_Al_arg), _Ref_count_base(), _Size(_Count) {
_Uninitialized_fill_multidimensional_n_al(_Get_ptr(), _Size, _Val, this->_Get_val());
}
_NODISCARD auto _Get_ptr() noexcept {
return _STD addressof(_Storage._Value);
}
private:
size_t _Size;
union {
_Wrap<_Element_type> _Storage; // flexible array must be last member
};
~_Ref_count_unbounded_array_alloc() {
// nothing to do; _Storage._Value already destroyed by _Destroy()
// See N4849 [class.dtor]/7.
}
virtual void _Destroy() noexcept override { // destroy managed resource
if constexpr (!conjunction_v<is_trivially_destructible<_Item>, _Uses_default_destroy<_Rebound, _Item*>>) {
_Reverse_destroy_multidimensional_n_al(_Get_ptr(), _Size, this->_Get_val());
}
}
virtual void _Delete_this() noexcept override { // destroy self
constexpr size_t _Align = alignof(_Ref_count_unbounded_array_alloc);
using _Storage = _Alignas_storage_unit<_Align>;
_Rebind_alloc_t<_Alloc, _Storage> _Al(this->_Get_val());
const size_t _Bytes =
_Calculate_bytes_for_flexible_array<_Ref_count_unbounded_array_alloc, _Check_overflow::_No>(_Size);
const size_t _Storage_units = _Bytes / sizeof(_Storage);
this->~_Ref_count_unbounded_array_alloc();
_Al.deallocate(reinterpret_cast<_Storage*>(this), _Storage_units);
}
};
// CLASS TEMPLATE _Ref_count_bounded_array_alloc
template <class _Ty, class _Alloc>
class __declspec(empty_bases) _Ref_count_bounded_array_alloc
: public _Ebco_base<_Rebind_alloc_t<_Alloc, remove_all_extents_t<_Ty>>>,
public _Ref_count_base {
// handle reference counting for bounded array in control block, allocator
private:
static_assert(is_bounded_array_v<_Ty>);
static_assert(is_same_v<_Ty, remove_cv_t<_Ty>>, "allocate_shared should remove_cv_t");
using _Item = remove_all_extents_t<_Ty>;
using _Rebound = _Rebind_alloc_t<_Alloc, _Item>;
public:
explicit _Ref_count_bounded_array_alloc(const _Alloc& _Al_arg)
: _Ebco_base<_Rebound>(_Al_arg), _Ref_count_base() { // don't value-initialize _Storage
_Uninitialized_value_construct_multidimensional_n_al(_Storage._Value, extent_v<_Ty>, this->_Get_val());
}
explicit _Ref_count_bounded_array_alloc(const _Alloc& _Al_arg, const remove_extent_t<_Ty>& _Val)
: _Ebco_base<_Rebound>(_Al_arg), _Ref_count_base() { // don't value-initialize _Storage
_Uninitialized_fill_multidimensional_n_al(_Storage._Value, extent_v<_Ty>, _Val, this->_Get_val());
}
union {
_Wrap<_Ty> _Storage;
};
private:
~_Ref_count_bounded_array_alloc() {
// nothing to do; _Storage._Value already destroyed by _Destroy()
// See N4849 [class.dtor]/7.
}
virtual void _Destroy() noexcept override { // destroy managed resource
if constexpr (!conjunction_v<is_trivially_destructible<_Item>, _Uses_default_destroy<_Rebound, _Item*>>) {
_Reverse_destroy_multidimensional_n_al(_Storage._Value, extent_v<_Ty>, this->_Get_val());
}
}
virtual void _Delete_this() noexcept override { // destroy self
_Rebind_alloc_t<_Alloc, _Ref_count_bounded_array_alloc> _Al(this->_Get_val());
this->~_Ref_count_bounded_array_alloc();
_Deallocate_plain(_Al, this);
}
};
#endif // _HAS_CXX20
// FUNCTION TEMPLATE make_shared
template <class _Ty, class... _Types>
_NODISCARD shared_ptr<_Ty> make_shared(_Types&&... _Args) { // make a shared_ptr
_NODISCARD
#if _HAS_CXX20
enable_if_t<!is_array_v<_Ty>, shared_ptr<_Ty>>
#else // _HAS_CXX20
shared_ptr<_Ty>
#endif // _HAS_CXX20
make_shared(_Types&&... _Args) { // make a shared_ptr to non-array object
const auto _Rx = new _Ref_count_obj2<_Ty>(_STD forward<_Types>(_Args)...);
shared_ptr<_Ty> _Ret;
_Ret._Set_ptr_rep_and_enable_shared(_STD addressof(_Rx->_Storage._Value), _Rx);
return _Ret;
}
#if _HAS_CXX20
template <class _Refc>
struct _Global_delete_guard {
_Refc* _Target;
~_Global_delete_guard() {
// While this branch is technically unnecessary because N4849 [new.delete.single]/17 requires
// `::operator delete(nullptr)` to be a no-op, it's here to help optimizers see that after
// `_Guard._Target = nullptr;`, this destructor can be eliminated.
if (_Target) {
_Deallocate_flexible_array(_Target);
}
}
};
template <class _Ty>
_NODISCARD enable_if_t<is_unbounded_array_v<_Ty>, shared_ptr<_Ty>> make_shared(const size_t _Count) {
// make a shared_ptr to an unbounded array
using _Refc = _Ref_count_unbounded_array<_Ty>;
const auto _Rx = _Allocate_flexible_array<_Refc>(_Count);
_Global_delete_guard<_Refc> _Guard{_Rx};
::new (static_cast<void*>(_Rx)) _Refc(_Count);
_Guard._Target = nullptr;
shared_ptr<_Ty> _Ret;
_Ret._Set_ptr_rep_and_enable_shared(_Rx->_Get_ptr(), _Rx);
return _Ret;
}
template <class _Ty>
_NODISCARD enable_if_t<is_unbounded_array_v<_Ty>, shared_ptr<_Ty>> make_shared(
const size_t _Count, const remove_extent_t<_Ty>& _Val) {
// make a shared_ptr to an unbounded array
using _Refc = _Ref_count_unbounded_array<_Ty>;
const auto _Rx = _Allocate_flexible_array<_Refc>(_Count);
_Global_delete_guard<_Refc> _Guard{_Rx};
::new (static_cast<void*>(_Rx)) _Refc(_Count, _Val);
_Guard._Target = nullptr;
shared_ptr<_Ty> _Ret;
_Ret._Set_ptr_rep_and_enable_shared(_Rx->_Get_ptr(), _Rx);
return _Ret;
}
template <class _Ty>
_NODISCARD enable_if_t<is_bounded_array_v<_Ty>, shared_ptr<_Ty>> make_shared() {
// make a shared_ptr to a bounded array
const auto _Rx = new _Ref_count_bounded_array<_Ty>();
shared_ptr<_Ty> _Ret;
_Ret._Set_ptr_rep_and_enable_shared(_Rx->_Storage._Value, _Rx);
return _Ret;
}
template <class _Ty>
_NODISCARD enable_if_t<is_bounded_array_v<_Ty>, shared_ptr<_Ty>> make_shared(const remove_extent_t<_Ty>& _Val) {
// make a shared_ptr to a bounded array
const auto _Rx = new _Ref_count_bounded_array<_Ty>(_Val);
shared_ptr<_Ty> _Ret;
_Ret._Set_ptr_rep_and_enable_shared(_Rx->_Storage._Value, _Rx);
return _Ret;
}
#endif // _HAS_CXX20
// FUNCTION TEMPLATE allocate_shared
template <class _Ty, class _Alloc, class... _Types>
_NODISCARD shared_ptr<_Ty> allocate_shared(const _Alloc& _Al, _Types&&... _Args) { // make a shared_ptr
_NODISCARD
#if _HAS_CXX20
enable_if_t<!is_array_v<_Ty>, shared_ptr<_Ty>>
#else // _HAS_CXX20
shared_ptr<_Ty>
#endif // _HAS_CXX20
allocate_shared(const _Alloc& _Al, _Types&&... _Args) { // make a shared_ptr to non-array object
// Note: As of 2019-05-28, this implements the proposed resolution of LWG-3210 (which controls whether
// allocator::construct sees T or const T when _Ty is const qualified)
using _Refoa = _Ref_count_obj_alloc2<remove_cv_t<_Ty>, _Alloc>;
using _Refoa = _Ref_count_obj_alloc3<remove_cv_t<_Ty>, _Alloc>;
using _Alblock = _Rebind_alloc_t<_Alloc, _Refoa>;
_Alblock _Rebound(_Al);
_Alloc_construct_ptr<_Alblock> _Constructor{_Rebound};
@ -1622,6 +2277,95 @@ _NODISCARD shared_ptr<_Ty> allocate_shared(const _Alloc& _Al, _Types&&... _Args)
return _Ret;
}
#if _HAS_CXX20
template <class _Alloc>
struct _Allocate_n_ptr {
_Alloc& _Al;
_Alloc_ptr_t<_Alloc> _Ptr;
size_t _Nx;
_Allocate_n_ptr(_Alloc& _Al_, const size_t _Nx_) : _Al(_Al_), _Ptr(_Al_.allocate(_Nx_)), _Nx(_Nx_) {}
~_Allocate_n_ptr() {
if (_Ptr) {
_Al.deallocate(_Ptr, _Nx);
}
}
_Allocate_n_ptr(const _Allocate_n_ptr&) = delete;
_Allocate_n_ptr& operator=(const _Allocate_n_ptr&) = delete;
};
template <class _Ty, class _Alloc>
_NODISCARD enable_if_t<is_unbounded_array_v<_Ty>, shared_ptr<_Ty>> allocate_shared(
const _Alloc& _Al, const size_t _Count) {
// make a shared_ptr to an unbounded array
using _Refc = _Ref_count_unbounded_array_alloc<remove_cv_t<_Ty>, _Alloc>;
constexpr size_t _Align = alignof(_Refc);
using _Storage = _Alignas_storage_unit<_Align>;
_Rebind_alloc_t<_Alloc, _Storage> _Rebound(_Al);
const size_t _Bytes = _Calculate_bytes_for_flexible_array<_Refc, _Check_overflow::_Yes>(_Count);
const size_t _Storage_units = _Bytes / sizeof(_Storage);
_Allocate_n_ptr _Guard{_Rebound, _Storage_units};
const auto _Rx = reinterpret_cast<_Refc*>(_Unfancy(_Guard._Ptr));
::new (static_cast<void*>(_Rx)) _Refc(_Al, _Count);
_Guard._Ptr = nullptr;
shared_ptr<_Ty> _Ret;
_Ret._Set_ptr_rep_and_enable_shared(_Rx->_Get_ptr(), _Rx);
return _Ret;
}
template <class _Ty, class _Alloc>
_NODISCARD enable_if_t<is_unbounded_array_v<_Ty>, shared_ptr<_Ty>> allocate_shared(
const _Alloc& _Al, const size_t _Count, const remove_extent_t<_Ty>& _Val) {
// make a shared_ptr to an unbounded array
using _Refc = _Ref_count_unbounded_array_alloc<remove_cv_t<_Ty>, _Alloc>;
constexpr size_t _Align = alignof(_Refc);
using _Storage = _Alignas_storage_unit<_Align>;
_Rebind_alloc_t<_Alloc, _Storage> _Rebound(_Al);
const size_t _Bytes = _Calculate_bytes_for_flexible_array<_Refc, _Check_overflow::_Yes>(_Count);
const size_t _Storage_units = _Bytes / sizeof(_Storage);
_Allocate_n_ptr _Guard{_Rebound, _Storage_units};
const auto _Rx = reinterpret_cast<_Refc*>(_Unfancy(_Guard._Ptr));
::new (static_cast<void*>(_Rx)) _Refc(_Al, _Count, _Val);
_Guard._Ptr = nullptr;
shared_ptr<_Ty> _Ret;
_Ret._Set_ptr_rep_and_enable_shared(_Rx->_Get_ptr(), _Rx);
return _Ret;
}
template <class _Ty, class _Alloc>
_NODISCARD enable_if_t<is_bounded_array_v<_Ty>, shared_ptr<_Ty>> allocate_shared(const _Alloc& _Al) {
// make a shared_ptr to a bounded array
using _Refc = _Ref_count_bounded_array_alloc<remove_cv_t<_Ty>, _Alloc>;
using _Alblock = _Rebind_alloc_t<_Alloc, _Refc>;
_Alblock _Rebound(_Al);
_Alloc_construct_ptr _Constructor{_Rebound};
_Constructor._Allocate();
::new (static_cast<void*>(_Unfancy(_Constructor._Ptr))) _Refc(_Al);
shared_ptr<_Ty> _Ret;
const auto _Ptr = static_cast<remove_extent_t<_Ty>*>(_Constructor._Ptr->_Storage._Value);
_Ret._Set_ptr_rep_and_enable_shared(_Ptr, _Unfancy(_Constructor._Release()));
return _Ret;
}
template <class _Ty, class _Alloc>
_NODISCARD enable_if_t<is_bounded_array_v<_Ty>, shared_ptr<_Ty>> allocate_shared(
const _Alloc& _Al, const remove_extent_t<_Ty>& _Val) {
// make a shared_ptr to a bounded array
using _Refc = _Ref_count_bounded_array_alloc<remove_cv_t<_Ty>, _Alloc>;
using _Alblock = _Rebind_alloc_t<_Alloc, _Refc>;
_Alblock _Rebound(_Al);
_Alloc_construct_ptr _Constructor{_Rebound};
_Constructor._Allocate();
::new (static_cast<void*>(_Unfancy(_Constructor._Ptr))) _Refc(_Al, _Val);
shared_ptr<_Ty> _Ret;
const auto _Ptr = static_cast<remove_extent_t<_Ty>*>(_Constructor._Ptr->_Storage._Value);
_Ret._Set_ptr_rep_and_enable_shared(_Ptr, _Unfancy(_Constructor._Release()));
return _Ret;
}
#endif // _HAS_CXX20
// CLASS TEMPLATE weak_ptr
template <class _Ty>
class weak_ptr : public _Ptr_base<_Ty> { // class for pointer to reference counted resource

Просмотреть файл

@ -155,6 +155,7 @@
// P0646R1 list/forward_list remove()/remove_if()/unique() Return size_type
// P0653R2 to_address()
// P0655R1 visit<R>()
// P0674R1 make_shared() For Arrays
// P0758R1 is_nothrow_convertible
// P0768R1 Library Support For The Spaceship Comparison Operator <=>
// (partially implemented)
@ -1029,7 +1030,6 @@
#define __cpp_lib_map_try_emplace 201411L
#define __cpp_lib_nonmember_container_access 201411L
#define __cpp_lib_shared_mutex 201505L
#define __cpp_lib_shared_ptr_arrays 201611L
#define __cpp_lib_transparent_operators 201510L
#define __cpp_lib_type_trait_variable_templates 201510L
#define __cpp_lib_uncaught_exceptions 201411L
@ -1044,7 +1044,6 @@
#if _HAS_STD_BYTE
#define __cpp_lib_byte 201603L
#endif // _HAS_STD_BYTE
#define __cpp_lib_chrono 201611L
#define __cpp_lib_clamp 201603L
#ifndef _M_CEE
#define __cpp_lib_execution 201603L
@ -1074,8 +1073,12 @@
#define __cpp_lib_string_view 201803L
#define __cpp_lib_to_chars 201611L
#define __cpp_lib_variant 201606L
#endif // _HAS_CXX17
#if _HAS_CXX17
#define __cpp_lib_chrono 201611L // P0505R0 constexpr For <chrono> (Again)
#else // _HAS_CXX17
#define __cpp_lib_chrono 201510L
#define __cpp_lib_chrono 201510L // P0092R1 <chrono> floor(), ceil(), round(), abs()
#endif // _HAS_CXX17
// C++20
@ -1139,6 +1142,12 @@
#define __cpp_lib_array_constexpr 201803L
#endif // _HAS_CXX17
#if _HAS_CXX20
#define __cpp_lib_shared_ptr_arrays 201707L // P0674R1 make_shared() For Arrays
#else // _HAS_CXX20
#define __cpp_lib_shared_ptr_arrays 201611L // P0497R0 Fixing shared_ptr For Arrays
#endif // _HAS_CXX20
// EXPERIMENTAL
#define __cpp_lib_experimental_erase_if 201411L
#define __cpp_lib_experimental_filesystem 201406L

Просмотреть файл

@ -1,4 +1,4 @@
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
RUNALL_INCLUDE ..\usual_matrix.lst
RUNALL_INCLUDE ..\usual_latest_matrix.lst

Просмотреть файл

@ -1,12 +1,300 @@
// Copyright (c) Microsoft Corporation.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#include <cassert>
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <new>
#include <stdexcept>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include <vector>
using namespace std;
#pragma warning(disable : 28251) // Inconsistent annotation for 'new': this instance has no annotations.
int allocationCount = 0;
int canCreate = 10; // Counter to force an exception when constructing a
// sufficiently large ReportAddress array
struct ReportAddress;
vector<ReportAddress*> ascendingAddressBuffer;
vector<ReportAddress*> descendingAddressBuffer;
// According to N4849, the default behavior of operator new[](size) is to return
// operator new(size), so only the latter needs to be replaced.
void* operator new(size_t size) {
void* const p = ::operator new(size, nothrow);
if (p) {
return p;
} else {
throw bad_alloc();
}
}
void* operator new(size_t size, const nothrow_t&) noexcept {
void* const result = malloc(size == 0 ? 1 : size);
++allocationCount;
return result;
}
struct InitialValue {
int value = 106;
InitialValue() = default;
InitialValue(int a, int b) : value(a + b) {}
};
struct ThreeIntWrap {
int v1;
int v2;
int v3;
};
struct alignas(32) HighlyAligned {
uint64_t a;
uint64_t b;
uint64_t c;
uint64_t d;
};
struct ReportAddress {
ReportAddress() {
if (canCreate > 0) {
ascendingAddressBuffer.push_back(this);
--canCreate;
} else {
throw runtime_error("Can't create more ReportAddress objects.");
}
}
~ReportAddress() {
++canCreate;
descendingAddressBuffer.push_back(this);
}
};
void assert_ascending_init() {
for (size_t i = 1; i < ascendingAddressBuffer.size(); ++i) {
assert(ascendingAddressBuffer[i - 1] < ascendingAddressBuffer[i]);
}
ascendingAddressBuffer.clear();
}
void assert_descending_destruct() {
for (size_t i = 1; i < descendingAddressBuffer.size(); ++i) {
assert(descendingAddressBuffer[i - 1] > descendingAddressBuffer[i]);
}
descendingAddressBuffer.clear();
}
template <class T>
void assert_shared_use_get(const shared_ptr<T>& sp) {
assert(sp.use_count() == 1);
assert(sp.get() != nullptr);
}
template <class T, class... Args>
shared_ptr<T> make_shared_assert(Args&&... vals) {
int count = allocationCount;
shared_ptr<T> sp = make_shared<T>(forward<Args>(vals)...);
assert_shared_use_get(sp);
assert(count + 1 == allocationCount);
return sp;
}
template <class T, enable_if_t<extent_v<T> != 0, int> = 0>
shared_ptr<T> make_shared_init_assert(const remove_extent_t<T>& val) {
return make_shared_assert<T>(val);
}
template <class T, enable_if_t<is_array_v<T> && extent_v<T> == 0, int> = 0>
shared_ptr<T> make_shared_init_assert(size_t size, const remove_extent_t<T>& val) {
return make_shared_assert<T>(size, val);
}
template <class T, class... Args>
void test_make_init_destruct_order(Args&&... vals) {
try {
shared_ptr<T> sp = make_shared<T>(forward<Args>(vals)...);
assert_shared_use_get(sp);
} catch (const runtime_error& exc) {
assert(exc.what() == "Can't create more ReportAddress objects."sv);
}
assert_ascending_init();
assert_descending_destruct();
}
void test_make_shared_not_array() {
shared_ptr<vector<int>> p0 = make_shared<vector<int>>();
assert_shared_use_get(p0);
assert(p0->empty());
shared_ptr<InitialValue> p1 = make_shared_assert<InitialValue>();
assert(p1->value == 106);
shared_ptr<string> p2 = make_shared<string>("Meow!", 2u, 3u);
assert_shared_use_get(p2);
assert(p2->compare("ow!") == 0);
shared_ptr<InitialValue> p3 = make_shared_assert<InitialValue>(40, 2);
assert(p3->value == 42);
shared_ptr<int> p4 = make_shared<int>();
assert_shared_use_get(p4);
assert(*p4 == 0);
shared_ptr<HighlyAligned> p5 = make_shared<HighlyAligned>();
assert_shared_use_get(p5);
assert(reinterpret_cast<uintptr_t>(p5.get()) % alignof(HighlyAligned) == 0);
assert(p5->a == 0 && p5->b == 0 && p5->c == 0 && p5->d == 0);
}
void test_make_shared_array_known_bounds() {
shared_ptr<string[100]> p0 = make_shared<string[100]>();
assert_shared_use_get(p0);
for (int i = 0; i < 100; ++i) {
assert(p0[i].empty());
}
shared_ptr<InitialValue[2][8][9]> p1 = make_shared_assert<InitialValue[2][8][9]>();
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 8; ++j) {
for (int k = 0; k < 9; ++k) {
assert(p1[i][j][k].value == 106);
}
}
}
shared_ptr<string[10][2]> p2 = make_shared<string[10][2]>({"Meow!", "Purr"});
assert_shared_use_get(p2);
for (int i = 0; i < 10; ++i) {
assert(p2[i][0].compare("Meow!") == 0);
assert(p2[i][1].compare("Purr") == 0);
}
shared_ptr<vector<int>[3]> p3 = make_shared<vector<int>[3]>({9, 9, 9});
assert_shared_use_get(p3);
for (int i = 0; i < 3; ++i) {
assert(p3[i].size() == 3);
for (const auto& val : p3[i]) {
assert(val == 9);
}
}
shared_ptr<ThreeIntWrap[5]> p4 = make_shared_init_assert<ThreeIntWrap[5]>({2, 8, 9});
for (int i = 0; i < 5; ++i) {
assert(p4[i].v1 == 2 && p4[i].v2 == 8 && p4[i].v3 == 9);
}
shared_ptr<int[1][7][2][9]> p5 = make_shared<int[1][7][2][9]>();
assert_shared_use_get(p5);
for (int i = 0; i < 7; ++i) {
for (int j = 0; j < 2; ++j) {
for (int k = 0; k < 9; ++k) {
assert(p5[0][i][j][k] == 0);
}
}
}
shared_ptr<HighlyAligned[6]> p6 = make_shared<HighlyAligned[6]>();
assert_shared_use_get(p6);
assert(reinterpret_cast<uintptr_t>(p6.get()) % alignof(HighlyAligned) == 0);
for (int i = 0; i < 6; ++i) {
assert(p6[i].a == 0 && p6[i].b == 0 && p6[i].c == 0 && p6[i].d == 0);
}
test_make_init_destruct_order<ReportAddress[5]>(); // success one dimensional
test_make_init_destruct_order<ReportAddress[20]>(); // failure one dimensional
test_make_init_destruct_order<ReportAddress[2][2][2]>(); // success multidimensional
test_make_init_destruct_order<ReportAddress[3][3][3]>(); // failure multidimensional
}
void test_make_shared_array_unknown_bounds() {
shared_ptr<string[]> p0 = make_shared<string[]>(100);
assert_shared_use_get(p0);
for (int i = 0; i < 100; ++i) {
assert(p0[i].empty());
}
shared_ptr<InitialValue[][8][9]> p1 = make_shared_assert<InitialValue[][8][9]>(2u);
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 8; ++j) {
for (int k = 0; k < 9; ++k) {
assert(p1[i][j][k].value == 106);
}
}
}
shared_ptr<string[][2]> p2 = make_shared<string[][2]>(10, {"Meow!", "Purr"});
assert_shared_use_get(p2);
for (int i = 0; i < 10; ++i) {
assert(p2[i][0].compare("Meow!") == 0);
assert(p2[i][1].compare("Purr") == 0);
}
shared_ptr<vector<int>[]> p3 = make_shared<vector<int>[]>(3, {9, 9, 9});
assert_shared_use_get(p3);
for (int i = 0; i < 3; ++i) {
assert(p3[i].size() == 3);
for (const auto& val : p3[i]) {
assert(val == 9);
}
}
shared_ptr<ThreeIntWrap[]> p4 = make_shared_init_assert<ThreeIntWrap[]>(5, {2, 8, 9});
for (int i = 0; i < 5; ++i) {
assert(p4[i].v1 == 2 && p4[i].v2 == 8 && p4[i].v3 == 9);
}
shared_ptr<int[]> p5 = make_shared_assert<int[]>(0u); // p5 cannot be dereferenced
shared_ptr<int[][5][6]> p6 = make_shared<int[][5][6]>(4u);
assert_shared_use_get(p6);
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 5; ++j) {
for (int k = 0; k < 6; ++k) {
assert(p6[i][j][k] == 0);
}
}
}
shared_ptr<HighlyAligned[]> p7 = make_shared<HighlyAligned[]>(7u);
assert_shared_use_get(p7);
assert(reinterpret_cast<uintptr_t>(p7.get()) % alignof(HighlyAligned) == 0);
for (int i = 0; i < 7; ++i) {
assert(p7[i].a == 0 && p7[i].b == 0 && p7[i].c == 0 && p7[i].d == 0);
}
test_make_init_destruct_order<ReportAddress[]>(5u); // success one dimensional
test_make_init_destruct_order<ReportAddress[]>(20u); // failure one dimensional
test_make_init_destruct_order<ReportAddress[][2][2]>(2u); // success multidimensional
test_make_init_destruct_order<ReportAddress[][3][3]>(3u); // failure multidimensional
}
int constructCount = 0;
int destroyCount = 0;
inline void assert_construct_destruct_equal() {
assert(constructCount == destroyCount);
}
template <class T, class ConstructAssert>
struct ConstructConstrainingAllocator {
using value_type = T;
@ -30,6 +318,7 @@ struct ConstructConstrainingAllocator {
allocator<Other> a;
static_assert(is_same_v<Other, value_type> && is_same_v<ConstructAssert, Other>, "incorrect construct call");
allocator_traits<allocator<Other>>::construct(a, p, forward<Args>(vals)...);
++constructCount;
}
template <class Other>
@ -37,11 +326,287 @@ struct ConstructConstrainingAllocator {
allocator<Other> a;
static_assert(is_same_v<Other, value_type> && is_same_v<ConstructAssert, Other>, "incorrect destroy call");
allocator_traits<allocator<Other>>::destroy(a, p);
++destroyCount;
}
};
int main() {
ConstructConstrainingAllocator<void, int> a{};
(void) allocate_shared<int>(a, 42);
(void) allocate_shared<const int>(a, 42);
template <typename T>
using CustomAlloc = ConstructConstrainingAllocator<void, T>;
template <class T, class... Args>
shared_ptr<T> allocate_shared_assert(int elemCount, Args&&... vals) {
int aCount = allocationCount;
int cCount = constructCount;
shared_ptr<T> sp = allocate_shared<T>(forward<Args>(vals)...);
assert_shared_use_get(sp);
assert(aCount + 1 == allocationCount);
assert(cCount + elemCount == constructCount);
return sp;
}
template <class T, class A, enable_if_t<extent_v<T> != 0, int> = 0>
shared_ptr<T> allocate_shared_init_assert(int elemCount, const A& a, const remove_extent_t<T>& val) {
return allocate_shared_assert<T>(elemCount, a, val);
}
template <class T, class A, enable_if_t<is_array_v<T> && extent_v<T> == 0, int> = 0>
shared_ptr<T> allocate_shared_init_assert(int elemCount, const A& a, size_t size, const remove_extent_t<T>& val) {
return allocate_shared_assert<T>(elemCount, a, size, val);
}
template <class T, class... Args>
void test_allocate_init_destruct_order(Args&&... vals) {
CustomAlloc<remove_all_extents_t<T>> a{};
try {
shared_ptr<T> sp = allocate_shared<T>(a, forward<Args>(vals)...);
assert_shared_use_get(sp);
} catch (const runtime_error& exc) {
assert(exc.what() == "Can't create more ReportAddress objects."sv);
}
assert_construct_destruct_equal();
assert_ascending_init();
assert_descending_destruct();
}
void test_allocate_shared_not_array() {
CustomAlloc<vector<int>> a0{};
{
shared_ptr<vector<int>> p0 = allocate_shared<vector<int>>(a0);
assert_shared_use_get(p0);
assert(p0->empty());
}
assert_construct_destruct_equal();
CustomAlloc<InitialValue> a1{};
{
shared_ptr<InitialValue> p1 = allocate_shared_assert<InitialValue>(1, a1);
assert(p1->value == 106);
}
assert_construct_destruct_equal();
CustomAlloc<string> a2{};
{
shared_ptr<string> p2 = allocate_shared<string>(a2, "Meow!", 2u, 3u);
assert_shared_use_get(p2);
assert(p2->compare("ow!") == 0);
}
assert_construct_destruct_equal();
{
shared_ptr<InitialValue> p3 = allocate_shared_assert<InitialValue>(1, a1, 40, 2);
assert(p3->value == 42);
}
assert_construct_destruct_equal();
CustomAlloc<int> a4{};
{
shared_ptr<int> p4 = allocate_shared<int>(a4);
assert_shared_use_get(p4);
assert(*p4 == 0);
}
assert_construct_destruct_equal();
CustomAlloc<HighlyAligned> a5{};
{
shared_ptr<HighlyAligned> p5 = allocate_shared<HighlyAligned>(a5);
assert_shared_use_get(p5);
assert(reinterpret_cast<uintptr_t>(p5.get()) % alignof(HighlyAligned) == 0);
assert(p5->a == 0 && p5->b == 0 && p5->c == 0 && p5->d == 0);
}
assert_construct_destruct_equal();
}
void test_allocate_shared_array_known_bounds() {
CustomAlloc<string> a0{};
{
shared_ptr<string[100]> p0 = allocate_shared<string[100]>(a0);
assert_shared_use_get(p0);
for (int i = 0; i < 100; ++i) {
assert(p0[i].empty());
}
}
assert_construct_destruct_equal();
CustomAlloc<InitialValue> a1{};
{
shared_ptr<InitialValue[2][8][9]> p1 = allocate_shared_assert<InitialValue[2][8][9]>(144, a1);
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 8; ++j) {
for (int k = 0; k < 9; ++k) {
assert(p1[i][j][k].value == 106);
}
}
}
}
assert_construct_destruct_equal();
{
shared_ptr<string[10][2]> p2 = allocate_shared<string[10][2]>(a0, {"Meow!", "Purr"});
assert_shared_use_get(p2);
for (int i = 0; i < 10; ++i) {
assert(p2[i][0].compare("Meow!") == 0);
assert(p2[i][1].compare("Purr") == 0);
}
}
assert_construct_destruct_equal();
CustomAlloc<vector<int>> a3{};
{
shared_ptr<vector<int>[3]> p3 = allocate_shared<vector<int>[3]>(a3, {9, 9, 9});
assert_shared_use_get(p3);
for (int i = 0; i < 3; ++i) {
assert(p3[i].size() == 3);
for (const auto& val : p3[i]) {
assert(val == 9);
}
}
}
assert_construct_destruct_equal();
CustomAlloc<ThreeIntWrap> a4{};
{
shared_ptr<ThreeIntWrap[5]> p4 = allocate_shared_init_assert<ThreeIntWrap[5]>(5, a4, {2, 8, 9});
for (int i = 0; i < 5; ++i) {
assert(p4[i].v1 == 2 && p4[i].v2 == 8 && p4[i].v3 == 9);
}
}
assert_construct_destruct_equal();
CustomAlloc<int> a5{};
{
shared_ptr<int[1][7][2][9]> p5 = allocate_shared<int[1][7][2][9]>(a5);
assert_shared_use_get(p5);
for (int i = 0; i < 7; ++i) {
for (int j = 0; j < 2; ++j) {
for (int k = 0; k < 9; ++k) {
assert(p5[0][i][j][k] == 0);
}
}
}
}
assert_construct_destruct_equal();
CustomAlloc<HighlyAligned> a6{};
{
shared_ptr<HighlyAligned[6]> p6 = allocate_shared<HighlyAligned[6]>(a6);
assert_shared_use_get(p6);
assert(reinterpret_cast<uintptr_t>(p6.get()) % alignof(HighlyAligned) == 0);
for (int i = 0; i < 6; ++i) {
assert(p6[i].a == 0 && p6[i].b == 0 && p6[i].c == 0 && p6[i].d == 0);
}
}
assert_construct_destruct_equal();
test_allocate_init_destruct_order<ReportAddress[5]>(); // success one dimensional
test_allocate_init_destruct_order<ReportAddress[20]>(); // failure one dimensional
test_allocate_init_destruct_order<ReportAddress[2][2][2]>(); // success multidimensional
test_allocate_init_destruct_order<ReportAddress[3][3][3]>(); // failure multidimensional
}
void test_allocate_shared_array_unknown_bounds() {
CustomAlloc<string> a0{};
{
shared_ptr<string[]> p0 = allocate_shared<string[]>(a0, 100);
assert_shared_use_get(p0);
for (int i = 0; i < 100; ++i) {
assert(p0[i].empty());
}
}
assert_construct_destruct_equal();
CustomAlloc<InitialValue> a1{};
{
shared_ptr<InitialValue[][8][9]> p1 = allocate_shared_assert<InitialValue[][8][9]>(144, a1, 2u);
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 8; ++j) {
for (int k = 0; k < 9; ++k) {
assert(p1[i][j][k].value == 106);
}
}
}
}
assert_construct_destruct_equal();
{
shared_ptr<string[][2]> p2 = allocate_shared<string[][2]>(a0, 10, {"Meow!", "Purr"});
assert_shared_use_get(p2);
for (int i = 0; i < 10; ++i) {
assert(p2[i][0].compare("Meow!") == 0);
assert(p2[i][1].compare("Purr") == 0);
}
}
assert_construct_destruct_equal();
CustomAlloc<vector<int>> a3{};
{
shared_ptr<vector<int>[]> p3 = allocate_shared<vector<int>[]>(a3, 3, {9, 9, 9});
assert_shared_use_get(p3);
for (int i = 0; i < 3; ++i) {
assert(p3[i].size() == 3);
for (const auto& val : p3[i]) {
assert(val == 9);
}
}
}
assert_construct_destruct_equal();
CustomAlloc<ThreeIntWrap> a4{};
{
shared_ptr<ThreeIntWrap[]> p4 = allocate_shared_init_assert<ThreeIntWrap[]>(5, a4, 5, {2, 8, 9});
for (int i = 0; i < 5; ++i) {
assert(p4[i].v1 == 2 && p4[i].v2 == 8 && p4[i].v3 == 9);
}
}
assert_construct_destruct_equal();
CustomAlloc<int> a5{};
{ shared_ptr<int[]> p5 = allocate_shared_assert<int[]>(0, a5, 0u); } // p5 cannot be dereferenced
assert_construct_destruct_equal();
{
shared_ptr<int[][5][6]> p6 = allocate_shared<int[][5][6]>(a5, 4u);
assert_shared_use_get(p6);
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 5; ++j) {
for (int k = 0; k < 6; ++k) {
assert(p6[i][j][k] == 0);
}
}
}
}
assert_construct_destruct_equal();
CustomAlloc<HighlyAligned> a7{};
{
shared_ptr<HighlyAligned[]> p7 = allocate_shared<HighlyAligned[]>(a7, 7u);
assert_shared_use_get(p7);
assert(reinterpret_cast<uintptr_t>(p7.get()) % alignof(HighlyAligned) == 0);
for (int i = 0; i < 7; ++i) {
assert(p7[i].a == 0 && p7[i].b == 0 && p7[i].c == 0 && p7[i].d == 0);
}
}
assert_construct_destruct_equal();
test_allocate_init_destruct_order<ReportAddress[]>(5u); // success one dimensional
test_allocate_init_destruct_order<ReportAddress[]>(20u); // failure one dimensional
test_allocate_init_destruct_order<ReportAddress[][2][2]>(2u); // success multidimensional
test_allocate_init_destruct_order<ReportAddress[][3][3]>(3u); // failure multidimensional
}
int main() {
test_make_shared_not_array();
test_make_shared_array_known_bounds();
test_make_shared_array_unknown_bounds();
test_allocate_shared_not_array();
test_allocate_shared_array_known_bounds();
test_allocate_shared_array_unknown_bounds();
}

Просмотреть файл

@ -1598,11 +1598,19 @@ STATIC_ASSERT(__cpp_lib_shared_mutex == 201505L);
#ifndef __cpp_lib_shared_ptr_arrays
#error __cpp_lib_shared_ptr_arrays is not defined
#elif __cpp_lib_shared_ptr_arrays != 201611L
#elif _HAS_CXX20
#if __cpp_lib_shared_ptr_arrays != 201707L
#error __cpp_lib_shared_ptr_arrays is not 201707L
#else
STATIC_ASSERT(__cpp_lib_shared_ptr_arrays == 201707L);
#endif
#else
#if __cpp_lib_shared_ptr_arrays != 201611L
#error __cpp_lib_shared_ptr_arrays is not 201611L
#else
STATIC_ASSERT(__cpp_lib_shared_ptr_arrays == 201611L);
#endif
#endif
#if _HAS_CXX17
#ifndef __cpp_lib_shared_ptr_weak_type

Просмотреть файл

@ -179,7 +179,10 @@ tests\string1
tests\string2
tests\strstream
tests\system_error
tests\thread
# TRANSITION, flaky test
# tests\thread
tests\tuple
tests\type_traits1
tests\type_traits2