2019-09-05 01:57:56 +03:00
|
|
|
// memory_resource standard header
|
|
|
|
|
|
|
|
// Copyright (c) Microsoft Corporation.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
#ifndef _MEMORY_RESOURCE_
|
|
|
|
#define _MEMORY_RESOURCE_
|
|
|
|
#include <yvals.h>
|
|
|
|
#if _STL_COMPILER_PREPROCESSOR
|
|
|
|
|
2019-11-06 00:12:34 +03:00
|
|
|
#if !_HAS_CXX17
|
|
|
|
#pragma message("The contents of <memory_resource> are available only with C++17 or later.")
|
|
|
|
#else // ^^^ !_HAS_CXX17 / _HAS_CXX17 vvv
|
2019-09-05 01:57:56 +03:00
|
|
|
#include <vector>
|
|
|
|
#include <xbit_ops.h>
|
|
|
|
#include <xpolymorphic_allocator.h>
|
|
|
|
#include <xutility>
|
|
|
|
|
|
|
|
#ifndef _M_CEE
|
|
|
|
#include <mutex>
|
|
|
|
#endif // _M_CEE
|
|
|
|
|
|
|
|
#pragma pack(push, _CRT_PACKING)
|
|
|
|
#pragma warning(push, _STL_WARNING_LEVEL)
|
|
|
|
#pragma warning(disable : _STL_DISABLED_WARNINGS)
|
|
|
|
_STL_DISABLE_CLANG_WARNINGS
|
|
|
|
#pragma push_macro("new")
|
|
|
|
#undef new
|
|
|
|
|
|
|
|
_STD_BEGIN
|
|
|
|
|
|
|
|
namespace pmr {
|
|
|
|
|
|
|
|
// FUNCTION set_default_resource
|
|
|
|
extern "C" _CRT_SATELLITE_1 memory_resource* __cdecl _Aligned_set_default_resource(memory_resource*) noexcept;
|
|
|
|
extern "C" _CRT_SATELLITE_1 memory_resource* __cdecl _Unaligned_set_default_resource(memory_resource*) noexcept;
|
|
|
|
|
|
|
|
inline memory_resource* set_default_resource(memory_resource* const _Resource) noexcept {
|
|
|
|
#ifdef __cpp_aligned_new
|
|
|
|
return _Aligned_set_default_resource(_Resource);
|
|
|
|
#else // ^^^ __cpp_aligned_new / !__cpp_aligned_new vvv
|
|
|
|
return _Unaligned_set_default_resource(_Resource);
|
|
|
|
#endif // __cpp_aligned_new
|
|
|
|
}
|
|
|
|
|
|
|
|
extern "C" _CRT_SATELLITE_1 _NODISCARD memory_resource* __cdecl null_memory_resource() noexcept;
|
|
|
|
|
|
|
|
// FUNCTION new_delete_resource
|
|
|
|
class _Identity_equal_resource : public memory_resource {
|
|
|
|
protected:
|
|
|
|
virtual bool do_is_equal(const memory_resource& _That) const noexcept override {
|
|
|
|
return this == &_That;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class _Unaligned_new_delete_resource_impl final
|
|
|
|
: public _Identity_equal_resource { // implementation for new_delete_resource with /Zc:alignedNew-
|
|
|
|
virtual void* do_allocate(const size_t _Bytes, const size_t _Align) override {
|
|
|
|
if (_Align > __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
|
|
|
|
_Xbad_alloc();
|
|
|
|
}
|
|
|
|
|
|
|
|
return ::operator new(_Bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void do_deallocate(void* const _Ptr, const size_t _Bytes, size_t) noexcept override /* strengthened */ {
|
|
|
|
::operator delete(_Ptr, _Bytes);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
extern "C" _CRT_SATELLITE_1 _Unaligned_new_delete_resource_impl* __cdecl _Unaligned_new_delete_resource() noexcept;
|
|
|
|
|
|
|
|
#ifdef __cpp_aligned_new
|
|
|
|
class _Aligned_new_delete_resource_impl final
|
|
|
|
: public _Identity_equal_resource { // implementation for new_delete_resource with aligned new support
|
|
|
|
virtual void* do_allocate(const size_t _Bytes, const size_t _Align) override {
|
|
|
|
if (_Align > __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
|
|
|
|
return ::operator new (_Bytes, align_val_t{_Align});
|
|
|
|
}
|
|
|
|
|
|
|
|
return ::operator new(_Bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void do_deallocate(void* const _Ptr, const size_t _Bytes, const size_t _Align) noexcept override
|
|
|
|
/* strengthened */ {
|
|
|
|
if (_Align > __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
|
|
|
|
return ::operator delete (_Ptr, _Bytes, align_val_t{_Align});
|
|
|
|
}
|
|
|
|
|
|
|
|
::operator delete(_Ptr, _Bytes);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
extern "C" _CRT_SATELLITE_1 _Aligned_new_delete_resource_impl* __cdecl _Aligned_new_delete_resource() noexcept;
|
|
|
|
|
|
|
|
_NODISCARD inline memory_resource* new_delete_resource() noexcept {
|
|
|
|
return _Aligned_new_delete_resource();
|
|
|
|
}
|
|
|
|
|
|
|
|
#else // ^^^ __cpp_aligned_new / !__cpp_aligned_new vvv
|
|
|
|
|
|
|
|
_NODISCARD inline memory_resource* new_delete_resource() noexcept {
|
|
|
|
return _Unaligned_new_delete_resource();
|
|
|
|
}
|
|
|
|
#endif // __cpp_aligned_new
|
|
|
|
|
|
|
|
// CLASS unsynchronized_pool_resource
|
|
|
|
template <class _Tag = void>
|
|
|
|
struct _Double_link { // base class for intrusive doubly-linked structures
|
|
|
|
_Double_link* _Next;
|
|
|
|
_Double_link* _Prev;
|
|
|
|
};
|
|
|
|
|
|
|
|
template <class _Ty, class _Tag = void>
|
|
|
|
struct _Intrusive_list { // intrusive circular list of _Ty (which must derive from _Double_link<_Tag>)
|
|
|
|
using _Link_type = _Double_link<_Tag>;
|
|
|
|
|
2019-10-11 23:43:06 +03:00
|
|
|
constexpr _Intrusive_list() noexcept { // TRANSITION, VSO-517878
|
2019-09-05 01:57:56 +03:00
|
|
|
// initialize this list to the empty state
|
|
|
|
}
|
|
|
|
|
|
|
|
_Intrusive_list(const _Intrusive_list&) = delete;
|
|
|
|
_Intrusive_list& operator=(const _Intrusive_list&) = delete;
|
|
|
|
|
|
|
|
static constexpr _Link_type* _As_link(_Ty* const _Ptr) noexcept {
|
|
|
|
// extract the link from the item denoted by _Ptr
|
|
|
|
static_assert(is_base_of_v<_Link_type, _Ty>);
|
|
|
|
return static_cast<_Link_type*>(_Ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static constexpr _Ty* _As_item(_Link_type* const _Ptr) noexcept { // get the item whose link is denoted by _Ptr
|
|
|
|
static_assert(is_base_of_v<_Link_type, _Ty>);
|
|
|
|
return static_cast<_Ty*>(_Ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
constexpr void _Push_front(_Ty* const _Item) noexcept { // insert _Item at the head of this list
|
|
|
|
static_assert(is_base_of_v<_Link_type, _Ty>);
|
|
|
|
const auto _Ptr = static_cast<_Link_type*>(_Item);
|
|
|
|
_Ptr->_Next = _Head._Next;
|
|
|
|
_Head._Next->_Prev = _Ptr;
|
|
|
|
_Ptr->_Prev = &_Head;
|
|
|
|
_Head._Next = _Ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static constexpr void _Remove(_Ty* const _Item) noexcept { // unlink _Item from this list
|
|
|
|
static_assert(is_base_of_v<_Link_type, _Ty>);
|
|
|
|
const auto _Ptr = static_cast<_Link_type*>(_Item);
|
|
|
|
_Ptr->_Next->_Prev = _Ptr->_Prev;
|
|
|
|
_Ptr->_Prev->_Next = _Ptr->_Next;
|
|
|
|
}
|
|
|
|
|
|
|
|
constexpr void _Clear() noexcept { // make this list empty
|
|
|
|
_Head._Next = &_Head;
|
|
|
|
_Head._Prev = &_Head;
|
|
|
|
}
|
|
|
|
|
|
|
|
_Link_type _Head{&_Head, &_Head};
|
|
|
|
};
|
|
|
|
|
|
|
|
template <class _Tag = void>
|
|
|
|
struct _Single_link { // base class for intrusive singly-linked structures
|
|
|
|
_Single_link* _Next;
|
|
|
|
};
|
|
|
|
|
|
|
|
template <class _Ty, class _Tag = void>
|
|
|
|
struct _Intrusive_stack { // intrusive stack of _Ty, which must derive from _Single_link<_Tag>
|
|
|
|
using _Link_type = _Single_link<_Tag>;
|
|
|
|
|
|
|
|
constexpr _Intrusive_stack() noexcept = default;
|
|
|
|
constexpr _Intrusive_stack(_Intrusive_stack&& _That) noexcept : _Head{_That._Head} {
|
|
|
|
_That._Head = nullptr;
|
|
|
|
}
|
|
|
|
constexpr _Intrusive_stack& operator=(_Intrusive_stack&& _That) noexcept {
|
|
|
|
_Head = _That._Head;
|
|
|
|
_That._Head = nullptr;
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
static constexpr _Link_type* _As_link(_Ty* const _Ptr) noexcept {
|
|
|
|
static_assert(is_base_of_v<_Link_type, _Ty>);
|
|
|
|
return static_cast<_Link_type*>(_Ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static constexpr _Ty* _As_item(_Link_type* const _Ptr) noexcept {
|
|
|
|
static_assert(is_base_of_v<_Link_type, _Ty>);
|
|
|
|
return static_cast<_Ty*>(_Ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
constexpr bool _Empty() const noexcept {
|
|
|
|
return _Head == nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
constexpr _Ty* _Top() const noexcept {
|
|
|
|
return _As_item(_Head);
|
|
|
|
}
|
|
|
|
|
|
|
|
constexpr void _Push(_Ty* const _Item) noexcept {
|
|
|
|
const auto _Ptr = _As_link(_Item);
|
|
|
|
_Ptr->_Next = _Head;
|
|
|
|
_Head = _Ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
constexpr _Ty* _Pop() noexcept { // pre: _Head != nullptr
|
|
|
|
const auto _Result = _Head;
|
|
|
|
_Head = _Head->_Next;
|
|
|
|
return _As_item(_Result);
|
|
|
|
}
|
|
|
|
|
|
|
|
constexpr void _Remove(_Ty* const _Item) noexcept {
|
|
|
|
const auto _Ptr = _As_link(_Item);
|
|
|
|
for (_Link_type** _Foo = _STD addressof(_Head); *_Foo; _Foo = _STD addressof((*_Foo)->_Next)) {
|
|
|
|
if (*_Foo == _Ptr) {
|
|
|
|
*_Foo = _Ptr->_Next;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
_Link_type* _Head = nullptr;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct pool_options {
|
|
|
|
size_t max_blocks_per_chunk = 0;
|
|
|
|
size_t largest_required_pool_block = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
inline void _Check_alignment(void* const _Ptr, const size_t _Align) noexcept {
|
|
|
|
_STL_ASSERT((reinterpret_cast<uintptr_t>(_Ptr) & (_Align - 1)) == 0,
|
|
|
|
"Upstream resource did not respect alignment requirement.");
|
|
|
|
(void) _Ptr;
|
|
|
|
(void) _Align;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct unsynchronized_pool_resource : _Identity_equal_resource {
|
|
|
|
unsynchronized_pool_resource() noexcept { // initialize pool with default options and default upstream
|
|
|
|
_Setup_options();
|
|
|
|
}
|
|
|
|
unsynchronized_pool_resource(
|
|
|
|
const pool_options& _Opts, memory_resource* const _Resource) noexcept // strengthened
|
|
|
|
: _Options(_Opts), _Pools{_Resource} { // initialize pool with options _Opts and upstream _Resource
|
|
|
|
_STL_ASSERT(
|
|
|
|
_Resource, "Upstream memory resource must be a valid resource (N4810 20.12.5.3 [mem.res.pool.ctor]/1)");
|
|
|
|
_Setup_options();
|
|
|
|
}
|
|
|
|
explicit unsynchronized_pool_resource(memory_resource* const _Resource) noexcept // strengthened
|
|
|
|
: _Pools{_Resource} { // initialize pool with default options and upstream _Resource
|
|
|
|
_STL_ASSERT(
|
|
|
|
_Resource, "Upstream memory resource must be a valid resource (N4810 20.12.5.3 [mem.res.pool.ctor]/1)");
|
|
|
|
_Setup_options();
|
|
|
|
}
|
|
|
|
explicit unsynchronized_pool_resource(const pool_options& _Opts) noexcept // strengthened
|
|
|
|
: _Options(_Opts) { // initialize pool with options _Opts and default upstream
|
|
|
|
_Setup_options();
|
|
|
|
}
|
|
|
|
|
|
|
|
unsynchronized_pool_resource(const unsynchronized_pool_resource&) = delete;
|
|
|
|
unsynchronized_pool_resource& operator=(const unsynchronized_pool_resource&) = delete;
|
|
|
|
|
|
|
|
virtual ~unsynchronized_pool_resource() noexcept override {
|
|
|
|
// destroy this pool resource, releasing all allocations back upstream
|
|
|
|
release();
|
|
|
|
}
|
|
|
|
|
|
|
|
_NODISCARD memory_resource* upstream_resource() const noexcept /* strengthened */ {
|
|
|
|
// retrieve this pool resource's upstream resource
|
|
|
|
return _Pools.get_allocator().resource();
|
|
|
|
}
|
|
|
|
|
|
|
|
_NODISCARD pool_options options() const noexcept /* strengthened */ {
|
|
|
|
// retrieve the adjusted/actual option values
|
|
|
|
return _Options;
|
|
|
|
}
|
|
|
|
|
|
|
|
void release() noexcept /* strengthened */ {
|
|
|
|
// release all allocations back upstream
|
|
|
|
for (auto& _Al : _Pools) {
|
|
|
|
_Al._Clear(*this);
|
|
|
|
}
|
|
|
|
_Pools.clear();
|
|
|
|
_Pools.shrink_to_fit();
|
|
|
|
|
|
|
|
auto* _Ptr = _Chunks._Head._Next;
|
|
|
|
_Chunks._Clear();
|
|
|
|
memory_resource* const _Resource = upstream_resource();
|
|
|
|
while (_Ptr != &_Chunks._Head) {
|
|
|
|
const auto _Chunk = _Chunks._As_item(_Ptr);
|
|
|
|
_Ptr = _Ptr->_Next;
|
|
|
|
_Resource->deallocate(_Chunk->_Base_address(), _Chunk->_Size, _Chunk->_Align);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
virtual void* do_allocate(size_t _Bytes, const size_t _Align) override {
|
|
|
|
// allocate a block from the appropriate pool, or directly from upstream if too large
|
|
|
|
if (_Bytes <= _Options.largest_required_pool_block) {
|
|
|
|
auto _Result = _Find_pool(_Bytes, _Align);
|
|
|
|
if (_Result.first == _Pools.end() || _Result.first->_Log_of_size != _Result.second) {
|
|
|
|
_Result.first = _Pools.emplace(_Result.first, _Result.second);
|
|
|
|
}
|
|
|
|
|
|
|
|
return _Result.first->_Allocate(*this);
|
|
|
|
}
|
|
|
|
|
|
|
|
return _Allocate_oversized(_Bytes, _Align);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void do_deallocate(void* const _Ptr, const size_t _Bytes, const size_t _Align) override {
|
|
|
|
// deallocate a block from the appropriate pool, or directly from upstream if too large
|
|
|
|
if (_Bytes <= _Options.largest_required_pool_block) {
|
|
|
|
const auto _Result = _Find_pool(_Bytes, _Align);
|
|
|
|
if (_Result.first != _Pools.end() && _Result.first->_Log_of_size == _Result.second) {
|
|
|
|
_Result.first->_Deallocate(*this, _Ptr);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
_Deallocate_oversized(_Ptr, _Bytes, _Align);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
struct _Oversized_header
|
|
|
|
: _Double_link<> { // tracks an allocation that was obtained directly from the upstream resource
|
|
|
|
size_t _Size;
|
|
|
|
size_t _Align;
|
|
|
|
|
|
|
|
void* _Base_address() const { // headers are stored at the end of the allocated memory block
|
|
|
|
return const_cast<char*>(reinterpret_cast<const char*>(this + 1) - _Size);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
static_assert(alignof(_Oversized_header) == alignof(void*));
|
|
|
|
static_assert(sizeof(_Oversized_header) == 4 * sizeof(void*));
|
|
|
|
|
|
|
|
static constexpr bool _Prepare_oversized(size_t& _Bytes, size_t& _Align) noexcept {
|
|
|
|
// adjust size and alignment to allow for an _Oversized_header
|
2020-02-21 16:55:22 +03:00
|
|
|
_Align = (_STD max)(_Align, alignof(_Oversized_header));
|
2019-09-05 01:57:56 +03:00
|
|
|
|
|
|
|
if (_Bytes > SIZE_MAX - sizeof(_Oversized_header) - alignof(_Oversized_header) + 1) {
|
|
|
|
// no room for header + alignment padding
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// adjust _Bytes to the smallest multiple of alignof(_Oversized_header) that is >=
|
|
|
|
// _Bytes + sizeof(_Oversized_header), which guarantees that the end of the allocated space
|
|
|
|
// is properly aligned for an _Oversized_header.
|
|
|
|
_Bytes = (_Bytes + sizeof(_Oversized_header) + alignof(_Oversized_header) - 1)
|
|
|
|
& ~(alignof(_Oversized_header) - 1);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void* _Allocate_oversized(size_t _Bytes, size_t _Align) {
|
|
|
|
// allocate a block directly from the upstream resource
|
|
|
|
if (!_Prepare_oversized(_Bytes, _Align)) { // no room for header + alignment padding
|
|
|
|
_Xbad_alloc();
|
|
|
|
}
|
|
|
|
|
|
|
|
memory_resource* const _Resource = upstream_resource();
|
|
|
|
void* const _Ptr = _Resource->allocate(_Bytes, _Align);
|
|
|
|
_Check_alignment(_Ptr, _Align);
|
|
|
|
|
|
|
|
_Oversized_header* const _Hdr =
|
|
|
|
reinterpret_cast<_Oversized_header*>(reinterpret_cast<char*>(_Ptr) + _Bytes) - 1;
|
|
|
|
|
|
|
|
_Hdr->_Size = _Bytes;
|
|
|
|
_Hdr->_Align = _Align;
|
|
|
|
_Chunks._Push_front(_Hdr);
|
|
|
|
|
|
|
|
return _Ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void _Deallocate_oversized(void* _Ptr, size_t _Bytes, size_t _Align) noexcept {
|
|
|
|
// deallocate a block directly from the upstream resource
|
|
|
|
if (!_Prepare_oversized(_Bytes, _Align)) {
|
|
|
|
// no room for header + alignment padding; this memory WAS NOT allocated by this pool resource
|
|
|
|
#ifdef _DEBUG
|
|
|
|
_STL_REPORT_ERROR("Cannot deallocate memory not allocated by this memory pool.");
|
|
|
|
#endif // _DEBUG
|
|
|
|
}
|
|
|
|
|
|
|
|
_Oversized_header* _Hdr = reinterpret_cast<_Oversized_header*>(reinterpret_cast<char*>(_Ptr) + _Bytes) - 1;
|
|
|
|
|
|
|
|
_STL_ASSERT(_Hdr->_Size == _Bytes && _Hdr->_Align == _Align,
|
|
|
|
"Cannot deallocate memory not allocated by this memory pool.");
|
|
|
|
_Chunks._Remove(_Hdr);
|
|
|
|
upstream_resource()->deallocate(_Ptr, _Bytes, _Align);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct _Pool { // manager for a collection of chunks comprised of blocks of a single size
|
|
|
|
struct _Chunk
|
|
|
|
: _Single_link<> { // a memory allocation consisting of a number of fixed-size blocks to be parceled out
|
|
|
|
_Intrusive_stack<_Single_link<>> _Free_blocks{}; // list of free blocks
|
|
|
|
size_t _Free_count; // # of unallocated blocks
|
|
|
|
size_t _Capacity; // total # of blocks
|
|
|
|
char* _Base; // address of first block
|
|
|
|
size_t _Next_available = 0; // index of first never-allocated block
|
|
|
|
size_t _Id; // unique identifier; increasing order of allocation
|
|
|
|
|
|
|
|
_Chunk(_Pool& _Al, void* const _Base_, const size_t _Capacity_) noexcept
|
|
|
|
: _Free_count{_Capacity_}, _Capacity{_Capacity_}, _Base{static_cast<char*>(_Base_)},
|
|
|
|
_Id{_Al._All_chunks._Empty() ? 0 : _Al._All_chunks._Top()->_Id + 1} {
|
|
|
|
// initialize a chunk of _Capacity blocks, all initially free
|
|
|
|
}
|
|
|
|
|
|
|
|
_Chunk(const _Chunk&) = delete;
|
|
|
|
_Chunk& operator=(const _Chunk&) = delete;
|
|
|
|
};
|
|
|
|
|
|
|
|
_Chunk* _Unfull_chunk = nullptr; // largest _Chunk with free blocks
|
|
|
|
_Intrusive_stack<_Chunk> _All_chunks{}; // all chunks (ordered by decreasing _Id)
|
|
|
|
size_t _Next_capacity = _Default_next_capacity; // # of blocks to allocate in next _Chunk
|
|
|
|
// in (1, (PTRDIFF_MAX - sizeof(_Chunk)) >> _Log_of_size]
|
|
|
|
size_t _Block_size; // size of allocated blocks
|
|
|
|
size_t _Log_of_size; // _Block_size == 1 << _Log_of_size
|
|
|
|
_Chunk* _Empty_chunk = nullptr; // only _Chunk with all free blocks
|
|
|
|
|
|
|
|
static constexpr size_t _Default_next_capacity = 4;
|
|
|
|
static_assert(_Default_next_capacity > 1);
|
|
|
|
|
|
|
|
explicit _Pool(const size_t _Log_of_size_) noexcept
|
|
|
|
: _Block_size{size_t{1} << _Log_of_size_},
|
|
|
|
_Log_of_size{_Log_of_size_} { // initialize a pool that manages blocks of the indicated size
|
|
|
|
}
|
|
|
|
|
|
|
|
_Pool(_Pool&& _That) noexcept
|
|
|
|
: _Unfull_chunk{_STD exchange(_That._Unfull_chunk, nullptr)}, _All_chunks{_STD move(_That._All_chunks)},
|
|
|
|
_Next_capacity{_STD exchange(_That._Next_capacity, _Default_next_capacity)},
|
|
|
|
_Block_size{_That._Block_size}, _Log_of_size{_That._Log_of_size}, _Empty_chunk{_STD exchange(
|
|
|
|
_That._Empty_chunk, nullptr)} {}
|
|
|
|
|
|
|
|
_Pool& operator=(_Pool&& _That) noexcept {
|
|
|
|
_Unfull_chunk = _STD exchange(_That._Unfull_chunk, nullptr);
|
|
|
|
_All_chunks = _STD move(_That._All_chunks);
|
|
|
|
_Next_capacity = _STD exchange(_That._Next_capacity, _Default_next_capacity);
|
|
|
|
_Block_size = _That._Block_size;
|
|
|
|
_Log_of_size = _That._Log_of_size;
|
|
|
|
_Empty_chunk = _STD exchange(_That._Empty_chunk, nullptr);
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
void _Clear(unsynchronized_pool_resource& _Pool_resource) noexcept {
|
|
|
|
// release all chunks in the pool back upstream
|
|
|
|
_Intrusive_stack<_Chunk> _Tmp{};
|
|
|
|
_STD swap(_Tmp, _All_chunks);
|
|
|
|
memory_resource* const _Resource = _Pool_resource.upstream_resource();
|
|
|
|
while (!_Tmp._Empty()) {
|
|
|
|
const auto _Ptr = _Tmp._Pop();
|
|
|
|
_Resource->deallocate(_Ptr->_Base, _Size_for_capacity(_Ptr->_Capacity), _Block_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
_Unfull_chunk = nullptr;
|
|
|
|
_Next_capacity = _Default_next_capacity;
|
|
|
|
_Empty_chunk = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void* _Allocate(unsynchronized_pool_resource& _Pool_resource) { // allocate a block from this pool
|
|
|
|
for (;; _Unfull_chunk = _All_chunks._As_item(_Unfull_chunk->_Next)) {
|
|
|
|
if (!_Unfull_chunk) {
|
|
|
|
_Increase_capacity(_Pool_resource);
|
|
|
|
} else if (!_Unfull_chunk->_Free_blocks._Empty()) {
|
|
|
|
if (_Unfull_chunk == _Empty_chunk) { // this chunk is no longer empty
|
|
|
|
_Empty_chunk = nullptr;
|
|
|
|
}
|
|
|
|
--_Unfull_chunk->_Free_count;
|
|
|
|
return _Unfull_chunk->_Free_blocks._Pop();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (_Unfull_chunk->_Next_available < _Unfull_chunk->_Capacity) {
|
|
|
|
if (_Unfull_chunk == _Empty_chunk) { // this chunk is no longer empty
|
|
|
|
_Empty_chunk = nullptr;
|
|
|
|
}
|
|
|
|
--_Unfull_chunk->_Free_count;
|
|
|
|
char* const _Block = _Unfull_chunk->_Base + _Unfull_chunk->_Next_available * _Block_size;
|
|
|
|
++_Unfull_chunk->_Next_available;
|
|
|
|
*(reinterpret_cast<_Chunk**>(_Block + _Block_size) - 1) = _Unfull_chunk;
|
|
|
|
return _Block;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void _Deallocate(unsynchronized_pool_resource& _Pool_resource, void* const _Ptr) noexcept {
|
|
|
|
// return a block to this pool
|
|
|
|
_Chunk* _Current = *(reinterpret_cast<_Chunk**>(static_cast<char*>(_Ptr) + _Block_size) - 1);
|
|
|
|
|
|
|
|
_Current->_Free_blocks._Push(::new (_Ptr) _Single_link<>);
|
|
|
|
|
|
|
|
if (_Current->_Free_count++ == 0) {
|
|
|
|
// prefer to allocate from newer/larger chunks...
|
|
|
|
if (!_Unfull_chunk || _Unfull_chunk->_Id < _Current->_Id) {
|
|
|
|
_Unfull_chunk = _Current;
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (_Current->_Free_count < _Current->_Capacity) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!_Empty_chunk) {
|
|
|
|
_Empty_chunk = _Current;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// ...and release older/smaller chunks to keep the list lengths short.
|
|
|
|
if (_Empty_chunk->_Id < _Current->_Id) {
|
|
|
|
_STD swap(_Current, _Empty_chunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
_All_chunks._Remove(_Current);
|
|
|
|
_Pool_resource.upstream_resource()->deallocate(
|
|
|
|
_Current->_Base, _Size_for_capacity(_Current->_Capacity), _Block_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t _Size_for_capacity(const size_t _Capacity) const noexcept {
|
|
|
|
// return the size of a chunk that holds _Capacity blocks
|
|
|
|
return (_Capacity << _Log_of_size) + sizeof(_Chunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
void _Increase_capacity(unsynchronized_pool_resource& _Pool_resource) {
|
|
|
|
// this pool has no free blocks; get a new chunk from upstream
|
|
|
|
const size_t _Size = _Size_for_capacity(_Next_capacity);
|
|
|
|
memory_resource* const _Resource = _Pool_resource.upstream_resource();
|
|
|
|
void* const _Ptr = _Resource->allocate(_Size, _Block_size);
|
|
|
|
_Check_alignment(_Ptr, _Block_size);
|
|
|
|
|
|
|
|
void* const _Tmp = static_cast<char*>(_Ptr) + _Size - sizeof(_Chunk);
|
|
|
|
_Unfull_chunk = ::new (_Tmp) _Chunk{*this, _Ptr, _Next_capacity};
|
|
|
|
_Empty_chunk = _Unfull_chunk;
|
|
|
|
_All_chunks._Push(_Unfull_chunk);
|
|
|
|
|
|
|
|
// scale _Next_capacity by 2, saturating so that _Size_for_capacity(_Next_capacity) cannot overflow
|
|
|
|
_Next_capacity =
|
2020-02-21 16:55:22 +03:00
|
|
|
(_STD min)(_Next_capacity << 1, (_STD min)((PTRDIFF_MAX - sizeof(_Chunk)) >> _Log_of_size,
|
2019-09-05 01:57:56 +03:00
|
|
|
_Pool_resource._Options.max_blocks_per_chunk));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
void _Setup_options() noexcept { // configure pool options
|
|
|
|
constexpr auto _Max_blocks_per_chunk_limit = static_cast<size_t>(PTRDIFF_MAX);
|
|
|
|
constexpr auto _Largest_required_pool_block_limit =
|
|
|
|
static_cast<size_t>((PTRDIFF_MAX >> 4) + 1); // somewhat arbitrary power of 2
|
|
|
|
static_assert(_Is_pow_2(_Largest_required_pool_block_limit));
|
|
|
|
|
|
|
|
if (_Options.max_blocks_per_chunk - 1 >= _Max_blocks_per_chunk_limit) {
|
|
|
|
_Options.max_blocks_per_chunk = _Max_blocks_per_chunk_limit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (_Options.largest_required_pool_block - 1 < sizeof(void*)) {
|
|
|
|
_Options.largest_required_pool_block = sizeof(void*);
|
|
|
|
} else if (_Options.largest_required_pool_block - 1 >= _Largest_required_pool_block_limit) {
|
|
|
|
_Options.largest_required_pool_block = _Largest_required_pool_block_limit;
|
|
|
|
} else {
|
|
|
|
_Options.largest_required_pool_block = static_cast<size_t>(1)
|
|
|
|
<< _Ceiling_of_log_2(_Options.largest_required_pool_block);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pair<pmr::vector<_Pool>::iterator, unsigned char> _Find_pool(
|
|
|
|
const size_t _Bytes, const size_t _Align) noexcept {
|
|
|
|
// find the pool from which to allocate a block with size _Bytes and alignment _Align
|
2020-02-21 16:55:22 +03:00
|
|
|
const size_t _Size = (_STD max)(_Bytes + sizeof(void*), _Align);
|
2019-09-05 01:57:56 +03:00
|
|
|
const auto _Log_of_size = static_cast<unsigned char>(_Ceiling_of_log_2(_Size));
|
|
|
|
return {_STD lower_bound(_Pools.begin(), _Pools.end(), _Log_of_size,
|
|
|
|
[](const _Pool& _Al, const unsigned char _Log) { return _Al._Log_of_size < _Log; }),
|
|
|
|
_Log_of_size};
|
|
|
|
}
|
|
|
|
|
|
|
|
pool_options _Options{}; // parameters that control the behavior of this pool resource
|
|
|
|
_Intrusive_list<_Oversized_header> _Chunks{}; // list of oversized allocations obtained directly from upstream
|
|
|
|
pmr::vector<_Pool> _Pools{}; // pools in order of increasing block size
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifndef _M_CEE
|
|
|
|
class synchronized_pool_resource : public unsynchronized_pool_resource {
|
|
|
|
public:
|
|
|
|
using unsynchronized_pool_resource::unsynchronized_pool_resource;
|
|
|
|
|
|
|
|
void release() noexcept /* strengthened */ {
|
|
|
|
lock_guard<mutex> _Guard{_Mtx};
|
|
|
|
this->unsynchronized_pool_resource::release();
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
virtual void* do_allocate(const size_t _Bytes, const size_t _Align) override {
|
|
|
|
lock_guard<mutex> _Guard{_Mtx};
|
|
|
|
return this->unsynchronized_pool_resource::do_allocate(_Bytes, _Align);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void do_deallocate(void* const _Ptr, const size_t _Bytes, const size_t _Align) override {
|
|
|
|
lock_guard<mutex> _Guard{_Mtx};
|
|
|
|
this->unsynchronized_pool_resource::do_deallocate(_Ptr, _Bytes, _Align);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
mutable mutex _Mtx;
|
|
|
|
};
|
|
|
|
#endif // _M_CEE
|
|
|
|
|
|
|
|
// CLASS TEMPLATE monotonic_buffer_resource
|
|
|
|
class monotonic_buffer_resource : public _Identity_equal_resource {
|
|
|
|
public:
|
|
|
|
explicit monotonic_buffer_resource(memory_resource* const _Upstream) noexcept // strengthened
|
|
|
|
: _Resource{_Upstream} { // initialize this resource with upstream
|
|
|
|
}
|
|
|
|
|
|
|
|
monotonic_buffer_resource(const size_t _Initial_size, memory_resource* const _Upstream) noexcept // strengthened
|
|
|
|
: _Next_buffer_size(_Round(_Initial_size)),
|
|
|
|
_Resource{_Upstream} { // initialize this resource with upstream and initial allocation size
|
|
|
|
}
|
|
|
|
|
|
|
|
monotonic_buffer_resource(void* const _Buffer, const size_t _Buffer_size,
|
|
|
|
memory_resource* const _Upstream) noexcept // strengthened
|
|
|
|
: _Current_buffer(_Buffer), _Space_available(_Buffer_size),
|
|
|
|
_Next_buffer_size(_Buffer_size ? _Scale(_Buffer_size) : _Min_allocation),
|
|
|
|
_Resource{_Upstream} { // initialize this resource with upstream and initial buffer
|
|
|
|
}
|
|
|
|
|
|
|
|
monotonic_buffer_resource() = default;
|
|
|
|
|
|
|
|
explicit monotonic_buffer_resource(const size_t _Initial_size) noexcept // strengthened
|
|
|
|
: _Next_buffer_size(_Round(_Initial_size)) { // initialize this resource with initial allocation size
|
|
|
|
}
|
|
|
|
|
|
|
|
monotonic_buffer_resource(void* const _Buffer, const size_t _Buffer_size) noexcept // strengthened
|
|
|
|
: _Current_buffer(_Buffer), _Space_available(_Buffer_size),
|
|
|
|
_Next_buffer_size(_Buffer_size ? _Scale(_Buffer_size)
|
|
|
|
: _Min_allocation) { // initialize this resource with initial buffer
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual ~monotonic_buffer_resource() noexcept override {
|
|
|
|
release();
|
|
|
|
}
|
|
|
|
|
|
|
|
monotonic_buffer_resource(const monotonic_buffer_resource&) = delete;
|
|
|
|
monotonic_buffer_resource& operator=(const monotonic_buffer_resource&) = delete;
|
|
|
|
|
|
|
|
void release() noexcept /* strengthened */ {
|
|
|
|
if (_Chunks._Empty()) {
|
|
|
|
// nothing to release; potentially continues to use an initial block provided at construction
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
_Current_buffer = nullptr;
|
|
|
|
_Space_available = 0;
|
|
|
|
|
|
|
|
// unscale _Next_buffer_size so the next allocation will be the same size as the most recent allocation
|
|
|
|
// (keep synchronized with monotonic_buffer_resource::_Scale)
|
|
|
|
const size_t _Unscaled = (_Next_buffer_size / 3 * 2 + alignof(_Header) - 1) & _Max_allocation;
|
2020-02-21 16:55:22 +03:00
|
|
|
_Next_buffer_size = (_STD max)(_Unscaled, _Min_allocation);
|
2019-09-05 01:57:56 +03:00
|
|
|
|
|
|
|
_Intrusive_stack<_Header> _Tmp{};
|
|
|
|
_STD swap(_Tmp, _Chunks);
|
|
|
|
while (!_Tmp._Empty()) {
|
|
|
|
const auto _Ptr = _Tmp._Pop();
|
|
|
|
_Resource->deallocate(_Ptr->_Base_address(), _Ptr->_Size, _Ptr->_Align);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
_NODISCARD memory_resource* upstream_resource() const noexcept /* strengthened */ {
|
|
|
|
// retrieve the upstream resource
|
|
|
|
return _Resource;
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
virtual void* do_allocate(const size_t _Bytes, const size_t _Align) override {
|
|
|
|
// allocate from the current buffer or a new larger buffer from upstream
|
|
|
|
if (!_STD align(_Align, _Bytes, _Current_buffer, _Space_available)) {
|
|
|
|
_Increase_capacity(_Bytes, _Align);
|
|
|
|
}
|
|
|
|
|
|
|
|
void* const _Result = _Current_buffer;
|
|
|
|
_Current_buffer = reinterpret_cast<char*>(_Current_buffer) + _Bytes;
|
|
|
|
_Space_available -= _Bytes;
|
|
|
|
return _Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void do_deallocate(void*, size_t, size_t) override { // nothing to do
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
struct _Header : _Single_link<> { // track the size and alignment of an allocation from upstream
|
|
|
|
size_t _Size;
|
|
|
|
size_t _Align;
|
|
|
|
|
|
|
|
_Header(const size_t _Size_, const size_t _Align_) : _Size{_Size_}, _Align{_Align_} {}
|
|
|
|
|
|
|
|
void* _Base_address() const { // header is stored at the end of the allocated memory block
|
|
|
|
return const_cast<char*>(reinterpret_cast<const char*>(this + 1) - _Size);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
static constexpr size_t _Min_allocation = 2 * sizeof(_Header);
|
|
|
|
static constexpr size_t _Max_allocation = 0 - alignof(_Header);
|
|
|
|
|
|
|
|
static constexpr size_t _Round(const size_t _Size) noexcept {
|
|
|
|
// return the smallest multiple of alignof(_Header) greater than _Size,
|
|
|
|
// clamped to the range [_Min_allocation, _Max_allocation]
|
|
|
|
if (_Size < _Min_allocation) {
|
|
|
|
return _Min_allocation;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (_Size >= _Max_allocation) {
|
|
|
|
return _Max_allocation;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Since _Max_allocation == -alignof(_Header), _Size < _Max_allocation implies that
|
|
|
|
// (_Size + alignof(_Header) - 1) does not overflow.
|
|
|
|
return (_Size + alignof(_Header) - 1) & _Max_allocation;
|
|
|
|
}
|
|
|
|
|
|
|
|
static constexpr size_t _Scale(const size_t _Size) noexcept {
|
|
|
|
// scale _Size by 1.5, rounding up to a multiple of alignof(_Header), saturating to _Max_allocation
|
|
|
|
// (keep synchronized with monotonic_buffer_resource::release)
|
|
|
|
constexpr auto _Max_size = (_Max_allocation - alignof(_Header) + 1) / 3 * 2;
|
|
|
|
if (_Size >= _Max_size) {
|
|
|
|
return _Max_allocation;
|
|
|
|
}
|
|
|
|
|
2019-10-16 02:49:36 +03:00
|
|
|
return (_Size + (_Size + 1) / 2 + alignof(_Header) - 1) & _Max_allocation;
|
2019-09-05 01:57:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void _Increase_capacity(const size_t _Bytes, const size_t _Align) { // obtain a new buffer from upstream
|
|
|
|
if (_Bytes > _Max_allocation - sizeof(_Header)) {
|
|
|
|
_Xbad_alloc();
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t _New_size = _Next_buffer_size;
|
|
|
|
if (_New_size < _Bytes + sizeof(_Header)) {
|
|
|
|
_New_size = (_Bytes + sizeof(_Header) + alignof(_Header) - 1) & _Max_allocation;
|
|
|
|
}
|
|
|
|
|
2020-02-21 16:55:22 +03:00
|
|
|
const size_t _New_align = (_STD max)(alignof(_Header), _Align);
|
2019-09-05 01:57:56 +03:00
|
|
|
|
|
|
|
void* _New_buffer = _Resource->allocate(_New_size, _New_align);
|
|
|
|
_Check_alignment(_New_buffer, _New_align);
|
|
|
|
|
|
|
|
_Current_buffer = _New_buffer;
|
|
|
|
_Space_available = _New_size - sizeof(_Header);
|
|
|
|
_New_buffer = static_cast<char*>(_New_buffer) + _Space_available;
|
|
|
|
_Chunks._Push(::new (_New_buffer) _Header{_New_size, _New_align});
|
|
|
|
|
|
|
|
_Next_buffer_size = _Scale(_New_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void* _Current_buffer = nullptr; // current memory block to parcel out to callers
|
|
|
|
size_t _Space_available = 0; // space remaining in current block
|
|
|
|
size_t _Next_buffer_size = _Min_allocation; // size of next block to allocate from upstream
|
|
|
|
_Intrusive_stack<_Header> _Chunks{}; // list of memory blocks allocated from upstream
|
|
|
|
memory_resource* _Resource = _STD pmr::get_default_resource(); // upstream resource from which to allocate
|
|
|
|
};
|
|
|
|
} // namespace pmr
|
|
|
|
|
|
|
|
_STD_END
|
|
|
|
|
|
|
|
#pragma pop_macro("new")
|
|
|
|
_STL_RESTORE_CLANG_WARNINGS
|
|
|
|
#pragma warning(pop)
|
|
|
|
#pragma pack(pop)
|
|
|
|
#endif // _HAS_CXX17
|
|
|
|
#endif // _STL_COMPILER_PREPROCESSOR
|
|
|
|
#endif // _MEMORY_RESOURCE_
|