Refactor interface between backend and frontend (#530)

* Rename to use Config, rather than StateHandle/Globals/Backend
* Make Backend a type on Config that contains the address space management implementation
* Make Ranges part of the Backend configuration, so we can reuse code for different ways of managing memory
* Pull the common chains of range definitions into separate files for reuse.
* Move PagemapEntry to CommonConfig
* Expose Pagemap through backend, so frontend doesn't see Pagemap directly
* Remove global Pal and use DefaultPal, where one is not pass explicitly.

Co-authored-by: David Chisnall <davidchisnall@users.noreply.github.com>
Co-authored-by: Nathaniel Filardo <105816689+nwf-msr@users.noreply.github.com>
This commit is contained in:
Matthew Parkinson 2022-05-31 10:45:04 +01:00 коммит произвёл GitHub
Родитель 1b8aa6bc0d
Коммит 03c9da6aa4
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
42 изменённых файлов: 765 добавлений и 538 удалений

Просмотреть файл

@ -78,7 +78,7 @@ Exciting, no?
### Decoding a MetaEntry
The centerpiece of `snmalloc`'s metadata is its `PageMap`, which associates each "chunk" of the address space (~16KiB; see `MIN_CHUNK_BITS`) with a `MetaEntry`.
The centerpiece of `snmalloc`'s metadata is its `Pagemap`, which associates each "chunk" of the address space (~16KiB; see `MIN_CHUNK_BITS`) with a `MetaEntry`.
A `MetaEntry` is a pair of pointers, suggestively named `meta` and `remote_and_sizeclass`.
In more detail, `MetaEntry`s are better represented by Sigma and Pi types, all packed into two pointer-sized words in ways that preserve pointer provenance on CHERI.

Просмотреть файл

@ -189,7 +189,7 @@ In future architectures, this is increasingly likely to be a no-op.
## Backend-Provided Operations
* `CapPtr<T, Bout> capptr_domesticate(Backend::LocalState *, CapPtr<T, Bin> ptr)` allows the backend to test whether `ptr` is sensible, by some definition thereof.
* `CapPtr<T, Bout> capptr_domesticate(LocalState *, CapPtr<T, Bin> ptr)` allows the backend to test whether `ptr` is sensible, by some definition thereof.
The annotation `Bout` is *computed* as a function of `Bin`.
`Bin` is required to be `Wild`, and `Bout` is `Tame` but otherwise identical.

Просмотреть файл

@ -1,223 +1,28 @@
#pragma once
#include "../backend_helpers/backend_helpers.h"
#if defined(SNMALLOC_CHECK_CLIENT) && !defined(OPEN_ENCLAVE)
/**
* Protect meta data blocks by allocating separate from chunks for
* user allocations. This involves leaving gaps in address space.
* This is less efficient, so should only be applied for the checked
* build.
*
* On Open Enclave the address space is limited, so we disable this
* feature.
*/
# define SNMALLOC_META_PROTECTED
#endif
namespace snmalloc
{
/**
* This class implements the standard backend for handling allocations.
* It abstracts page table management and address space management.
* It is parameterised by its Pagemap management and
* address space management (LocalState).
*/
template<SNMALLOC_CONCEPT(ConceptPAL) PAL, bool fixed_range>
class BackendAllocator : public CommonConfig
template<
SNMALLOC_CONCEPT(ConceptPAL) PAL,
typename PagemapEntry,
typename Pagemap,
typename LocalState>
class BackendAllocator
{
using GlobalMetaRange = typename LocalState::GlobalMetaRange;
using Stats = typename LocalState::Stats;
public:
class PageMapEntry;
using Pal = PAL;
using SlabMetadata = FrontendSlabMetadata;
private:
using ConcretePagemap =
FlatPagemap<MIN_CHUNK_BITS, PageMapEntry, PAL, fixed_range>;
using SlabMetadata = typename PagemapEntry::SlabMetadata;
public:
/**
* Example of type stored in the pagemap.
* The following class could be replaced by:
*
* ```
* using PageMapEntry = FrontendMetaEntry<SlabMetadata>;
* ```
*
* The full form here provides an example of how to extend the pagemap
* entries. It also guarantees that the front end never directly
* constructs meta entries, it only ever reads them or modifies them in
* place.
*/
class PageMapEntry : public FrontendMetaEntry<SlabMetadata>
{
/**
* The private initialising constructor is usable only by this back end.
*/
friend class BackendAllocator;
/**
* The private default constructor is usable only by the pagemap.
*/
friend ConcretePagemap;
/**
* The only constructor that creates newly initialised meta entries.
* This is callable only by the back end. The front end may copy,
* query, and update these entries, but it may not create them
* directly. This contract allows the back end to store any arbitrary
* metadata in meta entries when they are first constructed.
*/
SNMALLOC_FAST_PATH
PageMapEntry(SlabMetadata* meta, uintptr_t ras)
: FrontendMetaEntry<SlabMetadata>(meta, ras)
{}
/**
* Copy assignment is used only by the pagemap.
*/
PageMapEntry& operator=(const PageMapEntry& other)
{
FrontendMetaEntry<SlabMetadata>::operator=(other);
return *this;
}
/**
* Default constructor. This must be callable from the pagemap.
*/
SNMALLOC_FAST_PATH PageMapEntry() = default;
};
using Pagemap = BasicPagemap<
BackendAllocator,
PAL,
ConcretePagemap,
PageMapEntry,
fixed_range>;
#if defined(_WIN32) || defined(__CHERI_PURE_CAPABILITY__)
static constexpr bool CONSOLIDATE_PAL_ALLOCS = false;
#else
static constexpr bool CONSOLIDATE_PAL_ALLOCS = true;
#endif
// Set up source of memory
using Base = std::conditional_t<
fixed_range,
EmptyRange,
Pipe<
PalRange<Pal>,
PagemapRegisterRange<Pagemap, CONSOLIDATE_PAL_ALLOCS>>>;
static constexpr size_t MinBaseSizeBits()
{
if constexpr (pal_supports<AlignedAllocation, PAL>)
{
return bits::next_pow2_bits_const(PAL::minimum_alloc_size);
}
else
{
return MIN_CHUNK_BITS;
}
}
// Global range of memory
using GlobalR = Pipe<
Base,
LargeBuddyRange<24, bits::BITS - 1, Pagemap, MinBaseSizeBits()>,
LogRange<2>,
GlobalRange<>>;
#ifdef SNMALLOC_META_PROTECTED
// Introduce two global ranges, so we don't mix Object and Meta
using CentralObjectRange = Pipe<
GlobalR,
LargeBuddyRange<24, bits::BITS - 1, Pagemap, MinBaseSizeBits()>,
LogRange<3>,
GlobalRange<>>;
using CentralMetaRange = Pipe<
GlobalR,
SubRange<PAL, 6>, // Use SubRange to introduce guard pages.
LargeBuddyRange<24, bits::BITS - 1, Pagemap, MinBaseSizeBits()>,
LogRange<4>,
GlobalRange<>>;
// Source for object allocations
using StatsObject =
Pipe<CentralObjectRange, CommitRange<PAL>, StatsRange<>>;
using ObjectRange =
Pipe<StatsObject, LargeBuddyRange<21, 21, Pagemap>, LogRange<5>>;
using StatsMeta = Pipe<CentralMetaRange, CommitRange<PAL>, StatsRange<>>;
using MetaRange = Pipe<
StatsMeta,
LargeBuddyRange<21 - 6, bits::BITS - 1, Pagemap>,
SmallBuddyRange<>>;
// Create global range that can service small meta-data requests.
// Don't want to add this to the CentralMetaRange to move Commit outside the
// lock on the common case.
using GlobalMetaRange = Pipe<StatsMeta, SmallBuddyRange<>, GlobalRange<>>;
using Stats = StatsCombiner<StatsObject, StatsMeta>;
#else
// Source for object allocations and metadata
// No separation between the two
using Stats = Pipe<GlobalR, StatsRange<>>;
using ObjectRange = Pipe<
Stats,
CommitRange<PAL>,
LargeBuddyRange<21, 21, Pagemap>,
SmallBuddyRange<>>;
using GlobalMetaRange = Pipe<ObjectRange, GlobalRange<>>;
#endif
struct LocalState
{
ObjectRange object_range;
#ifdef SNMALLOC_META_PROTECTED
MetaRange meta_range;
MetaRange& get_meta_range()
{
return meta_range;
}
#else
ObjectRange& get_meta_range()
{
return object_range;
}
#endif
};
public:
template<bool fixed_range_ = fixed_range>
static std::enable_if_t<!fixed_range_> init()
{
static_assert(fixed_range_ == fixed_range, "Don't set SFINAE parameter!");
Pagemap::concretePagemap.init();
}
template<bool fixed_range_ = fixed_range>
static std::enable_if_t<fixed_range_> init(void* base, size_t length)
{
static_assert(fixed_range_ == fixed_range, "Don't set SFINAE parameter!");
auto [heap_base, heap_length] =
Pagemap::concretePagemap.init(base, length);
Pagemap::register_range(address_cast(heap_base), heap_length);
// Push memory into the global range.
range_to_pow_2_blocks<MIN_CHUNK_BITS>(
capptr::Chunk<void>(heap_base),
heap_length,
[&](capptr::Chunk<void> p, size_t sz, bool) {
GlobalR g;
g.dealloc_range(p, sz);
});
}
/**
* Provide a block of meta-data with size and align.
*
@ -302,6 +107,15 @@ namespace snmalloc
return {p, meta};
}
/**
* Deallocate a chunk of memory of size `size` and base `alloc`.
* The `slab_metadata` is the meta-data block associated with this
* chunk. The backend can recalculate this, but as the callee will
* already have it, we take it for possibly more optimal code.
*
* LocalState contains all the information about the various ranges
* that are used by the backend to manage the address space.
*/
static void dealloc_chunk(
LocalState& local_state,
SlabMetadata& slab_metadata,
@ -336,6 +150,12 @@ namespace snmalloc
local_state.object_range.dealloc_range(chunk, size);
}
template<bool potentially_out_of_range = false>
SNMALLOC_FAST_PATH static const PagemapEntry& get_metaentry(address_t p)
{
return Pagemap::template get_metaentry<potentially_out_of_range>(p);
}
static size_t get_current_usage()
{
Stats stats_state;

Просмотреть файл

@ -0,0 +1,21 @@
#pragma once
#include "../backend/backend.h"
namespace snmalloc
{
/**
* Base range configuration contains common parts of other ranges.
*/
struct BaseLocalStateConstants
{
protected:
// Size of requests that the global cache should use
static constexpr size_t GlobalCacheSizeBits = 24;
// Size of requests that the local cache should use
static constexpr size_t LocalCacheSizeBits = 21;
};
} // namespace snmalloc

Просмотреть файл

@ -1,6 +1,7 @@
#pragma once
#include "../backend/backend.h"
#include "../backend_helpers/backend_helpers.h"
#include "standard_range.h"
namespace snmalloc
{
@ -8,14 +9,26 @@ namespace snmalloc
* A single fixed address range allocator configuration
*/
template<SNMALLOC_CONCEPT(ConceptPAL) PAL>
class FixedGlobals final : public BackendAllocator<PAL, true>
class FixedRangeConfig final : public CommonConfig
{
public:
using GlobalPoolState = PoolState<CoreAllocator<FixedGlobals>>;
using PagemapEntry = DefaultPagemapEntry;
private:
using Backend = BackendAllocator<PAL, true>;
using ConcretePagemap =
FlatPagemap<MIN_CHUNK_BITS, PagemapEntry, PAL, true>;
using Pagemap = BasicPagemap<PAL, ConcretePagemap, PagemapEntry, true>;
public:
using LocalState = StandardLocalState<PAL, Pagemap>;
using GlobalPoolState = PoolState<CoreAllocator<FixedRangeConfig>>;
using Backend = BackendAllocator<PAL, PagemapEntry, Pagemap, LocalState>;
using Pal = PAL;
private:
inline static GlobalPoolState alloc_pool;
public:
@ -54,11 +67,23 @@ namespace snmalloc
snmalloc::register_clean_up();
}
static void
init(typename Backend::LocalState* local_state, void* base, size_t length)
static void init(LocalState* local_state, void* base, size_t length)
{
UNUSED(local_state);
Backend::init(base, length);
auto [heap_base, heap_length] =
Pagemap::concretePagemap.init(base, length);
Pagemap::register_range(address_cast(heap_base), heap_length);
// Push memory into the global range.
range_to_pow_2_blocks<MIN_CHUNK_BITS>(
capptr::Chunk<void>(heap_base),
heap_length,
[&](capptr::Chunk<void> p, size_t sz, bool) {
typename LocalState::GlobalR g;
g.dealloc_range(p, sz);
});
}
/* Verify that a pointer points into the region managed by this config */
@ -66,7 +91,7 @@ namespace snmalloc
static SNMALLOC_FAST_PATH CapPtr<
T,
typename B::template with_wildness<capptr::dimension::Wildness::Tame>>
capptr_domesticate(typename Backend::LocalState* ls, CapPtr<T, B> p)
capptr_domesticate(LocalState* ls, CapPtr<T, B> p)
{
static_assert(B::wildness == capptr::dimension::Wildness::Wild);
@ -75,7 +100,7 @@ namespace snmalloc
UNUSED(ls);
auto address = address_cast(p);
auto [base, length] = Backend::Pagemap::get_bounds();
auto [base, length] = Pagemap::get_bounds();
if ((address - base > (length - sz)) || (length < sz))
{
return nullptr;

Просмотреть файл

@ -4,46 +4,111 @@
// `snmalloc.h` or consume the global allocation APIs.
#ifndef SNMALLOC_PROVIDE_OWN_CONFIG
# include "../backend/backend.h"
# include "../backend_helpers/backend_helpers.h"
# include "backend.h"
# include "meta_protected_range.h"
# include "standard_range.h"
# if defined(SNMALLOC_CHECK_CLIENT) && !defined(OPEN_ENCLAVE)
/**
* Protect meta data blocks by allocating separate from chunks for
* user allocations. This involves leaving gaps in address space.
* This is less efficient, so should only be applied for the checked
* build.
*
* On Open Enclave the address space is limited, so we disable this
* feature.
*/
# define SNMALLOC_META_PROTECTED
# endif
namespace snmalloc
{
// Forward reference to thread local cleanup.
void register_clean_up();
# ifdef USE_SNMALLOC_STATS
inline static void print_stats()
{
printf("No Stats yet!");
// Stats s;
// current_alloc_pool()->aggregate_stats(s);
// s.print<Alloc>(std::cout);
}
# endif
/**
* The default configuration for a global snmalloc. This allocates memory
* from the operating system and expects to manage memory anywhere in the
* address space.
* The default configuration for a global snmalloc. It contains all the
* datastructures to manage the memory from the OS. It had several internal
* public types for various aspects of the code.
* The most notable are:
*
* Backend - Manages the memory coming from the platform.
* LocalState - the per-thread/per-allocator state that may perform local
* caching of reserved memory. This also specifies the various Range types
* used to manage the memory.
*
* The Configuration sets up a Pagemap for the backend to use, and the state
* required to build new allocators (GlobalPoolState).
*/
class Globals final : public BackendAllocator<Pal, false>
class StandardConfig final : public CommonConfig
{
using GlobalPoolState = PoolState<CoreAllocator<StandardConfig>>;
public:
using GlobalPoolState = PoolState<CoreAllocator<Globals>>;
using Pal = DefaultPal;
using PagemapEntry = DefaultPagemapEntry;
private:
using Backend = BackendAllocator<Pal, false>;
using ConcretePagemap =
FlatPagemap<MIN_CHUNK_BITS, PagemapEntry, Pal, false>;
using Pagemap = BasicPagemap<Pal, ConcretePagemap, PagemapEntry, false>;
/**
* This specifies where this configurations sources memory from.
*
* Takes account of any platform specific constraints like whether
* mmap/virtual alloc calls can be consolidated.
* @{
*/
# if defined(_WIN32) || defined(__CHERI_PURE_CAPABILITY__)
static constexpr bool CONSOLIDATE_PAL_ALLOCS = false;
# else
static constexpr bool CONSOLIDATE_PAL_ALLOCS = true;
# endif
using Base = Pipe<
PalRange<Pal>,
PagemapRegisterRange<Pagemap, CONSOLIDATE_PAL_ALLOCS>>;
/**
* @}
*/
public:
/**
* Use one of the default range configurations
*/
# ifdef SNMALLOC_META_PROTECTED
using LocalState = MetaProtectedRangeLocalState<Pal, Pagemap, Base>;
# else
using LocalState = StandardLocalState<Pal, Pagemap, Base>;
# endif
/**
* Use the default backend.
*/
using Backend = BackendAllocator<Pal, PagemapEntry, Pagemap, LocalState>;
private:
SNMALLOC_REQUIRE_CONSTINIT
inline static GlobalPoolState alloc_pool;
/**
* Specifies if the Configuration has been initialised.
*/
SNMALLOC_REQUIRE_CONSTINIT
inline static std::atomic<bool> initialised{false};
/**
* Used to prevent two threads attempting to initialise the configuration
*/
SNMALLOC_REQUIRE_CONSTINIT
inline static FlagWord initialisation_lock{};
public:
/**
* Provides the state to create new allocators.
*/
static GlobalPoolState& pool()
{
return alloc_pool;
@ -70,11 +135,7 @@ namespace snmalloc
key_global = FreeListKey(entropy.get_free_list_key());
// Need to initialise pagemap.
Backend::init();
# ifdef USE_SNMALLOC_STATS
atexit(snmalloc::print_stats);
# endif
Pagemap::concretePagemap.init();
initialised = true;
}
@ -93,11 +154,10 @@ namespace snmalloc
snmalloc::register_clean_up();
}
};
} // namespace snmalloc
// The default configuration for snmalloc
namespace snmalloc
{
using Alloc = snmalloc::LocalAllocator<snmalloc::Globals>;
/**
* Create allocator type for this configuration.
*/
using Alloc = snmalloc::LocalAllocator<snmalloc::StandardConfig>;
} // namespace snmalloc
#endif

Просмотреть файл

@ -0,0 +1,103 @@
#pragma once
#include "../backend/backend.h"
#include "base_constants.h"
namespace snmalloc
{
/**
* Range that carefully ensures meta-data and object data cannot be in
* the same memory range. Once memory has is used for either meta-data
* or object data it can never be recycled to the other.
*
* This configuration also includes guard pages and randomisation.
*
* PAL is the underlying PAL that is used to Commit memory ranges.
*
* Base is where memory is sourced from.
*
* MinSizeBits is the minimum request size that can be passed to Base.
* On Windows this 16 as VirtualAlloc cannot reserve less than 64KiB.
* Alternative configurations might make this 2MiB so that huge pages
* can be used.
*/
template<
typename PAL,
typename Pagemap,
typename Base,
size_t MinSizeBits = MinBaseSizeBits<PAL>()>
struct MetaProtectedRangeLocalState : BaseLocalStateConstants
{
private:
// Global range of memory
using GlobalR = Pipe<
Base,
LargeBuddyRange<
GlobalCacheSizeBits,
bits::BITS - 1,
Pagemap,
MinSizeBits>,
LogRange<2>,
GlobalRange<>>;
// Central source of object-range, does not pass back to GlobalR as
// that would allow flows from Objects to Meta-data, and thus UAF
// would be able to corrupt meta-data.
using CentralObjectRange = Pipe<
GlobalR,
LargeBuddyRange<GlobalCacheSizeBits, bits::BITS - 1, Pagemap>,
LogRange<3>,
GlobalRange<>,
CommitRange<PAL>,
StatsRange<>>;
// Controls the padding around the meta-data range.
// The larger the padding range the more randomisation that
// can be used.
static constexpr size_t SubRangeRatioBits = 6;
// Centralised source of meta-range
using CentralMetaRange = Pipe<
GlobalR,
SubRange<PAL, SubRangeRatioBits>, // Use SubRange to introduce guard
// pages.
LargeBuddyRange<GlobalCacheSizeBits, bits::BITS - 1, Pagemap>,
LogRange<4>,
GlobalRange<>,
CommitRange<PAL>,
StatsRange<>>;
// Local caching of object range
using ObjectRange = Pipe<
CentralObjectRange,
LargeBuddyRange<LocalCacheSizeBits, LocalCacheSizeBits, Pagemap>,
LogRange<5>>;
// Local caching of meta-data range
using MetaRange = Pipe<
CentralMetaRange,
LargeBuddyRange<
LocalCacheSizeBits - SubRangeRatioBits,
bits::BITS - 1,
Pagemap>,
SmallBuddyRange<>>;
public:
using Stats = StatsCombiner<CentralObjectRange, CentralMetaRange>;
ObjectRange object_range;
MetaRange meta_range;
MetaRange& get_meta_range()
{
return meta_range;
}
// Create global range that can service small meta-data requests.
// Don't want to add the SmallBuddyRange to the CentralMetaRange as that
// would require committing memory inside the main global lock.
using GlobalMetaRange =
Pipe<CentralMetaRange, SmallBuddyRange<>, GlobalRange<>>;
};
} // namespace snmalloc

Просмотреть файл

@ -0,0 +1,65 @@
#pragma once
#include "../backend/backend.h"
#include "base_constants.h"
namespace snmalloc
{
/**
* Default configuration that does not provide any meta-data protection.
*
* PAL is the underlying PAL that is used to Commit memory ranges.
*
* Base is where memory is sourced from.
*
* MinSizeBits is the minimum request size that can be passed to Base.
* On Windows this 16 as VirtualAlloc cannot reserve less than 64KiB.
* Alternative configurations might make this 2MiB so that huge pages
* can be used.
*/
template<
typename PAL,
typename Pagemap,
typename Base = EmptyRange,
size_t MinSizeBits = MinBaseSizeBits<PAL>()>
struct StandardLocalState : BaseLocalStateConstants
{
// Global range of memory, expose this so can be filled by init.
using GlobalR = Pipe<
Base,
LargeBuddyRange<
GlobalCacheSizeBits,
bits::BITS - 1,
Pagemap,
MinSizeBits>,
LogRange<2>,
GlobalRange<>>;
// Track stats of the committed memory
using Stats = Pipe<GlobalR, CommitRange<PAL>, StatsRange<>>;
private:
// Source for object allocations and metadata
// Use buddy allocators to cache locally.
using ObjectRange = Pipe<
Stats,
LargeBuddyRange<LocalCacheSizeBits, LocalCacheSizeBits, Pagemap>,
SmallBuddyRange<>>;
public:
// Expose a global range for the initial allocation of meta-data.
using GlobalMetaRange = Pipe<ObjectRange, GlobalRange<>>;
// Where we get user allocations from.
ObjectRange object_range;
// Where we get meta-data allocations from.
ObjectRange& get_meta_range()
{
// Use the object range to service meta-data requests.
return object_range;
}
};
} // namespace snmalloc

Просмотреть файл

@ -2,6 +2,7 @@
#include "buddy.h"
#include "commitrange.h"
#include "commonconfig.h"
#include "defaultpagemapentry.h"
#include "empty_range.h"
#include "globalrange.h"
#include "largebuddyrange.h"

Просмотреть файл

@ -113,5 +113,17 @@ namespace snmalloc
inline static RemoteAllocator unused_remote;
};
template<typename PAL>
static constexpr size_t MinBaseSizeBits()
{
if constexpr (pal_supports<AlignedAllocation, PAL>)
{
return bits::next_pow2_bits_const(PAL::minimum_alloc_size);
}
else
{
return MIN_CHUNK_BITS;
}
}
} // namespace snmalloc
#include "../mem/remotecache.h"

Просмотреть файл

@ -0,0 +1,64 @@
#pragma once
#include "../mem/mem.h"
namespace snmalloc
{
/**
* Example of type stored in the pagemap.
* The following class could be replaced by:
*
* ```
* using DefaultPagemapEntry = FrontendMetaEntry<SlabMetadata>;
* ```
*
* The full form here provides an example of how to extend the pagemap
* entries. It also guarantees that the front end never directly
* constructs meta entries, it only ever reads them or modifies them in
* place.
*/
class DefaultPagemapEntry : public FrontendMetaEntry<FrontendSlabMetadata>
{
/**
* The private initialising constructor is usable only by this back end.
*/
template<
SNMALLOC_CONCEPT(ConceptPAL) A1,
typename A2,
typename A3,
typename A4>
friend class BackendAllocator;
/**
* The private default constructor is usable only by the pagemap.
*/
template<size_t GRANULARITY_BITS, typename T, typename PAL, bool has_bounds>
friend class FlatPagemap;
/**
* The only constructor that creates newly initialised meta entries.
* This is callable only by the back end. The front end may copy,
* query, and update these entries, but it may not create them
* directly. This contract allows the back end to store any arbitrary
* metadata in meta entries when they are first constructed.
*/
SNMALLOC_FAST_PATH
DefaultPagemapEntry(FrontendSlabMetadata* meta, uintptr_t ras)
: FrontendMetaEntry<FrontendSlabMetadata>(meta, ras)
{}
/**
* Copy assignment is used only by the pagemap.
*/
DefaultPagemapEntry& operator=(const DefaultPagemapEntry& other)
{
FrontendMetaEntry<FrontendSlabMetadata>::operator=(other);
return *this;
}
/**
* Default constructor. This must be callable from the pagemap.
*/
SNMALLOC_FAST_PATH DefaultPagemapEntry() = default;
};
} // namespace snmalloc

Просмотреть файл

@ -13,7 +13,7 @@ namespace snmalloc
/**
* Class for using the pagemap entries for the buddy allocator.
*/
template<SNMALLOC_CONCEPT(ConceptBuddyRangeMeta) Pagemap>
template<SNMALLOC_CONCEPT(IsWritablePagemap) Pagemap>
class BuddyChunkRep
{
public:
@ -186,7 +186,7 @@ namespace snmalloc
template<
size_t REFILL_SIZE_BITS,
size_t MAX_SIZE_BITS,
SNMALLOC_CONCEPT(ConceptBuddyRangeMeta) Pagemap,
SNMALLOC_CONCEPT(IsWritablePagemap) Pagemap,
size_t MIN_REFILL_SIZE_BITS = 0,
typename ParentRange = EmptyRange>
class LargeBuddyRange : public ContainsParent<ParentRange>

Просмотреть файл

@ -328,16 +328,15 @@ namespace snmalloc
/**
* This is a generic implementation of the backend's interface to the page
* map. It takes a concrete page map implementation (probably FlatPageMap
* map. It takes a concrete page map implementation (probably FlatPagemap
* above) and entry type. It is friends with the backend passed in as a
* template parameter so that the backend can initialise the concrete page map
* and use set_metaentry which no one else should use.
*/
template<
typename Backend,
typename PAL,
typename ConcreteMap,
typename PageMapEntry,
typename PagemapEntry,
bool fixed_range>
class BasicPagemap
{
@ -345,10 +344,7 @@ namespace snmalloc
/**
* Export the type stored in the pagemap.
*/
using Entry = PageMapEntry;
private:
friend Backend;
using Entry = PagemapEntry;
/**
* Instance of the concrete pagemap, accessible to the backend so that
@ -369,7 +365,6 @@ namespace snmalloc
}
}
public:
/**
* Get the metadata associated with a chunk.
*

Просмотреть файл

@ -7,7 +7,7 @@
namespace snmalloc
{
template<
SNMALLOC_CONCEPT(ConceptBackendMetaRange) Pagemap,
SNMALLOC_CONCEPT(IsWritablePagemapWithRegister) Pagemap,
bool CanConsolidate = true,
typename ParentRange = EmptyRange>
class PagemapRegisterRange : public ContainsParent<ParentRange>

Просмотреть файл

@ -12,81 +12,94 @@ namespace snmalloc
* get_metadata takes a boolean template parameter indicating whether it may
* be accessing memory that is not known to be committed.
*/
template<typename Meta>
concept ConceptBackendMeta =
requires(address_t addr, size_t sz, const typename Meta::Entry& t)
template<typename Pagemap>
concept IsReadablePagemap =
requires(address_t addr, size_t sz, const typename Pagemap::Entry& t)
{
{
Meta::template get_metaentry<true>(addr)
Pagemap::template get_metaentry<true>(addr)
}
->ConceptSame<const typename Meta::Entry&>;
->ConceptSame<const typename Pagemap::Entry&>;
{
Meta::template get_metaentry<false>(addr)
Pagemap::template get_metaentry<false>(addr)
}
->ConceptSame<const typename Meta::Entry&>;
->ConceptSame<const typename Pagemap::Entry&>;
};
/**
* The core of the static pagemap accessor interface: {get,set}_metadata.
*
* get_metadata_mut takes a boolean template parameter indicating whether it
* may be accessing memory that is not known to be committed.
*
* set_metadata updates the entry in the pagemap.
*/
template<typename Pagemap>
concept IsWritablePagemap = IsReadablePagemap<Pagemap>&& requires(
address_t addr, size_t sz, const typename Pagemap::Entry& t)
{
{
Pagemap::template get_metaentry_mut<true>(addr)
}
->ConceptSame<typename Pagemap::Entry&>;
{
Pagemap::template get_metaentry_mut<false>(addr)
}
->ConceptSame<typename Pagemap::Entry&>;
{
Pagemap::set_metaentry(addr, sz, t)
}
->ConceptSame<void>;
};
/**
* The pagemap can also be told to commit backing storage for a range of
* addresses. This is broken out to a separate concept so that we can
* annotate which functions expect to do this vs. which merely use the core
* interface above. In practice, use ConceptBackendMetaRange (without the
* underscore) below, which combines this and the core concept, above.
* interface above. In practice, use IsWritablePagemapWithRegister below,
* which combines this and the core concept, above.
*/
template<typename Meta>
concept ConceptBackendMeta_Range = requires(address_t addr, size_t sz)
template<typename Pagemap>
concept IsPagemapWithRegister = requires(address_t addr, size_t sz)
{
{
Meta::register_range(addr, sz)
Pagemap::register_range(addr, sz)
}
->ConceptSame<void>;
};
template<typename Meta>
concept ConceptBuddyRangeMeta =
requires(address_t addr, size_t sz, const typename Meta::Entry& t)
{
{
Meta::template get_metaentry_mut<true>(addr)
}
->ConceptSame<typename Meta::Entry&>;
{
Meta::template get_metaentry_mut<false>(addr)
}
->ConceptSame<typename Meta::Entry&>;
};
/**
* The full pagemap accessor interface, with all of {get,set}_metadata and
* register_range. Use this to annotate callers that need the full interface
* and use ConceptBackendMeta for callers that merely need {get,set}_metadata,
* and use IsReadablePagemap for callers that merely need {get,set}_metadata,
* but note that the difference is just for humans and not compilers (since
* concept checking is lower bounding and does not constrain the templatized
* code to use only those affordances given by the concept).
*/
template<typename Meta>
concept ConceptBackendMetaRange =
ConceptBackendMeta<Meta>&& ConceptBackendMeta_Range<Meta>;
template<typename Pagemap>
concept IsWritablePagemapWithRegister =
IsReadablePagemap<Pagemap>&& IsPagemapWithRegister<Pagemap>;
/**
* The backend also defines domestication (that is, the difference between
* Tame and Wild CapPtr bounds). It exports the intended affordance for
* testing a Wild pointer and either returning nullptr or the original
* The configuration also defines domestication (that is, the difference
* between Tame and Wild CapPtr bounds). It exports the intended affordance
* for testing a Wild pointer and either returning nullptr or the original
* pointer, now Tame.
*/
template<typename Globals>
concept ConceptBackendDomestication =
requires(typename Globals::LocalState* ls, capptr::AllocWild<void> ptr)
template<typename Config>
concept IsConfigDomestication =
requires(typename Config::LocalState* ls, capptr::AllocWild<void> ptr)
{
{
Globals::capptr_domesticate(ls, ptr)
Config::capptr_domesticate(ls, ptr)
}
->ConceptSame<capptr::Alloc<void>>;
{
Globals::capptr_domesticate(ls, ptr.template as_static<char>())
Config::capptr_domesticate(ls, ptr.template as_static<char>())
}
->ConceptSame<capptr::Alloc<char>>;
};
@ -94,49 +107,92 @@ namespace snmalloc
class CommonConfig;
struct Flags;
template<typename LocalState, typename PagemapEntry, typename Backend>
concept IsBackend =
requires(LocalState& local_state, size_t size, uintptr_t ras)
{
{
Backend::alloc_chunk(local_state, size, ras)
}
->ConceptSame<
std::pair<capptr::Chunk<void>, typename Backend::SlabMetadata*>>;
}
&&requires(LocalState* local_state, size_t size)
{
{
Backend::template alloc_meta_data<void*>(local_state, size)
}
->ConceptSame<capptr::Chunk<void>>;
}
&&requires(
LocalState& local_state,
typename Backend::SlabMetadata& slab_metadata,
capptr::Alloc<void> alloc,
size_t size)
{
{
Backend::dealloc_chunk(local_state, slab_metadata, alloc, size)
}
->ConceptSame<void>;
}
&&requires(address_t p)
{
{
Backend::template get_metaentry<true>(p)
}
->ConceptSame<const PagemapEntry&>;
{
Backend::template get_metaentry<false>(p)
}
->ConceptSame<const PagemapEntry&>;
};
/**
* Backend global objects of type T must obey a number of constraints. They
* Config objects of type T must obey a number of constraints. They
* must...
*
* * inherit from CommonConfig (see commonconfig.h)
* * specify which PAL is in use via T::Pal
* * have static pagemap accessors via T::Pagemap
* * define a T::LocalState type (and alias it as T::Pagemap::LocalState)
* * define T::Options of type snmalloc::Flags
* * expose the global allocator pool via T::pool() if pool allocation is
* used.
*
*/
template<typename Globals>
concept ConceptBackendGlobals =
std::is_base_of<CommonConfig, Globals>::value&&
ConceptPAL<typename Globals::Pal>&&
ConceptBackendMetaRange<typename Globals::Pagemap>&& requires()
template<typename Config>
concept IsConfig = std::is_base_of<CommonConfig, Config>::value&&
ConceptPAL<typename Config::Pal>&& IsBackend<
typename Config::LocalState,
typename Config::PagemapEntry,
typename Config::Backend>&& requires()
{
typename Globals::LocalState;
typename Config::LocalState;
typename Config::Backend;
typename Config::PagemapEntry;
{
Globals::Options
Config::Options
}
->ConceptSameModRef<const Flags>;
}
&&(
requires() {
Globals::Options.CoreAllocIsPoolAllocated == true;
typename Globals::GlobalPoolState;
Config::Options.CoreAllocIsPoolAllocated == true;
typename Config::GlobalPoolState;
{
Globals::pool()
Config::pool()
}
->ConceptSame<typename Globals::GlobalPoolState&>;
->ConceptSame<typename Config::GlobalPoolState&>;
} ||
requires() { Globals::Options.CoreAllocIsPoolAllocated == false; });
requires() { Config::Options.CoreAllocIsPoolAllocated == false; });
/**
* The lazy version of the above; please see ds/concept.h and use sparingly.
* The lazy version of the above; please see ds_core/concept.h and use
* sparingly.
*/
template<typename Globals>
concept ConceptBackendGlobalsLazy =
!is_type_complete_v<Globals> || ConceptBackendGlobals<Globals>;
template<typename Config>
concept IsConfigLazy = !is_type_complete_v<Config> || IsConfig<Config>;
} // namespace snmalloc

Просмотреть файл

@ -40,14 +40,14 @@ namespace snmalloc
* backend. Returns true if there is a function with correct name and type.
*/
template<
SNMALLOC_CONCEPT(ConceptBackendDomestication) Backend,
SNMALLOC_CONCEPT(IsConfigDomestication) Config,
typename T,
SNMALLOC_CONCEPT(capptr::ConceptBound) B>
constexpr SNMALLOC_FAST_PATH_INLINE auto has_domesticate(int)
-> std::enable_if_t<
std::is_same_v<
decltype(Backend::capptr_domesticate(
std::declval<typename Backend::LocalState*>(),
decltype(Config::capptr_domesticate(
std::declval<typename Config::LocalState*>(),
std::declval<CapPtr<T, B>>())),
CapPtr<
T,
@ -63,7 +63,7 @@ namespace snmalloc
* backend. Returns false in case where above template does not match.
*/
template<
SNMALLOC_CONCEPT(ConceptBackendGlobals) Backend,
SNMALLOC_CONCEPT(IsConfig) Config,
typename T,
SNMALLOC_CONCEPT(capptr::ConceptBound) B>
constexpr SNMALLOC_FAST_PATH_INLINE bool has_domesticate(long)
@ -73,29 +73,29 @@ namespace snmalloc
} // namespace detail
/**
* Wrapper that calls `Backend::capptr_domesticate` if and only if
* Backend::Options.HasDomesticate is true. If it is not implemented then
* Wrapper that calls `Config::capptr_domesticate` if and only if
* Config::Options.HasDomesticate is true. If it is not implemented then
* this assumes that any wild pointer can be domesticated.
*/
template<
SNMALLOC_CONCEPT(ConceptBackendGlobals) Backend,
SNMALLOC_CONCEPT(IsConfig) Config,
typename T,
SNMALLOC_CONCEPT(capptr::ConceptBound) B>
SNMALLOC_FAST_PATH_INLINE auto
capptr_domesticate(typename Backend::LocalState* ls, CapPtr<T, B> p)
capptr_domesticate(typename Config::LocalState* ls, CapPtr<T, B> p)
{
static_assert(
!detail::has_domesticate<Backend, T, B>(0) ||
Backend::Options.HasDomesticate,
!detail::has_domesticate<Config, T, B>(0) ||
Config::Options.HasDomesticate,
"Back end provides domesticate function but opts out of using it ");
static_assert(
detail::has_domesticate<Backend, T, B>(0) ||
!Backend::Options.HasDomesticate,
detail::has_domesticate<Config, T, B>(0) ||
!Config::Options.HasDomesticate,
"Back end does not provide capptr_domesticate and requests its use");
if constexpr (Backend::Options.HasDomesticate)
if constexpr (Config::Options.HasDomesticate)
{
return Backend::capptr_domesticate(ls, p);
return Config::capptr_domesticate(ls, p);
}
else
{

Просмотреть файл

@ -32,13 +32,13 @@ namespace snmalloc
* provided externally, then it must be set explicitly with
* `init_message_queue`.
*/
template<SNMALLOC_CONCEPT(ConceptBackendGlobalsLazy) Backend>
template<SNMALLOC_CONCEPT(IsConfigLazy) Config>
class CoreAllocator : public std::conditional_t<
Backend::Options.CoreAllocIsPoolAllocated,
Pooled<CoreAllocator<Backend>>,
Config::Options.CoreAllocIsPoolAllocated,
Pooled<CoreAllocator<Config>>,
Empty>
{
template<SNMALLOC_CONCEPT(ConceptBackendGlobals)>
template<SNMALLOC_CONCEPT(IsConfig)>
friend class LocalAllocator;
/**
@ -46,8 +46,8 @@ namespace snmalloc
* specialised for the back-end that we are using.
* @{
*/
using BackendSlabMetadata = typename Backend::SlabMetadata;
using PagemapEntry = typename Backend::Pagemap::Entry;
using BackendSlabMetadata = typename Config::Backend::SlabMetadata;
using PagemapEntry = typename Config::PagemapEntry;
/// }@
/**
@ -77,7 +77,7 @@ namespace snmalloc
* allocator
*/
std::conditional_t<
Backend::Options.IsQueueInline,
Config::Options.IsQueueInline,
RemoteAllocator,
RemoteAllocator*>
remote_alloc;
@ -85,7 +85,7 @@ namespace snmalloc
/**
* The type used local state. This is defined by the back end.
*/
using LocalState = typename Backend::LocalState;
using LocalState = typename Config::LocalState;
/**
* A local area of address space managed by this allocator.
@ -94,7 +94,7 @@ namespace snmalloc
* externally.
*/
std::conditional_t<
Backend::Options.CoreAllocOwnsLocalState,
Config::Options.CoreAllocOwnsLocalState,
LocalState,
LocalState*>
backend_state;
@ -108,7 +108,7 @@ namespace snmalloc
/**
* Ticker to query the clock regularly at a lower cost.
*/
Ticker<typename Backend::Pal> ticker;
Ticker<typename Config::Pal> ticker;
/**
* The message queue needs to be accessible from other threads
@ -118,7 +118,7 @@ namespace snmalloc
*/
auto* public_state()
{
if constexpr (Backend::Options.IsQueueInline)
if constexpr (Config::Options.IsQueueInline)
{
return &remote_alloc;
}
@ -133,7 +133,7 @@ namespace snmalloc
*/
LocalState* backend_state_ptr()
{
if constexpr (Backend::Options.CoreAllocOwnsLocalState)
if constexpr (Config::Options.CoreAllocOwnsLocalState)
{
return &backend_state;
}
@ -195,10 +195,10 @@ namespace snmalloc
SNMALLOC_ASSERT(attached_cache != nullptr);
auto domesticate =
[this](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA {
return capptr_domesticate<Backend>(backend_state_ptr(), p);
return capptr_domesticate<Config>(backend_state_ptr(), p);
};
// Use attached cache, and fill it if it is empty.
return attached_cache->template alloc<NoZero, Backend>(
return attached_cache->template alloc<NoZero, Config>(
domesticate,
size,
[&](smallsizeclass_t sizeclass, freelist::Iter<>* fl) {
@ -300,7 +300,7 @@ namespace snmalloc
auto local_state = backend_state_ptr();
auto domesticate = [local_state](freelist::QueuePtr p)
SNMALLOC_FAST_PATH_LAMBDA {
return capptr_domesticate<Backend>(local_state, p);
return capptr_domesticate<Config>(local_state, p);
};
capptr::Alloc<void> p =
finish_alloc_no_zero(fl.take(key, domesticate), sizeclass);
@ -363,7 +363,7 @@ namespace snmalloc
alloc_classes[sizeclass].available.filter([this, sizeclass](auto* meta) {
auto domesticate =
[this](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA {
auto res = capptr_domesticate<Backend>(backend_state_ptr(), p);
auto res = capptr_domesticate<Config>(backend_state_ptr(), p);
#ifdef SNMALLOC_TRACING
if (res.unsafe_ptr() != p.unsafe_ptr())
printf(
@ -388,7 +388,7 @@ namespace snmalloc
// don't touch the cache lines at this point in snmalloc_check_client.
auto start = clear_slab(meta, sizeclass);
Backend::dealloc_chunk(
Config::Backend::dealloc_chunk(
get_backend_local_state(),
*meta,
start,
@ -423,7 +423,8 @@ namespace snmalloc
UNUSED(size);
#endif
Backend::dealloc_chunk(get_backend_local_state(), *meta, p, size);
Config::Backend::dealloc_chunk(
get_backend_local_state(), *meta, p, size);
return;
}
@ -483,7 +484,7 @@ namespace snmalloc
auto local_state = backend_state_ptr();
auto domesticate = [local_state](freelist::QueuePtr p)
SNMALLOC_FAST_PATH_LAMBDA {
return capptr_domesticate<Backend>(local_state, p);
return capptr_domesticate<Config>(local_state, p);
};
auto cb = [this,
&need_post](freelist::HeadPtr msg) SNMALLOC_FAST_PATH_LAMBDA {
@ -492,14 +493,14 @@ namespace snmalloc
#endif
auto& entry =
Backend::Pagemap::template get_metaentry(snmalloc::address_cast(msg));
Config::Backend::template get_metaentry(snmalloc::address_cast(msg));
handle_dealloc_remote(entry, msg.as_void(), need_post);
return true;
};
if constexpr (Backend::Options.QueueHeadsAreTame)
if constexpr (Config::Options.QueueHeadsAreTame)
{
/*
* The front of the queue has already been validated; just change the
@ -571,12 +572,12 @@ namespace snmalloc
// Entropy must be first, so that all data-structures can use the key
// it generates.
// This must occur before any freelists are constructed.
entropy.init<typename Backend::Pal>();
entropy.init<typename Config::Pal>();
// Ignoring stats for now.
// stats().start();
if constexpr (Backend::Options.IsQueueInline)
if constexpr (Config::Options.IsQueueInline)
{
init_message_queue();
message_queue().invariant();
@ -606,8 +607,8 @@ namespace snmalloc
* SFINAE disabled if the allocator does not own the local state.
*/
template<
typename Config = Backend,
typename = std::enable_if_t<Config::Options.CoreAllocOwnsLocalState>>
typename Config_ = Config,
typename = std::enable_if_t<Config_::Options.CoreAllocOwnsLocalState>>
CoreAllocator(LocalCache* cache) : attached_cache(cache)
{
init();
@ -618,8 +619,8 @@ namespace snmalloc
* state. SFINAE disabled if the allocator does own the local state.
*/
template<
typename Config = Backend,
typename = std::enable_if_t<!Config::Options.CoreAllocOwnsLocalState>>
typename Config_ = Config,
typename = std::enable_if_t<!Config_::Options.CoreAllocOwnsLocalState>>
CoreAllocator(LocalCache* cache, LocalState* backend = nullptr)
: backend_state(backend), attached_cache(cache)
{
@ -630,7 +631,7 @@ namespace snmalloc
* If the message queue is not inline, provide it. This will then
* configure the message queue for use.
*/
template<bool InlineQueue = Backend::Options.IsQueueInline>
template<bool InlineQueue = Config::Options.IsQueueInline>
std::enable_if_t<!InlineQueue> init_message_queue(RemoteAllocator* q)
{
remote_alloc = q;
@ -649,7 +650,7 @@ namespace snmalloc
// stats().remote_post(); // TODO queue not in line!
bool sent_something =
attached_cache->remote_dealloc_cache
.post<sizeof(CoreAllocator), Backend>(
.post<sizeof(CoreAllocator), Config>(
backend_state_ptr(), public_state()->trunc_id(), key_global);
return sent_something;
@ -674,7 +675,7 @@ namespace snmalloc
// PagemapEntry-s seen here are expected to have meaningful Remote
// pointers
auto& entry =
Backend::Pagemap::template get_metaentry(snmalloc::address_cast(p));
Config::Backend::template get_metaentry(snmalloc::address_cast(p));
if (SNMALLOC_LIKELY(dealloc_local_object_fast(entry, p, entropy)))
return;
@ -735,7 +736,7 @@ namespace snmalloc
auto domesticate =
[this](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA {
return capptr_domesticate<Backend>(backend_state_ptr(), p);
return capptr_domesticate<Config>(backend_state_ptr(), p);
};
auto [p, still_active] = BackendSlabMetadata::alloc_free_list(
domesticate, meta, fast_free_list, entropy, sizeclass);
@ -746,7 +747,7 @@ namespace snmalloc
sl.insert(meta);
}
auto r = finish_alloc<zero_mem, Backend>(p, sizeclass);
auto r = finish_alloc<zero_mem, Config>(p, sizeclass);
return ticker.check_tick(r);
}
return small_alloc_slow<zero_mem>(sizeclass, fast_free_list);
@ -759,7 +760,7 @@ namespace snmalloc
SNMALLOC_FAST_PATH
LocalState& get_backend_local_state()
{
if constexpr (Backend::Options.CoreAllocOwnsLocalState)
if constexpr (Config::Options.CoreAllocOwnsLocalState)
{
return backend_state;
}
@ -783,7 +784,7 @@ namespace snmalloc
message<1024>("small_alloc_slow rsize={} slab size={}", rsize, slab_size);
#endif
auto [slab, meta] = Backend::alloc_chunk(
auto [slab, meta] = Config::Backend::alloc_chunk(
get_backend_local_state(),
slab_size,
PagemapEntry::encode(
@ -802,7 +803,7 @@ namespace snmalloc
auto domesticate =
[this](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA {
return capptr_domesticate<Backend>(backend_state_ptr(), p);
return capptr_domesticate<Config>(backend_state_ptr(), p);
};
auto [p, still_active] = BackendSlabMetadata::alloc_free_list(
domesticate, meta, fast_free_list, entropy, sizeclass);
@ -813,7 +814,7 @@ namespace snmalloc
alloc_classes[sizeclass].available.insert(meta);
}
auto r = finish_alloc<zero_mem, Backend>(p, sizeclass);
auto r = finish_alloc<zero_mem, Config>(p, sizeclass);
return ticker.check_tick(r);
}
@ -828,7 +829,7 @@ namespace snmalloc
auto local_state = backend_state_ptr();
auto domesticate = [local_state](freelist::QueuePtr p)
SNMALLOC_FAST_PATH_LAMBDA {
return capptr_domesticate<Backend>(local_state, p);
return capptr_domesticate<Config>(local_state, p);
};
if (destroy_queue)
@ -841,7 +842,7 @@ namespace snmalloc
bool need_post = true; // Always going to post, so ignore.
auto n_tame = p_tame->atomic_read_next(key_global, domesticate);
const PagemapEntry& entry =
Backend::Pagemap::get_metaentry(snmalloc::address_cast(p_tame));
Config::Backend::get_metaentry(snmalloc::address_cast(p_tame));
handle_dealloc_remote(entry, p_tame.as_void(), need_post);
p_tame = n_tame;
}
@ -854,7 +855,7 @@ namespace snmalloc
handle_message_queue([]() {});
}
auto posted = attached_cache->flush<sizeof(CoreAllocator), Backend>(
auto posted = attached_cache->flush<sizeof(CoreAllocator), Config>(
backend_state_ptr(),
[&](capptr::Alloc<void> p) { dealloc_local_object(p); });
@ -966,6 +967,6 @@ namespace snmalloc
/**
* Use this alias to access the pool of allocators throughout snmalloc.
*/
template<typename Backend>
using AllocPool = Pool<CoreAllocator<Backend>, Backend, Backend::pool>;
template<typename Config>
using AllocPool = Pool<CoreAllocator<Config>, Config, Config::pool>;
} // namespace snmalloc

Просмотреть файл

@ -5,18 +5,18 @@
namespace snmalloc
{
template<SNMALLOC_CONCEPT(ConceptBackendGlobals) SharedStateHandle>
template<SNMALLOC_CONCEPT(IsConfig) Config>
inline static void cleanup_unused()
{
#ifndef SNMALLOC_PASS_THROUGH
static_assert(
SharedStateHandle::Options.CoreAllocIsPoolAllocated,
Config::Options.CoreAllocIsPoolAllocated,
"Global cleanup is available only for pool-allocated configurations");
// Call this periodically to free and coalesce memory allocated by
// allocators that are not currently in use by any thread.
// One atomic operation to extract the stack, another to restore it.
// Handling the message queue for each stack is non-atomic.
auto* first = AllocPool<SharedStateHandle>::extract();
auto* first = AllocPool<Config>::extract();
auto* alloc = first;
decltype(alloc) last;
@ -26,10 +26,10 @@ namespace snmalloc
{
alloc->flush();
last = alloc;
alloc = AllocPool<SharedStateHandle>::extract(alloc);
alloc = AllocPool<Config>::extract(alloc);
}
AllocPool<SharedStateHandle>::restore(first, last);
AllocPool<Config>::restore(first, last);
}
#endif
}
@ -39,16 +39,16 @@ namespace snmalloc
allocators are empty. If you don't pass a pointer to a bool, then will
raise an error all the allocators are not empty.
*/
template<SNMALLOC_CONCEPT(ConceptBackendGlobals) SharedStateHandle>
template<SNMALLOC_CONCEPT(IsConfig) Config>
inline static void debug_check_empty(bool* result = nullptr)
{
#ifndef SNMALLOC_PASS_THROUGH
static_assert(
SharedStateHandle::Options.CoreAllocIsPoolAllocated,
Config::Options.CoreAllocIsPoolAllocated,
"Global status is available only for pool-allocated configurations");
// This is a debugging function. It checks that all memory from all
// allocators has been freed.
auto* alloc = AllocPool<SharedStateHandle>::iterate();
auto* alloc = AllocPool<Config>::iterate();
# ifdef SNMALLOC_TRACING
message<1024>("debug check empty: first {}", alloc);
@ -62,7 +62,7 @@ namespace snmalloc
message<1024>("debug_check_empty: Check all allocators!");
# endif
done = true;
alloc = AllocPool<SharedStateHandle>::iterate();
alloc = AllocPool<Config>::iterate();
okay = true;
while (alloc != nullptr)
@ -83,7 +83,7 @@ namespace snmalloc
# ifdef SNMALLOC_TRACING
message<1024>("debug check empty: okay = {}", okay);
# endif
alloc = AllocPool<SharedStateHandle>::iterate(alloc);
alloc = AllocPool<Config>::iterate(alloc);
}
}
@ -96,11 +96,11 @@ namespace snmalloc
// Redo check so abort is on allocator with allocation left.
if (!okay)
{
alloc = AllocPool<SharedStateHandle>::iterate();
alloc = AllocPool<Config>::iterate();
while (alloc != nullptr)
{
alloc->debug_is_empty(nullptr);
alloc = AllocPool<SharedStateHandle>::iterate(alloc);
alloc = AllocPool<Config>::iterate(alloc);
}
}
#else
@ -108,13 +108,13 @@ namespace snmalloc
#endif
}
template<SNMALLOC_CONCEPT(ConceptBackendGlobals) SharedStateHandle>
template<SNMALLOC_CONCEPT(IsConfig) Config>
inline static void debug_in_use(size_t count)
{
static_assert(
SharedStateHandle::Options.CoreAllocIsPoolAllocated,
Config::Options.CoreAllocIsPoolAllocated,
"Global status is available only for pool-allocated configurations");
auto alloc = AllocPool<SharedStateHandle>::iterate();
auto alloc = AllocPool<Config>::iterate();
while (alloc != nullptr)
{
if (alloc->debug_is_in_use())
@ -125,7 +125,7 @@ namespace snmalloc
}
count--;
}
alloc = AllocPool<SharedStateHandle>::iterate(alloc);
alloc = AllocPool<Config>::iterate(alloc);
if (count != 0)
{

Просмотреть файл

@ -56,11 +56,11 @@ namespace snmalloc
* core allocator must be provided externally by invoking the `init` method
* on this class *before* any allocation-related methods are called.
*/
template<SNMALLOC_CONCEPT(ConceptBackendGlobals) Backend>
template<SNMALLOC_CONCEPT(IsConfig) Config_>
class LocalAllocator
{
public:
using StateHandle = Backend;
using Config = Config_;
private:
/**
@ -68,15 +68,15 @@ namespace snmalloc
* specialised for the back-end that we are using.
* @{
*/
using CoreAlloc = CoreAllocator<Backend>;
using PagemapEntry = typename Backend::Pagemap::Entry;
using CoreAlloc = CoreAllocator<Config>;
using PagemapEntry = typename Config::PagemapEntry;
/// }@
// Free list per small size class. These are used for
// allocation on the fast path. This part of the code is inspired by
// mimalloc.
// Also contains remote deallocation cache.
LocalCache local_cache{&Backend::unused_remote};
LocalCache local_cache{&Config::unused_remote};
// Underlying allocator for most non-fast path operations.
CoreAlloc* core_alloc{nullptr};
@ -120,7 +120,7 @@ namespace snmalloc
SNMALLOC_SLOW_PATH decltype(auto) lazy_init(Action action, Args... args)
{
SNMALLOC_ASSERT(core_alloc == nullptr);
if constexpr (!Backend::Options.LocalAllocSupportsLazyInit)
if constexpr (!Config::Options.LocalAllocSupportsLazyInit)
{
SNMALLOC_CHECK(
false &&
@ -133,7 +133,7 @@ namespace snmalloc
else
{
// Initialise the thread local allocator
if constexpr (Backend::Options.CoreAllocOwnsLocalState)
if constexpr (Config::Options.CoreAllocOwnsLocalState)
{
init();
}
@ -145,7 +145,7 @@ namespace snmalloc
// Must be called at least once per thread.
// A pthread implementation only calls the thread destruction handle
// if the key has been set.
Backend::register_clean_up();
Config::register_clean_up();
// Perform underlying operation
auto r = action(core_alloc, args...);
@ -184,7 +184,7 @@ namespace snmalloc
return check_init([&](CoreAlloc* core_alloc) {
// Grab slab of correct size
// Set remote as large allocator remote.
auto [chunk, meta] = Backend::alloc_chunk(
auto [chunk, meta] = Config::Backend::alloc_chunk(
core_alloc->get_backend_local_state(),
large_size_to_chunk_size(size),
PagemapEntry::encode(
@ -201,7 +201,7 @@ namespace snmalloc
if (zero_mem == YesZero && chunk.unsafe_ptr() != nullptr)
{
Backend::Pal::template zero<false>(
Config::Pal::template zero<false>(
chunk.unsafe_ptr(), bits::next_pow2(size));
}
@ -212,10 +212,10 @@ namespace snmalloc
template<ZeroMem zero_mem>
SNMALLOC_FAST_PATH capptr::Alloc<void> small_alloc(size_t size)
{
auto domesticate = [this](
freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA {
return capptr_domesticate<Backend>(core_alloc->backend_state_ptr(), p);
};
auto domesticate =
[this](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA {
return capptr_domesticate<Config>(core_alloc->backend_state_ptr(), p);
};
auto slowpath = [&](
smallsizeclass_t sizeclass,
freelist::Iter<>* fl) SNMALLOC_FAST_PATH_LAMBDA {
@ -239,7 +239,7 @@ namespace snmalloc
sizeclass);
};
return local_cache.template alloc<zero_mem, Backend>(
return local_cache.template alloc<zero_mem, Config>(
domesticate, size, slowpath);
}
@ -271,7 +271,7 @@ namespace snmalloc
alloc_size(p.unsafe_ptr()));
#endif
const PagemapEntry& entry =
Backend::Pagemap::get_metaentry(address_cast(p));
Config::Backend::template get_metaentry(address_cast(p));
local_cache.remote_dealloc_cache.template dealloc<sizeof(CoreAlloc)>(
entry.get_remote()->trunc_id(), p, key_global);
post_remote_cache();
@ -300,13 +300,13 @@ namespace snmalloc
}
/**
* Call `Backend::is_initialised()` if it is implemented,
* Call `Config::is_initialised()` if it is implemented,
* unconditionally returns true otherwise.
*/
SNMALLOC_FAST_PATH
bool is_initialised()
{
return call_is_initialised<Backend>(nullptr, 0);
return call_is_initialised<Config>(nullptr, 0);
}
/**
@ -329,13 +329,13 @@ namespace snmalloc
{}
/**
* Call `Backend::ensure_init()` if it is implemented, do
* Call `Config::ensure_init()` if it is implemented, do
* nothing otherwise.
*/
SNMALLOC_FAST_PATH
void ensure_init()
{
call_ensure_init<Backend>(nullptr, 0);
call_ensure_init<Config>(nullptr, 0);
}
public:
@ -380,7 +380,7 @@ namespace snmalloc
// Initialise the global allocator structures
ensure_init();
// Grab an allocator for this thread.
init(AllocPool<Backend>::acquire(&(this->local_cache)));
init(AllocPool<Config>::acquire(&(this->local_cache)));
}
// Return all state in the fast allocator and release the underlying
@ -400,9 +400,9 @@ namespace snmalloc
// Detach underlying allocator
core_alloc->attached_cache = nullptr;
// Return underlying allocator to the system.
if constexpr (Backend::Options.CoreAllocOwnsLocalState)
if constexpr (Config::Options.CoreAllocOwnsLocalState)
{
AllocPool<Backend>::release(core_alloc);
AllocPool<Config>::release(core_alloc);
}
// Set up thread local allocator to look like
@ -411,7 +411,7 @@ namespace snmalloc
#ifdef SNMALLOC_TRACING
message<1024>("flush(): core_alloc={}", core_alloc);
#endif
local_cache.remote_allocator = &Backend::unused_remote;
local_cache.remote_allocator = &Config::unused_remote;
local_cache.remote_dealloc_cache.capacity = 0;
}
}
@ -625,10 +625,10 @@ namespace snmalloc
* deal with the object's extent.
*/
capptr::Alloc<void> p_tame =
capptr_domesticate<Backend>(core_alloc->backend_state_ptr(), p_wild);
capptr_domesticate<Config>(core_alloc->backend_state_ptr(), p_wild);
const PagemapEntry& entry =
Backend::Pagemap::get_metaentry(address_cast(p_tame));
Config::Backend::get_metaentry(address_cast(p_tame));
if (SNMALLOC_LIKELY(local_cache.remote_allocator == entry.get_remote()))
{
# if defined(__CHERI_PURE_CAPABILITY__) && defined(SNMALLOC_CHECK_CLIENT)
@ -681,7 +681,7 @@ namespace snmalloc
size = size == 0 ? 1 : size;
auto sc = size_to_sizeclass_full(size);
auto pm_sc =
Backend::Pagemap::get_metaentry(address_cast(p)).get_sizeclass();
Config::Backend::get_metaentry(address_cast(p)).get_sizeclass();
auto rsize = sizeclass_full_to_size(sc);
auto pm_size = sizeclass_full_to_size(pm_sc);
snmalloc_check_client(
@ -723,7 +723,7 @@ namespace snmalloc
#else
// TODO What's the domestication policy here? At the moment we just
// probe the pagemap with the raw address, without checks. There could
// be implicit domestication through the `Backend::Pagemap` or
// be implicit domestication through the `Config::Pagemap` or
// we could just leave well enough alone.
// Note that alloc_size should return 0 for nullptr.
@ -734,7 +734,7 @@ namespace snmalloc
// entry for the first chunk of memory, that states it represents a
// large object, so we can pull the check for null off the fast path.
const PagemapEntry& entry =
Backend::Pagemap::get_metaentry(address_cast(p_raw));
Config::Backend::template get_metaentry(address_cast(p_raw));
return sizeclass_full_to_size(entry.get_sizeclass());
#endif
@ -779,7 +779,7 @@ namespace snmalloc
{
#ifndef SNMALLOC_PASS_THROUGH
const PagemapEntry& entry =
Backend::Pagemap::template get_metaentry<true>(address_cast(p));
Config::Backend::template get_metaentry<true>(address_cast(p));
auto sizeclass = entry.get_sizeclass();
return snmalloc::remaining_bytes(sizeclass, address_cast(p));
@ -790,7 +790,7 @@ namespace snmalloc
bool check_bounds(const void* p, size_t s)
{
if (SNMALLOC_LIKELY(Backend::Pagemap::is_initialised()))
if (SNMALLOC_LIKELY(Config::is_initialised()))
{
return remaining_bytes(p) >= s;
}
@ -807,7 +807,7 @@ namespace snmalloc
{
#ifndef SNMALLOC_PASS_THROUGH
const PagemapEntry& entry =
Backend::Pagemap::template get_metaentry<true>(address_cast(p));
Config::Backend::template get_metaentry<true>(address_cast(p));
auto sizeclass = entry.get_sizeclass();
return snmalloc::index_in_object(sizeclass, address_cast(p));

Просмотреть файл

@ -19,15 +19,14 @@ namespace snmalloc
return p.as_void();
}
template<ZeroMem zero_mem, typename SharedStateHandle>
template<ZeroMem zero_mem, typename Config>
inline static SNMALLOC_FAST_PATH capptr::Alloc<void>
finish_alloc(freelist::HeadPtr p, smallsizeclass_t sizeclass)
{
auto r = finish_alloc_no_zero(p, sizeclass);
if constexpr (zero_mem == YesZero)
SharedStateHandle::Pal::zero(
r.unsafe_ptr(), sizeclass_to_size(sizeclass));
Config::Pal::zero(r.unsafe_ptr(), sizeclass_to_size(sizeclass));
// TODO: Should this be zeroing the free Object state, in the non-zeroing
// case?
@ -64,18 +63,14 @@ namespace snmalloc
/**
* Return all the free lists to the allocator. Used during thread teardown.
*/
template<
size_t allocator_size,
typename SharedStateHandle,
typename DeallocFun>
bool flush(
typename SharedStateHandle::LocalState* local_state, DeallocFun dealloc)
template<size_t allocator_size, typename Config, typename DeallocFun>
bool flush(typename Config::LocalState* local_state, DeallocFun dealloc)
{
auto& key = entropy.get_free_list_key();
auto domesticate =
[local_state](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA {
return capptr_domesticate<SharedStateHandle>(local_state, p);
};
auto domesticate = [local_state](freelist::QueuePtr p)
SNMALLOC_FAST_PATH_LAMBDA {
return capptr_domesticate<Config>(local_state, p);
};
for (size_t i = 0; i < NUM_SMALL_SIZECLASSES; i++)
{
@ -90,13 +85,13 @@ namespace snmalloc
}
}
return remote_dealloc_cache.post<allocator_size, SharedStateHandle>(
return remote_dealloc_cache.post<allocator_size, Config>(
local_state, remote_allocator->trunc_id(), key_global);
}
template<
ZeroMem zero_mem,
typename SharedStateHandle,
typename Config,
typename Slowpath,
typename Domesticator>
SNMALLOC_FAST_PATH capptr::Alloc<void>
@ -108,7 +103,7 @@ namespace snmalloc
if (SNMALLOC_LIKELY(!fl.empty()))
{
auto p = fl.take(key, domesticate);
return finish_alloc<zero_mem, SharedStateHandle>(p, sizeclass);
return finish_alloc<zero_mem, Config>(p, sizeclass);
}
return slowpath(sizeclass, &fl);
}

Просмотреть файл

@ -581,6 +581,8 @@ namespace snmalloc
"compatible with the front-end's structure");
public:
using SlabMetadata = BackendSlabMetadata;
constexpr FrontendMetaEntry() = default;
/**

Просмотреть файл

@ -22,7 +22,7 @@ namespace snmalloc
{
template<
typename TT,
SNMALLOC_CONCEPT(ConceptBackendGlobals) SharedStateHandle,
SNMALLOC_CONCEPT(IsConfig) Config,
PoolState<TT>& get_state()>
friend class Pool;
@ -41,9 +41,7 @@ namespace snmalloc
* SingletonPoolState::pool is the default provider for the PoolState within
* the Pool class.
*/
template<
typename T,
SNMALLOC_CONCEPT(ConceptBackendGlobals) SharedStateHandle>
template<typename T, SNMALLOC_CONCEPT(IsConfig) Config>
class SingletonPoolState
{
/**
@ -55,8 +53,8 @@ namespace snmalloc
-> decltype(SharedStateHandle_::ensure_init())
{
static_assert(
std::is_same<SharedStateHandle, SharedStateHandle_>::value,
"SFINAE parameter, should only be used with SharedStateHandle");
std::is_same<Config, SharedStateHandle_>::value,
"SFINAE parameter, should only be used with Config");
SharedStateHandle_::ensure_init();
}
@ -68,17 +66,17 @@ namespace snmalloc
SNMALLOC_FAST_PATH static auto call_ensure_init(SharedStateHandle_*, long)
{
static_assert(
std::is_same<SharedStateHandle, SharedStateHandle_>::value,
"SFINAE parameter, should only be used with SharedStateHandle");
std::is_same<Config, SharedStateHandle_>::value,
"SFINAE parameter, should only be used with Config");
}
/**
* Call `SharedStateHandle::ensure_init()` if it is implemented, do nothing
* Call `Config::ensure_init()` if it is implemented, do nothing
* otherwise.
*/
SNMALLOC_FAST_PATH static void ensure_init()
{
call_ensure_init<SharedStateHandle>(nullptr, 0);
call_ensure_init<Config>(nullptr, 0);
}
static void make_pool(PoolState<T>*) noexcept
@ -114,8 +112,8 @@ namespace snmalloc
*/
template<
typename T,
SNMALLOC_CONCEPT(ConceptBackendGlobals) SharedStateHandle,
PoolState<T>& get_state() = SingletonPoolState<T, SharedStateHandle>::pool>
SNMALLOC_CONCEPT(IsConfig) Config,
PoolState<T>& get_state() = SingletonPoolState<T, Config>::pool>
class Pool
{
public:
@ -132,12 +130,11 @@ namespace snmalloc
}
auto raw =
SharedStateHandle::template alloc_meta_data<T>(nullptr, sizeof(T));
Config::Backend::template alloc_meta_data<T>(nullptr, sizeof(T));
if (raw == nullptr)
{
SharedStateHandle::Pal::error(
"Failed to initialise thread local allocator.");
Config::Pal::error("Failed to initialise thread local allocator.");
}
p = new (raw.unsafe_ptr()) T(std::forward<Args>(args)...);

Просмотреть файл

@ -14,7 +14,7 @@ namespace snmalloc
public:
template<
typename TT,
SNMALLOC_CONCEPT(ConceptBackendGlobals) SharedStateHandle,
SNMALLOC_CONCEPT(IsConfig) Config,
PoolState<TT>& get_state()>
friend class Pool;
template<class a, Construction c>

Просмотреть файл

@ -77,9 +77,9 @@ namespace snmalloc
list[get_slot<allocator_size>(target_id, 0)].add(r, key);
}
template<size_t allocator_size, typename Backend>
template<size_t allocator_size, typename Config>
bool post(
typename Backend::LocalState* local_state,
typename Config::LocalState* local_state,
RemoteAllocator::alloc_id_t id,
const FreeListKey& key)
{
@ -88,7 +88,7 @@ namespace snmalloc
bool sent_something = false;
auto domesticate = [local_state](freelist::QueuePtr p)
SNMALLOC_FAST_PATH_LAMBDA {
return capptr_domesticate<Backend>(local_state, p);
return capptr_domesticate<Config>(local_state, p);
};
while (true)
@ -104,7 +104,7 @@ namespace snmalloc
{
auto [first, last] = list[i].extract_segment(key);
const auto& entry =
Backend::Pagemap::get_metaentry(address_cast(first));
Config::Backend::get_metaentry(address_cast(first));
auto remote = entry.get_remote();
// If the allocator is not correctly aligned, then the bit that is
// set implies this is used by the backend, and we should not be
@ -112,7 +112,7 @@ namespace snmalloc
snmalloc_check_client(
!entry.is_backend_owned(),
"Delayed detection of attempt to free internal structure.");
if constexpr (Backend::Options.QueueHeadsAreTame)
if constexpr (Config::Options.QueueHeadsAreTame)
{
auto domesticate_nop = [](freelist::QueuePtr p) {
return freelist::HeadPtr(p.unsafe_ptr());
@ -143,7 +143,7 @@ namespace snmalloc
// Use the next N bits to spread out remote deallocs in our own
// slot.
auto r = resend.take(key, domesticate);
const auto& entry = Backend::Pagemap::get_metaentry(address_cast(r));
const auto& entry = Config::Backend::get_metaentry(address_cast(r));
auto i = entry.get_remote()->trunc_id();
size_t slot = get_slot<allocator_size>(i, post_round);
list[slot].add(r, key);

Просмотреть файл

@ -36,7 +36,7 @@ namespace snmalloc
// Large classes range from [MAX_SMALL_SIZECLASS_SIZE, ADDRESS_SPACE).
static constexpr size_t NUM_LARGE_CLASSES =
Pal::address_bits - MAX_SMALL_SIZECLASS_BITS;
DefaultPal::address_bits - MAX_SMALL_SIZECLASS_BITS;
// How many bits are required to represent either a large or a small
// sizeclass.

Просмотреть файл

@ -6,8 +6,8 @@ using namespace snmalloc;
void get_malloc_info_v1(malloc_info_v1* stats)
{
auto curr = Globals::get_current_usage();
auto peak = Globals::get_peak_usage();
auto curr = StandardConfig::Backend::get_current_usage();
auto peak = StandardConfig::Backend::get_peak_usage();
stats->current_memory_usage = curr;
stats->peak_memory_usage = peak;
}

Просмотреть файл

@ -48,6 +48,6 @@ extern "C" SNMALLOC_EXPORT void* SNMALLOC_NAME_MANGLE(rust_realloc)(
extern "C" SNMALLOC_EXPORT void SNMALLOC_NAME_MANGLE(rust_statistics)(
size_t* current_memory_usage, size_t* peak_memory_usage)
{
*current_memory_usage = Globals::get_current_usage();
*peak_memory_usage = Globals::get_peak_usage();
*current_memory_usage = StandardConfig::Backend::get_current_usage();
*peak_memory_usage = StandardConfig::Backend::get_peak_usage();
}

Просмотреть файл

@ -37,49 +37,42 @@
namespace snmalloc
{
#if !defined(OPEN_ENCLAVE) || defined(OPEN_ENCLAVE_SIMULATION)
using DefaultPal =
# if defined(_WIN32)
PALWindows;
# elif defined(__APPLE__)
PALApple<>;
# elif defined(__linux__)
PALLinux;
# elif defined(FreeBSD_KERNEL)
PALFreeBSDKernel;
# elif defined(__FreeBSD__)
PALFreeBSD;
# elif defined(__HAIKU__)
PALHaiku;
# elif defined(__NetBSD__)
PALNetBSD;
# elif defined(__OpenBSD__)
PALOpenBSD;
# elif defined(__sun)
PALSolaris;
# elif defined(__DragonFly__)
PALDragonfly;
# else
# error Unsupported platform
# endif
#endif
using Pal =
#if defined(SNMALLOC_MEMORY_PROVIDER)
PALPlainMixin<SNMALLOC_MEMORY_PROVIDER>;
SNMALLOC_MEMORY_PROVIDER;
#elif defined(OPEN_ENCLAVE)
PALOpenEnclave;
#elif defined(_WIN32)
PALWindows;
#elif defined(__APPLE__)
PALApple<>;
#elif defined(__linux__)
PALLinux;
#elif defined(FreeBSD_KERNEL)
PALFreeBSDKernel;
#elif defined(__FreeBSD__)
PALFreeBSD;
#elif defined(__HAIKU__)
PALHaiku;
#elif defined(__NetBSD__)
PALNetBSD;
#elif defined(__OpenBSD__)
PALOpenBSD;
#elif defined(__sun)
PALSolaris;
#elif defined(__DragonFly__)
PALDragonfly;
#else
DefaultPal;
# error Unsupported platform
#endif
[[noreturn]] SNMALLOC_SLOW_PATH inline void error(const char* const str)
{
Pal::error(str);
DefaultPal::error(str);
}
// Used to keep Superslab metadata committed.
static constexpr size_t OS_PAGE_SIZE = Pal::page_size;
static constexpr size_t OS_PAGE_SIZE = DefaultPal::page_size;
/**
* Perform platform-specific adjustment of return pointers.
@ -88,7 +81,7 @@ namespace snmalloc
* disruption to PALs for platforms that do not support StrictProvenance AALs.
*/
template<
typename PAL = Pal,
typename PAL = DefaultPal,
typename AAL = Aal,
typename T,
SNMALLOC_CONCEPT(capptr::ConceptBound) B>
@ -101,7 +94,7 @@ namespace snmalloc
}
template<
typename PAL = Pal,
typename PAL = DefaultPal,
typename AAL = Aal,
typename T,
SNMALLOC_CONCEPT(capptr::ConceptBound) B>
@ -177,7 +170,7 @@ namespace snmalloc
[[noreturn]] inline void report_fatal_error(Args... args)
{
MessageBuilder<BufferSize> msg{std::forward<Args>(args)...};
Pal::error(msg.get_message());
DefaultPal::error(msg.get_message());
}
static inline size_t get_tid()
@ -197,6 +190,6 @@ namespace snmalloc
{
MessageBuilder<BufferSize> msg{std::forward<Args>(args)...};
MessageBuilder<BufferSize> msg_tid{"{}: {}", get_tid(), msg.get_message()};
Pal::message(msg_tid.get_message());
DefaultPal::message(msg_tid.get_message());
}
} // namespace snmalloc

Просмотреть файл

@ -11,20 +11,38 @@ int main()
// # define SNMALLOC_TRACING
# include <snmalloc/backend/backend.h>
# include <snmalloc/backend/standard_range.h>
# include <snmalloc/backend_helpers/backend_helpers.h>
# include <snmalloc/snmalloc_core.h>
// Specify type of allocator
# define SNMALLOC_PROVIDE_OWN_CONFIG
namespace snmalloc
{
class CustomGlobals : public BackendAllocator<Pal, false>
class CustomConfig : public CommonConfig
{
public:
using GlobalPoolState = PoolState<CoreAllocator<CustomGlobals>>;
using Pal = DefaultPal;
using PagemapEntry = DefaultPagemapEntry;
private:
using Backend = BackendAllocator<Pal, false>;
using ConcretePagemap =
FlatPagemap<MIN_CHUNK_BITS, PagemapEntry, Pal, false>;
public:
using Pagemap = BasicPagemap<Pal, ConcretePagemap, PagemapEntry, false>;
public:
using LocalState = StandardLocalState<
Pal,
Pagemap,
Pipe<PalRange<Pal>, PagemapRegisterRange<Pagemap, false>>>;
using GlobalPoolState = PoolState<CoreAllocator<CustomConfig>>;
using Backend = BackendAllocator<Pal, PagemapEntry, Pagemap, LocalState>;
private:
SNMALLOC_REQUIRE_CONSTINIT
inline static GlobalPoolState alloc_pool;
@ -65,7 +83,7 @@ namespace snmalloc
static CapPtr<
T,
typename B::template with_wildness<capptr::dimension::Wildness::Tame>>
capptr_domesticate(typename Backend::LocalState*, CapPtr<T, B> p)
capptr_domesticate(LocalState*, CapPtr<T, B> p)
{
domesticate_count++;
@ -85,7 +103,7 @@ namespace snmalloc
{
std::cout << "Patching over corruption" << std::endl;
*domesticate_patch_location = domesticate_patch_value;
snmalloc::CustomGlobals::domesticate_patch_location = nullptr;
snmalloc::CustomConfig::domesticate_patch_location = nullptr;
}
return CapPtr<
@ -95,7 +113,7 @@ namespace snmalloc
}
};
using Alloc = LocalAllocator<CustomGlobals>;
using Alloc = LocalAllocator<CustomConfig>;
}
# define SNMALLOC_NAME_MANGLE(a) test_##a
@ -103,11 +121,11 @@ namespace snmalloc
int main()
{
snmalloc::CustomGlobals::init(); // init pagemap
snmalloc::CustomGlobals::domesticate_count = 0;
snmalloc::CustomConfig::Pagemap::concretePagemap.init(); // init pagemap
snmalloc::CustomConfig::domesticate_count = 0;
LocalEntropy entropy;
entropy.init<Pal>();
entropy.init<DefaultPal>();
key_global = FreeListKey(entropy.get_free_list_key());
auto alloc1 = new Alloc();
@ -123,21 +141,20 @@ int main()
alloc2->flush();
// Clobber the linkage but not the back pointer
snmalloc::CustomGlobals::domesticate_patch_location =
snmalloc::CustomConfig::domesticate_patch_location =
static_cast<uintptr_t*>(p);
snmalloc::CustomGlobals::domesticate_patch_value =
*static_cast<uintptr_t*>(p);
snmalloc::CustomConfig::domesticate_patch_value = *static_cast<uintptr_t*>(p);
memset(p, 0xA5, sizeof(void*));
snmalloc::CustomGlobals::domesticate_trace = true;
snmalloc::CustomGlobals::domesticate_count = 0;
snmalloc::CustomConfig::domesticate_trace = true;
snmalloc::CustomConfig::domesticate_count = 0;
// Open a new slab, so that slow path will pick up the message queue. That
// means this should be a sizeclass we've not used before, even internally.
auto q = alloc1->alloc(512);
std::cout << "Allocated q " << q << std::endl;
snmalloc::CustomGlobals::domesticate_trace = false;
snmalloc::CustomConfig::domesticate_trace = false;
/*
* Expected domestication calls in the above message passing:
@ -152,8 +169,8 @@ int main()
* after q).
*/
static constexpr size_t expected_count =
snmalloc::CustomGlobals::Options.QueueHeadsAreTame ? 2 : 3;
SNMALLOC_CHECK(snmalloc::CustomGlobals::domesticate_count == expected_count);
snmalloc::CustomConfig::Options.QueueHeadsAreTame ? 2 : 3;
SNMALLOC_CHECK(snmalloc::CustomConfig::domesticate_count == expected_count);
// Prevent the allocators from going out of scope during the above test
alloc1->flush();

Просмотреть файл

@ -11,7 +11,7 @@
using namespace snmalloc;
using CustomGlobals = FixedGlobals<PALNoAlloc<DefaultPal>>;
using CustomGlobals = FixedRangeConfig<PALNoAlloc<DefaultPal>>;
using FixedAlloc = LocalAllocator<CustomGlobals>;
int main()
@ -23,8 +23,8 @@ int main()
// It is also large enough for the example to run in.
// For 1MiB superslabs, SUPERSLAB_BITS + 4 is not big enough for the example.
auto size = bits::one_at_bit(28);
auto oe_base = Pal::reserve(size);
Pal::notify_using<NoZero>(oe_base, size);
auto oe_base = DefaultPal::reserve(size);
DefaultPal::notify_using<NoZero>(oe_base, size);
auto oe_end = pointer_offset(oe_base, size);
std::cout << "Allocated region " << oe_base << " - "
<< pointer_offset(oe_base, size) << std::endl;

Просмотреть файл

@ -367,6 +367,6 @@ int main(int argc, char** argv)
our_malloc_usable_size(nullptr) == 0,
"malloc_usable_size(nullptr) should be zero");
snmalloc::debug_check_empty<snmalloc::Globals>();
snmalloc::debug_check_empty<snmalloc::StandardConfig>();
return 0;
}

Просмотреть файл

@ -184,7 +184,7 @@ void test_calloc()
alloc.dealloc(p, size);
}
snmalloc::debug_check_empty<Globals>();
snmalloc::debug_check_empty<StandardConfig>();
}
void test_double_alloc()
@ -229,7 +229,7 @@ void test_double_alloc()
}
}
}
snmalloc::debug_check_empty<Globals>();
snmalloc::debug_check_empty<StandardConfig>();
}
void test_external_pointer()
@ -273,7 +273,7 @@ void test_external_pointer()
alloc.dealloc(p1, size);
}
snmalloc::debug_check_empty<Globals>();
snmalloc::debug_check_empty<StandardConfig>();
};
void check_offset(void* base, void* interior)
@ -305,7 +305,7 @@ void test_external_pointer_large()
auto& alloc = ThreadAlloc::get();
constexpr size_t count_log = Pal::address_bits > 32 ? 5 : 3;
constexpr size_t count_log = DefaultPal::address_bits > 32 ? 5 : 3;
constexpr size_t count = 1 << count_log;
// Pre allocate all the objects
size_t* objects[count];

Просмотреть файл

@ -65,8 +65,8 @@ void test_pagemap(bool bounded)
if (bounded)
{
auto size = bits::one_at_bit(30);
auto base = Pal::reserve(size);
Pal::notify_using<NoZero>(base, size);
auto base = DefaultPal::reserve(size);
DefaultPal::notify_using<NoZero>(base, size);
std::cout << "Fixed base: " << base << " (" << size << ") "
<< " end: " << pointer_offset(base, size) << std::endl;
auto [heap_base, heap_size] = pagemap_test_bound.init(base, size);

Просмотреть файл

@ -12,7 +12,7 @@ struct PoolAEntry : Pooled<PoolAEntry>
PoolAEntry() : field(1){};
};
using PoolA = Pool<PoolAEntry, Alloc::StateHandle>;
using PoolA = Pool<PoolAEntry, Alloc::Config>;
struct PoolBEntry : Pooled<PoolBEntry>
{
@ -22,14 +22,14 @@ struct PoolBEntry : Pooled<PoolBEntry>
PoolBEntry(int f) : field(f){};
};
using PoolB = Pool<PoolBEntry, Alloc::StateHandle>;
using PoolB = Pool<PoolBEntry, Alloc::Config>;
void test_alloc()
{
auto ptr = PoolA::acquire();
SNMALLOC_CHECK(ptr != nullptr);
// Pool allocations should not be visible to debug_check_empty.
snmalloc::debug_check_empty<Alloc::StateHandle>();
snmalloc::debug_check_empty<Alloc::Config>();
PoolA::release(ptr);
}

Просмотреть файл

@ -8,7 +8,7 @@ int main()
auto r = a.alloc(16);
snmalloc::debug_check_empty<snmalloc::Globals>(&result);
snmalloc::debug_check_empty<snmalloc::StandardConfig>(&result);
if (result != false)
{
abort();
@ -16,7 +16,7 @@ int main()
a.dealloc(r);
snmalloc::debug_check_empty<snmalloc::Globals>(&result);
snmalloc::debug_check_empty<snmalloc::StandardConfig>(&result);
if (result != true)
{
abort();
@ -24,7 +24,7 @@ int main()
r = a.alloc(16);
snmalloc::debug_check_empty<snmalloc::Globals>(&result);
snmalloc::debug_check_empty<snmalloc::StandardConfig>(&result);
if (result != false)
{
abort();
@ -32,7 +32,7 @@ int main()
a.dealloc(r);
snmalloc::debug_check_empty<snmalloc::Globals>(&result);
snmalloc::debug_check_empty<snmalloc::StandardConfig>(&result);
if (result != true)
{
abort();

Просмотреть файл

@ -12,7 +12,7 @@
namespace snmalloc
{
using Alloc = snmalloc::LocalAllocator<snmalloc::Globals>;
using Alloc = snmalloc::LocalAllocator<snmalloc::StandardConfig>;
}
using namespace snmalloc;

Просмотреть файл

@ -12,7 +12,7 @@
#define SNMALLOC_PROVIDE_OWN_CONFIG
namespace snmalloc
{
using CustomGlobals = FixedGlobals<PALNoAlloc<DefaultPal>>;
using CustomGlobals = FixedRangeConfig<PALNoAlloc<DefaultPal>>;
using Alloc = LocalAllocator<CustomGlobals>;
}

Просмотреть файл

@ -19,7 +19,7 @@ namespace snmalloc
{ \
current_test = __PRETTY_FUNCTION__; \
MessageBuilder<1024> mb{"Starting test: " msg "\n", ##__VA_ARGS__}; \
Pal::message(mb.get_message()); \
DefaultPal::message(mb.get_message()); \
} while (0)
/**
@ -33,7 +33,7 @@ namespace snmalloc
do \
{ \
MessageBuilder<1024> mb{msg "\n", ##__VA_ARGS__}; \
Pal::message(mb.get_message()); \
DefaultPal::message(mb.get_message()); \
} while (0)
}

Просмотреть файл

@ -154,7 +154,7 @@ void test_tasks(size_t num_tasks, size_t count, size_t size)
}
#ifndef NDEBUG
snmalloc::debug_check_empty<Globals>();
snmalloc::debug_check_empty<StandardConfig>();
#endif
};

Просмотреть файл

@ -19,7 +19,7 @@ namespace test
{
size_t rand = (size_t)r.next();
size_t offset = bits::clz(rand);
if constexpr (Pal::address_bits > 32)
if constexpr (DefaultPal::address_bits > 32)
{
if (offset > 30)
offset = 30;
@ -47,7 +47,7 @@ namespace test
alloc.dealloc(objects[i]);
}
snmalloc::debug_check_empty<Globals>();
snmalloc::debug_check_empty<StandardConfig>();
}
void test_external_pointer(xoroshiro::p128r64& r)

Просмотреть файл

@ -60,7 +60,7 @@ void test_alloc_dealloc(size_t count, size_t size, bool write)
}
}
snmalloc::debug_check_empty<Globals>();
snmalloc::debug_check_empty<StandardConfig>();
}
int main(int, char**)

Просмотреть файл

@ -64,7 +64,7 @@ void print_stack_trace()
void _cdecl error(int signal)
{
snmalloc::UNUSED(signal);
snmalloc::Pal::message("*****ABORT******");
snmalloc::DefaultPal::message("*****ABORT******");
print_stack_trace();
@ -75,7 +75,7 @@ LONG WINAPI VectoredHandler(struct _EXCEPTION_POINTERS* ExceptionInfo)
{
snmalloc::UNUSED(ExceptionInfo);
snmalloc::Pal::message("*****UNHANDLED EXCEPTION******");
snmalloc::DefaultPal::message("*****UNHANDLED EXCEPTION******");
print_stack_trace();