diff --git a/docs/AddressSpace.md b/docs/AddressSpace.md index f72a8d07..95f91ef4 100644 --- a/docs/AddressSpace.md +++ b/docs/AddressSpace.md @@ -78,7 +78,7 @@ Exciting, no? ### Decoding a MetaEntry -The centerpiece of `snmalloc`'s metadata is its `PageMap`, which associates each "chunk" of the address space (~16KiB; see `MIN_CHUNK_BITS`) with a `MetaEntry`. +The centerpiece of `snmalloc`'s metadata is its `Pagemap`, which associates each "chunk" of the address space (~16KiB; see `MIN_CHUNK_BITS`) with a `MetaEntry`. A `MetaEntry` is a pair of pointers, suggestively named `meta` and `remote_and_sizeclass`. In more detail, `MetaEntry`s are better represented by Sigma and Pi types, all packed into two pointer-sized words in ways that preserve pointer provenance on CHERI. diff --git a/docs/StrictProvenance.md b/docs/StrictProvenance.md index 4017cf02..5f3081a3 100644 --- a/docs/StrictProvenance.md +++ b/docs/StrictProvenance.md @@ -189,7 +189,7 @@ In future architectures, this is increasingly likely to be a no-op. ## Backend-Provided Operations -* `CapPtr capptr_domesticate(Backend::LocalState *, CapPtr ptr)` allows the backend to test whether `ptr` is sensible, by some definition thereof. +* `CapPtr capptr_domesticate(LocalState *, CapPtr ptr)` allows the backend to test whether `ptr` is sensible, by some definition thereof. The annotation `Bout` is *computed* as a function of `Bin`. `Bin` is required to be `Wild`, and `Bout` is `Tame` but otherwise identical. diff --git a/src/snmalloc/backend/backend.h b/src/snmalloc/backend/backend.h index 5486e42a..1463fa94 100644 --- a/src/snmalloc/backend/backend.h +++ b/src/snmalloc/backend/backend.h @@ -1,223 +1,28 @@ #pragma once #include "../backend_helpers/backend_helpers.h" -#if defined(SNMALLOC_CHECK_CLIENT) && !defined(OPEN_ENCLAVE) -/** - * Protect meta data blocks by allocating separate from chunks for - * user allocations. This involves leaving gaps in address space. - * This is less efficient, so should only be applied for the checked - * build. - * - * On Open Enclave the address space is limited, so we disable this - * feature. - */ -# define SNMALLOC_META_PROTECTED -#endif - namespace snmalloc { /** * This class implements the standard backend for handling allocations. - * It abstracts page table management and address space management. + * It is parameterised by its Pagemap management and + * address space management (LocalState). */ - template - class BackendAllocator : public CommonConfig + template< + SNMALLOC_CONCEPT(ConceptPAL) PAL, + typename PagemapEntry, + typename Pagemap, + typename LocalState> + class BackendAllocator { + using GlobalMetaRange = typename LocalState::GlobalMetaRange; + using Stats = typename LocalState::Stats; + public: - class PageMapEntry; using Pal = PAL; - using SlabMetadata = FrontendSlabMetadata; - - private: - using ConcretePagemap = - FlatPagemap; + using SlabMetadata = typename PagemapEntry::SlabMetadata; public: - /** - * Example of type stored in the pagemap. - * The following class could be replaced by: - * - * ``` - * using PageMapEntry = FrontendMetaEntry; - * ``` - * - * The full form here provides an example of how to extend the pagemap - * entries. It also guarantees that the front end never directly - * constructs meta entries, it only ever reads them or modifies them in - * place. - */ - class PageMapEntry : public FrontendMetaEntry - { - /** - * The private initialising constructor is usable only by this back end. - */ - friend class BackendAllocator; - - /** - * The private default constructor is usable only by the pagemap. - */ - friend ConcretePagemap; - - /** - * The only constructor that creates newly initialised meta entries. - * This is callable only by the back end. The front end may copy, - * query, and update these entries, but it may not create them - * directly. This contract allows the back end to store any arbitrary - * metadata in meta entries when they are first constructed. - */ - SNMALLOC_FAST_PATH - PageMapEntry(SlabMetadata* meta, uintptr_t ras) - : FrontendMetaEntry(meta, ras) - {} - - /** - * Copy assignment is used only by the pagemap. - */ - PageMapEntry& operator=(const PageMapEntry& other) - { - FrontendMetaEntry::operator=(other); - return *this; - } - - /** - * Default constructor. This must be callable from the pagemap. - */ - SNMALLOC_FAST_PATH PageMapEntry() = default; - }; - using Pagemap = BasicPagemap< - BackendAllocator, - PAL, - ConcretePagemap, - PageMapEntry, - fixed_range>; - -#if defined(_WIN32) || defined(__CHERI_PURE_CAPABILITY__) - static constexpr bool CONSOLIDATE_PAL_ALLOCS = false; -#else - static constexpr bool CONSOLIDATE_PAL_ALLOCS = true; -#endif - - // Set up source of memory - using Base = std::conditional_t< - fixed_range, - EmptyRange, - Pipe< - PalRange, - PagemapRegisterRange>>; - - static constexpr size_t MinBaseSizeBits() - { - if constexpr (pal_supports) - { - return bits::next_pow2_bits_const(PAL::minimum_alloc_size); - } - else - { - return MIN_CHUNK_BITS; - } - } - - // Global range of memory - using GlobalR = Pipe< - Base, - LargeBuddyRange<24, bits::BITS - 1, Pagemap, MinBaseSizeBits()>, - LogRange<2>, - GlobalRange<>>; - -#ifdef SNMALLOC_META_PROTECTED - // Introduce two global ranges, so we don't mix Object and Meta - using CentralObjectRange = Pipe< - GlobalR, - LargeBuddyRange<24, bits::BITS - 1, Pagemap, MinBaseSizeBits()>, - LogRange<3>, - GlobalRange<>>; - - using CentralMetaRange = Pipe< - GlobalR, - SubRange, // Use SubRange to introduce guard pages. - LargeBuddyRange<24, bits::BITS - 1, Pagemap, MinBaseSizeBits()>, - LogRange<4>, - GlobalRange<>>; - - // Source for object allocations - using StatsObject = - Pipe, StatsRange<>>; - - using ObjectRange = - Pipe, LogRange<5>>; - - using StatsMeta = Pipe, StatsRange<>>; - - using MetaRange = Pipe< - StatsMeta, - LargeBuddyRange<21 - 6, bits::BITS - 1, Pagemap>, - SmallBuddyRange<>>; - - // Create global range that can service small meta-data requests. - // Don't want to add this to the CentralMetaRange to move Commit outside the - // lock on the common case. - using GlobalMetaRange = Pipe, GlobalRange<>>; - using Stats = StatsCombiner; -#else - // Source for object allocations and metadata - // No separation between the two - using Stats = Pipe>; - using ObjectRange = Pipe< - Stats, - CommitRange, - LargeBuddyRange<21, 21, Pagemap>, - SmallBuddyRange<>>; - using GlobalMetaRange = Pipe>; -#endif - - struct LocalState - { - ObjectRange object_range; - -#ifdef SNMALLOC_META_PROTECTED - MetaRange meta_range; - - MetaRange& get_meta_range() - { - return meta_range; - } -#else - ObjectRange& get_meta_range() - { - return object_range; - } -#endif - }; - - public: - template - static std::enable_if_t init() - { - static_assert(fixed_range_ == fixed_range, "Don't set SFINAE parameter!"); - - Pagemap::concretePagemap.init(); - } - - template - static std::enable_if_t init(void* base, size_t length) - { - static_assert(fixed_range_ == fixed_range, "Don't set SFINAE parameter!"); - - auto [heap_base, heap_length] = - Pagemap::concretePagemap.init(base, length); - - Pagemap::register_range(address_cast(heap_base), heap_length); - - // Push memory into the global range. - range_to_pow_2_blocks( - capptr::Chunk(heap_base), - heap_length, - [&](capptr::Chunk p, size_t sz, bool) { - GlobalR g; - g.dealloc_range(p, sz); - }); - } - /** * Provide a block of meta-data with size and align. * @@ -302,6 +107,15 @@ namespace snmalloc return {p, meta}; } + /** + * Deallocate a chunk of memory of size `size` and base `alloc`. + * The `slab_metadata` is the meta-data block associated with this + * chunk. The backend can recalculate this, but as the callee will + * already have it, we take it for possibly more optimal code. + * + * LocalState contains all the information about the various ranges + * that are used by the backend to manage the address space. + */ static void dealloc_chunk( LocalState& local_state, SlabMetadata& slab_metadata, @@ -336,6 +150,12 @@ namespace snmalloc local_state.object_range.dealloc_range(chunk, size); } + template + SNMALLOC_FAST_PATH static const PagemapEntry& get_metaentry(address_t p) + { + return Pagemap::template get_metaentry(p); + } + static size_t get_current_usage() { Stats stats_state; diff --git a/src/snmalloc/backend/base_constants.h b/src/snmalloc/backend/base_constants.h new file mode 100644 index 00000000..92385a96 --- /dev/null +++ b/src/snmalloc/backend/base_constants.h @@ -0,0 +1,21 @@ + + +#pragma once + +#include "../backend/backend.h" + +namespace snmalloc +{ + /** + * Base range configuration contains common parts of other ranges. + */ + struct BaseLocalStateConstants + { + protected: + // Size of requests that the global cache should use + static constexpr size_t GlobalCacheSizeBits = 24; + + // Size of requests that the local cache should use + static constexpr size_t LocalCacheSizeBits = 21; + }; +} // namespace snmalloc \ No newline at end of file diff --git a/src/snmalloc/backend/fixedglobalconfig.h b/src/snmalloc/backend/fixedglobalconfig.h index 41b50268..a48a55f7 100644 --- a/src/snmalloc/backend/fixedglobalconfig.h +++ b/src/snmalloc/backend/fixedglobalconfig.h @@ -1,6 +1,7 @@ #pragma once -#include "../backend/backend.h" +#include "../backend_helpers/backend_helpers.h" +#include "standard_range.h" namespace snmalloc { @@ -8,14 +9,26 @@ namespace snmalloc * A single fixed address range allocator configuration */ template - class FixedGlobals final : public BackendAllocator + class FixedRangeConfig final : public CommonConfig { public: - using GlobalPoolState = PoolState>; + using PagemapEntry = DefaultPagemapEntry; private: - using Backend = BackendAllocator; + using ConcretePagemap = + FlatPagemap; + using Pagemap = BasicPagemap; + + public: + using LocalState = StandardLocalState; + + using GlobalPoolState = PoolState>; + + using Backend = BackendAllocator; + using Pal = PAL; + + private: inline static GlobalPoolState alloc_pool; public: @@ -54,11 +67,23 @@ namespace snmalloc snmalloc::register_clean_up(); } - static void - init(typename Backend::LocalState* local_state, void* base, size_t length) + static void init(LocalState* local_state, void* base, size_t length) { UNUSED(local_state); - Backend::init(base, length); + + auto [heap_base, heap_length] = + Pagemap::concretePagemap.init(base, length); + + Pagemap::register_range(address_cast(heap_base), heap_length); + + // Push memory into the global range. + range_to_pow_2_blocks( + capptr::Chunk(heap_base), + heap_length, + [&](capptr::Chunk p, size_t sz, bool) { + typename LocalState::GlobalR g; + g.dealloc_range(p, sz); + }); } /* Verify that a pointer points into the region managed by this config */ @@ -66,7 +91,7 @@ namespace snmalloc static SNMALLOC_FAST_PATH CapPtr< T, typename B::template with_wildness> - capptr_domesticate(typename Backend::LocalState* ls, CapPtr p) + capptr_domesticate(LocalState* ls, CapPtr p) { static_assert(B::wildness == capptr::dimension::Wildness::Wild); @@ -75,7 +100,7 @@ namespace snmalloc UNUSED(ls); auto address = address_cast(p); - auto [base, length] = Backend::Pagemap::get_bounds(); + auto [base, length] = Pagemap::get_bounds(); if ((address - base > (length - sz)) || (length < sz)) { return nullptr; diff --git a/src/snmalloc/backend/globalconfig.h b/src/snmalloc/backend/globalconfig.h index 6e88f175..322536a1 100644 --- a/src/snmalloc/backend/globalconfig.h +++ b/src/snmalloc/backend/globalconfig.h @@ -4,46 +4,111 @@ // `snmalloc.h` or consume the global allocation APIs. #ifndef SNMALLOC_PROVIDE_OWN_CONFIG -# include "../backend/backend.h" +# include "../backend_helpers/backend_helpers.h" +# include "backend.h" +# include "meta_protected_range.h" +# include "standard_range.h" + +# if defined(SNMALLOC_CHECK_CLIENT) && !defined(OPEN_ENCLAVE) +/** + * Protect meta data blocks by allocating separate from chunks for + * user allocations. This involves leaving gaps in address space. + * This is less efficient, so should only be applied for the checked + * build. + * + * On Open Enclave the address space is limited, so we disable this + * feature. + */ +# define SNMALLOC_META_PROTECTED +# endif namespace snmalloc { // Forward reference to thread local cleanup. void register_clean_up(); -# ifdef USE_SNMALLOC_STATS - inline static void print_stats() - { - printf("No Stats yet!"); - // Stats s; - // current_alloc_pool()->aggregate_stats(s); - // s.print(std::cout); - } -# endif - /** - * The default configuration for a global snmalloc. This allocates memory - * from the operating system and expects to manage memory anywhere in the - * address space. + * The default configuration for a global snmalloc. It contains all the + * datastructures to manage the memory from the OS. It had several internal + * public types for various aspects of the code. + * The most notable are: + * + * Backend - Manages the memory coming from the platform. + * LocalState - the per-thread/per-allocator state that may perform local + * caching of reserved memory. This also specifies the various Range types + * used to manage the memory. + * + * The Configuration sets up a Pagemap for the backend to use, and the state + * required to build new allocators (GlobalPoolState). */ - class Globals final : public BackendAllocator + class StandardConfig final : public CommonConfig { + using GlobalPoolState = PoolState>; + public: - using GlobalPoolState = PoolState>; + using Pal = DefaultPal; + using PagemapEntry = DefaultPagemapEntry; private: - using Backend = BackendAllocator; + using ConcretePagemap = + FlatPagemap; + using Pagemap = BasicPagemap; + + /** + * This specifies where this configurations sources memory from. + * + * Takes account of any platform specific constraints like whether + * mmap/virtual alloc calls can be consolidated. + * @{ + */ +# if defined(_WIN32) || defined(__CHERI_PURE_CAPABILITY__) + static constexpr bool CONSOLIDATE_PAL_ALLOCS = false; +# else + static constexpr bool CONSOLIDATE_PAL_ALLOCS = true; +# endif + + using Base = Pipe< + PalRange, + PagemapRegisterRange>; + /** + * @} + */ + public: + /** + * Use one of the default range configurations + */ +# ifdef SNMALLOC_META_PROTECTED + using LocalState = MetaProtectedRangeLocalState; +# else + using LocalState = StandardLocalState; +# endif + + /** + * Use the default backend. + */ + using Backend = BackendAllocator; + + private: SNMALLOC_REQUIRE_CONSTINIT inline static GlobalPoolState alloc_pool; + /** + * Specifies if the Configuration has been initialised. + */ SNMALLOC_REQUIRE_CONSTINIT inline static std::atomic initialised{false}; + /** + * Used to prevent two threads attempting to initialise the configuration + */ SNMALLOC_REQUIRE_CONSTINIT inline static FlagWord initialisation_lock{}; public: + /** + * Provides the state to create new allocators. + */ static GlobalPoolState& pool() { return alloc_pool; @@ -70,11 +135,7 @@ namespace snmalloc key_global = FreeListKey(entropy.get_free_list_key()); // Need to initialise pagemap. - Backend::init(); - -# ifdef USE_SNMALLOC_STATS - atexit(snmalloc::print_stats); -# endif + Pagemap::concretePagemap.init(); initialised = true; } @@ -93,11 +154,10 @@ namespace snmalloc snmalloc::register_clean_up(); } }; -} // namespace snmalloc -// The default configuration for snmalloc -namespace snmalloc -{ - using Alloc = snmalloc::LocalAllocator; + /** + * Create allocator type for this configuration. + */ + using Alloc = snmalloc::LocalAllocator; } // namespace snmalloc #endif diff --git a/src/snmalloc/backend/meta_protected_range.h b/src/snmalloc/backend/meta_protected_range.h new file mode 100644 index 00000000..8a8c5fe2 --- /dev/null +++ b/src/snmalloc/backend/meta_protected_range.h @@ -0,0 +1,103 @@ +#pragma once + +#include "../backend/backend.h" +#include "base_constants.h" + +namespace snmalloc +{ + /** + * Range that carefully ensures meta-data and object data cannot be in + * the same memory range. Once memory has is used for either meta-data + * or object data it can never be recycled to the other. + * + * This configuration also includes guard pages and randomisation. + * + * PAL is the underlying PAL that is used to Commit memory ranges. + * + * Base is where memory is sourced from. + * + * MinSizeBits is the minimum request size that can be passed to Base. + * On Windows this 16 as VirtualAlloc cannot reserve less than 64KiB. + * Alternative configurations might make this 2MiB so that huge pages + * can be used. + */ + template< + typename PAL, + typename Pagemap, + typename Base, + size_t MinSizeBits = MinBaseSizeBits()> + struct MetaProtectedRangeLocalState : BaseLocalStateConstants + { + private: + // Global range of memory + using GlobalR = Pipe< + Base, + LargeBuddyRange< + GlobalCacheSizeBits, + bits::BITS - 1, + Pagemap, + MinSizeBits>, + LogRange<2>, + GlobalRange<>>; + + // Central source of object-range, does not pass back to GlobalR as + // that would allow flows from Objects to Meta-data, and thus UAF + // would be able to corrupt meta-data. + using CentralObjectRange = Pipe< + GlobalR, + LargeBuddyRange, + LogRange<3>, + GlobalRange<>, + CommitRange, + StatsRange<>>; + + // Controls the padding around the meta-data range. + // The larger the padding range the more randomisation that + // can be used. + static constexpr size_t SubRangeRatioBits = 6; + + // Centralised source of meta-range + using CentralMetaRange = Pipe< + GlobalR, + SubRange, // Use SubRange to introduce guard + // pages. + LargeBuddyRange, + LogRange<4>, + GlobalRange<>, + CommitRange, + StatsRange<>>; + + // Local caching of object range + using ObjectRange = Pipe< + CentralObjectRange, + LargeBuddyRange, + LogRange<5>>; + + // Local caching of meta-data range + using MetaRange = Pipe< + CentralMetaRange, + LargeBuddyRange< + LocalCacheSizeBits - SubRangeRatioBits, + bits::BITS - 1, + Pagemap>, + SmallBuddyRange<>>; + + public: + using Stats = StatsCombiner; + + ObjectRange object_range; + + MetaRange meta_range; + + MetaRange& get_meta_range() + { + return meta_range; + } + + // Create global range that can service small meta-data requests. + // Don't want to add the SmallBuddyRange to the CentralMetaRange as that + // would require committing memory inside the main global lock. + using GlobalMetaRange = + Pipe, GlobalRange<>>; + }; +} // namespace snmalloc \ No newline at end of file diff --git a/src/snmalloc/backend/standard_range.h b/src/snmalloc/backend/standard_range.h new file mode 100644 index 00000000..0feb0aff --- /dev/null +++ b/src/snmalloc/backend/standard_range.h @@ -0,0 +1,65 @@ + + +#pragma once + +#include "../backend/backend.h" +#include "base_constants.h" + +namespace snmalloc +{ + /** + * Default configuration that does not provide any meta-data protection. + * + * PAL is the underlying PAL that is used to Commit memory ranges. + * + * Base is where memory is sourced from. + * + * MinSizeBits is the minimum request size that can be passed to Base. + * On Windows this 16 as VirtualAlloc cannot reserve less than 64KiB. + * Alternative configurations might make this 2MiB so that huge pages + * can be used. + */ + template< + typename PAL, + typename Pagemap, + typename Base = EmptyRange, + size_t MinSizeBits = MinBaseSizeBits()> + struct StandardLocalState : BaseLocalStateConstants + { + // Global range of memory, expose this so can be filled by init. + using GlobalR = Pipe< + Base, + LargeBuddyRange< + GlobalCacheSizeBits, + bits::BITS - 1, + Pagemap, + MinSizeBits>, + LogRange<2>, + GlobalRange<>>; + + // Track stats of the committed memory + using Stats = Pipe, StatsRange<>>; + + private: + // Source for object allocations and metadata + // Use buddy allocators to cache locally. + using ObjectRange = Pipe< + Stats, + LargeBuddyRange, + SmallBuddyRange<>>; + + public: + // Expose a global range for the initial allocation of meta-data. + using GlobalMetaRange = Pipe>; + + // Where we get user allocations from. + ObjectRange object_range; + + // Where we get meta-data allocations from. + ObjectRange& get_meta_range() + { + // Use the object range to service meta-data requests. + return object_range; + } + }; +} // namespace snmalloc \ No newline at end of file diff --git a/src/snmalloc/backend_helpers/backend_helpers.h b/src/snmalloc/backend_helpers/backend_helpers.h index 94dfec2f..fa4a708f 100644 --- a/src/snmalloc/backend_helpers/backend_helpers.h +++ b/src/snmalloc/backend_helpers/backend_helpers.h @@ -2,6 +2,7 @@ #include "buddy.h" #include "commitrange.h" #include "commonconfig.h" +#include "defaultpagemapentry.h" #include "empty_range.h" #include "globalrange.h" #include "largebuddyrange.h" diff --git a/src/snmalloc/backend_helpers/commonconfig.h b/src/snmalloc/backend_helpers/commonconfig.h index aca6103f..119d6c84 100644 --- a/src/snmalloc/backend_helpers/commonconfig.h +++ b/src/snmalloc/backend_helpers/commonconfig.h @@ -113,5 +113,17 @@ namespace snmalloc inline static RemoteAllocator unused_remote; }; + template + static constexpr size_t MinBaseSizeBits() + { + if constexpr (pal_supports) + { + return bits::next_pow2_bits_const(PAL::minimum_alloc_size); + } + else + { + return MIN_CHUNK_BITS; + } + } } // namespace snmalloc #include "../mem/remotecache.h" diff --git a/src/snmalloc/backend_helpers/defaultpagemapentry.h b/src/snmalloc/backend_helpers/defaultpagemapentry.h new file mode 100644 index 00000000..7001f451 --- /dev/null +++ b/src/snmalloc/backend_helpers/defaultpagemapentry.h @@ -0,0 +1,64 @@ +#pragma once + +#include "../mem/mem.h" + +namespace snmalloc +{ + /** + * Example of type stored in the pagemap. + * The following class could be replaced by: + * + * ``` + * using DefaultPagemapEntry = FrontendMetaEntry; + * ``` + * + * The full form here provides an example of how to extend the pagemap + * entries. It also guarantees that the front end never directly + * constructs meta entries, it only ever reads them or modifies them in + * place. + */ + class DefaultPagemapEntry : public FrontendMetaEntry + { + /** + * The private initialising constructor is usable only by this back end. + */ + template< + SNMALLOC_CONCEPT(ConceptPAL) A1, + typename A2, + typename A3, + typename A4> + friend class BackendAllocator; + + /** + * The private default constructor is usable only by the pagemap. + */ + template + friend class FlatPagemap; + + /** + * The only constructor that creates newly initialised meta entries. + * This is callable only by the back end. The front end may copy, + * query, and update these entries, but it may not create them + * directly. This contract allows the back end to store any arbitrary + * metadata in meta entries when they are first constructed. + */ + SNMALLOC_FAST_PATH + DefaultPagemapEntry(FrontendSlabMetadata* meta, uintptr_t ras) + : FrontendMetaEntry(meta, ras) + {} + + /** + * Copy assignment is used only by the pagemap. + */ + DefaultPagemapEntry& operator=(const DefaultPagemapEntry& other) + { + FrontendMetaEntry::operator=(other); + return *this; + } + + /** + * Default constructor. This must be callable from the pagemap. + */ + SNMALLOC_FAST_PATH DefaultPagemapEntry() = default; + }; +} // namespace snmalloc \ No newline at end of file diff --git a/src/snmalloc/backend_helpers/largebuddyrange.h b/src/snmalloc/backend_helpers/largebuddyrange.h index 47e35962..8f4b7ee6 100644 --- a/src/snmalloc/backend_helpers/largebuddyrange.h +++ b/src/snmalloc/backend_helpers/largebuddyrange.h @@ -13,7 +13,7 @@ namespace snmalloc /** * Class for using the pagemap entries for the buddy allocator. */ - template + template class BuddyChunkRep { public: @@ -186,7 +186,7 @@ namespace snmalloc template< size_t REFILL_SIZE_BITS, size_t MAX_SIZE_BITS, - SNMALLOC_CONCEPT(ConceptBuddyRangeMeta) Pagemap, + SNMALLOC_CONCEPT(IsWritablePagemap) Pagemap, size_t MIN_REFILL_SIZE_BITS = 0, typename ParentRange = EmptyRange> class LargeBuddyRange : public ContainsParent diff --git a/src/snmalloc/backend_helpers/pagemap.h b/src/snmalloc/backend_helpers/pagemap.h index 15ff51cf..55535a64 100644 --- a/src/snmalloc/backend_helpers/pagemap.h +++ b/src/snmalloc/backend_helpers/pagemap.h @@ -328,16 +328,15 @@ namespace snmalloc /** * This is a generic implementation of the backend's interface to the page - * map. It takes a concrete page map implementation (probably FlatPageMap + * map. It takes a concrete page map implementation (probably FlatPagemap * above) and entry type. It is friends with the backend passed in as a * template parameter so that the backend can initialise the concrete page map * and use set_metaentry which no one else should use. */ template< - typename Backend, typename PAL, typename ConcreteMap, - typename PageMapEntry, + typename PagemapEntry, bool fixed_range> class BasicPagemap { @@ -345,10 +344,7 @@ namespace snmalloc /** * Export the type stored in the pagemap. */ - using Entry = PageMapEntry; - - private: - friend Backend; + using Entry = PagemapEntry; /** * Instance of the concrete pagemap, accessible to the backend so that @@ -369,7 +365,6 @@ namespace snmalloc } } - public: /** * Get the metadata associated with a chunk. * diff --git a/src/snmalloc/backend_helpers/pagemapregisterrange.h b/src/snmalloc/backend_helpers/pagemapregisterrange.h index cd52640b..d0fe39d4 100644 --- a/src/snmalloc/backend_helpers/pagemapregisterrange.h +++ b/src/snmalloc/backend_helpers/pagemapregisterrange.h @@ -7,7 +7,7 @@ namespace snmalloc { template< - SNMALLOC_CONCEPT(ConceptBackendMetaRange) Pagemap, + SNMALLOC_CONCEPT(IsWritablePagemapWithRegister) Pagemap, bool CanConsolidate = true, typename ParentRange = EmptyRange> class PagemapRegisterRange : public ContainsParent diff --git a/src/snmalloc/mem/backend_concept.h b/src/snmalloc/mem/backend_concept.h index e29a2df8..03321250 100644 --- a/src/snmalloc/mem/backend_concept.h +++ b/src/snmalloc/mem/backend_concept.h @@ -12,81 +12,94 @@ namespace snmalloc * get_metadata takes a boolean template parameter indicating whether it may * be accessing memory that is not known to be committed. */ - template - concept ConceptBackendMeta = - requires(address_t addr, size_t sz, const typename Meta::Entry& t) + template + concept IsReadablePagemap = + requires(address_t addr, size_t sz, const typename Pagemap::Entry& t) { { - Meta::template get_metaentry(addr) + Pagemap::template get_metaentry(addr) } - ->ConceptSame; + ->ConceptSame; { - Meta::template get_metaentry(addr) + Pagemap::template get_metaentry(addr) } - ->ConceptSame; + ->ConceptSame; + }; + + /** + * The core of the static pagemap accessor interface: {get,set}_metadata. + * + * get_metadata_mut takes a boolean template parameter indicating whether it + * may be accessing memory that is not known to be committed. + * + * set_metadata updates the entry in the pagemap. + */ + template + concept IsWritablePagemap = IsReadablePagemap&& requires( + address_t addr, size_t sz, const typename Pagemap::Entry& t) + { + { + Pagemap::template get_metaentry_mut(addr) + } + ->ConceptSame; + + { + Pagemap::template get_metaentry_mut(addr) + } + ->ConceptSame; + + { + Pagemap::set_metaentry(addr, sz, t) + } + ->ConceptSame; }; /** * The pagemap can also be told to commit backing storage for a range of * addresses. This is broken out to a separate concept so that we can * annotate which functions expect to do this vs. which merely use the core - * interface above. In practice, use ConceptBackendMetaRange (without the - * underscore) below, which combines this and the core concept, above. + * interface above. In practice, use IsWritablePagemapWithRegister below, + * which combines this and the core concept, above. */ - template - concept ConceptBackendMeta_Range = requires(address_t addr, size_t sz) + template + concept IsPagemapWithRegister = requires(address_t addr, size_t sz) { { - Meta::register_range(addr, sz) + Pagemap::register_range(addr, sz) } ->ConceptSame; }; - template - concept ConceptBuddyRangeMeta = - requires(address_t addr, size_t sz, const typename Meta::Entry& t) - { - { - Meta::template get_metaentry_mut(addr) - } - ->ConceptSame; - - { - Meta::template get_metaentry_mut(addr) - } - ->ConceptSame; - }; - /** * The full pagemap accessor interface, with all of {get,set}_metadata and * register_range. Use this to annotate callers that need the full interface - * and use ConceptBackendMeta for callers that merely need {get,set}_metadata, + * and use IsReadablePagemap for callers that merely need {get,set}_metadata, * but note that the difference is just for humans and not compilers (since * concept checking is lower bounding and does not constrain the templatized * code to use only those affordances given by the concept). */ - template - concept ConceptBackendMetaRange = - ConceptBackendMeta&& ConceptBackendMeta_Range; + template + concept IsWritablePagemapWithRegister = + IsReadablePagemap&& IsPagemapWithRegister; /** - * The backend also defines domestication (that is, the difference between - * Tame and Wild CapPtr bounds). It exports the intended affordance for - * testing a Wild pointer and either returning nullptr or the original + * The configuration also defines domestication (that is, the difference + * between Tame and Wild CapPtr bounds). It exports the intended affordance + * for testing a Wild pointer and either returning nullptr or the original * pointer, now Tame. */ - template - concept ConceptBackendDomestication = - requires(typename Globals::LocalState* ls, capptr::AllocWild ptr) + template + concept IsConfigDomestication = + requires(typename Config::LocalState* ls, capptr::AllocWild ptr) { { - Globals::capptr_domesticate(ls, ptr) + Config::capptr_domesticate(ls, ptr) } ->ConceptSame>; { - Globals::capptr_domesticate(ls, ptr.template as_static()) + Config::capptr_domesticate(ls, ptr.template as_static()) } ->ConceptSame>; }; @@ -94,49 +107,92 @@ namespace snmalloc class CommonConfig; struct Flags; + template + concept IsBackend = + requires(LocalState& local_state, size_t size, uintptr_t ras) + { + { + Backend::alloc_chunk(local_state, size, ras) + } + ->ConceptSame< + std::pair, typename Backend::SlabMetadata*>>; + } + &&requires(LocalState* local_state, size_t size) + { + { + Backend::template alloc_meta_data(local_state, size) + } + ->ConceptSame>; + } + &&requires( + LocalState& local_state, + typename Backend::SlabMetadata& slab_metadata, + capptr::Alloc alloc, + size_t size) + { + { + Backend::dealloc_chunk(local_state, slab_metadata, alloc, size) + } + ->ConceptSame; + } + &&requires(address_t p) + { + { + Backend::template get_metaentry(p) + } + ->ConceptSame; + + { + Backend::template get_metaentry(p) + } + ->ConceptSame; + }; + /** - * Backend global objects of type T must obey a number of constraints. They + * Config objects of type T must obey a number of constraints. They * must... * * * inherit from CommonConfig (see commonconfig.h) * * specify which PAL is in use via T::Pal - * * have static pagemap accessors via T::Pagemap * * define a T::LocalState type (and alias it as T::Pagemap::LocalState) * * define T::Options of type snmalloc::Flags * * expose the global allocator pool via T::pool() if pool allocation is * used. * */ - template - concept ConceptBackendGlobals = - std::is_base_of::value&& - ConceptPAL&& - ConceptBackendMetaRange&& requires() + template + concept IsConfig = std::is_base_of::value&& + ConceptPAL&& IsBackend< + typename Config::LocalState, + typename Config::PagemapEntry, + typename Config::Backend>&& requires() { - typename Globals::LocalState; + typename Config::LocalState; + typename Config::Backend; + typename Config::PagemapEntry; { - Globals::Options + Config::Options } ->ConceptSameModRef; } &&( requires() { - Globals::Options.CoreAllocIsPoolAllocated == true; - typename Globals::GlobalPoolState; + Config::Options.CoreAllocIsPoolAllocated == true; + typename Config::GlobalPoolState; { - Globals::pool() + Config::pool() } - ->ConceptSame; + ->ConceptSame; } || - requires() { Globals::Options.CoreAllocIsPoolAllocated == false; }); + requires() { Config::Options.CoreAllocIsPoolAllocated == false; }); /** - * The lazy version of the above; please see ds/concept.h and use sparingly. + * The lazy version of the above; please see ds_core/concept.h and use + * sparingly. */ - template - concept ConceptBackendGlobalsLazy = - !is_type_complete_v || ConceptBackendGlobals; + template + concept IsConfigLazy = !is_type_complete_v || IsConfig; } // namespace snmalloc diff --git a/src/snmalloc/mem/backend_wrappers.h b/src/snmalloc/mem/backend_wrappers.h index e26cc22a..605b5055 100644 --- a/src/snmalloc/mem/backend_wrappers.h +++ b/src/snmalloc/mem/backend_wrappers.h @@ -40,14 +40,14 @@ namespace snmalloc * backend. Returns true if there is a function with correct name and type. */ template< - SNMALLOC_CONCEPT(ConceptBackendDomestication) Backend, + SNMALLOC_CONCEPT(IsConfigDomestication) Config, typename T, SNMALLOC_CONCEPT(capptr::ConceptBound) B> constexpr SNMALLOC_FAST_PATH_INLINE auto has_domesticate(int) -> std::enable_if_t< std::is_same_v< - decltype(Backend::capptr_domesticate( - std::declval(), + decltype(Config::capptr_domesticate( + std::declval(), std::declval>())), CapPtr< T, @@ -63,7 +63,7 @@ namespace snmalloc * backend. Returns false in case where above template does not match. */ template< - SNMALLOC_CONCEPT(ConceptBackendGlobals) Backend, + SNMALLOC_CONCEPT(IsConfig) Config, typename T, SNMALLOC_CONCEPT(capptr::ConceptBound) B> constexpr SNMALLOC_FAST_PATH_INLINE bool has_domesticate(long) @@ -73,29 +73,29 @@ namespace snmalloc } // namespace detail /** - * Wrapper that calls `Backend::capptr_domesticate` if and only if - * Backend::Options.HasDomesticate is true. If it is not implemented then + * Wrapper that calls `Config::capptr_domesticate` if and only if + * Config::Options.HasDomesticate is true. If it is not implemented then * this assumes that any wild pointer can be domesticated. */ template< - SNMALLOC_CONCEPT(ConceptBackendGlobals) Backend, + SNMALLOC_CONCEPT(IsConfig) Config, typename T, SNMALLOC_CONCEPT(capptr::ConceptBound) B> SNMALLOC_FAST_PATH_INLINE auto - capptr_domesticate(typename Backend::LocalState* ls, CapPtr p) + capptr_domesticate(typename Config::LocalState* ls, CapPtr p) { static_assert( - !detail::has_domesticate(0) || - Backend::Options.HasDomesticate, + !detail::has_domesticate(0) || + Config::Options.HasDomesticate, "Back end provides domesticate function but opts out of using it "); static_assert( - detail::has_domesticate(0) || - !Backend::Options.HasDomesticate, + detail::has_domesticate(0) || + !Config::Options.HasDomesticate, "Back end does not provide capptr_domesticate and requests its use"); - if constexpr (Backend::Options.HasDomesticate) + if constexpr (Config::Options.HasDomesticate) { - return Backend::capptr_domesticate(ls, p); + return Config::capptr_domesticate(ls, p); } else { diff --git a/src/snmalloc/mem/corealloc.h b/src/snmalloc/mem/corealloc.h index 3b9628b5..98cf0b79 100644 --- a/src/snmalloc/mem/corealloc.h +++ b/src/snmalloc/mem/corealloc.h @@ -32,13 +32,13 @@ namespace snmalloc * provided externally, then it must be set explicitly with * `init_message_queue`. */ - template + template class CoreAllocator : public std::conditional_t< - Backend::Options.CoreAllocIsPoolAllocated, - Pooled>, + Config::Options.CoreAllocIsPoolAllocated, + Pooled>, Empty> { - template + template friend class LocalAllocator; /** @@ -46,8 +46,8 @@ namespace snmalloc * specialised for the back-end that we are using. * @{ */ - using BackendSlabMetadata = typename Backend::SlabMetadata; - using PagemapEntry = typename Backend::Pagemap::Entry; + using BackendSlabMetadata = typename Config::Backend::SlabMetadata; + using PagemapEntry = typename Config::PagemapEntry; /// }@ /** @@ -77,7 +77,7 @@ namespace snmalloc * allocator */ std::conditional_t< - Backend::Options.IsQueueInline, + Config::Options.IsQueueInline, RemoteAllocator, RemoteAllocator*> remote_alloc; @@ -85,7 +85,7 @@ namespace snmalloc /** * The type used local state. This is defined by the back end. */ - using LocalState = typename Backend::LocalState; + using LocalState = typename Config::LocalState; /** * A local area of address space managed by this allocator. @@ -94,7 +94,7 @@ namespace snmalloc * externally. */ std::conditional_t< - Backend::Options.CoreAllocOwnsLocalState, + Config::Options.CoreAllocOwnsLocalState, LocalState, LocalState*> backend_state; @@ -108,7 +108,7 @@ namespace snmalloc /** * Ticker to query the clock regularly at a lower cost. */ - Ticker ticker; + Ticker ticker; /** * The message queue needs to be accessible from other threads @@ -118,7 +118,7 @@ namespace snmalloc */ auto* public_state() { - if constexpr (Backend::Options.IsQueueInline) + if constexpr (Config::Options.IsQueueInline) { return &remote_alloc; } @@ -133,7 +133,7 @@ namespace snmalloc */ LocalState* backend_state_ptr() { - if constexpr (Backend::Options.CoreAllocOwnsLocalState) + if constexpr (Config::Options.CoreAllocOwnsLocalState) { return &backend_state; } @@ -195,10 +195,10 @@ namespace snmalloc SNMALLOC_ASSERT(attached_cache != nullptr); auto domesticate = [this](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA { - return capptr_domesticate(backend_state_ptr(), p); + return capptr_domesticate(backend_state_ptr(), p); }; // Use attached cache, and fill it if it is empty. - return attached_cache->template alloc( + return attached_cache->template alloc( domesticate, size, [&](smallsizeclass_t sizeclass, freelist::Iter<>* fl) { @@ -300,7 +300,7 @@ namespace snmalloc auto local_state = backend_state_ptr(); auto domesticate = [local_state](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA { - return capptr_domesticate(local_state, p); + return capptr_domesticate(local_state, p); }; capptr::Alloc p = finish_alloc_no_zero(fl.take(key, domesticate), sizeclass); @@ -363,7 +363,7 @@ namespace snmalloc alloc_classes[sizeclass].available.filter([this, sizeclass](auto* meta) { auto domesticate = [this](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA { - auto res = capptr_domesticate(backend_state_ptr(), p); + auto res = capptr_domesticate(backend_state_ptr(), p); #ifdef SNMALLOC_TRACING if (res.unsafe_ptr() != p.unsafe_ptr()) printf( @@ -388,7 +388,7 @@ namespace snmalloc // don't touch the cache lines at this point in snmalloc_check_client. auto start = clear_slab(meta, sizeclass); - Backend::dealloc_chunk( + Config::Backend::dealloc_chunk( get_backend_local_state(), *meta, start, @@ -423,7 +423,8 @@ namespace snmalloc UNUSED(size); #endif - Backend::dealloc_chunk(get_backend_local_state(), *meta, p, size); + Config::Backend::dealloc_chunk( + get_backend_local_state(), *meta, p, size); return; } @@ -483,7 +484,7 @@ namespace snmalloc auto local_state = backend_state_ptr(); auto domesticate = [local_state](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA { - return capptr_domesticate(local_state, p); + return capptr_domesticate(local_state, p); }; auto cb = [this, &need_post](freelist::HeadPtr msg) SNMALLOC_FAST_PATH_LAMBDA { @@ -492,14 +493,14 @@ namespace snmalloc #endif auto& entry = - Backend::Pagemap::template get_metaentry(snmalloc::address_cast(msg)); + Config::Backend::template get_metaentry(snmalloc::address_cast(msg)); handle_dealloc_remote(entry, msg.as_void(), need_post); return true; }; - if constexpr (Backend::Options.QueueHeadsAreTame) + if constexpr (Config::Options.QueueHeadsAreTame) { /* * The front of the queue has already been validated; just change the @@ -571,12 +572,12 @@ namespace snmalloc // Entropy must be first, so that all data-structures can use the key // it generates. // This must occur before any freelists are constructed. - entropy.init(); + entropy.init(); // Ignoring stats for now. // stats().start(); - if constexpr (Backend::Options.IsQueueInline) + if constexpr (Config::Options.IsQueueInline) { init_message_queue(); message_queue().invariant(); @@ -606,8 +607,8 @@ namespace snmalloc * SFINAE disabled if the allocator does not own the local state. */ template< - typename Config = Backend, - typename = std::enable_if_t> + typename Config_ = Config, + typename = std::enable_if_t> CoreAllocator(LocalCache* cache) : attached_cache(cache) { init(); @@ -618,8 +619,8 @@ namespace snmalloc * state. SFINAE disabled if the allocator does own the local state. */ template< - typename Config = Backend, - typename = std::enable_if_t> + typename Config_ = Config, + typename = std::enable_if_t> CoreAllocator(LocalCache* cache, LocalState* backend = nullptr) : backend_state(backend), attached_cache(cache) { @@ -630,7 +631,7 @@ namespace snmalloc * If the message queue is not inline, provide it. This will then * configure the message queue for use. */ - template + template std::enable_if_t init_message_queue(RemoteAllocator* q) { remote_alloc = q; @@ -649,7 +650,7 @@ namespace snmalloc // stats().remote_post(); // TODO queue not in line! bool sent_something = attached_cache->remote_dealloc_cache - .post( + .post( backend_state_ptr(), public_state()->trunc_id(), key_global); return sent_something; @@ -674,7 +675,7 @@ namespace snmalloc // PagemapEntry-s seen here are expected to have meaningful Remote // pointers auto& entry = - Backend::Pagemap::template get_metaentry(snmalloc::address_cast(p)); + Config::Backend::template get_metaentry(snmalloc::address_cast(p)); if (SNMALLOC_LIKELY(dealloc_local_object_fast(entry, p, entropy))) return; @@ -735,7 +736,7 @@ namespace snmalloc auto domesticate = [this](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA { - return capptr_domesticate(backend_state_ptr(), p); + return capptr_domesticate(backend_state_ptr(), p); }; auto [p, still_active] = BackendSlabMetadata::alloc_free_list( domesticate, meta, fast_free_list, entropy, sizeclass); @@ -746,7 +747,7 @@ namespace snmalloc sl.insert(meta); } - auto r = finish_alloc(p, sizeclass); + auto r = finish_alloc(p, sizeclass); return ticker.check_tick(r); } return small_alloc_slow(sizeclass, fast_free_list); @@ -759,7 +760,7 @@ namespace snmalloc SNMALLOC_FAST_PATH LocalState& get_backend_local_state() { - if constexpr (Backend::Options.CoreAllocOwnsLocalState) + if constexpr (Config::Options.CoreAllocOwnsLocalState) { return backend_state; } @@ -783,7 +784,7 @@ namespace snmalloc message<1024>("small_alloc_slow rsize={} slab size={}", rsize, slab_size); #endif - auto [slab, meta] = Backend::alloc_chunk( + auto [slab, meta] = Config::Backend::alloc_chunk( get_backend_local_state(), slab_size, PagemapEntry::encode( @@ -802,7 +803,7 @@ namespace snmalloc auto domesticate = [this](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA { - return capptr_domesticate(backend_state_ptr(), p); + return capptr_domesticate(backend_state_ptr(), p); }; auto [p, still_active] = BackendSlabMetadata::alloc_free_list( domesticate, meta, fast_free_list, entropy, sizeclass); @@ -813,7 +814,7 @@ namespace snmalloc alloc_classes[sizeclass].available.insert(meta); } - auto r = finish_alloc(p, sizeclass); + auto r = finish_alloc(p, sizeclass); return ticker.check_tick(r); } @@ -828,7 +829,7 @@ namespace snmalloc auto local_state = backend_state_ptr(); auto domesticate = [local_state](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA { - return capptr_domesticate(local_state, p); + return capptr_domesticate(local_state, p); }; if (destroy_queue) @@ -841,7 +842,7 @@ namespace snmalloc bool need_post = true; // Always going to post, so ignore. auto n_tame = p_tame->atomic_read_next(key_global, domesticate); const PagemapEntry& entry = - Backend::Pagemap::get_metaentry(snmalloc::address_cast(p_tame)); + Config::Backend::get_metaentry(snmalloc::address_cast(p_tame)); handle_dealloc_remote(entry, p_tame.as_void(), need_post); p_tame = n_tame; } @@ -854,7 +855,7 @@ namespace snmalloc handle_message_queue([]() {}); } - auto posted = attached_cache->flush( + auto posted = attached_cache->flush( backend_state_ptr(), [&](capptr::Alloc p) { dealloc_local_object(p); }); @@ -966,6 +967,6 @@ namespace snmalloc /** * Use this alias to access the pool of allocators throughout snmalloc. */ - template - using AllocPool = Pool, Backend, Backend::pool>; + template + using AllocPool = Pool, Config, Config::pool>; } // namespace snmalloc diff --git a/src/snmalloc/mem/globalalloc.h b/src/snmalloc/mem/globalalloc.h index b898eed9..dc9528f6 100644 --- a/src/snmalloc/mem/globalalloc.h +++ b/src/snmalloc/mem/globalalloc.h @@ -5,18 +5,18 @@ namespace snmalloc { - template + template inline static void cleanup_unused() { #ifndef SNMALLOC_PASS_THROUGH static_assert( - SharedStateHandle::Options.CoreAllocIsPoolAllocated, + Config::Options.CoreAllocIsPoolAllocated, "Global cleanup is available only for pool-allocated configurations"); // Call this periodically to free and coalesce memory allocated by // allocators that are not currently in use by any thread. // One atomic operation to extract the stack, another to restore it. // Handling the message queue for each stack is non-atomic. - auto* first = AllocPool::extract(); + auto* first = AllocPool::extract(); auto* alloc = first; decltype(alloc) last; @@ -26,10 +26,10 @@ namespace snmalloc { alloc->flush(); last = alloc; - alloc = AllocPool::extract(alloc); + alloc = AllocPool::extract(alloc); } - AllocPool::restore(first, last); + AllocPool::restore(first, last); } #endif } @@ -39,16 +39,16 @@ namespace snmalloc allocators are empty. If you don't pass a pointer to a bool, then will raise an error all the allocators are not empty. */ - template + template inline static void debug_check_empty(bool* result = nullptr) { #ifndef SNMALLOC_PASS_THROUGH static_assert( - SharedStateHandle::Options.CoreAllocIsPoolAllocated, + Config::Options.CoreAllocIsPoolAllocated, "Global status is available only for pool-allocated configurations"); // This is a debugging function. It checks that all memory from all // allocators has been freed. - auto* alloc = AllocPool::iterate(); + auto* alloc = AllocPool::iterate(); # ifdef SNMALLOC_TRACING message<1024>("debug check empty: first {}", alloc); @@ -62,7 +62,7 @@ namespace snmalloc message<1024>("debug_check_empty: Check all allocators!"); # endif done = true; - alloc = AllocPool::iterate(); + alloc = AllocPool::iterate(); okay = true; while (alloc != nullptr) @@ -83,7 +83,7 @@ namespace snmalloc # ifdef SNMALLOC_TRACING message<1024>("debug check empty: okay = {}", okay); # endif - alloc = AllocPool::iterate(alloc); + alloc = AllocPool::iterate(alloc); } } @@ -96,11 +96,11 @@ namespace snmalloc // Redo check so abort is on allocator with allocation left. if (!okay) { - alloc = AllocPool::iterate(); + alloc = AllocPool::iterate(); while (alloc != nullptr) { alloc->debug_is_empty(nullptr); - alloc = AllocPool::iterate(alloc); + alloc = AllocPool::iterate(alloc); } } #else @@ -108,13 +108,13 @@ namespace snmalloc #endif } - template + template inline static void debug_in_use(size_t count) { static_assert( - SharedStateHandle::Options.CoreAllocIsPoolAllocated, + Config::Options.CoreAllocIsPoolAllocated, "Global status is available only for pool-allocated configurations"); - auto alloc = AllocPool::iterate(); + auto alloc = AllocPool::iterate(); while (alloc != nullptr) { if (alloc->debug_is_in_use()) @@ -125,7 +125,7 @@ namespace snmalloc } count--; } - alloc = AllocPool::iterate(alloc); + alloc = AllocPool::iterate(alloc); if (count != 0) { diff --git a/src/snmalloc/mem/localalloc.h b/src/snmalloc/mem/localalloc.h index cd1b6e5f..9ef63b2b 100644 --- a/src/snmalloc/mem/localalloc.h +++ b/src/snmalloc/mem/localalloc.h @@ -56,11 +56,11 @@ namespace snmalloc * core allocator must be provided externally by invoking the `init` method * on this class *before* any allocation-related methods are called. */ - template + template class LocalAllocator { public: - using StateHandle = Backend; + using Config = Config_; private: /** @@ -68,15 +68,15 @@ namespace snmalloc * specialised for the back-end that we are using. * @{ */ - using CoreAlloc = CoreAllocator; - using PagemapEntry = typename Backend::Pagemap::Entry; + using CoreAlloc = CoreAllocator; + using PagemapEntry = typename Config::PagemapEntry; /// }@ // Free list per small size class. These are used for // allocation on the fast path. This part of the code is inspired by // mimalloc. // Also contains remote deallocation cache. - LocalCache local_cache{&Backend::unused_remote}; + LocalCache local_cache{&Config::unused_remote}; // Underlying allocator for most non-fast path operations. CoreAlloc* core_alloc{nullptr}; @@ -120,7 +120,7 @@ namespace snmalloc SNMALLOC_SLOW_PATH decltype(auto) lazy_init(Action action, Args... args) { SNMALLOC_ASSERT(core_alloc == nullptr); - if constexpr (!Backend::Options.LocalAllocSupportsLazyInit) + if constexpr (!Config::Options.LocalAllocSupportsLazyInit) { SNMALLOC_CHECK( false && @@ -133,7 +133,7 @@ namespace snmalloc else { // Initialise the thread local allocator - if constexpr (Backend::Options.CoreAllocOwnsLocalState) + if constexpr (Config::Options.CoreAllocOwnsLocalState) { init(); } @@ -145,7 +145,7 @@ namespace snmalloc // Must be called at least once per thread. // A pthread implementation only calls the thread destruction handle // if the key has been set. - Backend::register_clean_up(); + Config::register_clean_up(); // Perform underlying operation auto r = action(core_alloc, args...); @@ -184,7 +184,7 @@ namespace snmalloc return check_init([&](CoreAlloc* core_alloc) { // Grab slab of correct size // Set remote as large allocator remote. - auto [chunk, meta] = Backend::alloc_chunk( + auto [chunk, meta] = Config::Backend::alloc_chunk( core_alloc->get_backend_local_state(), large_size_to_chunk_size(size), PagemapEntry::encode( @@ -201,7 +201,7 @@ namespace snmalloc if (zero_mem == YesZero && chunk.unsafe_ptr() != nullptr) { - Backend::Pal::template zero( + Config::Pal::template zero( chunk.unsafe_ptr(), bits::next_pow2(size)); } @@ -212,10 +212,10 @@ namespace snmalloc template SNMALLOC_FAST_PATH capptr::Alloc small_alloc(size_t size) { - auto domesticate = [this]( - freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA { - return capptr_domesticate(core_alloc->backend_state_ptr(), p); - }; + auto domesticate = + [this](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA { + return capptr_domesticate(core_alloc->backend_state_ptr(), p); + }; auto slowpath = [&]( smallsizeclass_t sizeclass, freelist::Iter<>* fl) SNMALLOC_FAST_PATH_LAMBDA { @@ -239,7 +239,7 @@ namespace snmalloc sizeclass); }; - return local_cache.template alloc( + return local_cache.template alloc( domesticate, size, slowpath); } @@ -271,7 +271,7 @@ namespace snmalloc alloc_size(p.unsafe_ptr())); #endif const PagemapEntry& entry = - Backend::Pagemap::get_metaentry(address_cast(p)); + Config::Backend::template get_metaentry(address_cast(p)); local_cache.remote_dealloc_cache.template dealloc( entry.get_remote()->trunc_id(), p, key_global); post_remote_cache(); @@ -300,13 +300,13 @@ namespace snmalloc } /** - * Call `Backend::is_initialised()` if it is implemented, + * Call `Config::is_initialised()` if it is implemented, * unconditionally returns true otherwise. */ SNMALLOC_FAST_PATH bool is_initialised() { - return call_is_initialised(nullptr, 0); + return call_is_initialised(nullptr, 0); } /** @@ -329,13 +329,13 @@ namespace snmalloc {} /** - * Call `Backend::ensure_init()` if it is implemented, do + * Call `Config::ensure_init()` if it is implemented, do * nothing otherwise. */ SNMALLOC_FAST_PATH void ensure_init() { - call_ensure_init(nullptr, 0); + call_ensure_init(nullptr, 0); } public: @@ -380,7 +380,7 @@ namespace snmalloc // Initialise the global allocator structures ensure_init(); // Grab an allocator for this thread. - init(AllocPool::acquire(&(this->local_cache))); + init(AllocPool::acquire(&(this->local_cache))); } // Return all state in the fast allocator and release the underlying @@ -400,9 +400,9 @@ namespace snmalloc // Detach underlying allocator core_alloc->attached_cache = nullptr; // Return underlying allocator to the system. - if constexpr (Backend::Options.CoreAllocOwnsLocalState) + if constexpr (Config::Options.CoreAllocOwnsLocalState) { - AllocPool::release(core_alloc); + AllocPool::release(core_alloc); } // Set up thread local allocator to look like @@ -411,7 +411,7 @@ namespace snmalloc #ifdef SNMALLOC_TRACING message<1024>("flush(): core_alloc={}", core_alloc); #endif - local_cache.remote_allocator = &Backend::unused_remote; + local_cache.remote_allocator = &Config::unused_remote; local_cache.remote_dealloc_cache.capacity = 0; } } @@ -625,10 +625,10 @@ namespace snmalloc * deal with the object's extent. */ capptr::Alloc p_tame = - capptr_domesticate(core_alloc->backend_state_ptr(), p_wild); + capptr_domesticate(core_alloc->backend_state_ptr(), p_wild); const PagemapEntry& entry = - Backend::Pagemap::get_metaentry(address_cast(p_tame)); + Config::Backend::get_metaentry(address_cast(p_tame)); if (SNMALLOC_LIKELY(local_cache.remote_allocator == entry.get_remote())) { # if defined(__CHERI_PURE_CAPABILITY__) && defined(SNMALLOC_CHECK_CLIENT) @@ -681,7 +681,7 @@ namespace snmalloc size = size == 0 ? 1 : size; auto sc = size_to_sizeclass_full(size); auto pm_sc = - Backend::Pagemap::get_metaentry(address_cast(p)).get_sizeclass(); + Config::Backend::get_metaentry(address_cast(p)).get_sizeclass(); auto rsize = sizeclass_full_to_size(sc); auto pm_size = sizeclass_full_to_size(pm_sc); snmalloc_check_client( @@ -723,7 +723,7 @@ namespace snmalloc #else // TODO What's the domestication policy here? At the moment we just // probe the pagemap with the raw address, without checks. There could - // be implicit domestication through the `Backend::Pagemap` or + // be implicit domestication through the `Config::Pagemap` or // we could just leave well enough alone. // Note that alloc_size should return 0 for nullptr. @@ -734,7 +734,7 @@ namespace snmalloc // entry for the first chunk of memory, that states it represents a // large object, so we can pull the check for null off the fast path. const PagemapEntry& entry = - Backend::Pagemap::get_metaentry(address_cast(p_raw)); + Config::Backend::template get_metaentry(address_cast(p_raw)); return sizeclass_full_to_size(entry.get_sizeclass()); #endif @@ -779,7 +779,7 @@ namespace snmalloc { #ifndef SNMALLOC_PASS_THROUGH const PagemapEntry& entry = - Backend::Pagemap::template get_metaentry(address_cast(p)); + Config::Backend::template get_metaentry(address_cast(p)); auto sizeclass = entry.get_sizeclass(); return snmalloc::remaining_bytes(sizeclass, address_cast(p)); @@ -790,7 +790,7 @@ namespace snmalloc bool check_bounds(const void* p, size_t s) { - if (SNMALLOC_LIKELY(Backend::Pagemap::is_initialised())) + if (SNMALLOC_LIKELY(Config::is_initialised())) { return remaining_bytes(p) >= s; } @@ -807,7 +807,7 @@ namespace snmalloc { #ifndef SNMALLOC_PASS_THROUGH const PagemapEntry& entry = - Backend::Pagemap::template get_metaentry(address_cast(p)); + Config::Backend::template get_metaentry(address_cast(p)); auto sizeclass = entry.get_sizeclass(); return snmalloc::index_in_object(sizeclass, address_cast(p)); diff --git a/src/snmalloc/mem/localcache.h b/src/snmalloc/mem/localcache.h index 2ae8ffd6..68b232e4 100644 --- a/src/snmalloc/mem/localcache.h +++ b/src/snmalloc/mem/localcache.h @@ -19,15 +19,14 @@ namespace snmalloc return p.as_void(); } - template + template inline static SNMALLOC_FAST_PATH capptr::Alloc finish_alloc(freelist::HeadPtr p, smallsizeclass_t sizeclass) { auto r = finish_alloc_no_zero(p, sizeclass); if constexpr (zero_mem == YesZero) - SharedStateHandle::Pal::zero( - r.unsafe_ptr(), sizeclass_to_size(sizeclass)); + Config::Pal::zero(r.unsafe_ptr(), sizeclass_to_size(sizeclass)); // TODO: Should this be zeroing the free Object state, in the non-zeroing // case? @@ -64,18 +63,14 @@ namespace snmalloc /** * Return all the free lists to the allocator. Used during thread teardown. */ - template< - size_t allocator_size, - typename SharedStateHandle, - typename DeallocFun> - bool flush( - typename SharedStateHandle::LocalState* local_state, DeallocFun dealloc) + template + bool flush(typename Config::LocalState* local_state, DeallocFun dealloc) { auto& key = entropy.get_free_list_key(); - auto domesticate = - [local_state](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA { - return capptr_domesticate(local_state, p); - }; + auto domesticate = [local_state](freelist::QueuePtr p) + SNMALLOC_FAST_PATH_LAMBDA { + return capptr_domesticate(local_state, p); + }; for (size_t i = 0; i < NUM_SMALL_SIZECLASSES; i++) { @@ -90,13 +85,13 @@ namespace snmalloc } } - return remote_dealloc_cache.post( + return remote_dealloc_cache.post( local_state, remote_allocator->trunc_id(), key_global); } template< ZeroMem zero_mem, - typename SharedStateHandle, + typename Config, typename Slowpath, typename Domesticator> SNMALLOC_FAST_PATH capptr::Alloc @@ -108,7 +103,7 @@ namespace snmalloc if (SNMALLOC_LIKELY(!fl.empty())) { auto p = fl.take(key, domesticate); - return finish_alloc(p, sizeclass); + return finish_alloc(p, sizeclass); } return slowpath(sizeclass, &fl); } diff --git a/src/snmalloc/mem/metadata.h b/src/snmalloc/mem/metadata.h index ca90ec1a..391650e6 100644 --- a/src/snmalloc/mem/metadata.h +++ b/src/snmalloc/mem/metadata.h @@ -581,6 +581,8 @@ namespace snmalloc "compatible with the front-end's structure"); public: + using SlabMetadata = BackendSlabMetadata; + constexpr FrontendMetaEntry() = default; /** diff --git a/src/snmalloc/mem/pool.h b/src/snmalloc/mem/pool.h index 119777a7..8bb716a1 100644 --- a/src/snmalloc/mem/pool.h +++ b/src/snmalloc/mem/pool.h @@ -22,7 +22,7 @@ namespace snmalloc { template< typename TT, - SNMALLOC_CONCEPT(ConceptBackendGlobals) SharedStateHandle, + SNMALLOC_CONCEPT(IsConfig) Config, PoolState& get_state()> friend class Pool; @@ -41,9 +41,7 @@ namespace snmalloc * SingletonPoolState::pool is the default provider for the PoolState within * the Pool class. */ - template< - typename T, - SNMALLOC_CONCEPT(ConceptBackendGlobals) SharedStateHandle> + template class SingletonPoolState { /** @@ -55,8 +53,8 @@ namespace snmalloc -> decltype(SharedStateHandle_::ensure_init()) { static_assert( - std::is_same::value, - "SFINAE parameter, should only be used with SharedStateHandle"); + std::is_same::value, + "SFINAE parameter, should only be used with Config"); SharedStateHandle_::ensure_init(); } @@ -68,17 +66,17 @@ namespace snmalloc SNMALLOC_FAST_PATH static auto call_ensure_init(SharedStateHandle_*, long) { static_assert( - std::is_same::value, - "SFINAE parameter, should only be used with SharedStateHandle"); + std::is_same::value, + "SFINAE parameter, should only be used with Config"); } /** - * Call `SharedStateHandle::ensure_init()` if it is implemented, do nothing + * Call `Config::ensure_init()` if it is implemented, do nothing * otherwise. */ SNMALLOC_FAST_PATH static void ensure_init() { - call_ensure_init(nullptr, 0); + call_ensure_init(nullptr, 0); } static void make_pool(PoolState*) noexcept @@ -114,8 +112,8 @@ namespace snmalloc */ template< typename T, - SNMALLOC_CONCEPT(ConceptBackendGlobals) SharedStateHandle, - PoolState& get_state() = SingletonPoolState::pool> + SNMALLOC_CONCEPT(IsConfig) Config, + PoolState& get_state() = SingletonPoolState::pool> class Pool { public: @@ -132,12 +130,11 @@ namespace snmalloc } auto raw = - SharedStateHandle::template alloc_meta_data(nullptr, sizeof(T)); + Config::Backend::template alloc_meta_data(nullptr, sizeof(T)); if (raw == nullptr) { - SharedStateHandle::Pal::error( - "Failed to initialise thread local allocator."); + Config::Pal::error("Failed to initialise thread local allocator."); } p = new (raw.unsafe_ptr()) T(std::forward(args)...); diff --git a/src/snmalloc/mem/pooled.h b/src/snmalloc/mem/pooled.h index ac1af5d1..06f96489 100644 --- a/src/snmalloc/mem/pooled.h +++ b/src/snmalloc/mem/pooled.h @@ -14,7 +14,7 @@ namespace snmalloc public: template< typename TT, - SNMALLOC_CONCEPT(ConceptBackendGlobals) SharedStateHandle, + SNMALLOC_CONCEPT(IsConfig) Config, PoolState& get_state()> friend class Pool; template diff --git a/src/snmalloc/mem/remotecache.h b/src/snmalloc/mem/remotecache.h index 01a27525..50e4c9bb 100644 --- a/src/snmalloc/mem/remotecache.h +++ b/src/snmalloc/mem/remotecache.h @@ -77,9 +77,9 @@ namespace snmalloc list[get_slot(target_id, 0)].add(r, key); } - template + template bool post( - typename Backend::LocalState* local_state, + typename Config::LocalState* local_state, RemoteAllocator::alloc_id_t id, const FreeListKey& key) { @@ -88,7 +88,7 @@ namespace snmalloc bool sent_something = false; auto domesticate = [local_state](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA { - return capptr_domesticate(local_state, p); + return capptr_domesticate(local_state, p); }; while (true) @@ -104,7 +104,7 @@ namespace snmalloc { auto [first, last] = list[i].extract_segment(key); const auto& entry = - Backend::Pagemap::get_metaentry(address_cast(first)); + Config::Backend::get_metaentry(address_cast(first)); auto remote = entry.get_remote(); // If the allocator is not correctly aligned, then the bit that is // set implies this is used by the backend, and we should not be @@ -112,7 +112,7 @@ namespace snmalloc snmalloc_check_client( !entry.is_backend_owned(), "Delayed detection of attempt to free internal structure."); - if constexpr (Backend::Options.QueueHeadsAreTame) + if constexpr (Config::Options.QueueHeadsAreTame) { auto domesticate_nop = [](freelist::QueuePtr p) { return freelist::HeadPtr(p.unsafe_ptr()); @@ -143,7 +143,7 @@ namespace snmalloc // Use the next N bits to spread out remote deallocs in our own // slot. auto r = resend.take(key, domesticate); - const auto& entry = Backend::Pagemap::get_metaentry(address_cast(r)); + const auto& entry = Config::Backend::get_metaentry(address_cast(r)); auto i = entry.get_remote()->trunc_id(); size_t slot = get_slot(i, post_round); list[slot].add(r, key); diff --git a/src/snmalloc/mem/sizeclasstable.h b/src/snmalloc/mem/sizeclasstable.h index fa85d944..4590ebb5 100644 --- a/src/snmalloc/mem/sizeclasstable.h +++ b/src/snmalloc/mem/sizeclasstable.h @@ -36,7 +36,7 @@ namespace snmalloc // Large classes range from [MAX_SMALL_SIZECLASS_SIZE, ADDRESS_SPACE). static constexpr size_t NUM_LARGE_CLASSES = - Pal::address_bits - MAX_SMALL_SIZECLASS_BITS; + DefaultPal::address_bits - MAX_SMALL_SIZECLASS_BITS; // How many bits are required to represent either a large or a small // sizeclass. diff --git a/src/snmalloc/override/malloc-extensions.cc b/src/snmalloc/override/malloc-extensions.cc index 1e0810b4..d84210ef 100644 --- a/src/snmalloc/override/malloc-extensions.cc +++ b/src/snmalloc/override/malloc-extensions.cc @@ -6,8 +6,8 @@ using namespace snmalloc; void get_malloc_info_v1(malloc_info_v1* stats) { - auto curr = Globals::get_current_usage(); - auto peak = Globals::get_peak_usage(); + auto curr = StandardConfig::Backend::get_current_usage(); + auto peak = StandardConfig::Backend::get_peak_usage(); stats->current_memory_usage = curr; stats->peak_memory_usage = peak; } diff --git a/src/snmalloc/override/rust.cc b/src/snmalloc/override/rust.cc index 88923232..64da984c 100644 --- a/src/snmalloc/override/rust.cc +++ b/src/snmalloc/override/rust.cc @@ -48,6 +48,6 @@ extern "C" SNMALLOC_EXPORT void* SNMALLOC_NAME_MANGLE(rust_realloc)( extern "C" SNMALLOC_EXPORT void SNMALLOC_NAME_MANGLE(rust_statistics)( size_t* current_memory_usage, size_t* peak_memory_usage) { - *current_memory_usage = Globals::get_current_usage(); - *peak_memory_usage = Globals::get_peak_usage(); + *current_memory_usage = StandardConfig::Backend::get_current_usage(); + *peak_memory_usage = StandardConfig::Backend::get_peak_usage(); } \ No newline at end of file diff --git a/src/snmalloc/pal/pal.h b/src/snmalloc/pal/pal.h index 697b22cb..a8ac8a78 100644 --- a/src/snmalloc/pal/pal.h +++ b/src/snmalloc/pal/pal.h @@ -37,49 +37,42 @@ namespace snmalloc { -#if !defined(OPEN_ENCLAVE) || defined(OPEN_ENCLAVE_SIMULATION) using DefaultPal = -# if defined(_WIN32) - PALWindows; -# elif defined(__APPLE__) - PALApple<>; -# elif defined(__linux__) - PALLinux; -# elif defined(FreeBSD_KERNEL) - PALFreeBSDKernel; -# elif defined(__FreeBSD__) - PALFreeBSD; -# elif defined(__HAIKU__) - PALHaiku; -# elif defined(__NetBSD__) - PALNetBSD; -# elif defined(__OpenBSD__) - PALOpenBSD; -# elif defined(__sun) - PALSolaris; -# elif defined(__DragonFly__) - PALDragonfly; -# else -# error Unsupported platform -# endif -#endif - - using Pal = #if defined(SNMALLOC_MEMORY_PROVIDER) - PALPlainMixin; + SNMALLOC_MEMORY_PROVIDER; #elif defined(OPEN_ENCLAVE) PALOpenEnclave; +#elif defined(_WIN32) + PALWindows; +#elif defined(__APPLE__) + PALApple<>; +#elif defined(__linux__) + PALLinux; +#elif defined(FreeBSD_KERNEL) + PALFreeBSDKernel; +#elif defined(__FreeBSD__) + PALFreeBSD; +#elif defined(__HAIKU__) + PALHaiku; +#elif defined(__NetBSD__) + PALNetBSD; +#elif defined(__OpenBSD__) + PALOpenBSD; +#elif defined(__sun) + PALSolaris; +#elif defined(__DragonFly__) + PALDragonfly; #else - DefaultPal; +# error Unsupported platform #endif [[noreturn]] SNMALLOC_SLOW_PATH inline void error(const char* const str) { - Pal::error(str); + DefaultPal::error(str); } // Used to keep Superslab metadata committed. - static constexpr size_t OS_PAGE_SIZE = Pal::page_size; + static constexpr size_t OS_PAGE_SIZE = DefaultPal::page_size; /** * Perform platform-specific adjustment of return pointers. @@ -88,7 +81,7 @@ namespace snmalloc * disruption to PALs for platforms that do not support StrictProvenance AALs. */ template< - typename PAL = Pal, + typename PAL = DefaultPal, typename AAL = Aal, typename T, SNMALLOC_CONCEPT(capptr::ConceptBound) B> @@ -101,7 +94,7 @@ namespace snmalloc } template< - typename PAL = Pal, + typename PAL = DefaultPal, typename AAL = Aal, typename T, SNMALLOC_CONCEPT(capptr::ConceptBound) B> @@ -177,7 +170,7 @@ namespace snmalloc [[noreturn]] inline void report_fatal_error(Args... args) { MessageBuilder msg{std::forward(args)...}; - Pal::error(msg.get_message()); + DefaultPal::error(msg.get_message()); } static inline size_t get_tid() @@ -197,6 +190,6 @@ namespace snmalloc { MessageBuilder msg{std::forward(args)...}; MessageBuilder msg_tid{"{}: {}", get_tid(), msg.get_message()}; - Pal::message(msg_tid.get_message()); + DefaultPal::message(msg_tid.get_message()); } } // namespace snmalloc diff --git a/src/test/func/domestication/domestication.cc b/src/test/func/domestication/domestication.cc index 726a2f39..123a4413 100644 --- a/src/test/func/domestication/domestication.cc +++ b/src/test/func/domestication/domestication.cc @@ -11,20 +11,38 @@ int main() // # define SNMALLOC_TRACING # include +# include +# include # include // Specify type of allocator # define SNMALLOC_PROVIDE_OWN_CONFIG namespace snmalloc { - class CustomGlobals : public BackendAllocator + class CustomConfig : public CommonConfig { public: - using GlobalPoolState = PoolState>; + using Pal = DefaultPal; + using PagemapEntry = DefaultPagemapEntry; private: - using Backend = BackendAllocator; + using ConcretePagemap = + FlatPagemap; + public: + using Pagemap = BasicPagemap; + + public: + using LocalState = StandardLocalState< + Pal, + Pagemap, + Pipe, PagemapRegisterRange>>; + + using GlobalPoolState = PoolState>; + + using Backend = BackendAllocator; + + private: SNMALLOC_REQUIRE_CONSTINIT inline static GlobalPoolState alloc_pool; @@ -65,7 +83,7 @@ namespace snmalloc static CapPtr< T, typename B::template with_wildness> - capptr_domesticate(typename Backend::LocalState*, CapPtr p) + capptr_domesticate(LocalState*, CapPtr p) { domesticate_count++; @@ -85,7 +103,7 @@ namespace snmalloc { std::cout << "Patching over corruption" << std::endl; *domesticate_patch_location = domesticate_patch_value; - snmalloc::CustomGlobals::domesticate_patch_location = nullptr; + snmalloc::CustomConfig::domesticate_patch_location = nullptr; } return CapPtr< @@ -95,7 +113,7 @@ namespace snmalloc } }; - using Alloc = LocalAllocator; + using Alloc = LocalAllocator; } # define SNMALLOC_NAME_MANGLE(a) test_##a @@ -103,11 +121,11 @@ namespace snmalloc int main() { - snmalloc::CustomGlobals::init(); // init pagemap - snmalloc::CustomGlobals::domesticate_count = 0; + snmalloc::CustomConfig::Pagemap::concretePagemap.init(); // init pagemap + snmalloc::CustomConfig::domesticate_count = 0; LocalEntropy entropy; - entropy.init(); + entropy.init(); key_global = FreeListKey(entropy.get_free_list_key()); auto alloc1 = new Alloc(); @@ -123,21 +141,20 @@ int main() alloc2->flush(); // Clobber the linkage but not the back pointer - snmalloc::CustomGlobals::domesticate_patch_location = + snmalloc::CustomConfig::domesticate_patch_location = static_cast(p); - snmalloc::CustomGlobals::domesticate_patch_value = - *static_cast(p); + snmalloc::CustomConfig::domesticate_patch_value = *static_cast(p); memset(p, 0xA5, sizeof(void*)); - snmalloc::CustomGlobals::domesticate_trace = true; - snmalloc::CustomGlobals::domesticate_count = 0; + snmalloc::CustomConfig::domesticate_trace = true; + snmalloc::CustomConfig::domesticate_count = 0; // Open a new slab, so that slow path will pick up the message queue. That // means this should be a sizeclass we've not used before, even internally. auto q = alloc1->alloc(512); std::cout << "Allocated q " << q << std::endl; - snmalloc::CustomGlobals::domesticate_trace = false; + snmalloc::CustomConfig::domesticate_trace = false; /* * Expected domestication calls in the above message passing: @@ -152,8 +169,8 @@ int main() * after q). */ static constexpr size_t expected_count = - snmalloc::CustomGlobals::Options.QueueHeadsAreTame ? 2 : 3; - SNMALLOC_CHECK(snmalloc::CustomGlobals::domesticate_count == expected_count); + snmalloc::CustomConfig::Options.QueueHeadsAreTame ? 2 : 3; + SNMALLOC_CHECK(snmalloc::CustomConfig::domesticate_count == expected_count); // Prevent the allocators from going out of scope during the above test alloc1->flush(); diff --git a/src/test/func/fixed_region/fixed_region.cc b/src/test/func/fixed_region/fixed_region.cc index 0a996273..2c00c7b8 100644 --- a/src/test/func/fixed_region/fixed_region.cc +++ b/src/test/func/fixed_region/fixed_region.cc @@ -11,7 +11,7 @@ using namespace snmalloc; -using CustomGlobals = FixedGlobals>; +using CustomGlobals = FixedRangeConfig>; using FixedAlloc = LocalAllocator; int main() @@ -23,8 +23,8 @@ int main() // It is also large enough for the example to run in. // For 1MiB superslabs, SUPERSLAB_BITS + 4 is not big enough for the example. auto size = bits::one_at_bit(28); - auto oe_base = Pal::reserve(size); - Pal::notify_using(oe_base, size); + auto oe_base = DefaultPal::reserve(size); + DefaultPal::notify_using(oe_base, size); auto oe_end = pointer_offset(oe_base, size); std::cout << "Allocated region " << oe_base << " - " << pointer_offset(oe_base, size) << std::endl; diff --git a/src/test/func/malloc/malloc.cc b/src/test/func/malloc/malloc.cc index 04b88397..e94b6a99 100644 --- a/src/test/func/malloc/malloc.cc +++ b/src/test/func/malloc/malloc.cc @@ -367,6 +367,6 @@ int main(int argc, char** argv) our_malloc_usable_size(nullptr) == 0, "malloc_usable_size(nullptr) should be zero"); - snmalloc::debug_check_empty(); + snmalloc::debug_check_empty(); return 0; } diff --git a/src/test/func/memory/memory.cc b/src/test/func/memory/memory.cc index 57cafc23..292b35f0 100644 --- a/src/test/func/memory/memory.cc +++ b/src/test/func/memory/memory.cc @@ -184,7 +184,7 @@ void test_calloc() alloc.dealloc(p, size); } - snmalloc::debug_check_empty(); + snmalloc::debug_check_empty(); } void test_double_alloc() @@ -229,7 +229,7 @@ void test_double_alloc() } } } - snmalloc::debug_check_empty(); + snmalloc::debug_check_empty(); } void test_external_pointer() @@ -273,7 +273,7 @@ void test_external_pointer() alloc.dealloc(p1, size); } - snmalloc::debug_check_empty(); + snmalloc::debug_check_empty(); }; void check_offset(void* base, void* interior) @@ -305,7 +305,7 @@ void test_external_pointer_large() auto& alloc = ThreadAlloc::get(); - constexpr size_t count_log = Pal::address_bits > 32 ? 5 : 3; + constexpr size_t count_log = DefaultPal::address_bits > 32 ? 5 : 3; constexpr size_t count = 1 << count_log; // Pre allocate all the objects size_t* objects[count]; diff --git a/src/test/func/pagemap/pagemap.cc b/src/test/func/pagemap/pagemap.cc index 1f00ff8a..e6318dfb 100644 --- a/src/test/func/pagemap/pagemap.cc +++ b/src/test/func/pagemap/pagemap.cc @@ -65,8 +65,8 @@ void test_pagemap(bool bounded) if (bounded) { auto size = bits::one_at_bit(30); - auto base = Pal::reserve(size); - Pal::notify_using(base, size); + auto base = DefaultPal::reserve(size); + DefaultPal::notify_using(base, size); std::cout << "Fixed base: " << base << " (" << size << ") " << " end: " << pointer_offset(base, size) << std::endl; auto [heap_base, heap_size] = pagemap_test_bound.init(base, size); diff --git a/src/test/func/pool/pool.cc b/src/test/func/pool/pool.cc index 215c3935..7eaca6bc 100644 --- a/src/test/func/pool/pool.cc +++ b/src/test/func/pool/pool.cc @@ -12,7 +12,7 @@ struct PoolAEntry : Pooled PoolAEntry() : field(1){}; }; -using PoolA = Pool; +using PoolA = Pool; struct PoolBEntry : Pooled { @@ -22,14 +22,14 @@ struct PoolBEntry : Pooled PoolBEntry(int f) : field(f){}; }; -using PoolB = Pool; +using PoolB = Pool; void test_alloc() { auto ptr = PoolA::acquire(); SNMALLOC_CHECK(ptr != nullptr); // Pool allocations should not be visible to debug_check_empty. - snmalloc::debug_check_empty(); + snmalloc::debug_check_empty(); PoolA::release(ptr); } diff --git a/src/test/func/statistics/stats.cc b/src/test/func/statistics/stats.cc index d83dd330..2de3e2d9 100644 --- a/src/test/func/statistics/stats.cc +++ b/src/test/func/statistics/stats.cc @@ -8,7 +8,7 @@ int main() auto r = a.alloc(16); - snmalloc::debug_check_empty(&result); + snmalloc::debug_check_empty(&result); if (result != false) { abort(); @@ -16,7 +16,7 @@ int main() a.dealloc(r); - snmalloc::debug_check_empty(&result); + snmalloc::debug_check_empty(&result); if (result != true) { abort(); @@ -24,7 +24,7 @@ int main() r = a.alloc(16); - snmalloc::debug_check_empty(&result); + snmalloc::debug_check_empty(&result); if (result != false) { abort(); @@ -32,7 +32,7 @@ int main() a.dealloc(r); - snmalloc::debug_check_empty(&result); + snmalloc::debug_check_empty(&result); if (result != true) { abort(); diff --git a/src/test/func/thread_alloc_external/thread_alloc_external.cc b/src/test/func/thread_alloc_external/thread_alloc_external.cc index b8b1b231..2b10ed8c 100644 --- a/src/test/func/thread_alloc_external/thread_alloc_external.cc +++ b/src/test/func/thread_alloc_external/thread_alloc_external.cc @@ -12,7 +12,7 @@ namespace snmalloc { - using Alloc = snmalloc::LocalAllocator; + using Alloc = snmalloc::LocalAllocator; } using namespace snmalloc; diff --git a/src/test/func/two_alloc_types/alloc1.cc b/src/test/func/two_alloc_types/alloc1.cc index 8bfe4134..74996b51 100644 --- a/src/test/func/two_alloc_types/alloc1.cc +++ b/src/test/func/two_alloc_types/alloc1.cc @@ -12,7 +12,7 @@ #define SNMALLOC_PROVIDE_OWN_CONFIG namespace snmalloc { - using CustomGlobals = FixedGlobals>; + using CustomGlobals = FixedRangeConfig>; using Alloc = LocalAllocator; } diff --git a/src/test/helpers.h b/src/test/helpers.h index 7e11ba2a..30f6e465 100644 --- a/src/test/helpers.h +++ b/src/test/helpers.h @@ -19,7 +19,7 @@ namespace snmalloc { \ current_test = __PRETTY_FUNCTION__; \ MessageBuilder<1024> mb{"Starting test: " msg "\n", ##__VA_ARGS__}; \ - Pal::message(mb.get_message()); \ + DefaultPal::message(mb.get_message()); \ } while (0) /** @@ -33,7 +33,7 @@ namespace snmalloc do \ { \ MessageBuilder<1024> mb{msg "\n", ##__VA_ARGS__}; \ - Pal::message(mb.get_message()); \ + DefaultPal::message(mb.get_message()); \ } while (0) } diff --git a/src/test/perf/contention/contention.cc b/src/test/perf/contention/contention.cc index bcea629d..e266f049 100644 --- a/src/test/perf/contention/contention.cc +++ b/src/test/perf/contention/contention.cc @@ -154,7 +154,7 @@ void test_tasks(size_t num_tasks, size_t count, size_t size) } #ifndef NDEBUG - snmalloc::debug_check_empty(); + snmalloc::debug_check_empty(); #endif }; diff --git a/src/test/perf/external_pointer/externalpointer.cc b/src/test/perf/external_pointer/externalpointer.cc index b2509a65..be3306cb 100644 --- a/src/test/perf/external_pointer/externalpointer.cc +++ b/src/test/perf/external_pointer/externalpointer.cc @@ -19,7 +19,7 @@ namespace test { size_t rand = (size_t)r.next(); size_t offset = bits::clz(rand); - if constexpr (Pal::address_bits > 32) + if constexpr (DefaultPal::address_bits > 32) { if (offset > 30) offset = 30; @@ -47,7 +47,7 @@ namespace test alloc.dealloc(objects[i]); } - snmalloc::debug_check_empty(); + snmalloc::debug_check_empty(); } void test_external_pointer(xoroshiro::p128r64& r) diff --git a/src/test/perf/singlethread/singlethread.cc b/src/test/perf/singlethread/singlethread.cc index 29595230..b93dcd42 100644 --- a/src/test/perf/singlethread/singlethread.cc +++ b/src/test/perf/singlethread/singlethread.cc @@ -60,7 +60,7 @@ void test_alloc_dealloc(size_t count, size_t size, bool write) } } - snmalloc::debug_check_empty(); + snmalloc::debug_check_empty(); } int main(int, char**) diff --git a/src/test/setup.h b/src/test/setup.h index 642720dd..61f9a991 100644 --- a/src/test/setup.h +++ b/src/test/setup.h @@ -64,7 +64,7 @@ void print_stack_trace() void _cdecl error(int signal) { snmalloc::UNUSED(signal); - snmalloc::Pal::message("*****ABORT******"); + snmalloc::DefaultPal::message("*****ABORT******"); print_stack_trace(); @@ -75,7 +75,7 @@ LONG WINAPI VectoredHandler(struct _EXCEPTION_POINTERS* ExceptionInfo) { snmalloc::UNUSED(ExceptionInfo); - snmalloc::Pal::message("*****UNHANDLED EXCEPTION******"); + snmalloc::DefaultPal::message("*****UNHANDLED EXCEPTION******"); print_stack_trace();