зеркало из https://github.com/microsoft/snmalloc.git
Made a sizecass_t to wrap the sizeclass
This is useful as codegen is nicer if we use size_t, but the semantics is uint8_t, and is stored as that in many places in the metadata. Ultimately should introduce a wrapper to check this invariant.
This commit is contained in:
Родитель
830b06a616
Коммит
7a8eaec2cc
|
@ -277,7 +277,7 @@ namespace snmalloc
|
|||
else
|
||||
return calloc(1, size);
|
||||
#else
|
||||
constexpr uint8_t sizeclass = size_to_sizeclass_const(size);
|
||||
constexpr sizeclass_t sizeclass = size_to_sizeclass_const(size);
|
||||
|
||||
stats().alloc_request(size);
|
||||
|
||||
|
@ -317,7 +317,7 @@ namespace snmalloc
|
|||
|
||||
handle_message_queue();
|
||||
|
||||
uint8_t sizeclass = size_to_sizeclass(size);
|
||||
sizeclass_t sizeclass = size_to_sizeclass(size);
|
||||
|
||||
// Allocate memory of a dynamically known size.
|
||||
if (sizeclass < NUM_SMALL_CLASSES)
|
||||
|
@ -346,7 +346,7 @@ namespace snmalloc
|
|||
return free(p);
|
||||
#else
|
||||
|
||||
constexpr uint8_t sizeclass = size_to_sizeclass_const(size);
|
||||
constexpr sizeclass_t sizeclass = size_to_sizeclass_const(size);
|
||||
|
||||
handle_message_queue();
|
||||
|
||||
|
@ -389,7 +389,7 @@ namespace snmalloc
|
|||
|
||||
// Free memory of a dynamically known size. Must be called with an
|
||||
// external pointer.
|
||||
uint8_t sizeclass = size_to_sizeclass(size);
|
||||
sizeclass_t sizeclass = size_to_sizeclass(size);
|
||||
|
||||
if (sizeclass < NUM_SMALL_CLASSES)
|
||||
{
|
||||
|
@ -445,7 +445,7 @@ namespace snmalloc
|
|||
// Reading a remote sizeclass won't fail, since the other allocator
|
||||
// can't reuse the slab, as we have not yet deallocated this
|
||||
// pointer.
|
||||
uint8_t sizeclass = meta.sizeclass;
|
||||
sizeclass_t sizeclass = meta.sizeclass;
|
||||
|
||||
if (super->get_allocator() == public_state())
|
||||
small_dealloc(super, p, sizeclass);
|
||||
|
@ -460,7 +460,7 @@ namespace snmalloc
|
|||
|
||||
// Reading a remote sizeclass won't fail, since the other allocator
|
||||
// can't reuse the slab, as we have no yet deallocated this pointer.
|
||||
uint8_t sizeclass = slab->get_sizeclass();
|
||||
sizeclass_t sizeclass = slab->get_sizeclass();
|
||||
|
||||
if (target == public_state())
|
||||
medium_dealloc(slab, p, sizeclass);
|
||||
|
@ -494,7 +494,7 @@ namespace snmalloc
|
|||
Slab* slab = Slab::get(p);
|
||||
Metaslab& meta = super->get_meta(slab);
|
||||
|
||||
uint8_t sc = meta.sizeclass;
|
||||
sizeclass_t sc = meta.sizeclass;
|
||||
size_t slab_end = static_cast<size_t>(address_cast(slab) + SLAB_SIZE);
|
||||
|
||||
return external_pointer<location>(p, sc, slab_end);
|
||||
|
@ -503,7 +503,7 @@ namespace snmalloc
|
|||
{
|
||||
Mediumslab* slab = Mediumslab::get(p);
|
||||
|
||||
uint8_t sc = slab->get_sizeclass();
|
||||
sizeclass_t sc = slab->get_sizeclass();
|
||||
size_t slab_end =
|
||||
static_cast<size_t>(address_cast(slab) + SUPERSLAB_SIZE);
|
||||
|
||||
|
@ -622,7 +622,7 @@ namespace snmalloc
|
|||
return (id >> (initial_shift + (r * REMOTE_SLOT_BITS))) & REMOTE_MASK;
|
||||
}
|
||||
|
||||
void dealloc(alloc_id_t target_id, void* p, uint8_t sizeclass)
|
||||
void dealloc(alloc_id_t target_id, void* p, sizeclass_t sizeclass)
|
||||
{
|
||||
this->size += sizeclass_to_size(sizeclass);
|
||||
|
||||
|
@ -706,7 +706,7 @@ namespace snmalloc
|
|||
#ifdef CACHE_FRIENDLY_OFFSET
|
||||
size_t remote_offset = 0;
|
||||
|
||||
void* apply_cache_friendly_offset(void* p, uint8_t sizeclass)
|
||||
void* apply_cache_friendly_offset(void* p, sizeclass_t sizeclass)
|
||||
{
|
||||
size_t mask = sizeclass_to_cache_friendly_mask(sizeclass);
|
||||
|
||||
|
@ -716,7 +716,7 @@ namespace snmalloc
|
|||
return (void*)((uintptr_t)p + offset);
|
||||
}
|
||||
#else
|
||||
void* apply_cache_friendly_offset(void* p, uint8_t sizeclass)
|
||||
void* apply_cache_friendly_offset(void* p, sizeclass_t sizeclass)
|
||||
{
|
||||
UNUSED(sizeclass);
|
||||
return p;
|
||||
|
@ -770,11 +770,11 @@ namespace snmalloc
|
|||
message_queue().invariant();
|
||||
|
||||
#ifndef NDEBUG
|
||||
for (uint8_t i = 0; i < NUM_SIZECLASSES; i++)
|
||||
for (sizeclass_t i = 0; i < NUM_SIZECLASSES; i++)
|
||||
{
|
||||
size_t size = sizeclass_to_size(i);
|
||||
uint8_t sc1 = size_to_sizeclass(size);
|
||||
uint8_t sc2 = size_to_sizeclass_const(size);
|
||||
sizeclass_t sc1 = size_to_sizeclass(size);
|
||||
sizeclass_t sc2 = size_to_sizeclass_const(size);
|
||||
size_t size1 = sizeclass_to_size(sc1);
|
||||
size_t size2 = sizeclass_to_size(sc2);
|
||||
|
||||
|
@ -794,7 +794,7 @@ namespace snmalloc
|
|||
|
||||
template<Boundary location>
|
||||
static uintptr_t
|
||||
external_pointer(void* p, uint8_t sizeclass, size_t end_point)
|
||||
external_pointer(void* p, sizeclass_t sizeclass, size_t end_point)
|
||||
{
|
||||
size_t rsize = sizeclass_to_size(sizeclass);
|
||||
size_t end_point_correction = location == End ?
|
||||
|
@ -840,7 +840,7 @@ namespace snmalloc
|
|||
Mediumslab* slab = Mediumslab::get(p);
|
||||
if (p->target_id() == id())
|
||||
{
|
||||
uint8_t sizeclass = slab->get_sizeclass();
|
||||
sizeclass_t sizeclass = slab->get_sizeclass();
|
||||
void* start = remove_cache_friendly_offset(p, sizeclass);
|
||||
medium_dealloc(slab, start, sizeclass);
|
||||
}
|
||||
|
@ -938,7 +938,7 @@ namespace snmalloc
|
|||
}
|
||||
|
||||
template<AllowReserve allow_reserve>
|
||||
Slab* alloc_slab(uint8_t sizeclass)
|
||||
Slab* alloc_slab(sizeclass_t sizeclass)
|
||||
{
|
||||
stats().sizeclass_alloc_slab(sizeclass);
|
||||
if (Superslab::is_short_sizeclass(sizeclass))
|
||||
|
@ -978,7 +978,7 @@ namespace snmalloc
|
|||
}
|
||||
|
||||
template<ZeroMem zero_mem, AllowReserve allow_reserve>
|
||||
void* small_alloc(uint8_t sizeclass, size_t rsize)
|
||||
void* small_alloc(sizeclass_t sizeclass, size_t rsize)
|
||||
{
|
||||
MEASURE_TIME_MARKERS(
|
||||
small_alloc,
|
||||
|
@ -1011,7 +1011,7 @@ namespace snmalloc
|
|||
return slab->alloc<zero_mem>(sc, rsize, large_allocator.memory_provider);
|
||||
}
|
||||
|
||||
void small_dealloc(Superslab* super, void* p, uint8_t sizeclass)
|
||||
void small_dealloc(Superslab* super, void* p, sizeclass_t sizeclass)
|
||||
{
|
||||
#ifdef CHECK_CLIENT
|
||||
Slab* slab = Slab::get(p);
|
||||
|
@ -1025,7 +1025,8 @@ namespace snmalloc
|
|||
small_dealloc_offseted(super, offseted, sizeclass);
|
||||
}
|
||||
|
||||
void small_dealloc_offseted(Superslab* super, void* p, uint8_t sizeclass)
|
||||
void
|
||||
small_dealloc_offseted(Superslab* super, void* p, sizeclass_t sizeclass)
|
||||
{
|
||||
MEASURE_TIME(small_dealloc, 4, 16);
|
||||
stats().sizeclass_dealloc(sizeclass);
|
||||
|
@ -1100,7 +1101,7 @@ namespace snmalloc
|
|||
}
|
||||
|
||||
template<ZeroMem zero_mem, AllowReserve allow_reserve>
|
||||
void* medium_alloc(uint8_t sizeclass, size_t rsize, size_t size)
|
||||
void* medium_alloc(sizeclass_t sizeclass, size_t rsize, size_t size)
|
||||
{
|
||||
MEASURE_TIME_MARKERS(
|
||||
medium_alloc,
|
||||
|
@ -1110,7 +1111,7 @@ namespace snmalloc
|
|||
zero_mem == YesZero ? "zeromem" : "nozeromem",
|
||||
allow_reserve == NoReserve ? "noreserve" : "reserve"));
|
||||
|
||||
uint8_t medium_class = sizeclass - NUM_SMALL_CLASSES;
|
||||
sizeclass_t medium_class = sizeclass - NUM_SMALL_CLASSES;
|
||||
|
||||
DLList<Mediumslab>* sc = &medium_classes[medium_class];
|
||||
Mediumslab* slab = sc->get_head();
|
||||
|
@ -1144,7 +1145,7 @@ namespace snmalloc
|
|||
return p;
|
||||
}
|
||||
|
||||
void medium_dealloc(Mediumslab* slab, void* p, uint8_t sizeclass)
|
||||
void medium_dealloc(Mediumslab* slab, void* p, sizeclass_t sizeclass)
|
||||
{
|
||||
MEASURE_TIME(medium_dealloc, 4, 16);
|
||||
stats().sizeclass_dealloc(sizeclass);
|
||||
|
@ -1163,7 +1164,7 @@ namespace snmalloc
|
|||
{
|
||||
if (!was_full)
|
||||
{
|
||||
uint8_t medium_class = sizeclass - NUM_SMALL_CLASSES;
|
||||
sizeclass_t medium_class = sizeclass - NUM_SMALL_CLASSES;
|
||||
DLList<Mediumslab>* sc = &medium_classes[medium_class];
|
||||
sc->remove(slab);
|
||||
}
|
||||
|
@ -1180,7 +1181,7 @@ namespace snmalloc
|
|||
}
|
||||
else if (was_full)
|
||||
{
|
||||
uint8_t medium_class = sizeclass - NUM_SMALL_CLASSES;
|
||||
sizeclass_t medium_class = sizeclass - NUM_SMALL_CLASSES;
|
||||
DLList<Mediumslab>* sc = &medium_classes[medium_class];
|
||||
sc->insert(slab);
|
||||
}
|
||||
|
@ -1233,7 +1234,7 @@ namespace snmalloc
|
|||
large_allocator.dealloc(slab, large_class);
|
||||
}
|
||||
|
||||
void remote_dealloc(RemoteAllocator* target, void* p, uint8_t sizeclass)
|
||||
void remote_dealloc(RemoteAllocator* target, void* p, sizeclass_t sizeclass)
|
||||
{
|
||||
MEASURE_TIME(remote_dealloc, 4, 16);
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
#include "../ds/bits.h"
|
||||
#include "../mem/sizeclass.h"
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
|
@ -165,7 +166,7 @@ namespace snmalloc
|
|||
#endif
|
||||
}
|
||||
|
||||
void sizeclass_alloc(uint8_t sc)
|
||||
void sizeclass_alloc(sizeclass_t sc)
|
||||
{
|
||||
UNUSED(sc);
|
||||
|
||||
|
@ -175,7 +176,7 @@ namespace snmalloc
|
|||
#endif
|
||||
}
|
||||
|
||||
void sizeclass_dealloc(uint8_t sc)
|
||||
void sizeclass_dealloc(sizeclass_t sc)
|
||||
{
|
||||
UNUSED(sc);
|
||||
|
||||
|
@ -194,7 +195,7 @@ namespace snmalloc
|
|||
#endif
|
||||
}
|
||||
|
||||
void sizeclass_alloc_slab(uint8_t sc)
|
||||
void sizeclass_alloc_slab(sizeclass_t sc)
|
||||
{
|
||||
UNUSED(sc);
|
||||
|
||||
|
@ -204,7 +205,7 @@ namespace snmalloc
|
|||
#endif
|
||||
}
|
||||
|
||||
void sizeclass_dealloc_slab(uint8_t sc)
|
||||
void sizeclass_dealloc_slab(sizeclass_t sc)
|
||||
{
|
||||
UNUSED(sc);
|
||||
|
||||
|
@ -251,7 +252,7 @@ namespace snmalloc
|
|||
#endif
|
||||
}
|
||||
|
||||
void remote_free(uint8_t sc)
|
||||
void remote_free(sizeclass_t sc)
|
||||
{
|
||||
UNUSED(sc);
|
||||
|
||||
|
@ -267,7 +268,7 @@ namespace snmalloc
|
|||
#endif
|
||||
}
|
||||
|
||||
void remote_receive(uint8_t sc)
|
||||
void remote_receive(sizeclass_t sc)
|
||||
{
|
||||
UNUSED(sc);
|
||||
|
||||
|
@ -348,7 +349,7 @@ namespace snmalloc
|
|||
<< "Count" << csv.endl;
|
||||
}
|
||||
|
||||
for (uint8_t i = 0; i < N; i++)
|
||||
for (sizeclass_t i = 0; i < N; i++)
|
||||
{
|
||||
if (sizeclass[i].count.is_unused())
|
||||
continue;
|
||||
|
|
|
@ -44,7 +44,7 @@ namespace snmalloc
|
|||
return pointer_cast<Mediumslab>(address_cast(p) & SUPERSLAB_MASK);
|
||||
}
|
||||
|
||||
void init(RemoteAllocator* alloc, uint8_t sc, size_t rsize)
|
||||
void init(RemoteAllocator* alloc, sizeclass_t sc, size_t rsize)
|
||||
{
|
||||
assert(sc >= NUM_SMALL_CLASSES);
|
||||
assert((sc - NUM_SMALL_CLASSES) < NUM_MEDIUM_CLASSES);
|
||||
|
@ -56,7 +56,7 @@ namespace snmalloc
|
|||
// initialise the allocation stack.
|
||||
if ((kind != Medium) || (sizeclass != sc))
|
||||
{
|
||||
sizeclass = sc;
|
||||
sizeclass = static_cast<uint8_t>(sc);
|
||||
uint16_t ssize = static_cast<uint16_t>(rsize >> 8);
|
||||
kind = Medium;
|
||||
free = medium_slab_free(sc);
|
||||
|
|
|
@ -4,28 +4,34 @@
|
|||
|
||||
namespace snmalloc
|
||||
{
|
||||
constexpr static uint16_t get_initial_bumpptr(uint8_t sc, bool is_short);
|
||||
constexpr static uint16_t get_initial_link(uint8_t sc, bool is_short);
|
||||
constexpr static size_t sizeclass_to_size(uint8_t sizeclass);
|
||||
constexpr static size_t sizeclass_to_cache_friendly_mask(uint8_t sizeclass);
|
||||
constexpr static size_t sizeclass_to_inverse_cache_friendly_mask(uint8_t sc);
|
||||
constexpr static uint16_t medium_slab_free(uint8_t sizeclass);
|
||||
// Both usings should compile
|
||||
using sizeclass_t = size_t;
|
||||
// using sizeclass_t = uint8_t;
|
||||
|
||||
static inline uint8_t size_to_sizeclass(size_t size)
|
||||
constexpr static uint16_t get_initial_bumpptr(sizeclass_t sc, bool is_short);
|
||||
constexpr static uint16_t get_initial_link(sizeclass_t sc, bool is_short);
|
||||
constexpr static size_t sizeclass_to_size(sizeclass_t sizeclass);
|
||||
constexpr static size_t
|
||||
sizeclass_to_cache_friendly_mask(sizeclass_t sizeclass);
|
||||
constexpr static size_t
|
||||
sizeclass_to_inverse_cache_friendly_mask(sizeclass_t sc);
|
||||
constexpr static uint16_t medium_slab_free(sizeclass_t sizeclass);
|
||||
|
||||
static inline sizeclass_t size_to_sizeclass(size_t size)
|
||||
{
|
||||
// Don't use sizeclasses that are not a multiple of the alignment.
|
||||
// For example, 24 byte allocations can be
|
||||
// problematic for some data due to alignment issues.
|
||||
return static_cast<uint8_t>(
|
||||
return static_cast<sizeclass_t>(
|
||||
bits::to_exp_mant<INTERMEDIATE_BITS, MIN_ALLOC_BITS>(size));
|
||||
}
|
||||
|
||||
constexpr static inline uint8_t size_to_sizeclass_const(size_t size)
|
||||
constexpr static inline sizeclass_t size_to_sizeclass_const(size_t size)
|
||||
{
|
||||
// Don't use sizeclasses that are not a multiple of the alignment.
|
||||
// For example, 24 byte allocations can be
|
||||
// problematic for some data due to alignment issues.
|
||||
return static_cast<uint8_t>(
|
||||
return static_cast<sizeclass_t>(
|
||||
bits::to_exp_mant_const<INTERMEDIATE_BITS, MIN_ALLOC_BITS>(size));
|
||||
}
|
||||
|
||||
|
@ -143,26 +149,28 @@ namespace snmalloc
|
|||
}
|
||||
|
||||
#ifdef CACHE_FRIENDLY_OFFSET
|
||||
inline static void* remove_cache_friendly_offset(void* p, uint8_t sizeclass)
|
||||
inline static void*
|
||||
remove_cache_friendly_offset(void* p, sizeclass_t sizeclass)
|
||||
{
|
||||
size_t mask = sizeclass_to_inverse_cache_friendly_mask(sizeclass);
|
||||
return p = (void*)((uintptr_t)p & mask);
|
||||
}
|
||||
|
||||
inline static uint16_t
|
||||
remove_cache_friendly_offset(uint16_t relative, uint8_t sizeclass)
|
||||
remove_cache_friendly_offset(uint16_t relative, sizeclass_t sizeclass)
|
||||
{
|
||||
size_t mask = sizeclass_to_inverse_cache_friendly_mask(sizeclass);
|
||||
return relative & mask;
|
||||
}
|
||||
#else
|
||||
inline static void* remove_cache_friendly_offset(void* p, uint8_t sizeclass)
|
||||
inline static void*
|
||||
remove_cache_friendly_offset(void* p, sizeclass_t sizeclass)
|
||||
{
|
||||
UNUSED(sizeclass);
|
||||
return p;
|
||||
}
|
||||
inline static uint16_t
|
||||
remove_cache_friendly_offset(uint16_t relative, uint8_t sizeclass)
|
||||
remove_cache_friendly_offset(uint16_t relative, sizeclass_t sizeclass)
|
||||
{
|
||||
UNUSED(sizeclass);
|
||||
return relative;
|
||||
|
|
|
@ -26,7 +26,7 @@ namespace snmalloc
|
|||
short_initial_link_ptr(),
|
||||
medium_slab_slots()
|
||||
{
|
||||
for (uint8_t sizeclass = 0; sizeclass < NUM_SIZECLASSES; sizeclass++)
|
||||
for (sizeclass_t sizeclass = 0; sizeclass < NUM_SIZECLASSES; sizeclass++)
|
||||
{
|
||||
size[sizeclass] =
|
||||
bits::from_exp_mant<INTERMEDIATE_BITS, MIN_ALLOC_BITS>(sizeclass);
|
||||
|
@ -40,7 +40,7 @@ namespace snmalloc
|
|||
size_t header_size = sizeof(Superslab);
|
||||
size_t short_slab_size = SLAB_SIZE - header_size;
|
||||
|
||||
for (uint8_t i = 0; i < NUM_SMALL_CLASSES; i++)
|
||||
for (sizeclass_t i = 0; i < NUM_SMALL_CLASSES; i++)
|
||||
{
|
||||
// We align to the end of the block to remove special cases for the
|
||||
// short block. Calculate remainders
|
||||
|
@ -63,7 +63,7 @@ namespace snmalloc
|
|||
bump_ptr_start[i] = static_cast<uint16_t>((after_link + 1) % SLAB_SIZE);
|
||||
}
|
||||
|
||||
for (uint8_t i = NUM_SMALL_CLASSES; i < NUM_SIZECLASSES; i++)
|
||||
for (sizeclass_t i = NUM_SMALL_CLASSES; i < NUM_SIZECLASSES; i++)
|
||||
{
|
||||
medium_slab_slots[i - NUM_SMALL_CLASSES] = static_cast<uint16_t>(
|
||||
(SUPERSLAB_SIZE - Mediumslab::header_size()) / size[i]);
|
||||
|
@ -74,7 +74,7 @@ namespace snmalloc
|
|||
static constexpr SizeClassTable sizeclass_metadata = SizeClassTable();
|
||||
|
||||
static inline constexpr uint16_t
|
||||
get_initial_bumpptr(uint8_t sc, bool is_short)
|
||||
get_initial_bumpptr(sizeclass_t sc, bool is_short)
|
||||
{
|
||||
if (is_short)
|
||||
return sizeclass_metadata.short_bump_ptr_start[sc];
|
||||
|
@ -82,7 +82,8 @@ namespace snmalloc
|
|||
return sizeclass_metadata.bump_ptr_start[sc];
|
||||
}
|
||||
|
||||
static inline constexpr uint16_t get_initial_link(uint8_t sc, bool is_short)
|
||||
static inline constexpr uint16_t
|
||||
get_initial_link(sizeclass_t sc, bool is_short)
|
||||
{
|
||||
if (is_short)
|
||||
return sizeclass_metadata.short_initial_link_ptr[sc];
|
||||
|
@ -90,24 +91,24 @@ namespace snmalloc
|
|||
return sizeclass_metadata.initial_link_ptr[sc];
|
||||
}
|
||||
|
||||
constexpr static inline size_t sizeclass_to_size(uint8_t sizeclass)
|
||||
constexpr static inline size_t sizeclass_to_size(sizeclass_t sizeclass)
|
||||
{
|
||||
return sizeclass_metadata.size[sizeclass];
|
||||
}
|
||||
|
||||
constexpr static inline size_t
|
||||
sizeclass_to_cache_friendly_mask(uint8_t sizeclass)
|
||||
sizeclass_to_cache_friendly_mask(sizeclass_t sizeclass)
|
||||
{
|
||||
return sizeclass_metadata.cache_friendly_mask[sizeclass];
|
||||
}
|
||||
|
||||
constexpr static inline size_t
|
||||
sizeclass_to_inverse_cache_friendly_mask(uint8_t sizeclass)
|
||||
sizeclass_to_inverse_cache_friendly_mask(sizeclass_t sizeclass)
|
||||
{
|
||||
return sizeclass_metadata.inverse_cache_friendly_mask[sizeclass];
|
||||
}
|
||||
|
||||
constexpr static inline uint16_t medium_slab_free(uint8_t sizeclass)
|
||||
constexpr static inline uint16_t medium_slab_free(sizeclass_t sizeclass)
|
||||
{
|
||||
return sizeclass_metadata
|
||||
.medium_slab_slots[(sizeclass - NUM_SMALL_CLASSES)];
|
||||
|
|
|
@ -70,9 +70,9 @@ namespace snmalloc
|
|||
return pointer_cast<Superslab>(address_cast(p) & SUPERSLAB_MASK);
|
||||
}
|
||||
|
||||
static bool is_short_sizeclass(uint8_t sizeclass)
|
||||
static bool is_short_sizeclass(sizeclass_t sizeclass)
|
||||
{
|
||||
constexpr uint8_t h = size_to_sizeclass_const(sizeof(Superslab));
|
||||
constexpr sizeclass_t h = size_to_sizeclass_const(sizeof(Superslab));
|
||||
return sizeclass <= h;
|
||||
}
|
||||
|
||||
|
@ -154,13 +154,14 @@ namespace snmalloc
|
|||
}
|
||||
|
||||
template<typename MemoryProvider>
|
||||
Slab* alloc_short_slab(uint8_t sizeclass, MemoryProvider& memory_provider)
|
||||
Slab*
|
||||
alloc_short_slab(sizeclass_t sizeclass, MemoryProvider& memory_provider)
|
||||
{
|
||||
if ((used & 1) == 1)
|
||||
return alloc_slab(sizeclass, memory_provider);
|
||||
|
||||
meta[0].head = get_initial_bumpptr(sizeclass, true);
|
||||
meta[0].sizeclass = sizeclass;
|
||||
meta[0].sizeclass = static_cast<uint8_t>(sizeclass);
|
||||
meta[0].link = get_initial_link(sizeclass, true);
|
||||
|
||||
if constexpr (decommit_strategy == DecommitAll)
|
||||
|
@ -174,7 +175,7 @@ namespace snmalloc
|
|||
}
|
||||
|
||||
template<typename MemoryProvider>
|
||||
Slab* alloc_slab(uint8_t sizeclass, MemoryProvider& memory_provider)
|
||||
Slab* alloc_slab(sizeclass_t sizeclass, MemoryProvider& memory_provider)
|
||||
{
|
||||
uint8_t h = head;
|
||||
Slab* slab = pointer_cast<Slab>(
|
||||
|
@ -183,7 +184,7 @@ namespace snmalloc
|
|||
uint8_t n = meta[h].next;
|
||||
|
||||
meta[h].head = get_initial_bumpptr(sizeclass, false);
|
||||
meta[h].sizeclass = sizeclass;
|
||||
meta[h].sizeclass = static_cast<uint8_t>(sizeclass);
|
||||
meta[h].link = get_initial_link(sizeclass, false);
|
||||
|
||||
head = h + n + 1;
|
||||
|
|
|
@ -137,7 +137,7 @@ extern "C"
|
|||
}
|
||||
|
||||
size = bits::max(size, alignment);
|
||||
uint8_t sc = size_to_sizeclass(size);
|
||||
snmalloc::sizeclass_t sc = size_to_sizeclass(size);
|
||||
if (sc >= NUM_SIZECLASSES)
|
||||
{
|
||||
// large allocs are 16M aligned.
|
||||
|
|
|
@ -77,7 +77,7 @@ int main(int argc, char** argv)
|
|||
|
||||
test_calloc(0, 0, SUCCESS, false);
|
||||
|
||||
for (uint8_t sc = 0; sc < NUM_SIZECLASSES; sc++)
|
||||
for (snmalloc::sizeclass_t sc = 0; sc < NUM_SIZECLASSES; sc++)
|
||||
{
|
||||
const size_t size = sizeclass_to_size(sc);
|
||||
|
||||
|
@ -103,7 +103,7 @@ int main(int argc, char** argv)
|
|||
|
||||
for (size_t align = sizeof(size_t); align <= SUPERSLAB_SIZE; align <<= 1)
|
||||
{
|
||||
for (uint8_t sc = 0; sc < NUM_SIZECLASSES; sc++)
|
||||
for (snmalloc::sizeclass_t sc = 0; sc < NUM_SIZECLASSES; sc++)
|
||||
{
|
||||
const size_t size = sizeclass_to_size(sc);
|
||||
test_posix_memalign(size, align, SUCCESS, false);
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#include <snmalloc.h>
|
||||
|
||||
NOINLINE
|
||||
uint8_t size_to_sizeclass(size_t size)
|
||||
snmalloc::sizeclass_t size_to_sizeclass(size_t size)
|
||||
{
|
||||
return snmalloc::size_to_sizeclass(size);
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ int main(int, char**)
|
|||
|
||||
std::cout << "sizeclass |-> [size_low, size_high] " << std::endl;
|
||||
|
||||
for (uint8_t sz = 0; sz < snmalloc::NUM_SIZECLASSES; sz++)
|
||||
for (snmalloc::sizeclass_t sz = 0; sz < snmalloc::NUM_SIZECLASSES; sz++)
|
||||
{
|
||||
// Separate printing for small and medium sizeclasses
|
||||
if (sz == snmalloc::NUM_SMALL_CLASSES)
|
||||
|
|
Загрузка…
Ссылка в новой задаче