Randomise slab allocation pattern (#304)

The slab allocation pattern is randomised based on the deallocation
pattern.  This achieved by using two queues to enqueue free elements
onto.  We pick "randomly", which queue to add to, and then when we take
the free_queue to use, we splice the two queues together.
This commit is contained in:
Matthew Parkinson 2021-03-24 16:12:22 +00:00 коммит произвёл GitHub
Родитель 6442f4edd8
Коммит 578abd8db4
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
6 изменённых файлов: 317 добавлений и 199 удалений

Просмотреть файл

@ -294,29 +294,32 @@ if(NOT DEFINED SNMALLOC_ONLY_HEADER_LIBRARY)
# Windows does not support aligned allocation well enough
# for pass through.
# NetBSD, OpenBSD and DragonFlyBSD do not support malloc*size calls.
set(FLAVOURS 1;16;oe)
set(FLAVOURS 1;16;oe;check)
else()
set(FLAVOURS 1;16;oe;malloc)
set(FLAVOURS 1;16;oe;malloc;check)
endif()
foreach(SUPER_SLAB_SIZE ${FLAVOURS})
foreach(FLAVOUR ${FLAVOURS})
unset(SRC)
aux_source_directory(${TESTDIR}/${TEST_CATEGORY}/${TEST} SRC)
set(TESTNAME "${TEST_CATEGORY}-${TEST}-${SUPER_SLAB_SIZE}")
set(TESTNAME "${TEST_CATEGORY}-${TEST}-${FLAVOUR}")
add_executable(${TESTNAME} ${SRC})
# For all tests enable commit checking.
target_compile_definitions(${TESTNAME} PRIVATE -DUSE_POSIX_COMMIT_CHECKS)
if (${SUPER_SLAB_SIZE} EQUAL 16)
if (${FLAVOUR} EQUAL 16)
target_compile_definitions(${TESTNAME} PRIVATE SNMALLOC_USE_LARGE_CHUNKS)
endif()
if (${SUPER_SLAB_SIZE} STREQUAL "oe")
if (${FLAVOUR} STREQUAL "oe")
oe_simulate(${TESTNAME})
endif()
if (${SUPER_SLAB_SIZE} STREQUAL "malloc")
if (${FLAVOUR} STREQUAL "malloc")
target_compile_definitions(${TESTNAME} PRIVATE SNMALLOC_PASS_THROUGH)
endif()
if (${FLAVOUR} STREQUAL "check")
target_compile_definitions(${TESTNAME} PRIVATE CHECK_CLIENT)
endif()
if(CONST_QUALIFIED_MALLOC_USABLE_SIZE)
target_compile_definitions(${TESTNAME} PRIVATE -DMALLOC_USABLE_SIZE_QUALIFIER=const)
endif()

Просмотреть файл

@ -306,7 +306,7 @@ namespace snmalloc
*/
Slab* slab = Metaslab::get_slab(p);
Metaslab& meta = super->get_meta(slab);
sizeclass_t sizeclass = meta.sizeclass;
sizeclass_t sizeclass = meta.sizeclass();
small_dealloc_checked_sizeclass(super, slab, p, sizeclass);
return;
@ -361,7 +361,7 @@ namespace snmalloc
Slab* slab = Metaslab::get_slab(p);
Metaslab& meta = super->get_meta(slab);
sizeclass_t sc = meta.sizeclass;
sizeclass_t sc = meta.sizeclass();
void* slab_end = pointer_offset(slab, SLAB_SIZE);
return external_pointer<location>(p, sc, slab_end);
@ -436,7 +436,7 @@ namespace snmalloc
Slab* slab = Metaslab::get_slab(p);
Metaslab& meta = super->get_meta(slab);
return sizeclass_to_size(meta.sizeclass);
return sizeclass_to_size(meta.sizeclass());
}
if (likely(chunkmap_slab_kind == CMMediumslab))
@ -1170,7 +1170,7 @@ namespace snmalloc
{
Slab* slab = Metaslab::get_slab(p);
check_client(
sizeclass == super->get_meta(slab).sizeclass,
sizeclass == super->get_meta(slab).sizeclass(),
"Claimed small deallocation with mismatching size class");
small_dealloc_checked_sizeclass(super, slab, p, sizeclass);

Просмотреть файл

@ -10,27 +10,42 @@
#include "../ds/helpers.h"
#include "allocconfig.h"
#include <iostream>
#include <cstdint>
namespace snmalloc
{
#ifdef CHECK_CLIENT
static constexpr std::size_t PRESERVE_BOTTOM_BITS = 16;
/**
* The key that is used to encode free list pointers.
* This should be randomised at startup in the future.
*/
inline static address_t global_key =
static_cast<size_t>(bits::is64() ? 0x9999'9999'9999'9999 : 0x9999'9999);
inline static address_t global_key = static_cast<std::size_t>(
bits::is64() ? 0x5a59'DEAD'BEEF'5A59 : 0x5A59'BEEF);
#endif
/**
* Used to turn a location into a key. This is currently
* just the value of the previous location + 1.
* just the slab address truncated to 16bits and offset by 1.
*/
inline static uintptr_t initial_key(void* p)
inline static address_t initial_key(void* slab)
{
return address_cast(p) + 1;
}
#ifdef CHECK_CLIENT
/**
* This file assumes that SLAB_BITS is smaller than 16. In multiple
* places it uses uint16_t to represent the offset into a slab.
*/
static_assert(
SLAB_BITS <= 16,
"Encoding requires slab offset representable in 16bits.");
return (address_cast(slab) & SLAB_MASK) + 1;
#else
UNUSED(slab);
return 0;
#endif
}
static inline bool different_slab(uintptr_t p1, uintptr_t p2)
{
@ -47,19 +62,15 @@ namespace snmalloc
return different_slab(address_cast(p1), address_cast(p2));
}
/**
* Free objects within each slab point directly to the next.
* The next_object pointer can be encoded to detect
* corruption caused by writes in a UAF or a double free.
*
* If cache-friendly offsets are used, then the FreeObject is
* potentially offset from the start of the object.
*/
class FreeObject
{
FreeObject* next_object;
class FreeObject;
static FreeObject* encode(uintptr_t local_key, FreeObject* next_object)
class EncodeFreeObjectReference
{
FreeObject* reference;
public:
static inline FreeObject*
encode(uint16_t local_key, FreeObject* next_object)
{
#ifdef CHECK_CLIENT
if constexpr (aal_supports<IntegerPointers>)
@ -70,9 +81,12 @@ namespace snmalloc
// resulting word's top half is XORed into the pointer value before it
// is stored.
auto next = address_cast(next_object);
constexpr uintptr_t MASK = bits::one_at_bit(bits::BITS / 2) - 1;
constexpr uintptr_t MASK = bits::one_at_bit(PRESERVE_BOTTOM_BITS) - 1;
// Mix in local_key
auto key = local_key ^ global_key;
// We shift local key to the critical bits have more effect on the high
// bits.
address_t lk = local_key;
auto key = (lk << PRESERVE_BOTTOM_BITS) + global_key;
next ^= (((next & MASK) + 1) * key) & ~MASK;
next_object = reinterpret_cast<FreeObject*>(next);
}
@ -82,7 +96,30 @@ namespace snmalloc
return next_object;
}
void store(FreeObject* value, uint16_t local_key)
{
reference = encode(local_key, value);
}
FreeObject* read(uint16_t local_key)
{
return encode(local_key, reference);
}
};
/**
* Free objects within each slab point directly to the next.
* The next_object pointer can be encoded to detect
* corruption caused by writes in a UAF or a double free.
*
* If cache-friendly offsets are used, then the FreeObject is
* potentially offset from the start of the object.
*/
class FreeObject
{
public:
EncodeFreeObjectReference next_object;
static FreeObject* make(void* p)
{
return static_cast<FreeObject*>(p);
@ -91,37 +128,28 @@ namespace snmalloc
/**
* Read the next pointer handling any required decoding of the pointer
*/
FreeObject* read_next(uintptr_t key)
FreeObject* read_next(uint16_t key)
{
auto next = encode(key, next_object);
return next;
}
/**
* Store the next pointer handling any required encoding of the pointer
*/
void store_next(FreeObject* next, uintptr_t key)
{
next_object = encode(key, next);
SNMALLOC_ASSERT(next == read_next(key));
return next_object.read(key);
}
};
/**
* Wrapper class that allows the keys for pointer encoding to be
* conditionally compiled.
* Used to iterate a free list in object space.
*
* Checks signing of pointers
*/
class FreeObjectCursor
class FreeListIter
{
FreeObject* curr = nullptr;
#ifdef CHECK_CLIENT
uintptr_t prev = 0;
#endif
uintptr_t get_prev()
uint16_t get_prev()
{
#ifdef CHECK_CLIENT
return prev;
return prev & 0xffff;
#else
return 0;
#endif
@ -150,72 +178,24 @@ namespace snmalloc
}
public:
FreeObject* get_curr()
{
return curr;
}
/**
* Advance the cursor through the list
*/
void move_next()
{
FreeListIter(FreeObject* head)
: curr(head)
#ifdef CHECK_CLIENT
check_client(
!different_slab(prev, curr), "Heap corruption - free list corrupted!");
,
prev(initial_key(head))
#endif
update_cursor(curr->read_next(get_prev()));
}
/**
* Update the next pointer at the location in the list pointed to
* by the cursor.
*/
void set_next(FreeObject* next)
{
curr->store_next(next, get_prev());
SNMALLOC_ASSERT(head != nullptr);
}
/**
* Update the next pointer at the location in the list pointed to
* by the cursor, and move the cursor to that new value.
*/
void set_next_and_move(FreeObject* next)
{
set_next(next);
update_cursor(next);
}
FreeListIter() = default;
/**
* Resets the key to an initial value. So the cursor can be used
* on a new sequence.
*/
void reset_cursor(FreeObject* next)
{
#ifdef CHECK_CLIENT
prev = initial_key(next);
#endif
curr = next;
}
};
/**
* Used to iterate a free list in object space.
*
* Checks signing of pointers
*/
class FreeListIter
{
protected:
FreeObjectCursor front;
public:
/**
* Checks if there are any more values to iterate.
*/
bool empty()
{
return front.get_curr() == nullptr;
return curr == nullptr;
}
/**
@ -223,7 +203,7 @@ namespace snmalloc
*/
void* peek()
{
return front.get_curr();
return curr;
}
/**
@ -231,8 +211,12 @@ namespace snmalloc
*/
void* take()
{
auto c = front.get_curr();
front.move_next();
#ifdef CHECK_CLIENT
check_client(
!different_slab(prev, curr), "Heap corruption - free list corrupted!");
#endif
auto c = curr;
update_cursor(curr->read_next(get_prev()));
return c;
}
};
@ -240,48 +224,132 @@ namespace snmalloc
/**
* Used to build a free list in object space.
*
* Checks signing of pointers
* Adds signing of pointers
*
* On 64bit ptr architectures this data structure has
* 44 bytes of data
* and has an alignment of
* 8 bytes
* This unfortunately means its sizeof is 48bytes. We
* use the template parameter, so that an enclosing
* class can make use of the remaining four bytes.
*
* The builder uses two queues, and "randomly" decides to
* add to one of the two queues. This means that we will
* maintain a randomisation of the order between
* allocations.
*
* The fields are paired up to give better codegen as then they are offset
* by a power of 2, and the bit extract from the interleaving seed can
* be shifted to calculate the relevant offset to index the fields.
*/
class FreeListBuilder : FreeListIter
template<typename S = uint32_t>
class FreeListBuilder
{
FreeObjectCursor end;
// Pointer to the first element.
EncodeFreeObjectReference head[2];
// Pointer to the reference to the last element.
// In the empty case end[i] == &head[i]
// This enables branch free enqueuing.
EncodeFreeObjectReference* end[2];
uint32_t interleave;
#ifdef CHECK_CLIENT
// The bottom 16 bits of the previous pointer
uint16_t prev[2];
// The bottom 16 bits of the current pointer
// This needs to be stored for the empty case
// where it is `initial_key()` for the slab.
uint16_t curr[2];
#endif
public:
S s;
uint16_t get_prev(uint32_t index)
{
#ifdef CHECK_CLIENT
return prev[index];
#else
UNUSED(index);
return 0;
#endif
}
uint16_t get_curr(uint32_t index)
{
#ifdef CHECK_CLIENT
return curr[index];
#else
UNUSED(index);
return 0;
#endif
}
static constexpr uint16_t HEAD_KEY = 1;
/**
* Rotate the bits for interleaving.
*
* Returns the bottom bit.
*/
uint32_t next_interleave()
{
uint32_t bottom_bit = interleave & 1;
interleave = (bottom_bit << 31) | (interleave >> 1);
return bottom_bit;
}
public:
FreeListBuilder()
{
init();
}
/**
* Start building a new free list.
* Provide pointer to the slab to initialise the system.
*/
void open(void* n)
void open(void* p)
{
interleave = 0xDEADBEEF;
SNMALLOC_ASSERT(empty());
FreeObject* next = FreeObject::make(n);
end.reset_cursor(next);
front.reset_cursor(next);
#ifdef CHECK_CLIENT
prev[0] = HEAD_KEY;
curr[0] = initial_key(p) & 0xffff;
prev[1] = HEAD_KEY;
curr[1] = initial_key(p) & 0xffff;
#else
UNUSED(p);
#endif
end[0] = &head[0];
end[1] = &head[1];
}
/**
* Returns current head without affecting the builder.
*/
void* peek_head()
{
return peek();
}
/**
* Checks if there are any more values to iterate.
* Checks if the builder contains any elements.
*/
bool empty()
{
return FreeListIter::empty();
return end[0] == &head[0] && end[1] == &head[1];
}
/**
* Adds an element to the free list
* Adds an element to the builder
*/
void add(void* n)
{
SNMALLOC_ASSERT(!different_slab(end.get_curr(), n));
SNMALLOC_ASSERT(
!different_slab(end[0], n) || !different_slab(end[1], n) || empty());
FreeObject* next = FreeObject::make(n);
end.set_next_and_move(next);
uint32_t index = next_interleave();
end[index]->store(next, get_prev(index));
end[index] = &(next->next_object);
#ifdef CHECK_CLIENT
prev[index] = curr[index];
curr[index] = address_cast(next) & 0xffff;
#endif
}
/**
@ -290,14 +358,65 @@ namespace snmalloc
* can still be added. It returns a new iterator to the
* list.
*
* This also collapses the two queues into one, so that it can
* be iterated easily.
*
* This is used to iterate an list that is being constructed.
* It is currently only used to check invariants in Debug builds.
*
* It is used with preserve_queue enabled to check
* invariants in Debug builds.
*
* It is used with preserve_queue disabled by close.
*/
FreeListIter terminate()
FreeListIter terminate(bool preserve_queue = true)
{
if (!empty())
end.set_next(nullptr);
return *this;
SNMALLOC_ASSERT(end[1] != &head[0]);
SNMALLOC_ASSERT(end[0] != &head[1]);
// If second list is empty, then append is trivial.
if (end[1] == &head[1])
{
end[0]->store(nullptr, get_prev(0));
return {head[0].read(HEAD_KEY)};
}
end[1]->store(nullptr, get_prev(1));
// Append 1 to 0
auto mid = head[1].read(HEAD_KEY);
end[0]->store(mid, get_prev(0));
// Re-code first link in second list (if there is one).
// The first link in the second list will be encoded with initial_key,
// But that needs to be changed to the curr of the first list.
if (mid != nullptr)
{
auto mid_next = mid->read_next(initial_key(mid) & 0xffff);
mid->next_object.store(mid_next, get_curr(0));
}
auto h = head[0].read(HEAD_KEY);
// If we need to continue adding to the builder
// Set up the second list as empty,
// and extend the first list to cover all of the second.
if (preserve_queue && h != nullptr)
{
#ifdef CHECK_CLIENT
prev[0] = prev[1];
curr[0] = curr[1];
#endif
end[0] = end[1];
#ifdef CHECK_CLIENT
prev[1] = HEAD_KEY;
curr[1] = initial_key(h) & 0xffff;
#endif
end[1] = &(head[1]);
}
SNMALLOC_ASSERT(end[1] != &head[0]);
SNMALLOC_ASSERT(end[0] != &head[1]);
return {h};
}
/**
@ -306,8 +425,7 @@ namespace snmalloc
*/
void close(FreeListIter& dst)
{
terminate();
dst = *this;
dst = terminate(false);
init();
}
@ -316,7 +434,8 @@ namespace snmalloc
*/
void init()
{
front.reset_cursor(nullptr);
end[0] = &head[0];
end[1] = &head[1];
}
};
} // namespace snmalloc

Просмотреть файл

@ -22,18 +22,14 @@ namespace snmalloc
sizeof(SlabLink) <= MIN_ALLOC_SIZE,
"Need to be able to pack a SlabLink into any free small alloc");
// The Metaslab represent the status of a single slab.
// This can be either a short or a standard slab.
class Metaslab : public SlabLink
/**
* This struct is used inside FreeListBuilder to account for the
* alignment space that is wasted in sizeof.
*
* This is part of Metaslab abstraction.
*/
struct MetaslabEnd
{
public:
/**
* Pointer to first free entry in this slab
*
* The list will be (slab_capacity - needed) long.
*/
FreeListBuilder free_queue;
/**
* How many entries are not in the free list of slab, i.e.
* how many entries are needed to fully free this slab.
@ -47,6 +43,34 @@ namespace snmalloc
uint8_t sizeclass;
// Initially zero to encode the superslabs relative list of slabs.
uint8_t next = 0;
};
// The Metaslab represent the status of a single slab.
// This can be either a short or a standard slab.
class Metaslab : public SlabLink
{
public:
/**
* Data-structure for building the free list for this slab.
*
* Spare 32bits are used for the fields in MetaslabEnd.
*/
FreeListBuilder<MetaslabEnd> free_queue;
uint16_t& needed()
{
return free_queue.s.needed;
}
uint8_t& sizeclass()
{
return free_queue.s.sizeclass;
}
uint8_t& next()
{
return free_queue.s.next;
}
/**
* Updates statistics for adding an entry to the free list, if the
@ -57,12 +81,12 @@ namespace snmalloc
*/
bool return_object()
{
return (--needed) == 0;
return (--needed()) == 0;
}
bool is_unused()
{
return needed == 0;
return needed() == 0;
}
bool is_full()
@ -77,20 +101,10 @@ namespace snmalloc
SNMALLOC_ASSERT(free_queue.empty());
// Set needed to 1, so that "return_object" will return true after calling
// set_full
needed = 1;
needed() = 1;
null_prev();
}
bool valid_head()
{
size_t size = sizeclass_to_size(sizeclass);
auto h = address_cast(free_queue.peek_head());
address_t slab_end = (h | ~SLAB_MASK) + 1;
address_t allocation_start = remove_cache_friendly_offset(h, sizeclass);
return (slab_end - allocation_start) % size == 0;
}
static Slab* get_slab(const void* p)
{
return pointer_align_down<SLAB_SIZE, Slab>(const_cast<void*>(p));
@ -104,7 +118,7 @@ namespace snmalloc
SNMALLOC_FAST_PATH static bool is_start_of_object(Metaslab* self, void* p)
{
return is_multiple_of_sizeclass(
self->sizeclass,
self->sizeclass(),
SLAB_SIZE - pointer_diff(pointer_align_down<SLAB_SIZE>(p), p));
}
@ -120,26 +134,22 @@ namespace snmalloc
static SNMALLOC_FAST_PATH void*
alloc(Metaslab* self, FreeListIter& fast_free_list, size_t rsize)
{
SNMALLOC_ASSERT(rsize == sizeclass_to_size(self->sizeclass));
SNMALLOC_ASSERT(rsize == sizeclass_to_size(self->sizeclass()));
SNMALLOC_ASSERT(!self->is_full());
auto slab = get_slab(self->free_queue.peek_head());
self->debug_slab_invariant(slab);
self->free_queue.close(fast_free_list);
void* n = fast_free_list.take();
// Treat stealing the free list as allocating it all.
self->needed =
get_slab_capacity(self->sizeclass, Metaslab::is_short(slab));
self->needed() = get_slab_capacity(
self->sizeclass(), Metaslab::is_short(Metaslab::get_slab(n)));
self->remove();
self->set_full();
void* p = remove_cache_friendly_offset(n, self->sizeclass);
void* p = remove_cache_friendly_offset(n, self->sizeclass());
SNMALLOC_ASSERT(is_start_of_object(self, p));
self->debug_slab_invariant(slab);
self->debug_slab_invariant(Metaslab::get_slab(p));
if constexpr (zero_mem == YesZero)
{
@ -170,9 +180,9 @@ namespace snmalloc
if (is_unused())
return;
size_t size = sizeclass_to_size(sizeclass);
size_t offset = get_initial_offset(sizeclass, is_short);
size_t accounted_for = needed * size + offset;
size_t size = sizeclass_to_size(sizeclass());
size_t offset = get_initial_offset(sizeclass(), is_short);
size_t accounted_for = needed() * size + offset;
// Block is not full
SNMALLOC_ASSERT(SLAB_SIZE > accounted_for);
@ -183,7 +193,7 @@ namespace snmalloc
while (!fl.empty())
{
// Check we are looking at a correctly aligned block
void* start = remove_cache_friendly_offset(fl.take(), sizeclass);
void* start = remove_cache_friendly_offset(fl.take(), sizeclass());
SNMALLOC_ASSERT(((pointer_diff(slab, start) - offset) % size) == 0);
// Account for free elements in free list
@ -191,7 +201,7 @@ namespace snmalloc
SNMALLOC_ASSERT(SLAB_SIZE >= accounted_for);
}
auto bumpptr = (get_slab_capacity(sizeclass, is_short) * size) + offset;
auto bumpptr = (get_slab_capacity(sizeclass(), is_short) * size) + offset;
// Check we haven't allocated more than fits in a slab
SNMALLOC_ASSERT(bumpptr <= SLAB_SIZE);

Просмотреть файл

@ -37,19 +37,7 @@ namespace snmalloc
FreeListBuilder b;
SNMALLOC_ASSERT(b.empty());
// Builder does not check for setup on add as used on fast path
// deallocation This lambda wraps checking for initialisation.
auto push = [&](void* next) {
SNMALLOC_ASSERT(!different_slab(bumpptr, next));
if (b.empty())
{
b.open(next);
}
else
{
b.add(next);
}
};
b.open(bumpptr);
// This code needs generalising, but currently applies
// various offsets with a stride of seven to increase chance of catching
@ -60,7 +48,7 @@ namespace snmalloc
void* newbumpptr = pointer_offset(bumpptr, rsize * offset);
while (newbumpptr < slab_end)
{
push(newbumpptr);
b.add(newbumpptr);
newbumpptr = pointer_offset(newbumpptr, rsize * start_index.size());
}
}
@ -88,8 +76,6 @@ namespace snmalloc
// Update the head and the next pointer in the free list.
meta.free_queue.add(p);
SNMALLOC_ASSERT(meta.valid_head());
return true;
}
@ -109,7 +95,7 @@ namespace snmalloc
if (meta.is_full())
{
auto allocated = get_slab_capacity(
meta.sizeclass, Metaslab::is_short(Metaslab::get_slab(p)));
meta.sizeclass(), Metaslab::is_short(Metaslab::get_slab(p)));
// We are not on the sizeclass list.
if (allocated == 1)
{
@ -121,7 +107,8 @@ namespace snmalloc
}
SNMALLOC_ASSERT(meta.free_queue.empty());
meta.free_queue.open(p);
meta.needed = allocated - 1;
meta.free_queue.add(p);
meta.needed() = allocated - 1;
// Push on the list of slabs for this sizeclass.
sl->insert_prev(&meta);

Просмотреть файл

@ -4,7 +4,6 @@
#include "allocslab.h"
#include "metaslab.h"
#include <iostream>
#include <new>
namespace snmalloc
@ -134,7 +133,7 @@ namespace snmalloc
auto curr = head;
for (size_t i = 0; i < SLAB_COUNT - used - 1; i++)
{
curr = (curr + meta[curr].next + 1) & (SLAB_COUNT - 1);
curr = (curr + meta[curr].next() + 1) & (SLAB_COUNT - 1);
}
if (curr != 0)
abort();
@ -202,7 +201,7 @@ namespace snmalloc
// allocated from. Hence, the bump allocator slab will never be returned
// for use in another size class.
metaz.set_full();
metaz.sizeclass = static_cast<uint8_t>(sizeclass);
metaz.sizeclass() = static_cast<uint8_t>(sizeclass);
self->used++;
return reinterpret_cast<Slab*>(self);
@ -217,7 +216,7 @@ namespace snmalloc
pointer_offset(self, (static_cast<size_t>(h) << SLAB_BITS)));
auto& metah = self->meta[h];
uint8_t n = metah.next;
uint8_t n = metah.next();
metah.free_queue.init();
// Set up meta data as if the entire slab has been turned into a free
@ -226,7 +225,7 @@ namespace snmalloc
// allocated from. Hence, the bump allocator slab will never be returned
// for use in another size class.
metah.set_full();
metah.sizeclass = static_cast<uint8_t>(sizeclass);
metah.sizeclass() = static_cast<uint8_t>(sizeclass);
self->head = h + n + 1;
self->used += 2;
@ -241,8 +240,8 @@ namespace snmalloc
uint8_t index = static_cast<uint8_t>(slab_to_index(slab));
uint8_t n = head - index - 1;
meta[index].sizeclass = 0;
meta[index].next = n;
meta[index].sizeclass() = 0;
meta[index].next() = n;
head = index;
bool was_almost_full = is_almost_full();
used -= 2;