replace assert with SNMALLOC_ASSERT

This commit is contained in:
Amaury Chamayou 2020-03-04 16:57:44 +00:00
Родитель ef77bccfc2
Коммит acbcbce597
30 изменённых файлов: 128 добавлений и 106 удалений

Просмотреть файл

@ -1,4 +1,5 @@
#pragma once
#include "../pal/pal_consts.h"
#include "bits.h"
#include <cassert>
@ -95,8 +96,8 @@ namespace snmalloc
template<typename T = void>
inline T* pointer_align_up(void* p, size_t alignment)
{
assert(alignment > 0);
assert(bits::next_pow2(alignment) == alignment);
SNMALLOC_ASSERT(alignment > 0);
SNMALLOC_ASSERT(bits::next_pow2(alignment) == alignment);
#if __has_builtin(__builtin_align_up)
return static_cast<T*>(__builtin_align_up(p, alignment));
#else
@ -111,7 +112,7 @@ namespace snmalloc
*/
inline size_t pointer_diff(void* base, void* cursor)
{
assert(cursor >= base);
SNMALLOC_ASSERT(cursor >= base);
return static_cast<size_t>(
static_cast<char*>(cursor) - static_cast<char*>(base));
}

Просмотреть файл

@ -6,6 +6,7 @@
// #define USE_LZCNT
#include "../aal/aal.h"
#include "../pal/pal_consts.h"
#include "defines.h"
#include <atomic>
@ -225,7 +226,7 @@ namespace snmalloc
static SNMALLOC_FAST_PATH size_t align_down(size_t value, size_t alignment)
{
assert(next_pow2(alignment) == alignment);
SNMALLOC_ASSERT(next_pow2(alignment) == alignment);
size_t align_1 = alignment - 1;
value &= ~align_1;
@ -234,7 +235,7 @@ namespace snmalloc
static inline size_t align_up(size_t value, size_t alignment)
{
assert(next_pow2(alignment) == alignment);
SNMALLOC_ASSERT(next_pow2(alignment) == alignment);
size_t align_1 = alignment - 1;
value += align_1;

Просмотреть файл

@ -30,8 +30,25 @@
#define UNUSED(x) ((void)(x))
namespace snmalloc
{
void error(const char* const str);
} // namespace snmalloc
#ifdef NDEBUG
# define SNMALLOC_ASSERT(expr) ((void 0))
#else
# define SNMALLOC_ASSERT(expr) \
{ \
if (!(expr)) \
{ \
snmalloc::error("assert fail"); \
} \
}
#endif
#ifndef NDEBUG
# define SNMALLOC_ASSUME(x) assert(x)
# define SNMALLOC_ASSUME(x) SNMALLOC_ASSERT(x)
#else
# if __has_builtin(__builtin_assume)
# define SNMALLOC_ASSUME(x) __builtin_assume((x))

Просмотреть файл

@ -155,7 +155,7 @@ namespace snmalloc
while (curr != item)
{
assert(curr != Terminator());
SNMALLOC_ASSERT(curr != Terminator());
curr = curr->next;
}
#else
@ -171,7 +171,7 @@ namespace snmalloc
while (curr != Terminator())
{
assert(curr != item);
SNMALLOC_ASSERT(curr != item);
curr = curr->next;
}
#else
@ -187,7 +187,7 @@ namespace snmalloc
while (item != Terminator())
{
assert(item->prev == prev);
SNMALLOC_ASSERT(item->prev == prev);
prev = item;
item = item->next;
}

Просмотреть файл

@ -26,7 +26,7 @@ namespace snmalloc
static Object obj;
// If defined should be initially false;
assert(first == nullptr || *first == false);
SNMALLOC_ASSERT(first == nullptr || *first == false);
if (unlikely(!initialised.load(std::memory_order_acquire)))
{

Просмотреть файл

@ -20,10 +20,8 @@ namespace snmalloc
public:
void invariant()
{
#ifndef NDEBUG
assert(back != nullptr);
assert(front != nullptr);
#endif
SNMALLOC_ASSERT(back != nullptr);
SNMALLOC_ASSERT(front != nullptr);
}
void init(T* stub)
@ -71,7 +69,7 @@ namespace snmalloc
{
front = next;
Aal::prefetch(&(next->next));
assert(front);
SNMALLOC_ASSERT(front);
std::atomic_thread_fence(std::memory_order_acquire);
invariant();
return std::pair(first, true);

Просмотреть файл

@ -6,6 +6,7 @@
# define ALLOCATOR
#endif
#include "../pal/pal_consts.h"
#include "../test/histogram.h"
#include "allocstats.h"
#include "chunkmap.h"
@ -507,7 +508,7 @@ namespace snmalloc
Allocator<MemoryProvider, ChunkMap, IsQueueInline, Replacement>);
constexpr size_t initial_shift =
bits::next_pow2_bits_const(allocator_size);
assert((initial_shift + (r * REMOTE_SLOT_BITS)) < 64);
SNMALLOC_ASSERT((initial_shift + (r * REMOTE_SLOT_BITS)) < 64);
return (id >> (initial_shift + (r * REMOTE_SLOT_BITS))) & REMOTE_MASK;
}
@ -518,7 +519,7 @@ namespace snmalloc
Remote* r = static_cast<Remote*>(p);
r->set_target_id(target_id);
assert(r->target_id() == target_id);
SNMALLOC_ASSERT(r->target_id() == target_id);
RemoteList* l = &list[get_slot(target_id, 0)];
l->last->non_atomic_next = r;
@ -653,7 +654,7 @@ namespace snmalloc
{
if constexpr (IsQueueInline)
{
assert(r == nullptr);
SNMALLOC_ASSERT(r == nullptr);
(void)r;
}
else
@ -684,13 +685,13 @@ namespace snmalloc
// All medium size classes are page aligned.
if (i > NUM_SMALL_CLASSES)
{
assert(is_aligned_block<OS_PAGE_SIZE>(nullptr, size1));
SNMALLOC_ASSERT(is_aligned_block<OS_PAGE_SIZE>(nullptr, size1));
}
assert(sc1 == i);
assert(sc1 == sc2);
assert(size1 == size);
assert(size1 == size2);
SNMALLOC_ASSERT(sc1 == i);
SNMALLOC_ASSERT(sc1 == sc2);
SNMALLOC_ASSERT(size1 == size);
SNMALLOC_ASSERT(size1 == size2);
}
#endif
}
@ -826,7 +827,7 @@ namespace snmalloc
}
else
{
assert(likely(p->target_id() != id()));
SNMALLOC_ASSERT(likely(p->target_id() != id()));
Slab* slab = Metaslab::get_slab(p);
Metaslab& meta = super->get_meta(slab);
// Queue for remote dealloc elsewhere.
@ -931,7 +932,7 @@ namespace snmalloc
if (super != nullptr)
{
Slab* slab = super->alloc_short_slab(sizeclass);
assert(super->is_full());
SNMALLOC_ASSERT(super->is_full());
return slab;
}
@ -1224,7 +1225,7 @@ namespace snmalloc
size_t size_bits = bits::next_pow2_bits(size);
size_t large_class = size_bits - SUPERSLAB_BITS;
assert(large_class < NUM_LARGE_CLASSES);
SNMALLOC_ASSERT(large_class < NUM_LARGE_CLASSES);
void* p = large_allocator.template alloc<zero_mem, allow_reserve>(
large_class, size);
@ -1240,7 +1241,7 @@ namespace snmalloc
MEASURE_TIME(large_dealloc, 4, 16);
size_t size_bits = bits::next_pow2_bits(size);
assert(bits::one_at_bit(size_bits) >= SUPERSLAB_SIZE);
SNMALLOC_ASSERT(bits::one_at_bit(size_bits) >= SUPERSLAB_SIZE);
size_t large_class = size_bits - SUPERSLAB_BITS;
chunkmap().clear_large_size(p, size);
@ -1261,7 +1262,7 @@ namespace snmalloc
void remote_dealloc(RemoteAllocator* target, void* p, sizeclass_t sizeclass)
{
MEASURE_TIME(remote_dealloc, 4, 16);
assert(target->id() != id());
SNMALLOC_ASSERT(target->id() != id());
handle_message_queue();

Просмотреть файл

@ -34,7 +34,7 @@ namespace snmalloc
void dec()
{
assert(current > 0);
SNMALLOC_ASSERT(current > 0);
current--;
}
@ -143,7 +143,7 @@ namespace snmalloc
#ifdef USE_SNMALLOC_STATS
auto index = (size == 0) ? 0 : bits::to_exp_mant<BUCKETS_BITS>(size);
assert(index < TOTAL_BUCKETS);
SNMALLOC_ASSERT(index < TOTAL_BUCKETS);
bucketed_requests[index]++;
#endif
}

Просмотреть файл

@ -183,7 +183,7 @@ namespace snmalloc
*/
static void clear_slab(Superslab* slab)
{
assert(get(slab) == CMSuperslab);
SNMALLOC_ASSERT(get(slab) == CMSuperslab);
set(slab, static_cast<size_t>(CMNotOurs));
}
/**
@ -191,7 +191,7 @@ namespace snmalloc
*/
static void clear_slab(Mediumslab* slab)
{
assert(get(slab) == CMMediumslab);
SNMALLOC_ASSERT(get(slab) == CMMediumslab);
set(slab, static_cast<size_t>(CMNotOurs));
}
/**
@ -220,7 +220,7 @@ namespace snmalloc
{
auto p = address_cast(vp);
size_t rounded_size = bits::next_pow2(size);
assert(get(p) == bits::next_pow2_bits(size));
SNMALLOC_ASSERT(get(p) == bits::next_pow2_bits(size));
auto count = rounded_size >> SUPERSLAB_BITS;
PagemapProvider::pagemap().set_range(p, CMNotOurs, count);
}

Просмотреть файл

@ -389,7 +389,7 @@ namespace snmalloc
}
}
assert(p == pointer_align_up(p, rsize));
SNMALLOC_ASSERT(p == pointer_align_up(p, rsize));
return p;
}

Просмотреть файл

@ -46,8 +46,8 @@ namespace snmalloc
void init(RemoteAllocator* alloc, sizeclass_t sc, size_t rsize)
{
assert(sc >= NUM_SMALL_CLASSES);
assert((sc - NUM_SMALL_CLASSES) < NUM_MEDIUM_CLASSES);
SNMALLOC_ASSERT(sc >= NUM_SMALL_CLASSES);
SNMALLOC_ASSERT((sc - NUM_SMALL_CLASSES) < NUM_MEDIUM_CLASSES);
allocator = alloc;
head = 0;
@ -66,7 +66,7 @@ namespace snmalloc
}
else
{
assert(free == medium_slab_free(sc));
SNMALLOC_ASSERT(free == medium_slab_free(sc));
}
}
@ -78,13 +78,13 @@ namespace snmalloc
template<ZeroMem zero_mem, typename MemoryProvider>
void* alloc(size_t size, MemoryProvider& memory_provider)
{
assert(!full());
SNMALLOC_ASSERT(!full());
uint16_t index = stack[head++];
void* p = pointer_offset(this, (static_cast<size_t>(index) << 8));
free--;
assert(is_aligned_block<OS_PAGE_SIZE>(p, OS_PAGE_SIZE));
SNMALLOC_ASSERT(is_aligned_block<OS_PAGE_SIZE>(p, OS_PAGE_SIZE));
size = bits::align_up(size, OS_PAGE_SIZE);
if constexpr (zero_mem == YesZero)
@ -95,7 +95,7 @@ namespace snmalloc
bool dealloc(void* p)
{
assert(head > 0);
SNMALLOC_ASSERT(head > 0);
// Returns true if the Mediumslab was full before this deallocation.
bool was_full = full();

Просмотреть файл

@ -82,14 +82,14 @@ namespace snmalloc
bool is_full()
{
auto result = link == 1;
assert(!result || head == nullptr);
SNMALLOC_ASSERT(!result || head == nullptr);
return result;
}
void set_full()
{
assert(head == nullptr);
assert(link != 1);
SNMALLOC_ASSERT(head == nullptr);
SNMALLOC_ASSERT(link != 1);
link = 1;
// Set needed to 1, so that "return_object" will return true after calling
// set_full
@ -216,7 +216,7 @@ namespace snmalloc
size_t accounted_for = needed * size + offset;
// Block is not full
assert(SLAB_SIZE > accounted_for);
SNMALLOC_ASSERT(SLAB_SIZE > accounted_for);
// Keep variable so it appears in debugger.
size_t length = debug_slab_acyclic_free_list(slab);
@ -228,13 +228,13 @@ namespace snmalloc
{
// Check we are looking at a correctly aligned block
void* start = remove_cache_friendly_offset(curr, sizeclass);
assert(((pointer_diff(slab, start) - offset) % size) == 0);
SNMALLOC_ASSERT(((pointer_diff(slab, start) - offset) % size) == 0);
// Account for free elements in free list
accounted_for += size;
assert(SLAB_SIZE >= accounted_for);
SNMALLOC_ASSERT(SLAB_SIZE >= accounted_for);
// We should never reach the link node in the free list.
assert(curr != pointer_offset(slab, link));
SNMALLOC_ASSERT(curr != pointer_offset(slab, link));
// Iterate bump/free list segment
curr = follow_next(curr);
@ -242,7 +242,7 @@ namespace snmalloc
auto bumpptr = (allocated * size) + offset;
// Check we haven't allocaated more than gits in a slab
assert(bumpptr <= SLAB_SIZE);
SNMALLOC_ASSERT(bumpptr <= SLAB_SIZE);
// Account for to be bump allocated space
accounted_for += SLAB_SIZE - bumpptr;
@ -251,15 +251,15 @@ namespace snmalloc
{
// The link should be the first allocation as we
// haven't completely filled this block at any point.
assert(link == get_initial_offset(sizeclass, is_short));
SNMALLOC_ASSERT(link == get_initial_offset(sizeclass, is_short));
}
assert(!is_full());
SNMALLOC_ASSERT(!is_full());
// Add the link node.
accounted_for += size;
// All space accounted for
assert(SLAB_SIZE == accounted_for);
SNMALLOC_ASSERT(SLAB_SIZE == accounted_for);
#else
UNUSED(slab);
#endif

Просмотреть файл

@ -395,7 +395,8 @@ namespace snmalloc
*/
void* page_for_address(uintptr_t p)
{
assert((reinterpret_cast<uintptr_t>(&top) & (OS_PAGE_SIZE - 1)) == 0);
SNMALLOC_ASSERT(
(reinterpret_cast<uintptr_t>(&top) & (OS_PAGE_SIZE - 1)) == 0);
return reinterpret_cast<void*>(
reinterpret_cast<uintptr_t>(&top[p >> SHIFT]) & ~(OS_PAGE_SIZE - 1));
}

Просмотреть файл

@ -1,5 +1,6 @@
#pragma once
#include "../pal/pal_consts.h"
#include "allocconfig.h"
namespace snmalloc
@ -27,7 +28,7 @@ namespace snmalloc
auto sc = static_cast<sizeclass_t>(
bits::to_exp_mant_const<INTERMEDIATE_BITS, MIN_ALLOC_BITS>(size));
assert(sc == static_cast<uint8_t>(sc));
SNMALLOC_ASSERT(sc == static_cast<uint8_t>(sc));
return sc;
}
@ -56,17 +57,17 @@ namespace snmalloc
{
// check_same<NUM_LARGE_CLASSES, Globals::num_large_classes>();
// Must be called with a rounded size.
assert(sizeclass_to_size(size_to_sizeclass(rsize)) == rsize);
SNMALLOC_ASSERT(sizeclass_to_size(size_to_sizeclass(rsize)) == rsize);
// Only works up to certain offsets, exhaustively tested upto
// SUPERSLAB_SIZE.
assert(offset <= SUPERSLAB_SIZE);
SNMALLOC_ASSERT(offset <= SUPERSLAB_SIZE);
size_t align = bits::ctz(rsize);
size_t divider = rsize >> align;
// Maximum of 24 bits for 16MiB super/medium slab
if (INTERMEDIATE_BITS == 0 || divider == 1)
{
assert(divider == 1);
SNMALLOC_ASSERT(divider == 1);
return offset & ~(rsize - 1);
}
@ -100,17 +101,17 @@ namespace snmalloc
inline static bool is_multiple_of_sizeclass(size_t rsize, size_t offset)
{
// Must be called with a rounded size.
assert(sizeclass_to_size(size_to_sizeclass(rsize)) == rsize);
SNMALLOC_ASSERT(sizeclass_to_size(size_to_sizeclass(rsize)) == rsize);
// Only works up to certain offsets, exhaustively tested upto
// SUPERSLAB_SIZE.
assert(offset <= SUPERSLAB_SIZE);
SNMALLOC_ASSERT(offset <= SUPERSLAB_SIZE);
size_t align = bits::ctz(rsize);
size_t divider = rsize >> align;
// Maximum of 24 bits for 16MiB super/medium slab
if (INTERMEDIATE_BITS == 0 || divider == 1)
{
assert(divider == 1);
SNMALLOC_ASSERT(divider == 1);
return (offset & (rsize - 1)) == 0;
}
@ -178,9 +179,9 @@ namespace snmalloc
SNMALLOC_FAST_PATH static size_t aligned_size(size_t alignment, size_t size)
{
// Client responsible for checking alignment is not zero
assert(alignment != 0);
SNMALLOC_ASSERT(alignment != 0);
// Client responsible for checking alignment is a power of two
assert(bits::next_pow2(alignment) == alignment);
SNMALLOC_ASSERT(bits::next_pow2(alignment) == alignment);
return ((alignment - 1) | (size - 1)) + 1;
}

Просмотреть файл

@ -42,9 +42,10 @@ namespace snmalloc
Metaslab& meta = get_meta();
void* head = meta.head;
assert(rsize == sizeclass_to_size(meta.sizeclass));
assert(sl.get_head() == (SlabLink*)pointer_offset(this, meta.link));
assert(!meta.is_full());
SNMALLOC_ASSERT(rsize == sizeclass_to_size(meta.sizeclass));
SNMALLOC_ASSERT(
sl.get_head() == (SlabLink*)pointer_offset(this, meta.link));
SNMALLOC_ASSERT(!meta.is_full());
meta.debug_slab_invariant(this);
void* p = nullptr;
@ -103,7 +104,7 @@ namespace snmalloc
meta.allocated = meta.allocated + 1;
}
assert(curr != nullptr);
SNMALLOC_ASSERT(curr != nullptr);
Metaslab::store_next(curr, nullptr);
}
}
@ -124,7 +125,7 @@ namespace snmalloc
p = remove_cache_friendly_offset(p, meta.sizeclass);
}
assert(is_start_of_object(Superslab::get(p), p));
SNMALLOC_ASSERT(is_start_of_object(Superslab::get(p), p));
meta.debug_slab_invariant(this);
@ -166,7 +167,7 @@ namespace snmalloc
// Set the head to the memory being deallocated.
meta.head = p;
assert(meta.valid_head());
SNMALLOC_ASSERT(meta.valid_head());
// Set the next pointer to the previous head.
Metaslab::store_next(p, head);
@ -197,8 +198,8 @@ namespace snmalloc
}
// Update the head and the sizeclass link.
uint16_t index = pointer_to_index(p);
assert(meta.head == nullptr);
// assert(meta.fully_allocated(is_short()));
SNMALLOC_ASSERT(meta.head == nullptr);
// SNMALLOC_ASSERT(meta.fully_allocated(is_short()));
meta.link = index;
meta.needed = meta.allocated - 1;

Просмотреть файл

@ -45,7 +45,7 @@ namespace snmalloc
size_t slab_to_index(Slab* slab)
{
auto res = (pointer_diff(this, slab) >> SLAB_BITS);
assert(res == static_cast<uint8_t>(res));
SNMALLOC_ASSERT(res == static_cast<uint8_t>(res));
return static_cast<uint8_t>(res);
}
@ -110,7 +110,7 @@ namespace snmalloc
for (size_t i = 0; i < SLAB_COUNT; i++)
{
assert(meta[i].is_unused());
SNMALLOC_ASSERT(meta[i].is_unused());
}
#endif
}
@ -201,7 +201,7 @@ namespace snmalloc
bool was_almost_full = is_almost_full();
used -= 2;
assert(meta[index].is_unused());
SNMALLOC_ASSERT(meta[index].is_unused());
if (was_almost_full || is_empty())
return StatusChange;
@ -214,7 +214,7 @@ namespace snmalloc
bool was_full = is_full();
used--;
assert(meta[0].is_unused());
SNMALLOC_ASSERT(meta[0].is_unused());
if (was_full || is_empty())
return StatusChange;

Просмотреть файл

@ -222,9 +222,9 @@ namespace snmalloc
SNMALLOC_SLOW_PATH inline void* lazy_replacement_slow()
{
auto*& local_alloc = ThreadAlloc::get_reference();
assert(local_alloc == &GlobalPlaceHolder);
SNMALLOC_ASSERT(local_alloc == &GlobalPlaceHolder);
local_alloc = current_alloc_pool()->acquire();
assert(local_alloc != &GlobalPlaceHolder);
SNMALLOC_ASSERT(local_alloc != &GlobalPlaceHolder);
ThreadAlloc::register_cleanup();
return local_alloc;
}

Просмотреть файл

@ -81,7 +81,7 @@ extern "C"
void* p = SNMALLOC_NAME_MANGLE(malloc)(size);
if (p != nullptr)
{
assert(p == Alloc::external_pointer<Start>(p));
SNMALLOC_ASSERT(p == Alloc::external_pointer<Start>(p));
sz = bits::min(size, sz);
memcpy(p, ptr, sz);
SNMALLOC_NAME_MANGLE(free)(ptr);
@ -126,7 +126,7 @@ extern "C"
SNMALLOC_EXPORT void*
SNMALLOC_NAME_MANGLE(aligned_alloc)(size_t alignment, size_t size)
{
assert((size % alignment) == 0);
SNMALLOC_ASSERT((size % alignment) == 0);
return SNMALLOC_NAME_MANGLE(memalign)(alignment, size);
}
@ -198,7 +198,7 @@ extern "C"
if (config)
{
*config = &ChunkmapPagemap::config;
assert(ChunkmapPagemap::cast_to_pagemap(&pm, *config) == &pm);
SNMALLOC_ASSERT(ChunkmapPagemap::cast_to_pagemap(&pm, *config) == &pm);
}
return &pm;
}

Просмотреть файл

@ -2,11 +2,6 @@
#include "pal_consts.h"
namespace snmalloc
{
void error(const char* const str);
} // namespace snmalloc
// If simultating OE, then we need the underlying platform
#if !defined(OPEN_ENCLAVE) || defined(OPEN_ENCLAVE_SIMULATION)
# include "pal_apple.h"

Просмотреть файл

@ -34,7 +34,7 @@ namespace snmalloc
{
if (page_aligned || is_aligned_block<OS_PAGE_SIZE>(p, size))
{
assert(is_aligned_block<OS_PAGE_SIZE>(p, size));
SNMALLOC_ASSERT(is_aligned_block<OS_PAGE_SIZE>(p, size));
void* r = mmap(
p,
size,

Просмотреть файл

@ -33,7 +33,7 @@ namespace snmalloc
*/
void notify_not_using(void* p, size_t size) noexcept
{
assert(is_aligned_block<OS_PAGE_SIZE>(p, size));
SNMALLOC_ASSERT(is_aligned_block<OS_PAGE_SIZE>(p, size));
madvise(p, size, MADV_FREE);
}
};

Просмотреть файл

@ -30,7 +30,7 @@ namespace snmalloc
void* reserve(size_t size, size_t align) noexcept
{
// Alignment must be a power of 2.
assert(align == bits::next_pow2(align));
SNMALLOC_ASSERT(align == bits::next_pow2(align));
align = bits::max<size_t>(4096, align);

Просмотреть файл

@ -1,5 +1,9 @@
#pragma once
#include "../ds/defines.h"
#include <atomic>
namespace snmalloc
{
/**
@ -112,4 +116,4 @@ namespace snmalloc
}
}
};
} // namespace snmalloc
} // namespace snmalloc

Просмотреть файл

@ -39,7 +39,7 @@ namespace snmalloc
{
if (page_aligned || is_aligned_block<OS_PAGE_SIZE>(p, size))
{
assert(is_aligned_block<OS_PAGE_SIZE>(p, size));
SNMALLOC_ASSERT(is_aligned_block<OS_PAGE_SIZE>(p, size));
madvise(p, size, MADV_DONTNEED);
}
else

Просмотреть файл

@ -53,7 +53,7 @@ namespace snmalloc
*/
void notify_not_using(void* p, size_t size) noexcept
{
assert(is_aligned_block<OS_PAGE_SIZE>(p, size));
SNMALLOC_ASSERT(is_aligned_block<OS_PAGE_SIZE>(p, size));
#ifdef USE_POSIX_COMMIT_CHECKS
mprotect(p, size, PROT_NONE);
#else
@ -72,7 +72,8 @@ namespace snmalloc
template<ZeroMem zero_mem>
void notify_using(void* p, size_t size) noexcept
{
assert(is_aligned_block<OS_PAGE_SIZE>(p, size) || (zero_mem == NoZero));
SNMALLOC_ASSERT(
is_aligned_block<OS_PAGE_SIZE>(p, size) || (zero_mem == NoZero));
if constexpr (zero_mem == YesZero)
static_cast<OS*>(this)->template zero<true>(p, size);
@ -101,7 +102,7 @@ namespace snmalloc
{
if (page_aligned || is_aligned_block<OS_PAGE_SIZE>(p, size))
{
assert(is_aligned_block<OS_PAGE_SIZE>(p, size));
SNMALLOC_ASSERT(is_aligned_block<OS_PAGE_SIZE>(p, size));
void* r = mmap(
p,
size,

Просмотреть файл

@ -110,7 +110,7 @@ namespace snmalloc
/// Notify platform that we will not be using these pages
void notify_not_using(void* p, size_t size) noexcept
{
assert(is_aligned_block<OS_PAGE_SIZE>(p, size));
SNMALLOC_ASSERT(is_aligned_block<OS_PAGE_SIZE>(p, size));
BOOL ok = VirtualFree(p, size, MEM_DECOMMIT);
@ -122,7 +122,8 @@ namespace snmalloc
template<ZeroMem zero_mem>
void notify_using(void* p, size_t size) noexcept
{
assert(is_aligned_block<OS_PAGE_SIZE>(p, size) || (zero_mem == NoZero));
SNMALLOC_ASSERT(
is_aligned_block<OS_PAGE_SIZE>(p, size) || (zero_mem == NoZero));
void* r = VirtualAlloc(p, size, MEM_COMMIT, PAGE_READWRITE);
@ -136,7 +137,7 @@ namespace snmalloc
{
if (page_aligned || is_aligned_block<OS_PAGE_SIZE>(p, size))
{
assert(is_aligned_block<OS_PAGE_SIZE>(p, size));
SNMALLOC_ASSERT(is_aligned_block<OS_PAGE_SIZE>(p, size));
notify_not_using(p, size);
notify_using<YesZero>(p, size);
}

Просмотреть файл

@ -73,7 +73,7 @@ void test_random_allocation()
cell = alloc->alloc(16);
auto pair = allocated.insert(cell);
// Check not already allocated
assert(pair.second);
SNMALLOC_ASSERT(pair.second);
UNUSED(pair);
alloc_count++;
}
@ -130,14 +130,14 @@ void test_double_alloc()
for (size_t i = 0; i < (n * 2); i++)
{
void* p = a1->alloc(20);
assert(set1.find(p) == set1.end());
SNMALLOC_ASSERT(set1.find(p) == set1.end());
set1.insert(p);
}
for (size_t i = 0; i < (n * 2); i++)
{
void* p = a2->alloc(20);
assert(set2.find(p) == set2.end());
SNMALLOC_ASSERT(set2.find(p) == set2.end());
set2.insert(p);
}
@ -178,8 +178,8 @@ void test_external_pointer()
void* p4 = Alloc::external_pointer<End>(p2);
UNUSED(p3);
UNUSED(p4);
assert(p1 == p3);
assert((size_t)p4 == (size_t)p1 + size - 1);
SNMALLOC_ASSERT(p1 == p3);
SNMALLOC_ASSERT((size_t)p4 == (size_t)p1 + size - 1);
}
alloc->dealloc(p1, size);
@ -281,7 +281,7 @@ void test_alloc_16M()
const size_t size = 16'000'000;
void* p1 = alloc->alloc(size);
assert(Alloc::alloc_size(Alloc::external_pointer(p1)) >= size);
SNMALLOC_ASSERT(Alloc::alloc_size(Alloc::external_pointer(p1)) >= size);
alloc->dealloc(p1);
}
@ -292,7 +292,7 @@ void test_calloc_16M()
const size_t size = 16'000'000;
void* p1 = alloc->alloc<YesZero>(size);
assert(Alloc::alloc_size(Alloc::external_pointer(p1)) >= size);
SNMALLOC_ASSERT(Alloc::alloc_size(Alloc::external_pointer(p1)) >= size);
alloc->dealloc(p1);
}

Просмотреть файл

@ -12,7 +12,7 @@ void test_align_size()
{
bool failed = false;
assert(snmalloc::aligned_size(128, 160) == 256);
SNMALLOC_ASSERT(snmalloc::aligned_size(128, 160) == 256);
for (size_t size = 1;
size < snmalloc::sizeclass_to_size(snmalloc::NUM_SIZECLASSES - 1);

Просмотреть файл

@ -77,7 +77,7 @@ namespace histogram
else
{
auto i = get_index(value);
assert(i < BUCKETS);
SNMALLOC_ASSERT(i < BUCKETS);
count[i]++;
}
}

Просмотреть файл

@ -20,7 +20,7 @@ void test_alloc_dealloc(size_t count, size_t size, bool write)
for (size_t i = 0; i < ((count * 3) / 2); i++)
{
void* p = alloc->alloc<zero_mem>(size);
assert(set.find(p) == set.end());
SNMALLOC_ASSERT(set.find(p) == set.end());
if (write)
*(int*)p = 4;
@ -35,14 +35,14 @@ void test_alloc_dealloc(size_t count, size_t size, bool write)
void* p = *it;
alloc->dealloc(p, size);
set.erase(it);
assert(set.find(p) == set.end());
SNMALLOC_ASSERT(set.find(p) == set.end());
}
// alloc 1x objects
for (size_t i = 0; i < count; i++)
{
void* p = alloc->alloc<zero_mem>(size);
assert(set.find(p) == set.end());
SNMALLOC_ASSERT(set.find(p) == set.end());
if (write)
*(int*)p = 4;