diff --git a/src/ds/address.h b/src/ds/address.h index ae98de37..1c09cd88 100644 --- a/src/ds/address.h +++ b/src/ds/address.h @@ -1,4 +1,5 @@ #pragma once +#include "../pal/pal_consts.h" #include "bits.h" #include @@ -95,8 +96,8 @@ namespace snmalloc template inline T* pointer_align_up(void* p, size_t alignment) { - assert(alignment > 0); - assert(bits::next_pow2(alignment) == alignment); + SNMALLOC_ASSERT(alignment > 0); + SNMALLOC_ASSERT(bits::next_pow2(alignment) == alignment); #if __has_builtin(__builtin_align_up) return static_cast(__builtin_align_up(p, alignment)); #else @@ -111,7 +112,7 @@ namespace snmalloc */ inline size_t pointer_diff(void* base, void* cursor) { - assert(cursor >= base); + SNMALLOC_ASSERT(cursor >= base); return static_cast( static_cast(cursor) - static_cast(base)); } diff --git a/src/ds/bits.h b/src/ds/bits.h index 374dccd0..f519071d 100644 --- a/src/ds/bits.h +++ b/src/ds/bits.h @@ -6,6 +6,7 @@ // #define USE_LZCNT #include "../aal/aal.h" +#include "../pal/pal_consts.h" #include "defines.h" #include @@ -225,7 +226,7 @@ namespace snmalloc static SNMALLOC_FAST_PATH size_t align_down(size_t value, size_t alignment) { - assert(next_pow2(alignment) == alignment); + SNMALLOC_ASSERT(next_pow2(alignment) == alignment); size_t align_1 = alignment - 1; value &= ~align_1; @@ -234,7 +235,7 @@ namespace snmalloc static inline size_t align_up(size_t value, size_t alignment) { - assert(next_pow2(alignment) == alignment); + SNMALLOC_ASSERT(next_pow2(alignment) == alignment); size_t align_1 = alignment - 1; value += align_1; diff --git a/src/ds/defines.h b/src/ds/defines.h index 9b80bc05..0237657e 100644 --- a/src/ds/defines.h +++ b/src/ds/defines.h @@ -30,8 +30,25 @@ #define UNUSED(x) ((void)(x)) +namespace snmalloc +{ + void error(const char* const str); +} // namespace snmalloc + +#ifdef NDEBUG +# define SNMALLOC_ASSERT(expr) ((void 0)) +#else +# define SNMALLOC_ASSERT(expr) \ + { \ + if (!(expr)) \ + { \ + snmalloc::error("assert fail"); \ + } \ + } +#endif + #ifndef NDEBUG -# define SNMALLOC_ASSUME(x) assert(x) +# define SNMALLOC_ASSUME(x) SNMALLOC_ASSERT(x) #else # if __has_builtin(__builtin_assume) # define SNMALLOC_ASSUME(x) __builtin_assume((x)) diff --git a/src/ds/dllist.h b/src/ds/dllist.h index 1b37a16b..bf23819e 100644 --- a/src/ds/dllist.h +++ b/src/ds/dllist.h @@ -155,7 +155,7 @@ namespace snmalloc while (curr != item) { - assert(curr != Terminator()); + SNMALLOC_ASSERT(curr != Terminator()); curr = curr->next; } #else @@ -171,7 +171,7 @@ namespace snmalloc while (curr != Terminator()) { - assert(curr != item); + SNMALLOC_ASSERT(curr != item); curr = curr->next; } #else @@ -187,7 +187,7 @@ namespace snmalloc while (item != Terminator()) { - assert(item->prev == prev); + SNMALLOC_ASSERT(item->prev == prev); prev = item; item = item->next; } diff --git a/src/ds/helpers.h b/src/ds/helpers.h index 4e690544..5434c334 100644 --- a/src/ds/helpers.h +++ b/src/ds/helpers.h @@ -26,7 +26,7 @@ namespace snmalloc static Object obj; // If defined should be initially false; - assert(first == nullptr || *first == false); + SNMALLOC_ASSERT(first == nullptr || *first == false); if (unlikely(!initialised.load(std::memory_order_acquire))) { diff --git a/src/ds/mpscq.h b/src/ds/mpscq.h index 8fd7b79e..d5d51617 100644 --- a/src/ds/mpscq.h +++ b/src/ds/mpscq.h @@ -20,10 +20,8 @@ namespace snmalloc public: void invariant() { -#ifndef NDEBUG - assert(back != nullptr); - assert(front != nullptr); -#endif + SNMALLOC_ASSERT(back != nullptr); + SNMALLOC_ASSERT(front != nullptr); } void init(T* stub) @@ -71,7 +69,7 @@ namespace snmalloc { front = next; Aal::prefetch(&(next->next)); - assert(front); + SNMALLOC_ASSERT(front); std::atomic_thread_fence(std::memory_order_acquire); invariant(); return std::pair(first, true); diff --git a/src/mem/alloc.h b/src/mem/alloc.h index 76434e6c..a706fc9e 100644 --- a/src/mem/alloc.h +++ b/src/mem/alloc.h @@ -6,6 +6,7 @@ # define ALLOCATOR #endif +#include "../pal/pal_consts.h" #include "../test/histogram.h" #include "allocstats.h" #include "chunkmap.h" @@ -507,7 +508,7 @@ namespace snmalloc Allocator); constexpr size_t initial_shift = bits::next_pow2_bits_const(allocator_size); - assert((initial_shift + (r * REMOTE_SLOT_BITS)) < 64); + SNMALLOC_ASSERT((initial_shift + (r * REMOTE_SLOT_BITS)) < 64); return (id >> (initial_shift + (r * REMOTE_SLOT_BITS))) & REMOTE_MASK; } @@ -518,7 +519,7 @@ namespace snmalloc Remote* r = static_cast(p); r->set_target_id(target_id); - assert(r->target_id() == target_id); + SNMALLOC_ASSERT(r->target_id() == target_id); RemoteList* l = &list[get_slot(target_id, 0)]; l->last->non_atomic_next = r; @@ -653,7 +654,7 @@ namespace snmalloc { if constexpr (IsQueueInline) { - assert(r == nullptr); + SNMALLOC_ASSERT(r == nullptr); (void)r; } else @@ -684,13 +685,13 @@ namespace snmalloc // All medium size classes are page aligned. if (i > NUM_SMALL_CLASSES) { - assert(is_aligned_block(nullptr, size1)); + SNMALLOC_ASSERT(is_aligned_block(nullptr, size1)); } - assert(sc1 == i); - assert(sc1 == sc2); - assert(size1 == size); - assert(size1 == size2); + SNMALLOC_ASSERT(sc1 == i); + SNMALLOC_ASSERT(sc1 == sc2); + SNMALLOC_ASSERT(size1 == size); + SNMALLOC_ASSERT(size1 == size2); } #endif } @@ -826,7 +827,7 @@ namespace snmalloc } else { - assert(likely(p->target_id() != id())); + SNMALLOC_ASSERT(likely(p->target_id() != id())); Slab* slab = Metaslab::get_slab(p); Metaslab& meta = super->get_meta(slab); // Queue for remote dealloc elsewhere. @@ -931,7 +932,7 @@ namespace snmalloc if (super != nullptr) { Slab* slab = super->alloc_short_slab(sizeclass); - assert(super->is_full()); + SNMALLOC_ASSERT(super->is_full()); return slab; } @@ -1224,7 +1225,7 @@ namespace snmalloc size_t size_bits = bits::next_pow2_bits(size); size_t large_class = size_bits - SUPERSLAB_BITS; - assert(large_class < NUM_LARGE_CLASSES); + SNMALLOC_ASSERT(large_class < NUM_LARGE_CLASSES); void* p = large_allocator.template alloc( large_class, size); @@ -1240,7 +1241,7 @@ namespace snmalloc MEASURE_TIME(large_dealloc, 4, 16); size_t size_bits = bits::next_pow2_bits(size); - assert(bits::one_at_bit(size_bits) >= SUPERSLAB_SIZE); + SNMALLOC_ASSERT(bits::one_at_bit(size_bits) >= SUPERSLAB_SIZE); size_t large_class = size_bits - SUPERSLAB_BITS; chunkmap().clear_large_size(p, size); @@ -1261,7 +1262,7 @@ namespace snmalloc void remote_dealloc(RemoteAllocator* target, void* p, sizeclass_t sizeclass) { MEASURE_TIME(remote_dealloc, 4, 16); - assert(target->id() != id()); + SNMALLOC_ASSERT(target->id() != id()); handle_message_queue(); diff --git a/src/mem/allocstats.h b/src/mem/allocstats.h index d7d038f1..444fc1e7 100644 --- a/src/mem/allocstats.h +++ b/src/mem/allocstats.h @@ -34,7 +34,7 @@ namespace snmalloc void dec() { - assert(current > 0); + SNMALLOC_ASSERT(current > 0); current--; } @@ -143,7 +143,7 @@ namespace snmalloc #ifdef USE_SNMALLOC_STATS auto index = (size == 0) ? 0 : bits::to_exp_mant(size); - assert(index < TOTAL_BUCKETS); + SNMALLOC_ASSERT(index < TOTAL_BUCKETS); bucketed_requests[index]++; #endif } diff --git a/src/mem/chunkmap.h b/src/mem/chunkmap.h index 1cb2edfd..286d89b6 100644 --- a/src/mem/chunkmap.h +++ b/src/mem/chunkmap.h @@ -183,7 +183,7 @@ namespace snmalloc */ static void clear_slab(Superslab* slab) { - assert(get(slab) == CMSuperslab); + SNMALLOC_ASSERT(get(slab) == CMSuperslab); set(slab, static_cast(CMNotOurs)); } /** @@ -191,7 +191,7 @@ namespace snmalloc */ static void clear_slab(Mediumslab* slab) { - assert(get(slab) == CMMediumslab); + SNMALLOC_ASSERT(get(slab) == CMMediumslab); set(slab, static_cast(CMNotOurs)); } /** @@ -220,7 +220,7 @@ namespace snmalloc { auto p = address_cast(vp); size_t rounded_size = bits::next_pow2(size); - assert(get(p) == bits::next_pow2_bits(size)); + SNMALLOC_ASSERT(get(p) == bits::next_pow2_bits(size)); auto count = rounded_size >> SUPERSLAB_BITS; PagemapProvider::pagemap().set_range(p, CMNotOurs, count); } diff --git a/src/mem/largealloc.h b/src/mem/largealloc.h index e674a24f..ac8b6a2f 100644 --- a/src/mem/largealloc.h +++ b/src/mem/largealloc.h @@ -389,7 +389,7 @@ namespace snmalloc } } - assert(p == pointer_align_up(p, rsize)); + SNMALLOC_ASSERT(p == pointer_align_up(p, rsize)); return p; } diff --git a/src/mem/mediumslab.h b/src/mem/mediumslab.h index 0ce171e8..dcabf8e1 100644 --- a/src/mem/mediumslab.h +++ b/src/mem/mediumslab.h @@ -46,8 +46,8 @@ namespace snmalloc void init(RemoteAllocator* alloc, sizeclass_t sc, size_t rsize) { - assert(sc >= NUM_SMALL_CLASSES); - assert((sc - NUM_SMALL_CLASSES) < NUM_MEDIUM_CLASSES); + SNMALLOC_ASSERT(sc >= NUM_SMALL_CLASSES); + SNMALLOC_ASSERT((sc - NUM_SMALL_CLASSES) < NUM_MEDIUM_CLASSES); allocator = alloc; head = 0; @@ -66,7 +66,7 @@ namespace snmalloc } else { - assert(free == medium_slab_free(sc)); + SNMALLOC_ASSERT(free == medium_slab_free(sc)); } } @@ -78,13 +78,13 @@ namespace snmalloc template void* alloc(size_t size, MemoryProvider& memory_provider) { - assert(!full()); + SNMALLOC_ASSERT(!full()); uint16_t index = stack[head++]; void* p = pointer_offset(this, (static_cast(index) << 8)); free--; - assert(is_aligned_block(p, OS_PAGE_SIZE)); + SNMALLOC_ASSERT(is_aligned_block(p, OS_PAGE_SIZE)); size = bits::align_up(size, OS_PAGE_SIZE); if constexpr (zero_mem == YesZero) @@ -95,7 +95,7 @@ namespace snmalloc bool dealloc(void* p) { - assert(head > 0); + SNMALLOC_ASSERT(head > 0); // Returns true if the Mediumslab was full before this deallocation. bool was_full = full(); diff --git a/src/mem/metaslab.h b/src/mem/metaslab.h index fbfcc9a9..837c265d 100644 --- a/src/mem/metaslab.h +++ b/src/mem/metaslab.h @@ -82,14 +82,14 @@ namespace snmalloc bool is_full() { auto result = link == 1; - assert(!result || head == nullptr); + SNMALLOC_ASSERT(!result || head == nullptr); return result; } void set_full() { - assert(head == nullptr); - assert(link != 1); + SNMALLOC_ASSERT(head == nullptr); + SNMALLOC_ASSERT(link != 1); link = 1; // Set needed to 1, so that "return_object" will return true after calling // set_full @@ -216,7 +216,7 @@ namespace snmalloc size_t accounted_for = needed * size + offset; // Block is not full - assert(SLAB_SIZE > accounted_for); + SNMALLOC_ASSERT(SLAB_SIZE > accounted_for); // Keep variable so it appears in debugger. size_t length = debug_slab_acyclic_free_list(slab); @@ -228,13 +228,13 @@ namespace snmalloc { // Check we are looking at a correctly aligned block void* start = remove_cache_friendly_offset(curr, sizeclass); - assert(((pointer_diff(slab, start) - offset) % size) == 0); + SNMALLOC_ASSERT(((pointer_diff(slab, start) - offset) % size) == 0); // Account for free elements in free list accounted_for += size; - assert(SLAB_SIZE >= accounted_for); + SNMALLOC_ASSERT(SLAB_SIZE >= accounted_for); // We should never reach the link node in the free list. - assert(curr != pointer_offset(slab, link)); + SNMALLOC_ASSERT(curr != pointer_offset(slab, link)); // Iterate bump/free list segment curr = follow_next(curr); @@ -242,7 +242,7 @@ namespace snmalloc auto bumpptr = (allocated * size) + offset; // Check we haven't allocaated more than gits in a slab - assert(bumpptr <= SLAB_SIZE); + SNMALLOC_ASSERT(bumpptr <= SLAB_SIZE); // Account for to be bump allocated space accounted_for += SLAB_SIZE - bumpptr; @@ -251,15 +251,15 @@ namespace snmalloc { // The link should be the first allocation as we // haven't completely filled this block at any point. - assert(link == get_initial_offset(sizeclass, is_short)); + SNMALLOC_ASSERT(link == get_initial_offset(sizeclass, is_short)); } - assert(!is_full()); + SNMALLOC_ASSERT(!is_full()); // Add the link node. accounted_for += size; // All space accounted for - assert(SLAB_SIZE == accounted_for); + SNMALLOC_ASSERT(SLAB_SIZE == accounted_for); #else UNUSED(slab); #endif diff --git a/src/mem/pagemap.h b/src/mem/pagemap.h index aa77eb59..6d2c9444 100644 --- a/src/mem/pagemap.h +++ b/src/mem/pagemap.h @@ -395,7 +395,8 @@ namespace snmalloc */ void* page_for_address(uintptr_t p) { - assert((reinterpret_cast(&top) & (OS_PAGE_SIZE - 1)) == 0); + SNMALLOC_ASSERT( + (reinterpret_cast(&top) & (OS_PAGE_SIZE - 1)) == 0); return reinterpret_cast( reinterpret_cast(&top[p >> SHIFT]) & ~(OS_PAGE_SIZE - 1)); } diff --git a/src/mem/sizeclass.h b/src/mem/sizeclass.h index a8b821bc..76e774e3 100644 --- a/src/mem/sizeclass.h +++ b/src/mem/sizeclass.h @@ -1,5 +1,6 @@ #pragma once +#include "../pal/pal_consts.h" #include "allocconfig.h" namespace snmalloc @@ -27,7 +28,7 @@ namespace snmalloc auto sc = static_cast( bits::to_exp_mant_const(size)); - assert(sc == static_cast(sc)); + SNMALLOC_ASSERT(sc == static_cast(sc)); return sc; } @@ -56,17 +57,17 @@ namespace snmalloc { // check_same(); // Must be called with a rounded size. - assert(sizeclass_to_size(size_to_sizeclass(rsize)) == rsize); + SNMALLOC_ASSERT(sizeclass_to_size(size_to_sizeclass(rsize)) == rsize); // Only works up to certain offsets, exhaustively tested upto // SUPERSLAB_SIZE. - assert(offset <= SUPERSLAB_SIZE); + SNMALLOC_ASSERT(offset <= SUPERSLAB_SIZE); size_t align = bits::ctz(rsize); size_t divider = rsize >> align; // Maximum of 24 bits for 16MiB super/medium slab if (INTERMEDIATE_BITS == 0 || divider == 1) { - assert(divider == 1); + SNMALLOC_ASSERT(divider == 1); return offset & ~(rsize - 1); } @@ -100,17 +101,17 @@ namespace snmalloc inline static bool is_multiple_of_sizeclass(size_t rsize, size_t offset) { // Must be called with a rounded size. - assert(sizeclass_to_size(size_to_sizeclass(rsize)) == rsize); + SNMALLOC_ASSERT(sizeclass_to_size(size_to_sizeclass(rsize)) == rsize); // Only works up to certain offsets, exhaustively tested upto // SUPERSLAB_SIZE. - assert(offset <= SUPERSLAB_SIZE); + SNMALLOC_ASSERT(offset <= SUPERSLAB_SIZE); size_t align = bits::ctz(rsize); size_t divider = rsize >> align; // Maximum of 24 bits for 16MiB super/medium slab if (INTERMEDIATE_BITS == 0 || divider == 1) { - assert(divider == 1); + SNMALLOC_ASSERT(divider == 1); return (offset & (rsize - 1)) == 0; } @@ -178,9 +179,9 @@ namespace snmalloc SNMALLOC_FAST_PATH static size_t aligned_size(size_t alignment, size_t size) { // Client responsible for checking alignment is not zero - assert(alignment != 0); + SNMALLOC_ASSERT(alignment != 0); // Client responsible for checking alignment is a power of two - assert(bits::next_pow2(alignment) == alignment); + SNMALLOC_ASSERT(bits::next_pow2(alignment) == alignment); return ((alignment - 1) | (size - 1)) + 1; } diff --git a/src/mem/slab.h b/src/mem/slab.h index 9fa2137c..3274b056 100644 --- a/src/mem/slab.h +++ b/src/mem/slab.h @@ -42,9 +42,10 @@ namespace snmalloc Metaslab& meta = get_meta(); void* head = meta.head; - assert(rsize == sizeclass_to_size(meta.sizeclass)); - assert(sl.get_head() == (SlabLink*)pointer_offset(this, meta.link)); - assert(!meta.is_full()); + SNMALLOC_ASSERT(rsize == sizeclass_to_size(meta.sizeclass)); + SNMALLOC_ASSERT( + sl.get_head() == (SlabLink*)pointer_offset(this, meta.link)); + SNMALLOC_ASSERT(!meta.is_full()); meta.debug_slab_invariant(this); void* p = nullptr; @@ -103,7 +104,7 @@ namespace snmalloc meta.allocated = meta.allocated + 1; } - assert(curr != nullptr); + SNMALLOC_ASSERT(curr != nullptr); Metaslab::store_next(curr, nullptr); } } @@ -124,7 +125,7 @@ namespace snmalloc p = remove_cache_friendly_offset(p, meta.sizeclass); } - assert(is_start_of_object(Superslab::get(p), p)); + SNMALLOC_ASSERT(is_start_of_object(Superslab::get(p), p)); meta.debug_slab_invariant(this); @@ -166,7 +167,7 @@ namespace snmalloc // Set the head to the memory being deallocated. meta.head = p; - assert(meta.valid_head()); + SNMALLOC_ASSERT(meta.valid_head()); // Set the next pointer to the previous head. Metaslab::store_next(p, head); @@ -197,8 +198,8 @@ namespace snmalloc } // Update the head and the sizeclass link. uint16_t index = pointer_to_index(p); - assert(meta.head == nullptr); - // assert(meta.fully_allocated(is_short())); + SNMALLOC_ASSERT(meta.head == nullptr); + // SNMALLOC_ASSERT(meta.fully_allocated(is_short())); meta.link = index; meta.needed = meta.allocated - 1; diff --git a/src/mem/superslab.h b/src/mem/superslab.h index 8e3a7fc5..e236534d 100644 --- a/src/mem/superslab.h +++ b/src/mem/superslab.h @@ -45,7 +45,7 @@ namespace snmalloc size_t slab_to_index(Slab* slab) { auto res = (pointer_diff(this, slab) >> SLAB_BITS); - assert(res == static_cast(res)); + SNMALLOC_ASSERT(res == static_cast(res)); return static_cast(res); } @@ -110,7 +110,7 @@ namespace snmalloc for (size_t i = 0; i < SLAB_COUNT; i++) { - assert(meta[i].is_unused()); + SNMALLOC_ASSERT(meta[i].is_unused()); } #endif } @@ -201,7 +201,7 @@ namespace snmalloc bool was_almost_full = is_almost_full(); used -= 2; - assert(meta[index].is_unused()); + SNMALLOC_ASSERT(meta[index].is_unused()); if (was_almost_full || is_empty()) return StatusChange; @@ -214,7 +214,7 @@ namespace snmalloc bool was_full = is_full(); used--; - assert(meta[0].is_unused()); + SNMALLOC_ASSERT(meta[0].is_unused()); if (was_full || is_empty()) return StatusChange; diff --git a/src/mem/threadalloc.h b/src/mem/threadalloc.h index d71bcd25..79f8e4db 100644 --- a/src/mem/threadalloc.h +++ b/src/mem/threadalloc.h @@ -222,9 +222,9 @@ namespace snmalloc SNMALLOC_SLOW_PATH inline void* lazy_replacement_slow() { auto*& local_alloc = ThreadAlloc::get_reference(); - assert(local_alloc == &GlobalPlaceHolder); + SNMALLOC_ASSERT(local_alloc == &GlobalPlaceHolder); local_alloc = current_alloc_pool()->acquire(); - assert(local_alloc != &GlobalPlaceHolder); + SNMALLOC_ASSERT(local_alloc != &GlobalPlaceHolder); ThreadAlloc::register_cleanup(); return local_alloc; } diff --git a/src/override/malloc.cc b/src/override/malloc.cc index a3f18024..975bd410 100644 --- a/src/override/malloc.cc +++ b/src/override/malloc.cc @@ -81,7 +81,7 @@ extern "C" void* p = SNMALLOC_NAME_MANGLE(malloc)(size); if (p != nullptr) { - assert(p == Alloc::external_pointer(p)); + SNMALLOC_ASSERT(p == Alloc::external_pointer(p)); sz = bits::min(size, sz); memcpy(p, ptr, sz); SNMALLOC_NAME_MANGLE(free)(ptr); @@ -126,7 +126,7 @@ extern "C" SNMALLOC_EXPORT void* SNMALLOC_NAME_MANGLE(aligned_alloc)(size_t alignment, size_t size) { - assert((size % alignment) == 0); + SNMALLOC_ASSERT((size % alignment) == 0); return SNMALLOC_NAME_MANGLE(memalign)(alignment, size); } @@ -198,7 +198,7 @@ extern "C" if (config) { *config = &ChunkmapPagemap::config; - assert(ChunkmapPagemap::cast_to_pagemap(&pm, *config) == &pm); + SNMALLOC_ASSERT(ChunkmapPagemap::cast_to_pagemap(&pm, *config) == &pm); } return ± } diff --git a/src/pal/pal.h b/src/pal/pal.h index 11ae0173..dfadcadf 100644 --- a/src/pal/pal.h +++ b/src/pal/pal.h @@ -2,11 +2,6 @@ #include "pal_consts.h" -namespace snmalloc -{ - void error(const char* const str); -} // namespace snmalloc - // If simultating OE, then we need the underlying platform #if !defined(OPEN_ENCLAVE) || defined(OPEN_ENCLAVE_SIMULATION) # include "pal_apple.h" diff --git a/src/pal/pal_apple.h b/src/pal/pal_apple.h index 2e7554f6..b27dd43c 100644 --- a/src/pal/pal_apple.h +++ b/src/pal/pal_apple.h @@ -34,7 +34,7 @@ namespace snmalloc { if (page_aligned || is_aligned_block(p, size)) { - assert(is_aligned_block(p, size)); + SNMALLOC_ASSERT(is_aligned_block(p, size)); void* r = mmap( p, size, diff --git a/src/pal/pal_bsd.h b/src/pal/pal_bsd.h index 7252c864..2641a037 100644 --- a/src/pal/pal_bsd.h +++ b/src/pal/pal_bsd.h @@ -33,7 +33,7 @@ namespace snmalloc */ void notify_not_using(void* p, size_t size) noexcept { - assert(is_aligned_block(p, size)); + SNMALLOC_ASSERT(is_aligned_block(p, size)); madvise(p, size, MADV_FREE); } }; diff --git a/src/pal/pal_bsd_aligned.h b/src/pal/pal_bsd_aligned.h index 17e97ab4..7ca1277b 100644 --- a/src/pal/pal_bsd_aligned.h +++ b/src/pal/pal_bsd_aligned.h @@ -30,7 +30,7 @@ namespace snmalloc void* reserve(size_t size, size_t align) noexcept { // Alignment must be a power of 2. - assert(align == bits::next_pow2(align)); + SNMALLOC_ASSERT(align == bits::next_pow2(align)); align = bits::max(4096, align); diff --git a/src/pal/pal_consts.h b/src/pal/pal_consts.h index 0c047930..3d2bb5fa 100644 --- a/src/pal/pal_consts.h +++ b/src/pal/pal_consts.h @@ -1,5 +1,9 @@ #pragma once +#include "../ds/defines.h" + +#include + namespace snmalloc { /** @@ -112,4 +116,4 @@ namespace snmalloc } } }; -} // namespace snmalloc +} // namespace snmalloc \ No newline at end of file diff --git a/src/pal/pal_linux.h b/src/pal/pal_linux.h index f979943d..b70add14 100644 --- a/src/pal/pal_linux.h +++ b/src/pal/pal_linux.h @@ -39,7 +39,7 @@ namespace snmalloc { if (page_aligned || is_aligned_block(p, size)) { - assert(is_aligned_block(p, size)); + SNMALLOC_ASSERT(is_aligned_block(p, size)); madvise(p, size, MADV_DONTNEED); } else diff --git a/src/pal/pal_posix.h b/src/pal/pal_posix.h index 6ff5dc54..57d76acd 100644 --- a/src/pal/pal_posix.h +++ b/src/pal/pal_posix.h @@ -53,7 +53,7 @@ namespace snmalloc */ void notify_not_using(void* p, size_t size) noexcept { - assert(is_aligned_block(p, size)); + SNMALLOC_ASSERT(is_aligned_block(p, size)); #ifdef USE_POSIX_COMMIT_CHECKS mprotect(p, size, PROT_NONE); #else @@ -72,7 +72,8 @@ namespace snmalloc template void notify_using(void* p, size_t size) noexcept { - assert(is_aligned_block(p, size) || (zero_mem == NoZero)); + SNMALLOC_ASSERT( + is_aligned_block(p, size) || (zero_mem == NoZero)); if constexpr (zero_mem == YesZero) static_cast(this)->template zero(p, size); @@ -101,7 +102,7 @@ namespace snmalloc { if (page_aligned || is_aligned_block(p, size)) { - assert(is_aligned_block(p, size)); + SNMALLOC_ASSERT(is_aligned_block(p, size)); void* r = mmap( p, size, diff --git a/src/pal/pal_windows.h b/src/pal/pal_windows.h index b96deb2e..0f3b608d 100644 --- a/src/pal/pal_windows.h +++ b/src/pal/pal_windows.h @@ -110,7 +110,7 @@ namespace snmalloc /// Notify platform that we will not be using these pages void notify_not_using(void* p, size_t size) noexcept { - assert(is_aligned_block(p, size)); + SNMALLOC_ASSERT(is_aligned_block(p, size)); BOOL ok = VirtualFree(p, size, MEM_DECOMMIT); @@ -122,7 +122,8 @@ namespace snmalloc template void notify_using(void* p, size_t size) noexcept { - assert(is_aligned_block(p, size) || (zero_mem == NoZero)); + SNMALLOC_ASSERT( + is_aligned_block(p, size) || (zero_mem == NoZero)); void* r = VirtualAlloc(p, size, MEM_COMMIT, PAGE_READWRITE); @@ -136,7 +137,7 @@ namespace snmalloc { if (page_aligned || is_aligned_block(p, size)) { - assert(is_aligned_block(p, size)); + SNMALLOC_ASSERT(is_aligned_block(p, size)); notify_not_using(p, size); notify_using(p, size); } diff --git a/src/test/func/memory/memory.cc b/src/test/func/memory/memory.cc index bc8198e4..329c447c 100644 --- a/src/test/func/memory/memory.cc +++ b/src/test/func/memory/memory.cc @@ -73,7 +73,7 @@ void test_random_allocation() cell = alloc->alloc(16); auto pair = allocated.insert(cell); // Check not already allocated - assert(pair.second); + SNMALLOC_ASSERT(pair.second); UNUSED(pair); alloc_count++; } @@ -130,14 +130,14 @@ void test_double_alloc() for (size_t i = 0; i < (n * 2); i++) { void* p = a1->alloc(20); - assert(set1.find(p) == set1.end()); + SNMALLOC_ASSERT(set1.find(p) == set1.end()); set1.insert(p); } for (size_t i = 0; i < (n * 2); i++) { void* p = a2->alloc(20); - assert(set2.find(p) == set2.end()); + SNMALLOC_ASSERT(set2.find(p) == set2.end()); set2.insert(p); } @@ -178,8 +178,8 @@ void test_external_pointer() void* p4 = Alloc::external_pointer(p2); UNUSED(p3); UNUSED(p4); - assert(p1 == p3); - assert((size_t)p4 == (size_t)p1 + size - 1); + SNMALLOC_ASSERT(p1 == p3); + SNMALLOC_ASSERT((size_t)p4 == (size_t)p1 + size - 1); } alloc->dealloc(p1, size); @@ -281,7 +281,7 @@ void test_alloc_16M() const size_t size = 16'000'000; void* p1 = alloc->alloc(size); - assert(Alloc::alloc_size(Alloc::external_pointer(p1)) >= size); + SNMALLOC_ASSERT(Alloc::alloc_size(Alloc::external_pointer(p1)) >= size); alloc->dealloc(p1); } @@ -292,7 +292,7 @@ void test_calloc_16M() const size_t size = 16'000'000; void* p1 = alloc->alloc(size); - assert(Alloc::alloc_size(Alloc::external_pointer(p1)) >= size); + SNMALLOC_ASSERT(Alloc::alloc_size(Alloc::external_pointer(p1)) >= size); alloc->dealloc(p1); } diff --git a/src/test/func/sizeclass/sizeclass.cc b/src/test/func/sizeclass/sizeclass.cc index 0a89b2cd..757d3a6e 100644 --- a/src/test/func/sizeclass/sizeclass.cc +++ b/src/test/func/sizeclass/sizeclass.cc @@ -12,7 +12,7 @@ void test_align_size() { bool failed = false; - assert(snmalloc::aligned_size(128, 160) == 256); + SNMALLOC_ASSERT(snmalloc::aligned_size(128, 160) == 256); for (size_t size = 1; size < snmalloc::sizeclass_to_size(snmalloc::NUM_SIZECLASSES - 1); diff --git a/src/test/histogram.h b/src/test/histogram.h index 8203c79f..e5a56599 100644 --- a/src/test/histogram.h +++ b/src/test/histogram.h @@ -77,7 +77,7 @@ namespace histogram else { auto i = get_index(value); - assert(i < BUCKETS); + SNMALLOC_ASSERT(i < BUCKETS); count[i]++; } } diff --git a/src/test/perf/singlethread/singlethread.cc b/src/test/perf/singlethread/singlethread.cc index 3909521e..97053ce6 100644 --- a/src/test/perf/singlethread/singlethread.cc +++ b/src/test/perf/singlethread/singlethread.cc @@ -20,7 +20,7 @@ void test_alloc_dealloc(size_t count, size_t size, bool write) for (size_t i = 0; i < ((count * 3) / 2); i++) { void* p = alloc->alloc(size); - assert(set.find(p) == set.end()); + SNMALLOC_ASSERT(set.find(p) == set.end()); if (write) *(int*)p = 4; @@ -35,14 +35,14 @@ void test_alloc_dealloc(size_t count, size_t size, bool write) void* p = *it; alloc->dealloc(p, size); set.erase(it); - assert(set.find(p) == set.end()); + SNMALLOC_ASSERT(set.find(p) == set.end()); } // alloc 1x objects for (size_t i = 0; i < count; i++) { void* p = alloc->alloc(size); - assert(set.find(p) == set.end()); + SNMALLOC_ASSERT(set.find(p) == set.end()); if (write) *(int*)p = 4;