From 2f8f376db85965ed5ee36cf4cc3768da7683feeb Mon Sep 17 00:00:00 2001 From: Matthew Parkinson Date: Sat, 17 Sep 2022 14:30:13 +0100 Subject: [PATCH] Pagemap Rounding (#558) * Extend pagemap test Check for possible overlap between heap and pagemap, but writing and reading the heap. * Return unalign memory from the pagemap This commit allows the pagemap to return unaligned range of memory. This means that bump allocation of multiple pagemaps doesn't waste as much space. --- src/snmalloc/backend_helpers/pagemap.h | 57 +++++++++++++++++--------- src/test/func/pagemap/pagemap.cc | 34 +++++++++++++-- 2 files changed, 68 insertions(+), 23 deletions(-) diff --git a/src/snmalloc/backend_helpers/pagemap.h b/src/snmalloc/backend_helpers/pagemap.h index 55535a64..dd0b3131 100644 --- a/src/snmalloc/backend_helpers/pagemap.h +++ b/src/snmalloc/backend_helpers/pagemap.h @@ -17,6 +17,7 @@ namespace snmalloc { private: static constexpr size_t SHIFT = GRANULARITY_BITS; + static constexpr size_t GRANULARITY = bits::one_at_bit(GRANULARITY_BITS); /** * Before init is called will contain a single entry @@ -117,31 +118,47 @@ namespace snmalloc #endif SNMALLOC_ASSERT(s != 0); // TODO take account of pagemap size in the calculation of how big it - // needs to be. + // needs to be. The following code creates a pagemap that covers the + // pagemap as well as the left over. This is not ideal, and we should + // really calculate the division with + // + // GRANULARITY + sizeof(T) + // + // There are awkward corner cases for the alignment of the start and + // the end that are hard to calculate. So this is not currently done. - // Align the start and end. We won't store for the very ends as they - // are not aligned to a chunk boundary. - auto heap_base = pointer_align_up(b, bits::one_at_bit(GRANULARITY_BITS)); - auto end = pointer_align_down( - pointer_offset(b, s), bits::one_at_bit(GRANULARITY_BITS)); - size = pointer_diff(heap_base, end); + // Calculate range in pagemap that is associated to this space. + // Over calculate to cover any unaligned parts at either end. + base = bits::align_down(address_cast(b), GRANULARITY); + auto end = bits::align_up(address_cast(b) + s, GRANULARITY); + size = end - base; - // Put pagemap at start of range. - // TODO CHERI capability bound here! + // Setup the pagemap. body = static_cast(b); body_opt = body; - // Advance by size of pagemap. - // Note that base needs to be aligned to GRANULARITY for the rest of the - // code to work - // TODO CHERI capability bound here! - heap_base = pointer_align_up( - pointer_offset(b, (size >> SHIFT) * sizeof(T)), - bits::one_at_bit(GRANULARITY_BITS)); - base = address_cast(heap_base); - SNMALLOC_ASSERT( - base == bits::align_up(base, bits::one_at_bit(GRANULARITY_BITS))); - return {heap_base, pointer_diff(heap_base, end)}; + // Calculate size of pagemap. + auto pagemap_size = (size >> SHIFT) * sizeof(T); + + // Advance by size of pagemap. + // TODO CHERI capability bound here! + auto heap_base = pointer_offset(b, pagemap_size); + + // The following assert prevents the corner case where the pagemap + // occupies the entire address space, and this + // s - pagemap_size + // can underflow. + static_assert( + sizeof(T) < (1 << SHIFT), + "Pagemap entry too large relative to granularity"); + + if (pagemap_size > s) + { + // The pagemap is larger than the available space. + error("Pagemap is larger than the available space."); + } + + return {heap_base, s - pagemap_size}; } /** diff --git a/src/test/func/pagemap/pagemap.cc b/src/test/func/pagemap/pagemap.cc index e6318dfb..24218b55 100644 --- a/src/test/func/pagemap/pagemap.cc +++ b/src/test/func/pagemap/pagemap.cc @@ -56,7 +56,8 @@ void set(bool bounded, address_t address, T new_value) void test_pagemap(bool bounded) { address_t low = bits::one_at_bit(23); - address_t high = bits::one_at_bit(30); + address_t high = bits::one_at_bit(29); + void* base = nullptr; // Nullptr needs to work before initialisation CHECK_GET(bounded, 0, T()); @@ -64,8 +65,8 @@ void test_pagemap(bool bounded) // Initialise the pagemap if (bounded) { - auto size = bits::one_at_bit(30); - auto base = DefaultPal::reserve(size); + auto size = bits::one_at_bit(29); + base = DefaultPal::reserve(size); DefaultPal::notify_using(base, size); std::cout << "Fixed base: " << base << " (" << size << ") " << " end: " << pointer_offset(base, size) << std::endl; @@ -73,7 +74,10 @@ void test_pagemap(bool bounded) std::cout << "Heap base: " << heap_base << " (" << heap_size << ") " << " end: " << pointer_offset(heap_base, heap_size) << std::endl; low = address_cast(heap_base); + base = heap_base; high = low + heap_size; + // Store a pattern in heap. + memset(base, 0x23, high - low); } else { @@ -99,6 +103,30 @@ void test_pagemap(bool bounded) // Check pattern is correctly stored std::cout << std::endl; + + if (bounded) + { + std::cout << "Checking heap" << std::endl; + // Check we have not corrupted the heap. + for (size_t offset = 0; offset < high - low; offset++) + { + if ((offset % (1ULL << 26)) == 0) + std::cout << "." << std::flush; + auto* p = ((char*)base) + offset; + if (*p != 0x23) + { + printf("Heap and pagemap have collided at %p", p); + abort(); + } + } + + std::cout << std::endl; + std::cout << "Storing new pattern" << std::endl; + // Store a different pattern in heap. + memset(base, 0x23, high - low); + } + + std::cout << "Checking pagemap contents" << std::endl; value = 1; for (address_t ptr = low; ptr < high; ptr += bits::one_at_bit(GRANULARITY_BITS + 3))