Fix the sandbox use case and add a test. (#269)

Summary of changes:

- Add a new PAL that doesn't allocate memory, which can be used with a
  memory provider that is pre-initialised with a range of memory.
- Add a `NoAllocation` PAL property so that the methods on a PAL that 
  doesn't support dynamically reserving address space will never be
  called and therefore don't need to be implemented.
- Slightly refactor the memory provider class so that it has a narrower
  interface with LargeAlloc and is easier to proxy.
- Allow the address space manager and the memory provider to be
  initialised with a range of memory.

This may eventually also remove the need for (or, at least, simplify)
the Open Enclave PAL.

This commit also ends up with a few other cleanups:

 - The `malloc_useable_size` CMake test that checks whether the
   parameter is const qualified was failing on FreeBSD where this
   function is declared in `malloc_np.h` but where including
   `malloc.h` raises an error.  This should now be more robust.
 - The BSD aligned PAL inherited from the BSD PAL, which does not
   expose aligned allocation. This meant that it exposed both the
   aligned and non-aligned allocation interfaces and so happily
   accepted incorrect `constexpr` if blocks that expected one or 
   the other but accidentally required both to exist. The unaligned
   function is now deleted so the same failures that appear in CI should
   appear locally for anyone using this PAL.
This commit is contained in:
David Chisnall 2021-01-11 14:06:51 +00:00 коммит произвёл GitHub
Родитель 4837c82489
Коммит c33f355736
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
10 изменённых файлов: 427 добавлений и 11 удалений

Просмотреть файл

@ -21,8 +21,16 @@ option(SNMALLOC_OPTIMISE_FOR_CURRENT_MACHINE "Compile for current machine archit
set(CACHE_FRIENDLY_OFFSET OFF CACHE STRING "Base offset to place linked-list nodes.")
set(SNMALLOC_STATIC_LIBRARY_PREFIX "sn_" CACHE STRING "Static library function prefix")
# malloc.h will error if you include it on FreeBSD, so this test must not
# unconditionally include it.
CHECK_C_SOURCE_COMPILES("
#if __has_include(<malloc_np.h>)
#include <malloc_np.h>
#if __has_include(<malloc/malloc.h>)
#include <malloc/malloc.h>
#else
#include <malloc.h>
#endif
size_t malloc_usable_size(const void* ptr) { return 0; }
int main() { return 0; }
" CONST_QUALIFIED_MALLOC_USABLE_SIZE)
@ -307,6 +315,9 @@ if(NOT DEFINED SNMALLOC_ONLY_HEADER_LIBRARY)
if (${SUPER_SLAB_SIZE} STREQUAL "malloc")
target_compile_definitions(${TESTNAME} PRIVATE SNMALLOC_PASS_THROUGH)
endif()
if(CONST_QUALIFIED_MALLOC_USABLE_SIZE)
target_compile_definitions(${TESTNAME} PRIVATE -DMALLOC_USABLE_SIZE_QUALIFIER=const)
endif()
target_link_libraries(${TESTNAME} snmalloc_lib)
if (${TEST} MATCHES "release-.*")
message(STATUS "Adding test: ${TESTNAME} only for release configs")

Просмотреть файл

@ -191,14 +191,14 @@ namespace snmalloc
if (res == nullptr)
{
// Allocation failed ask OS for more memory
void* block;
size_t block_size;
void* block = nullptr;
size_t block_size = 0;
if constexpr (pal_supports<AlignedAllocation, PAL>)
{
block_size = PAL::minimum_alloc_size;
block = PAL::template reserve_aligned<false>(block_size);
}
else
else if constexpr (!pal_supports<NoAllocation, PAL>)
{
// Need at least 2 times the space to guarantee alignment.
// Hold lock here as a race could cause additional requests to
@ -236,5 +236,21 @@ namespace snmalloc
return res;
}
/**
* Default constructor. An address-space manager constructed in this way
* does not own any memory at the start and will request any that it needs
* from the PAL.
*/
AddressSpaceManager() = default;
/**
* Constructor that pre-initialises the address-space manager with a region
* of memory.
*/
AddressSpaceManager(void* base, size_t length)
{
add_range(base, length);
}
};
} // namespace snmalloc

Просмотреть файл

@ -196,6 +196,7 @@ namespace snmalloc
UNUSED(sc);
#ifdef USE_SNMALLOC_STATS
SNMALLOC_ASSUME(sc < LARGE_N);
large_pop_count[sc]++;
#endif
}

Просмотреть файл

@ -75,9 +75,6 @@ namespace snmalloc
*/
std::atomic<size_t> peak_memory_used_bytes{0};
public:
using Pal = PAL;
/**
* Memory current available in large_stacks
*/
@ -88,6 +85,51 @@ namespace snmalloc
*/
ModArray<NUM_LARGE_CLASSES, MPMCStack<Largeslab, RequiresInit>> large_stack;
public:
using Pal = PAL;
/**
* Pop an allocation from a large-allocation stack. This is safe to call
* concurrently with other acceses. If there is no large allocation on a
* particular stack then this will return `nullptr`.
*/
SNMALLOC_FAST_PATH void* pop_large_stack(size_t large_class)
{
void* p = large_stack[large_class].pop();
if (p != nullptr)
{
const size_t rsize = bits::one_at_bit(SUPERSLAB_BITS) << large_class;
available_large_chunks_in_bytes -= rsize;
}
return p;
}
/**
* Push `slab` onto the large-allocation stack associated with the size
* class specified by `large_class`. Always succeeds.
*/
SNMALLOC_FAST_PATH void
push_large_stack(Largeslab* slab, size_t large_class)
{
const size_t rsize = bits::one_at_bit(SUPERSLAB_BITS) << large_class;
available_large_chunks_in_bytes += rsize;
large_stack[large_class].push(slab);
}
/**
* Default constructor. This constructs a memory provider that doesn't yet
* own any memory, but which can claim memory from the PAL.
*/
MemoryProviderStateMixin() = default;
/**
* Construct a memory provider owning some memory. The PAL provided with
* memory providers constructed in this way does not have to be able to
* allocate memory, if the initial reservation is sufficient.
*/
MemoryProviderStateMixin(void* start, size_t len)
: address_space(start, len)
{}
/**
* Make a new memory provide for this PAL.
*/
@ -253,7 +295,7 @@ namespace snmalloc
if (large_class == 0)
size = rsize;
void* p = memory_provider.large_stack[large_class].pop();
void* p = memory_provider.pop_large_stack(large_class);
if (p == nullptr)
{
@ -265,7 +307,6 @@ namespace snmalloc
else
{
stats.superslab_pop();
memory_provider.available_large_chunks_in_bytes -= rsize;
// Cross-reference alloc.h's large_dealloc decommitment condition.
bool decommitted =
@ -323,8 +364,7 @@ namespace snmalloc
}
stats.superslab_push();
memory_provider.available_large_chunks_in_bytes += rsize;
memory_provider.large_stack[large_class].push(static_cast<Largeslab*>(p));
memory_provider.push_large_stack(static_cast<Largeslab*>(p), large_class);
}
};

Просмотреть файл

@ -16,6 +16,7 @@
# include "pal_haiku.h"
# include "pal_linux.h"
# include "pal_netbsd.h"
# include "pal_noalloc.h"
# include "pal_openbsd.h"
# include "pal_solaris.h"
# include "pal_windows.h"

Просмотреть файл

@ -50,5 +50,17 @@ namespace snmalloc
return p;
}
/**
* Explicitly deleted method for returning non-aligned memory. This causes
* incorrect use of `constexpr if` to fail on platforms with aligned
* allocation. Without this, this PAL and its subclasses exported both
* allocation functions and so callers would type-check if they called
* either in `constexpr if` branches and then fail on platforms such as
* Linux or Windows, which expose only unaligned or aligned allocations,
* respectively.
*/
static std::pair<void*, size_t>
reserve_at_least(size_t size) noexcept = delete;
};
} // namespace snmalloc

Просмотреть файл

@ -92,10 +92,11 @@ namespace snmalloc
ConceptPAL_memops<PAL> &&
(!(PAL::pal_features & LowMemoryNotification) ||
ConceptPAL_mem_low_notify<PAL>) &&
(!(PAL::pal_features & NoAllocation) && (
(!!(PAL::pal_features & AlignedAllocation) ||
ConceptPAL_reserve_at_least<PAL>) &&
(!(PAL::pal_features & AlignedAllocation) ||
ConceptPAL_reserve_aligned<PAL>);
ConceptPAL_reserve_aligned<PAL>)));
} // namespace snmalloc
#endif

Просмотреть файл

@ -36,6 +36,11 @@ namespace snmalloc
* exposed in the Pal.
*/
LazyCommit = (1 << 2),
/**
* This Pal does not support allocation. All memory used with this Pal
* should be pre-allocated.
*/
NoAllocation = (1 << 3),
};
/**
* Flag indicating whether requested memory should be zeroed.

80
src/pal/pal_noalloc.h Normal file
Просмотреть файл

@ -0,0 +1,80 @@
#pragma once
namespace snmalloc
{
/**
* Platform abstraction layer that does not allow allocation.
*
* This is a minimal PAL for pre-reserved memory regions, where the
* address-space manager is initialised with all of the memory that it will
* ever use.
*
* It takes an error handler delegate as a template argument. This is
* expected to forward to the default PAL in most cases.
*/
template<typename ErrorHandler>
struct PALNoAlloc
{
/**
* Bitmap of PalFeatures flags indicating the optional features that this
* PAL supports.
*/
static constexpr uint64_t pal_features = NoAllocation;
static constexpr size_t page_size = Aal::smallest_page_size;
/**
* Print a stack trace.
*/
static void print_stack_trace()
{
ErrorHandler::print_stack_trace();
}
/**
* Report a fatal error an exit.
*/
[[noreturn]] static void error(const char* const str) noexcept
{
ErrorHandler::error(str);
}
/**
* Notify platform that we will not be using these pages.
*
* This is a no-op in this stub.
*/
static void notify_not_using(void*, size_t) noexcept {}
/**
* Notify platform that we will be using these pages.
*
* This is a no-op in this stub, except for zeroing memory if required.
*/
template<ZeroMem zero_mem>
static void notify_using(void* p, size_t size) noexcept
{
if constexpr (zero_mem == YesZero)
{
zero<true>(p, size);
}
else
{
UNUSED(p);
UNUSED(size);
}
}
/**
* OS specific function for zeroing memory.
*
* This just calls memset - we don't assume that we have access to any
* virtual-memory functions.
*/
template<bool page_aligned = false>
static void zero(void* p, size_t size) noexcept
{
memset(p, 0, size);
}
};
} // namespace snmalloc

Просмотреть файл

@ -0,0 +1,249 @@
#ifdef SNMALLOC_PASS_THROUGH
/*
* This test does not make sense with malloc pass-through, skip it.
*/
int main()
{
return 0;
}
#else
// The decommit strategy is currently a global policy and not per-allocator and
// so we need to tell Windows not to use the lazy strategy for this test.
# define USE_DECOMMIT_STRATEGY DecommitSuper
# include <snmalloc.h>
using namespace snmalloc;
namespace
{
/**
* Helper for Alloc that is never used as a thread-local allocator and so is
* always initialised.
*/
bool never_init(void*)
{
return false;
}
/**
* Helper for Alloc that never needs lazy initialisation.
*/
void* no_op_init(function_ref<void*(void*)>)
{
SNMALLOC_CHECK(0 && "Should never be called!");
return nullptr;
}
/**
* Sandbox class. Allocates a memory region and an allocator that can
* allocate into this from the outside.
*/
struct Sandbox
{
using NoOpPal = PALNoAlloc<DefaultPal>;
/**
* Type for the allocator that lives outside of the sandbox and allocates
* sandbox-owned memory.
*/
using ExternalAlloc = Allocator<
never_init,
no_op_init,
MemoryProviderStateMixin<NoOpPal>,
SNMALLOC_DEFAULT_CHUNKMAP,
false>;
/**
* Proxy class that forwards requests for large allocations to the real
* memory provider.
*
* In a real implementation, these would be cross-domain calls with the
* callee verifying the arguments.
*/
struct MemoryProviderProxy
{
/**
* The PAL that allocators using this memory provider should use.
*/
typedef NoOpPal Pal;
/**
* The pointer to the real state. In a real implementation there would
* likely be only one of these inside any given sandbox and so this would
* not have to be per-instance state.
*/
MemoryProviderStateMixin<NoOpPal>* real_state;
/**
* Pop an element from the large stack for the specified size class,
* proxies to the real implementation.
*
* This method must be implemented for `LargeAlloc` to work.
*/
void* pop_large_stack(size_t large_class)
{
return real_state->pop_large_stack(large_class);
};
/**
* Push an element to the large stack for the specified size class,
* proxies to the real implementation.
*
* This method must be implemented for `LargeAlloc` to work.
*/
void push_large_stack(Largeslab* slab, size_t large_class)
{
real_state->push_large_stack(slab, large_class);
}
/**
* Reserve (and optionally commit) memory for a large sizeclass, proxies
* to the real implementation.
*
* This method must be implemented for `LargeAlloc` to work.
*/
template<bool committed>
void* reserve(size_t large_class) noexcept
{
return real_state->template reserve<committed>(large_class);
}
};
/**
* Type for the allocator that exists inside the sandbox.
*
* Note that a real version of this would not have access to the shared
* pagemap and would not be used outside of the sandbox.
*/
using InternalAlloc =
Allocator<never_init, no_op_init, MemoryProviderProxy>;
/**
* The start of the sandbox memory region.
*/
void* start;
/**
* The end of the sandbox memory region
*/
void* top;
/**
* State allocated in the sandbox that is shared between the inside and
* outside.
*/
struct SharedState
{
/**
* The message queue for the allocator that lives outside of the
* sandbox but allocates memory inside.
*/
struct RemoteAllocator queue;
} * shared_state;
/**
* The memory provider for this sandbox.
*/
MemoryProviderStateMixin<NoOpPal> state;
/**
* The allocator for callers outside the sandbox to allocate memory inside.
*/
ExternalAlloc alloc;
/**
* An allocator for callers inside the sandbox to allocate memory.
*/
InternalAlloc* internal_alloc;
/**
* Constructor. Takes the size of the sandbox as the argument.
*/
Sandbox(size_t sb_size)
: start(alloc_sandbox_heap(sb_size)),
top(pointer_offset(start, sb_size)),
shared_state(new (start) SharedState()),
state(
pointer_offset(start, sizeof(SharedState)),
sb_size - sizeof(SharedState)),
alloc(state, SNMALLOC_DEFAULT_CHUNKMAP(), &shared_state->queue)
{
auto* state_proxy = static_cast<MemoryProviderProxy*>(
alloc.alloc(sizeof(MemoryProviderProxy)));
state_proxy->real_state = &state;
// In real code, allocators should never be constructed like this, they
// should always come from an alloc pool. This is just to test that both
// kinds of allocator can be created.
internal_alloc =
new (alloc.alloc(sizeof(InternalAlloc))) InternalAlloc(*state_proxy);
}
Sandbox() = delete;
/**
* Predicate function for querying whether an object is entirely within the
* sandbox.
*/
bool is_in_sandbox(void* ptr, size_t sz)
{
return (ptr >= start) && (pointer_offset(ptr, sz) < top);
}
/**
* Predicate function for querying whether an object is entirely within the
* region of the sandbox allocated for its heap.
*/
bool is_in_sandbox_heap(void* ptr, size_t sz)
{
return (
ptr >= pointer_offset(start, sizeof(SharedState)) &&
(pointer_offset(ptr, sz) < top));
}
private:
template<typename PAL = DefaultPal>
void* alloc_sandbox_heap(size_t sb_size)
{
if constexpr (pal_supports<AlignedAllocation, PAL>)
{
return PAL::template reserve_aligned<true>(sb_size);
}
else
{
// Note: This wastes address space because the PAL will reserve
// double the amount we ask for to ensure alignment. It's fine for
// the test, but any call to this function that ignores `.second`
// (the allocated size) is deeply suspect.
void* ptr = PAL::reserve_at_least(sb_size).first;
PAL::template notify_using<YesZero>(ptr, sb_size);
return ptr;
}
}
};
}
int main()
{
static const size_t sb_size = 128 * 1024 * 1024;
// Check that we can create two sandboxes
Sandbox sb1(sb_size);
Sandbox sb2(sb_size);
auto check = [](Sandbox& sb, auto& alloc, size_t sz) {
void* ptr = alloc.alloc(sz);
SNMALLOC_CHECK(sb.is_in_sandbox_heap(ptr, sz));
ThreadAlloc::get_noncachable()->dealloc(ptr);
};
auto check_with_sb = [&](Sandbox& sb) {
// Check with a range of sizes
check(sb, sb.alloc, 32);
check(sb, *sb.internal_alloc, 32);
check(sb, sb.alloc, 240);
check(sb, *sb.internal_alloc, 240);
check(sb, sb.alloc, 513);
check(sb, *sb.internal_alloc, 513);
check(sb, sb.alloc, 10240);
check(sb, *sb.internal_alloc, 10240);
};
check_with_sb(sb1);
check_with_sb(sb2);
return 0;
}
#endif