Initial cut at a lazy decommit strategy.

This does not deallocate memory until the OS tells us that we are short
on memory, then tries to decommit all of the cached chunks (except for
the first page, used for the linked lists).

Nowhere near enough testing to commit to master yet!
This commit is contained in:
David Chisnall 2019-02-20 16:34:50 +00:00 коммит произвёл David Chisnall
Родитель 7a9ce97166
Коммит 66cec23b23
9 изменённых файлов: 243 добавлений и 6 удалений

Просмотреть файл

@ -999,6 +999,15 @@ namespace snmalloc
(void*)((size_t)super + OS_PAGE_SIZE),
SUPERSLAB_SIZE - OS_PAGE_SIZE);
}
else if constexpr (decommit_strategy == DecommitSuperLazy)
{
static_assert(
std::remove_reference_t<decltype(
large_allocator
.memory_provider)>::supports_low_memory_notification,
"A lazy decommit strategy cannot be implemented on platforms "
"without low memory notifications");
}
pagemap().clear_slab(super);
large_allocator.dealloc(super, 0);

Просмотреть файл

@ -60,14 +60,30 @@ namespace snmalloc
enum DecommitStrategy
{
/**
* Never decommit memory.
*/
DecommitNone,
/**
* Decommit superslabs when they are no entirely empty.
*/
DecommitSuper,
DecommitAll
/**
* Decommit all slabs once they are empty.
*/
DecommitAll,
/**
* Decommit superslabs only when we are informed of memory pressure by the
* OS, do not decommit anything in normal operation.
*/
DecommitSuperLazy
};
static constexpr DecommitStrategy decommit_strategy =
#ifdef USE_DECOMMIT_STRATEGY
USE_DECOMMIT_STRATEGY
#elif defined(_WIN32)
DecommitSuperLazy
#else
DecommitSuper
#endif

Просмотреть файл

@ -10,7 +10,12 @@ namespace snmalloc
Fresh = 0,
Large,
Medium,
Super
Super,
/**
* If the decommit policy is lazy, slabs are moved to this state when all
* pages other than the first one have been decommitted.
*/
Decommitted
};
class Baseslab

Просмотреть файл

@ -12,6 +12,9 @@
namespace snmalloc
{
template<class MemoryProviderState>
class MemoryProviderStateMixin;
class Largeslab : public Baseslab
{
// This is the view of a contiguous memory area when it is being kept
@ -19,6 +22,8 @@ namespace snmalloc
private:
template<class a, Construction c>
friend class MPMCStack;
template<class MemoryProviderState>
friend class MemoryProviderStateMixin;
std::atomic<Largeslab*> next;
public:
@ -28,12 +33,21 @@ namespace snmalloc
}
};
struct Decommittedslab : public Largeslab
{
Decommittedslab()
{
kind = Decommitted;
}
};
// This represents the state that the large allcoator needs to add to the
// global state of the allocator. This is currently stored in the memory
// provider, so we add this in.
template<class MemoryProviderState>
class MemoryProviderStateMixin : public MemoryProviderState
{
using MemoryProviderState::supports_low_memory_notification;
std::atomic_flag lock = ATOMIC_FLAG_INIT;
size_t bump;
size_t remaining;
@ -59,6 +73,52 @@ namespace snmalloc
remaining = r_size.second;
}
/**
* The last time we saw a low memory notification.
*/
std::atomic<uint64_t> last_low_memory_epoch = 0;
std::atomic_flag lazy_decommit_guard;
void lazy_decommit()
{
// If another thread is try to do lazy decommit, let it continue. If
// we try to parallelise this, we'll most likely end up waiting on the
// same page table locks.
if (!lazy_decommit_guard.test_and_set())
{
return;
}
// When we hit low memory, iterate over size classes and decommit all of
// the memory that we can. Start with the small size classes so that we
// hit cached superslabs first.
// FIXME: We probably shouldn't do this all at once.
for (size_t large_class = 0; large_class < NUM_LARGE_CLASSES;
large_class++)
{
size_t rsize = ((size_t)1 << SUPERSLAB_BITS) << large_class;
size_t decommit_size = rsize - OS_PAGE_SIZE;
// Grab all of the chunks of this size class.
auto* slab = large_stack[large_class].pop_all();
while (slab)
{
// Decommit all except for the first page and then put it back on
// the stack.
if (slab->get_kind() != Decommitted)
{
notify_not_using(((char*)slab) + OS_PAGE_SIZE, decommit_size);
}
// Once we've removed these from the stack, there will be no
// concurrent accesses and removal should have established a
// happens-before relationship, so it's safe to use relaxed loads
// here.
auto next = slab->next.load(std::memory_order_relaxed);
large_stack[large_class].push(new (slab) Decommittedslab());
slab = next;
}
}
lazy_decommit_guard.clear();
}
public:
/**
* Stack of large allocations that have been returned for reuse.
@ -68,7 +128,7 @@ namespace snmalloc
/**
* Primitive allocator for structure that are required before
* the allocator can be running.
***/
*/
template<size_t alignment = 64>
void* alloc_chunk(size_t size)
{
@ -109,6 +169,37 @@ namespace snmalloc
return p;
}
#define TEST_LAZY_DECOMMIT 4
ALWAYSINLINE void lazy_decommit_if_needed()
{
#ifdef TEST_LAZY_DECOMMIT
static_assert(TEST_LAZY_DECOMMIT > 0, "TEST_LAZY_DECOMMIT must be a positive integer value.");
static std::atomic<uint64_t> counter;
auto c = counter++;
if (c % TEST_LAZY_DECOMMIT == 0)
{
lazy_decommit();
}
#else
if constexpr (decommit_strategy == DecommitSuperLazy)
{
auto new_epoch = low_memory_epoch();
auto old_epoch = last_low_memory_epoch.load(std::memory_order_acquire);
if (new_epoch > old_epoch)
{
// Try to update the epoch to the value that we've seen. If
// another thread has seen a newer epoch than us (or done the same
// update) let them win.
do
{
last_low_memory_epoch.compare_exchange_strong(old_epoch, new_epoch);
} while (old_epoch <= new_epoch);
lazy_decommit();
}
}
#endif
}
};
using Stats = AllocStats<NUM_SIZECLASSES, NUM_LARGE_CLASSES>;
@ -167,6 +258,7 @@ namespace snmalloc
size = rsize;
void* p = memory_provider.large_stack[large_class].pop();
memory_provider.lazy_decommit_if_needed();
if (p == nullptr)
{
@ -189,6 +281,29 @@ namespace snmalloc
}
else
{
if constexpr (decommit_strategy == DecommitSuperLazy)
{
if (static_cast<Baseslab*>(p)->get_kind() == Decommitted)
{
// The first page is already in "use" for the stack element,
// this will need zeroing for a YesZero call.
if constexpr (zero_mem == YesZero)
memory_provider.template zero<true>(p, OS_PAGE_SIZE);
// Notify we are using the rest of the allocation.
// Passing zero_mem ensures the PAL provides zeroed pages if
// required.
memory_provider.template notify_using<zero_mem>(
(void*)((size_t)p + OS_PAGE_SIZE),
bits::align_up(size, OS_PAGE_SIZE) - OS_PAGE_SIZE);
}
else
{
if constexpr (zero_mem == YesZero)
memory_provider.template zero<true>(
p, bits::align_up(size, OS_PAGE_SIZE));
}
}
if ((decommit_strategy != DecommitNone) || (large_class > 0))
{
// The first page is already in "use" for the stack element,
@ -216,7 +331,8 @@ namespace snmalloc
void dealloc(void* p, size_t large_class)
{
memory_provider.large_stack[large_class].push((Largeslab*)p);
memory_provider.large_stack[large_class].push(static_cast<Largeslab*>(p));
memory_provider.lazy_decommit_if_needed();
}
};
@ -226,4 +342,4 @@ namespace snmalloc
* passed as an argument.
*/
HEADER_GLOBAL GlobalVirtual default_memory_provider;
}
}

Просмотреть файл

@ -24,6 +24,11 @@ namespace snmalloc
}
public:
/**
* Flag indicating that this PAL does not support low pressure
* notifications.
*/
static constexpr bool supports_low_memory_notification = false;
void error(const char* const str)
{
panic("snmalloc error: %s", str);

Просмотреть файл

@ -14,6 +14,11 @@ namespace snmalloc
class PALFBSD
{
public:
/**
* Flag indicating that this PAL does not support low pressure
* notifications.
*/
static constexpr bool supports_low_memory_notification = false;
static void error(const char* const str)
{
puts(str);

Просмотреть файл

@ -13,6 +13,11 @@ namespace snmalloc
class PALLinux
{
public:
/**
* Flag indicating that this PAL does not support low pressure
* notifications.
*/
static constexpr bool supports_low_memory_notification = false;
static void error(const char* const str)
{
puts(str);

Просмотреть файл

@ -14,6 +14,11 @@ namespace snmalloc
std::atomic<uintptr_t> oe_base;
public:
/**
* Flag indicating that this PAL does not support low pressure
* notifications.
*/
static constexpr bool supports_low_memory_notification = false;
static void error(const char* const str)
{
UNUSED(str);
@ -55,4 +60,4 @@ namespace snmalloc
}
};
}
#endif
#endif

Просмотреть файл

@ -19,7 +19,75 @@ namespace snmalloc
{
class PALWindows
{
/**
* The number of times that the memory pressure notification has fired.
*/
static std::atomic<uint64_t> pressure_epoch;
/**
* A flag indicating that we have tried to register for low-memory
* notifications.
*/
static std::atomic<bool> registered_for_notifications;
static HANDLE lowMemoryObject;
/**
* Callback, used when the system delivers a low-memory notification. This
* simply increments an atomic counter each time the notification is raised.
*/
static void CALLBACK low_memory(_In_ PVOID, _In_ BOOLEAN)
{
pressure_epoch++;
}
public:
PALWindows()
{
// No error handling here - if this doesn't work, then we will just
// consume more memory. There's nothing sensible that we could do in
// error handling. We also leak both the low memory notification object
// handle and the wait object handle. We'll need them until the program
// exits, so there's little point doing anything else.
//
// We only try to register once. If this fails, give up. Even if we
// create multiple PAL objects, we don't want to get more than one
// callback.
if (!registered_for_notifications.exchange(true))
{
lowMemoryObject =
CreateMemoryResourceNotification(LowMemoryResourceNotification);
HANDLE waitObject;
RegisterWaitForSingleObject(
&waitObject,
lowMemoryObject,
low_memory,
nullptr,
INFINITE,
WT_EXECUTEDEFAULT);
}
}
/**
* Flag indicating that this PAL supports the low pressure notification.
*/
static constexpr bool supports_low_memory_notification = true;
/**
* Counter values for the number of times that a low-pressure notification
* has been delivered. Callers should compare this with a previous value
* to see if the low memory state has been triggered since they last
* checked.
*/
uint64_t low_memory_epoch()
{
return pressure_epoch.load(std::memory_order_acquire);
}
/**
* Check whether the low memory state is still in effect. This is an
* expensive operation and should not be on any fast paths.
*/
bool expensive_low_memory_check()
{
BOOL result;
QueryMemoryResourceNotification(lowMemoryObject, &result);
return result;
}
static void error(const char* const str)
{
puts(str);
@ -127,5 +195,8 @@ namespace snmalloc
# endif
}
};
HEADER_GLOBAL std::atomic<uint64_t> PALWindows::pressure_epoch;
HEADER_GLOBAL std::atomic<bool> PALWindows::registered_for_notifications;
HEADER_GLOBAL HANDLE PALWindows::lowMemoryObject;
}
#endif