diff --git a/memory/mozjemalloc/mozjemalloc.cpp b/memory/mozjemalloc/mozjemalloc.cpp index 6339f68e0b96..b24fc357c487 100644 --- a/memory/mozjemalloc/mozjemalloc.cpp +++ b/memory/mozjemalloc/mozjemalloc.cpp @@ -140,7 +140,6 @@ #include #include #include -#include #ifdef MOZ_MEMORY_WINDOWS @@ -149,6 +148,7 @@ #include #include #include +#include #define SIZE_T_MAX SIZE_MAX #define STDERR_FILENO 2 @@ -2170,6 +2170,18 @@ label_return: base_node_dealloc(xprev); } +static bool +chunk_dalloc_mmap(void *chunk, size_t size) +{ + if (CAN_RECYCLE(size) && load_acquire_z(&recycled_size) < recycle_limit) + return true; + + pages_unmap(chunk, size); + return false; +} + +#undef CAN_RECYCLE + static void chunk_dealloc(void *chunk, size_t size, ChunkType type) { @@ -2181,29 +2193,10 @@ chunk_dealloc(void *chunk, size_t size, ChunkType type) malloc_rtree_set(chunk_rtree, (uintptr_t)chunk, nullptr); - if (CAN_RECYCLE(size)) { - size_t recycled_so_far = load_acquire_z(&recycled_size); - // In case some race condition put us above the limit. - if (recycled_so_far < recycle_limit) { - size_t recycle_remaining = recycle_limit - recycled_so_far; - size_t to_recycle; - if (size > recycle_remaining) { - to_recycle = recycle_remaining; - // Drop pages that would overflow the recycle limit - pages_trim(chunk, size, 0, to_recycle); - } else { - to_recycle = size; - } - chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, to_recycle, type); - } - return; - } - - pages_unmap(chunk, size); + if (chunk_dalloc_mmap(chunk, size)) + chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size, type); } -#undef CAN_RECYCLE - /* * End chunk management functions. */