diff --git a/memory/mozjemalloc/jemalloc.c b/memory/mozjemalloc/jemalloc.c index 289c25e07d0b..6dd3c16e82fa 100644 --- a/memory/mozjemalloc/jemalloc.c +++ b/memory/mozjemalloc/jemalloc.c @@ -1385,7 +1385,7 @@ static void arena_chunk_init(arena_t *arena, arena_chunk_t *chunk); static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk); static arena_run_t *arena_run_alloc(arena_t *arena, arena_bin_t *bin, size_t size, bool large, bool zero); -static void arena_purge(arena_t *arena); +static void arena_purge(arena_t *arena, bool all); static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty); static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, size_t oldsize, size_t newsize); @@ -3594,10 +3594,12 @@ arena_run_alloc(arena_t *arena, arena_bin_t *bin, size_t size, bool large, } static void -arena_purge(arena_t *arena) +arena_purge(arena_t *arena, bool all) { arena_chunk_t *chunk; size_t i, npages; + /* If all is set purge all dirty pages. */ + size_t dirty_max = all ? 1 : opt_dirty_max; #ifdef MALLOC_DEBUG size_t ndirty = 0; rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty, @@ -3606,7 +3608,7 @@ arena_purge(arena_t *arena) } rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk) assert(ndirty == arena->ndirty); #endif - RELEASE_ASSERT(arena->ndirty > opt_dirty_max); + RELEASE_ASSERT(all || (arena->ndirty > opt_dirty_max)); #ifdef MALLOC_STATS arena->stats.npurge++; @@ -3618,7 +3620,7 @@ arena_purge(arena_t *arena) * number of system calls, even if a chunk has only been partially * purged. */ - while (arena->ndirty > (opt_dirty_max >> 1)) { + while (arena->ndirty > (dirty_max >> 1)) { #ifdef MALLOC_DOUBLE_PURGE bool madvised = false; #endif @@ -3675,7 +3677,7 @@ arena_purge(arena_t *arena) arena->stats.nmadvise++; arena->stats.purged += npages; #endif - if (arena->ndirty <= (opt_dirty_max >> 1)) + if (arena->ndirty <= (dirty_max >> 1)) break; } } @@ -3800,7 +3802,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty) /* Enforce opt_dirty_max. */ if (arena->ndirty > opt_dirty_max) - arena_purge(arena); + arena_purge(arena, false); } static void @@ -6854,6 +6856,21 @@ _msize(const void *ptr) } #endif +void +jemalloc_free_dirty_pages(void) +{ + size_t i; + for (i = 0; i < narenas; i++) { + arena_t *arena = arenas[i]; + + if (arena != NULL) { + malloc_spin_lock(&arena->lock); + arena_purge(arena, true); + malloc_spin_unlock(&arena->lock); + } + } +} + /* * End non-standard functions. */ diff --git a/memory/mozjemalloc/jemalloc.h b/memory/mozjemalloc/jemalloc.h index f91d3e4f3e8b..9af11c8d2ea4 100644 --- a/memory/mozjemalloc/jemalloc.h +++ b/memory/mozjemalloc/jemalloc.h @@ -116,6 +116,20 @@ static inline void jemalloc_purge_freed_pages() { } void jemalloc_purge_freed_pages(); #endif +/* + * Free all unused dirty pages in all arenas. Calling this function will slow + * down subsequent allocations so it is recommended to use it only when + * memory needs to be reclaimed at all costs (see bug 805855). This function + * provides functionality similar to mallctl("arenas.purge") in jemalloc 3. + */ + +#if !defined(MOZ_NATIVE_JEMALLOC) +#if defined(MOZ_MEMORY_LINUX) || defined(MOZ_MEMORY_BSD) +__attribute__((weak)) +#endif /* defined(MOZ_MEMORY_LINUX) || defined(MOZ_MEMORY_BSD) */ +void jemalloc_free_dirty_pages(); +#endif /* !defined(MOZ_NATIVE_JEMALLOC) */ + #ifdef __cplusplus } /* extern "C" */ #endif diff --git a/mozglue/build/mozglue.def.in b/mozglue/build/mozglue.def.in index 6b27f88d2599..6faa4d37bf74 100644 --- a/mozglue/build/mozglue.def.in +++ b/mozglue/build/mozglue.def.in @@ -24,6 +24,7 @@ EXPORTS je_malloc_good_size #endif jemalloc_stats + jemalloc_free_dirty_pages ; A hack to work around the CRT (see giant comment in Makefile.in) frex=je_dumb_free_thunk #endif