зеркало из https://github.com/mozilla/gecko-dev.git
Bug 805855 - Add an extra function to jemalloc to free dirty unused pages. r=glandium
This commit is contained in:
Родитель
f7aae016b2
Коммит
f39e18e483
|
@ -1385,7 +1385,7 @@ static void arena_chunk_init(arena_t *arena, arena_chunk_t *chunk);
|
||||||
static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
|
static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
|
||||||
static arena_run_t *arena_run_alloc(arena_t *arena, arena_bin_t *bin,
|
static arena_run_t *arena_run_alloc(arena_t *arena, arena_bin_t *bin,
|
||||||
size_t size, bool large, bool zero);
|
size_t size, bool large, bool zero);
|
||||||
static void arena_purge(arena_t *arena);
|
static void arena_purge(arena_t *arena, bool all);
|
||||||
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
|
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
|
||||||
static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
|
static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
|
||||||
arena_run_t *run, size_t oldsize, size_t newsize);
|
arena_run_t *run, size_t oldsize, size_t newsize);
|
||||||
|
@ -3594,10 +3594,12 @@ arena_run_alloc(arena_t *arena, arena_bin_t *bin, size_t size, bool large,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_purge(arena_t *arena)
|
arena_purge(arena_t *arena, bool all)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
size_t i, npages;
|
size_t i, npages;
|
||||||
|
/* If all is set purge all dirty pages. */
|
||||||
|
size_t dirty_max = all ? 1 : opt_dirty_max;
|
||||||
#ifdef MALLOC_DEBUG
|
#ifdef MALLOC_DEBUG
|
||||||
size_t ndirty = 0;
|
size_t ndirty = 0;
|
||||||
rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty,
|
rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty,
|
||||||
|
@ -3606,7 +3608,7 @@ arena_purge(arena_t *arena)
|
||||||
} rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
|
} rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
|
||||||
assert(ndirty == arena->ndirty);
|
assert(ndirty == arena->ndirty);
|
||||||
#endif
|
#endif
|
||||||
RELEASE_ASSERT(arena->ndirty > opt_dirty_max);
|
RELEASE_ASSERT(all || (arena->ndirty > opt_dirty_max));
|
||||||
|
|
||||||
#ifdef MALLOC_STATS
|
#ifdef MALLOC_STATS
|
||||||
arena->stats.npurge++;
|
arena->stats.npurge++;
|
||||||
|
@ -3618,7 +3620,7 @@ arena_purge(arena_t *arena)
|
||||||
* number of system calls, even if a chunk has only been partially
|
* number of system calls, even if a chunk has only been partially
|
||||||
* purged.
|
* purged.
|
||||||
*/
|
*/
|
||||||
while (arena->ndirty > (opt_dirty_max >> 1)) {
|
while (arena->ndirty > (dirty_max >> 1)) {
|
||||||
#ifdef MALLOC_DOUBLE_PURGE
|
#ifdef MALLOC_DOUBLE_PURGE
|
||||||
bool madvised = false;
|
bool madvised = false;
|
||||||
#endif
|
#endif
|
||||||
|
@ -3675,7 +3677,7 @@ arena_purge(arena_t *arena)
|
||||||
arena->stats.nmadvise++;
|
arena->stats.nmadvise++;
|
||||||
arena->stats.purged += npages;
|
arena->stats.purged += npages;
|
||||||
#endif
|
#endif
|
||||||
if (arena->ndirty <= (opt_dirty_max >> 1))
|
if (arena->ndirty <= (dirty_max >> 1))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3800,7 +3802,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
|
||||||
|
|
||||||
/* Enforce opt_dirty_max. */
|
/* Enforce opt_dirty_max. */
|
||||||
if (arena->ndirty > opt_dirty_max)
|
if (arena->ndirty > opt_dirty_max)
|
||||||
arena_purge(arena);
|
arena_purge(arena, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -6854,6 +6856,21 @@ _msize(const void *ptr)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
void
|
||||||
|
jemalloc_free_dirty_pages(void)
|
||||||
|
{
|
||||||
|
size_t i;
|
||||||
|
for (i = 0; i < narenas; i++) {
|
||||||
|
arena_t *arena = arenas[i];
|
||||||
|
|
||||||
|
if (arena != NULL) {
|
||||||
|
malloc_spin_lock(&arena->lock);
|
||||||
|
arena_purge(arena, true);
|
||||||
|
malloc_spin_unlock(&arena->lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* End non-standard functions.
|
* End non-standard functions.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -116,6 +116,20 @@ static inline void jemalloc_purge_freed_pages() { }
|
||||||
void jemalloc_purge_freed_pages();
|
void jemalloc_purge_freed_pages();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Free all unused dirty pages in all arenas. Calling this function will slow
|
||||||
|
* down subsequent allocations so it is recommended to use it only when
|
||||||
|
* memory needs to be reclaimed at all costs (see bug 805855). This function
|
||||||
|
* provides functionality similar to mallctl("arenas.purge") in jemalloc 3.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#if !defined(MOZ_NATIVE_JEMALLOC)
|
||||||
|
#if defined(MOZ_MEMORY_LINUX) || defined(MOZ_MEMORY_BSD)
|
||||||
|
__attribute__((weak))
|
||||||
|
#endif /* defined(MOZ_MEMORY_LINUX) || defined(MOZ_MEMORY_BSD) */
|
||||||
|
void jemalloc_free_dirty_pages();
|
||||||
|
#endif /* !defined(MOZ_NATIVE_JEMALLOC) */
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
} /* extern "C" */
|
} /* extern "C" */
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -24,6 +24,7 @@ EXPORTS
|
||||||
je_malloc_good_size
|
je_malloc_good_size
|
||||||
#endif
|
#endif
|
||||||
jemalloc_stats
|
jemalloc_stats
|
||||||
|
jemalloc_free_dirty_pages
|
||||||
; A hack to work around the CRT (see giant comment in Makefile.in)
|
; A hack to work around the CRT (see giant comment in Makefile.in)
|
||||||
frex=je_dumb_free_thunk
|
frex=je_dumb_free_thunk
|
||||||
#endif
|
#endif
|
||||||
|
|
Загрузка…
Ссылка в новой задаче