зеркало из https://github.com/mozilla/gecko-dev.git
Bug 815071 - Update jemalloc3 to commit 6eb84fb. r=jlebar
This commit is contained in:
Родитель
bdc9e873b9
Коммит
77fe00e25b
|
@ -6,20 +6,51 @@ found in the git revision history:
|
|||
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
|
||||
git://canonware.com/jemalloc.git
|
||||
|
||||
* 3.x.x (XXX not yet released)
|
||||
* 3.x.x (Not yet released)
|
||||
|
||||
Bug fixes:
|
||||
- Fix "arenas.extend" mallctl to output the number of arenas.
|
||||
|
||||
* 3.2.0 (November 9, 2012)
|
||||
|
||||
In addition to a couple of bug fixes, this version modifies page run
|
||||
allocation and dirty page purging algorithms in order to better control
|
||||
page-level virtual memory fragmentation.
|
||||
|
||||
Incompatible changes:
|
||||
- Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1).
|
||||
|
||||
Bug fixes:
|
||||
- Fix dss/mmap allocation precedence code to use recyclable mmap memory only
|
||||
after primary dss allocation fails.
|
||||
- Fix deadlock in the "arenas.purge" mallctl. This regression was introduced
|
||||
in 3.1.0 by the addition of the "arena.<i>.purge" mallctl.
|
||||
|
||||
* 3.1.0 (October 16, 2012)
|
||||
|
||||
New features:
|
||||
- Auto-detect whether running inside Valgrind, thus removing the need to
|
||||
manually specify MALLOC_CONF=valgrind:true.
|
||||
- Add the "arenas.extend" mallctl, which allows applications to create
|
||||
manually managed arenas.
|
||||
- Add the ALLOCM_ARENA() flag for {,r,d}allocm().
|
||||
- Add the "opt.dss", "arena.<i>.dss", and "stats.arenas.<i>.dss" mallctls,
|
||||
which provide control over dss/mmap precedence.
|
||||
- Add the "arena.<i>.purge" mallctl, which obsoletes "arenas.purge".
|
||||
- Define LG_QUANTUM for hppa.
|
||||
|
||||
Incompatible changes:
|
||||
- Disable tcache by default if running inside Valgrind, in order to avoid
|
||||
making unallocated objects appear reachable to Valgrind.
|
||||
- Drop const from malloc_usable_size() argument on Linux.
|
||||
|
||||
Bug fixes:
|
||||
- Fix heap profiling crash if sampled object is freed via realloc(p, 0).
|
||||
- Remove const from __*_hook variable declarations, so that glibc can modify
|
||||
them during process forking.
|
||||
- Fix mlockall(2)/madvise(2) interaction.
|
||||
- Fix fork(2)-related deadlocks.
|
||||
- Fix error return value for "thread.tcache.enabled" mallctl.
|
||||
|
||||
* 3.0.0 (May 11, 2012)
|
||||
|
||||
|
|
|
@ -55,6 +55,11 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||
jemalloc overlays the default malloc zone, but makes no attempt to actually
|
||||
replace the "malloc", "calloc", etc. symbols.
|
||||
|
||||
--without-export
|
||||
Don't export public APIs. This can be useful when building jemalloc as a
|
||||
static library, or to avoid exporting public APIs when using the zone
|
||||
allocator on OSX.
|
||||
|
||||
--with-private-namespace=<prefix>
|
||||
Prefix all library-private APIs with <prefix>. For shared libraries,
|
||||
symbol visibility mechanisms prevent these symbols from being exported, but
|
||||
|
|
|
@ -101,9 +101,9 @@ DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.html)
|
|||
DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.3)
|
||||
DOCS := $(DOCS_HTML) $(DOCS_MAN3)
|
||||
CTESTS := $(srcroot)test/aligned_alloc.c $(srcroot)test/allocated.c \
|
||||
$(srcroot)test/bitmap.c $(srcroot)test/mremap.c \
|
||||
$(srcroot)test/posix_memalign.c $(srcroot)test/thread_arena.c \
|
||||
$(srcroot)test/thread_tcache_enabled.c
|
||||
$(srcroot)test/ALLOCM_ARENA.c $(srcroot)test/bitmap.c \
|
||||
$(srcroot)test/mremap.c $(srcroot)test/posix_memalign.c \
|
||||
$(srcroot)test/thread_arena.c $(srcroot)test/thread_tcache_enabled.c
|
||||
ifeq ($(enable_experimental), 1)
|
||||
CTESTS += $(srcroot)test/allocm.c $(srcroot)test/rallocm.c
|
||||
endif
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.0.0-357-gd0ffd8ed4f6aa4cf7248028eddfcb35f93247fe4
|
||||
1.0.0-370-g6eb84fbe315add1e1d4f8deedc25d260fff3ae97
|
||||
|
|
|
@ -750,6 +750,7 @@ enable_autogen
|
|||
enable_experimental
|
||||
with_mangling
|
||||
with_jemalloc_prefix
|
||||
with_export
|
||||
with_private_namespace
|
||||
with_install_suffix
|
||||
enable_cc_silence
|
||||
|
@ -1420,6 +1421,7 @@ Optional Packages:
|
|||
--with-mangling=<map> Mangle symbols in <map>
|
||||
--with-jemalloc-prefix=<prefix>
|
||||
Prefix to prepend to all public APIs
|
||||
--without-export disable exporting jemalloc public APIs
|
||||
--with-private-namespace=<prefix>
|
||||
Prefix to prepend to all library-private APIs
|
||||
--with-install-suffix=<suffix>
|
||||
|
@ -5148,6 +5150,17 @@ _ACEOF
|
|||
done
|
||||
|
||||
|
||||
# Check whether --with-export was given.
|
||||
if test "${with_export+set}" = set; then :
|
||||
withval=$with_export; if test "x$with_export" = "xno"; then
|
||||
$as_echo "#define JEMALLOC_EXPORT /**/" >>confdefs.h
|
||||
|
||||
fi
|
||||
]
|
||||
fi
|
||||
|
||||
|
||||
|
||||
# Check whether --with-private_namespace was given.
|
||||
if test "${with_private_namespace+set}" = set; then :
|
||||
withval=$with_private_namespace; JEMALLOC_PRIVATE_NAMESPACE="$with_private_namespace"
|
||||
|
|
|
@ -471,6 +471,13 @@ for stem in ${public_syms}; do
|
|||
AC_DEFINE_UNQUOTED([${n}], [${m}])
|
||||
done
|
||||
|
||||
AC_ARG_WITH([export],
|
||||
[AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])],
|
||||
[if test "x$with_export" = "xno"; then
|
||||
AC_DEFINE([JEMALLOC_EXPORT],[])]
|
||||
fi]
|
||||
)
|
||||
|
||||
dnl Do not mangle library-private APIs by default.
|
||||
AC_ARG_WITH([private_namespace],
|
||||
[AS_HELP_STRING([--with-private-namespace=<prefix>], [Prefix to prepend to all library-private APIs])],
|
||||
|
|
|
@ -368,6 +368,15 @@ for (i = 0; i < nbins; i++) {
|
|||
object. This constraint can apply to both growth and
|
||||
shrinkage.</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><constant>ALLOCM_ARENA(<parameter>a</parameter>)
|
||||
</constant></term>
|
||||
|
||||
<listitem><para>Use the arena specified by the index
|
||||
<parameter>a</parameter>. This macro does not validate that
|
||||
<parameter>a</parameter> specifies an arena in the valid
|
||||
range.</para></listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
</para>
|
||||
|
||||
|
@ -785,15 +794,29 @@ for (i = 0; i < nbins; i++) {
|
|||
chunk size is 4 MiB (2^22).</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.dss">
|
||||
<term>
|
||||
<mallctl>opt.dss</mallctl>
|
||||
(<type>const char *</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle>
|
||||
<manvolnum>2</manvolnum></citerefentry>) allocation precedence as
|
||||
related to <citerefentry><refentrytitle>mmap</refentrytitle>
|
||||
<manvolnum>2</manvolnum></citerefentry> allocation. The following
|
||||
settings are supported: “disabled”, “primary”,
|
||||
and “secondary” (default).</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.narenas">
|
||||
<term>
|
||||
<mallctl>opt.narenas</mallctl>
|
||||
(<type>size_t</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>Maximum number of arenas to use. The default maximum
|
||||
number of arenas is four times the number of CPUs, or one if there is a
|
||||
single CPU.</para></listitem>
|
||||
<listitem><para>Maximum number of arenas to use for automatic
|
||||
multiplexing of threads and arenas. The default is four times the
|
||||
number of CPUs, or one if there is a single CPU.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.lg_dirty_mult">
|
||||
|
@ -810,7 +833,7 @@ for (i = 0; i < nbins; i++) {
|
|||
<manvolnum>2</manvolnum></citerefentry> or a similar system call. This
|
||||
provides the kernel with sufficient information to recycle dirty pages
|
||||
if physical memory becomes scarce and the pages remain unused. The
|
||||
default minimum ratio is 32:1 (2^5:1); an option value of -1 will
|
||||
default minimum ratio is 8:1 (2^3:1); an option value of -1 will
|
||||
disable dirty page purging.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
|
@ -1149,11 +1172,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||
<literal>rw</literal>
|
||||
</term>
|
||||
<listitem><para>Get or set the arena associated with the calling
|
||||
thread. The arena index must be less than the maximum number of arenas
|
||||
(see the <link
|
||||
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>
|
||||
mallctl). If the specified arena was not initialized beforehand (see
|
||||
the <link
|
||||
thread. If the specified arena was not initialized beforehand (see the
|
||||
<link
|
||||
linkend="arenas.initialized"><mallctl>arenas.initialized</mallctl></link>
|
||||
mallctl), it will be automatically initialized as a side effect of
|
||||
calling this interface.</para></listitem>
|
||||
|
@ -1245,13 +1265,40 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||
the developer may find manual flushing useful.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="arena.i.purge">
|
||||
<term>
|
||||
<mallctl>arena.<i>.purge</mallctl>
|
||||
(<type>unsigned</type>)
|
||||
<literal>--</literal>
|
||||
</term>
|
||||
<listitem><para>Purge unused dirty pages for arena <i>, or for
|
||||
all arenas if <i> equals <link
|
||||
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>.
|
||||
</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="arena.i.dss">
|
||||
<term>
|
||||
<mallctl>arena.<i>.dss</mallctl>
|
||||
(<type>const char *</type>)
|
||||
<literal>rw</literal>
|
||||
</term>
|
||||
<listitem><para>Set the precedence of dss allocation as related to mmap
|
||||
allocation for arena <i>, or for all arenas if <i> equals
|
||||
<link
|
||||
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. See
|
||||
<link linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
|
||||
settings.
|
||||
</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="arenas.narenas">
|
||||
<term>
|
||||
<mallctl>arenas.narenas</mallctl>
|
||||
(<type>unsigned</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>Maximum number of arenas.</para></listitem>
|
||||
<listitem><para>Current limit on number of arenas.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="arenas.initialized">
|
||||
|
@ -1370,6 +1417,16 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||
for all arenas if none is specified.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term>
|
||||
<mallctl>arenas.extend</mallctl>
|
||||
(<type>unsigned</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>Extend the array of arenas by appending a new arena,
|
||||
and returning the new arena index.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="prof.active">
|
||||
<term>
|
||||
<mallctl>prof.active</mallctl>
|
||||
|
@ -1455,7 +1512,9 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||
application. This is a multiple of the page size, and greater than or
|
||||
equal to <link
|
||||
linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link>.
|
||||
</para></listitem>
|
||||
This does not include <link linkend="stats.arenas.i.pdirty">
|
||||
<mallctl>stats.arenas.<i>.pdirty</mallctl></link> and pages
|
||||
entirely devoted to allocator metadata.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
|
@ -1538,6 +1597,20 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||
</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term>
|
||||
<mallctl>stats.arenas.<i>.dss</mallctl>
|
||||
(<type>const char *</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle>
|
||||
<manvolnum>2</manvolnum></citerefentry>) allocation precedence as
|
||||
related to <citerefentry><refentrytitle>mmap</refentrytitle>
|
||||
<manvolnum>2</manvolnum></citerefentry> allocation. See <link
|
||||
linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for details.
|
||||
</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term>
|
||||
<mallctl>stats.arenas.<i>.nthreads</mallctl>
|
||||
|
@ -1557,7 +1630,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||
<listitem><para>Number of pages in active runs.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<varlistentry id="stats.arenas.i.pdirty">
|
||||
<term>
|
||||
<mallctl>stats.arenas.<i>.pdirty</mallctl>
|
||||
(<type>size_t</type>)
|
||||
|
|
|
@ -38,10 +38,10 @@
|
|||
*
|
||||
* (nactive >> opt_lg_dirty_mult) >= ndirty
|
||||
*
|
||||
* So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32
|
||||
* times as many active pages as dirty pages.
|
||||
* So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times
|
||||
* as many active pages as dirty pages.
|
||||
*/
|
||||
#define LG_DIRTY_MULT_DEFAULT 5
|
||||
#define LG_DIRTY_MULT_DEFAULT 3
|
||||
|
||||
typedef struct arena_chunk_map_s arena_chunk_map_t;
|
||||
typedef struct arena_chunk_s arena_chunk_t;
|
||||
|
@ -69,7 +69,7 @@ struct arena_chunk_map_s {
|
|||
/*
|
||||
* Linkage for run trees. There are two disjoint uses:
|
||||
*
|
||||
* 1) arena_t's runs_avail_{clean,dirty} trees.
|
||||
* 1) arena_t's runs_avail tree.
|
||||
* 2) arena_run_t conceptually uses this linkage for in-use
|
||||
* non-full runs, rather than directly embedding linkage.
|
||||
*/
|
||||
|
@ -162,20 +162,24 @@ typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
|
|||
/* Arena chunk header. */
|
||||
struct arena_chunk_s {
|
||||
/* Arena that owns the chunk. */
|
||||
arena_t *arena;
|
||||
arena_t *arena;
|
||||
|
||||
/* Linkage for the arena's chunks_dirty list. */
|
||||
ql_elm(arena_chunk_t) link_dirty;
|
||||
|
||||
/*
|
||||
* True if the chunk is currently in the chunks_dirty list, due to
|
||||
* having at some point contained one or more dirty pages. Removal
|
||||
* from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible.
|
||||
*/
|
||||
bool dirtied;
|
||||
/* Linkage for tree of arena chunks that contain dirty runs. */
|
||||
rb_node(arena_chunk_t) dirty_link;
|
||||
|
||||
/* Number of dirty pages. */
|
||||
size_t ndirty;
|
||||
size_t ndirty;
|
||||
|
||||
/* Number of available runs. */
|
||||
size_t nruns_avail;
|
||||
|
||||
/*
|
||||
* Number of available run adjacencies. Clean and dirty available runs
|
||||
* are not coalesced, which causes virtual memory fragmentation. The
|
||||
* ratio of (nruns_avail-nruns_adjac):nruns_adjac is used for tracking
|
||||
* this fragmentation.
|
||||
* */
|
||||
size_t nruns_adjac;
|
||||
|
||||
/*
|
||||
* Map of pages within chunk that keeps track of free/large/small. The
|
||||
|
@ -183,7 +187,7 @@ struct arena_chunk_s {
|
|||
* need to be tracked in the map. This omission saves a header page
|
||||
* for common chunk sizes (e.g. 4 MiB).
|
||||
*/
|
||||
arena_chunk_map_t map[1]; /* Dynamically sized. */
|
||||
arena_chunk_map_t map[1]; /* Dynamically sized. */
|
||||
};
|
||||
typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
|
||||
|
||||
|
@ -331,8 +335,10 @@ struct arena_s {
|
|||
|
||||
uint64_t prof_accumbytes;
|
||||
|
||||
/* List of dirty-page-containing chunks this arena manages. */
|
||||
ql_head(arena_chunk_t) chunks_dirty;
|
||||
dss_prec_t dss_prec;
|
||||
|
||||
/* Tree of dirty-page-containing chunks this arena manages. */
|
||||
arena_chunk_tree_t chunks_dirty;
|
||||
|
||||
/*
|
||||
* In order to avoid rapid chunk allocation/deallocation when an arena
|
||||
|
@ -367,18 +373,9 @@ struct arena_s {
|
|||
|
||||
/*
|
||||
* Size/address-ordered trees of this arena's available runs. The trees
|
||||
* are used for first-best-fit run allocation. The dirty tree contains
|
||||
* runs with dirty pages (i.e. very likely to have been touched and
|
||||
* therefore have associated physical pages), whereas the clean tree
|
||||
* contains runs with pages that either have no associated physical
|
||||
* pages, or have pages that the kernel may recycle at any time due to
|
||||
* previous madvise(2) calls. The dirty tree is used in preference to
|
||||
* the clean tree for allocations, because using dirty pages reduces
|
||||
* the amount of dirty purging necessary to keep the active:dirty page
|
||||
* ratio below the purge threshold.
|
||||
* are used for first-best-fit run allocation.
|
||||
*/
|
||||
arena_avail_tree_t runs_avail_clean;
|
||||
arena_avail_tree_t runs_avail_dirty;
|
||||
arena_avail_tree_t runs_avail;
|
||||
|
||||
/* bins is used to store trees of free regions. */
|
||||
arena_bin_t bins[NBINS];
|
||||
|
@ -403,7 +400,6 @@ extern arena_bin_info_t arena_bin_info[NBINS];
|
|||
#define nlclasses (chunk_npages - map_bias)
|
||||
|
||||
void arena_purge_all(arena_t *arena);
|
||||
void arena_prof_accum(arena_t *arena, uint64_t accumbytes);
|
||||
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
|
||||
size_t binind, uint64_t prof_accumbytes);
|
||||
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
|
||||
|
@ -422,13 +418,16 @@ void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
|||
void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
|
||||
void *ptr);
|
||||
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
|
||||
void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
|
||||
arena_stats_t *astats, malloc_bin_stats_t *bstats,
|
||||
malloc_large_stats_t *lstats);
|
||||
void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra, bool zero);
|
||||
void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
size_t alignment, bool zero, bool try_tcache);
|
||||
void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
|
||||
bool try_tcache_dalloc);
|
||||
dss_prec_t arena_dss_prec_get(arena_t *arena);
|
||||
void arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
|
||||
void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
|
||||
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
|
||||
malloc_large_stats_t *lstats);
|
||||
bool arena_new(arena_t *arena, unsigned ind);
|
||||
void arena_boot(void);
|
||||
void arena_prefork(arena_t *arena);
|
||||
|
@ -464,6 +463,9 @@ void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
|
|||
size_t runind, size_t binind, size_t flags);
|
||||
void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t unzeroed);
|
||||
void arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
|
||||
void arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
|
||||
void arena_prof_accum(arena_t *arena, uint64_t accumbytes);
|
||||
size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
|
||||
size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
||||
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
|
||||
|
@ -661,6 +663,44 @@ arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
|
|||
*mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
assert(prof_interval != 0);
|
||||
|
||||
arena->prof_accumbytes += accumbytes;
|
||||
if (arena->prof_accumbytes >= prof_interval) {
|
||||
prof_idump();
|
||||
arena->prof_accumbytes -= prof_interval;
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (prof_interval == 0)
|
||||
return;
|
||||
arena_prof_accum_impl(arena, accumbytes);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
arena_prof_accum(arena_t *arena, uint64_t accumbytes)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (prof_interval == 0)
|
||||
return;
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena_prof_accum_impl(arena, accumbytes);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
|
||||
{
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
extern size_t opt_lg_chunk;
|
||||
extern const char *opt_dss;
|
||||
|
||||
/* Protects stats_chunks; currently not used for any other purpose. */
|
||||
extern malloc_mutex_t chunks_mtx;
|
||||
|
@ -42,7 +43,9 @@ extern size_t chunk_npages;
|
|||
extern size_t map_bias; /* Number of arena chunk header pages. */
|
||||
extern size_t arena_maxclass; /* Max size class for arenas. */
|
||||
|
||||
void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero);
|
||||
void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
|
||||
dss_prec_t dss_prec);
|
||||
void chunk_unmap(void *chunk, size_t size);
|
||||
void chunk_dealloc(void *chunk, size_t size, bool unmap);
|
||||
bool chunk_boot(void);
|
||||
void chunk_prefork(void);
|
||||
|
|
|
@ -1,14 +1,28 @@
|
|||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef enum {
|
||||
dss_prec_disabled = 0,
|
||||
dss_prec_primary = 1,
|
||||
dss_prec_secondary = 2,
|
||||
|
||||
dss_prec_limit = 3
|
||||
} dss_prec_t ;
|
||||
#define DSS_PREC_DEFAULT dss_prec_secondary
|
||||
#define DSS_DEFAULT "secondary"
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
extern const char *dss_prec_names[];
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
dss_prec_t chunk_dss_prec_get(void);
|
||||
bool chunk_dss_prec_set(dss_prec_t dss_prec);
|
||||
void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
|
||||
bool chunk_in_dss(void *chunk);
|
||||
bool chunk_dss_boot(void);
|
||||
|
|
|
@ -33,6 +33,7 @@ struct ctl_indexed_node_s {
|
|||
struct ctl_arena_stats_s {
|
||||
bool initialized;
|
||||
unsigned nthreads;
|
||||
const char *dss;
|
||||
size_t pactive;
|
||||
size_t pdirty;
|
||||
arena_stats_t astats;
|
||||
|
@ -61,6 +62,7 @@ struct ctl_stats_s {
|
|||
uint64_t nmalloc; /* huge_nmalloc */
|
||||
uint64_t ndalloc; /* huge_ndalloc */
|
||||
} huge;
|
||||
unsigned narenas;
|
||||
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
|
||||
};
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ void *huge_palloc(size_t size, size_t alignment, bool zero);
|
|||
void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra);
|
||||
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
size_t alignment, bool zero);
|
||||
size_t alignment, bool zero, bool try_tcache_dalloc);
|
||||
void huge_dalloc(void *ptr, bool unmap);
|
||||
size_t huge_salloc(const void *ptr);
|
||||
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
|
||||
|
|
|
@ -514,13 +514,19 @@ extern size_t opt_narenas;
|
|||
/* Number of CPUs. */
|
||||
extern unsigned ncpus;
|
||||
|
||||
extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */
|
||||
/* Protects arenas initialization (arenas, arenas_total). */
|
||||
extern malloc_mutex_t arenas_lock;
|
||||
/*
|
||||
* Arenas that are used to service external requests. Not all elements of the
|
||||
* arenas array are necessarily used; arenas are created lazily as needed.
|
||||
*
|
||||
* arenas[0..narenas_auto) are used for automatic multiplexing of threads and
|
||||
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
|
||||
* takes some action to create them and allocate from them.
|
||||
*/
|
||||
extern arena_t **arenas;
|
||||
extern unsigned narenas;
|
||||
extern unsigned narenas_total;
|
||||
extern unsigned narenas_auto; /* Read-only after initialization. */
|
||||
|
||||
arena_t *arenas_extend(unsigned ind);
|
||||
void arenas_cleanup(void *arg);
|
||||
|
@ -575,6 +581,7 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
|
|||
|
||||
size_t s2u(size_t size);
|
||||
size_t sa2u(size_t size, size_t alignment);
|
||||
unsigned narenas_total_get(void);
|
||||
arena_t *choose_arena(arena_t *arena);
|
||||
#endif
|
||||
|
||||
|
@ -679,6 +686,18 @@ sa2u(size_t size, size_t alignment)
|
|||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE unsigned
|
||||
narenas_total_get(void)
|
||||
{
|
||||
unsigned narenas;
|
||||
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
narenas = narenas_total;
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
|
||||
return (narenas);
|
||||
}
|
||||
|
||||
/* Choose an arena based on a per-thread value. */
|
||||
JEMALLOC_INLINE arena_t *
|
||||
choose_arena(arena_t *arena)
|
||||
|
@ -714,15 +733,24 @@ choose_arena(arena_t *arena)
|
|||
#include "jemalloc/internal/quarantine.h"
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
void *imallocx(size_t size, bool try_tcache, arena_t *arena);
|
||||
void *imalloc(size_t size);
|
||||
void *icallocx(size_t size, bool try_tcache, arena_t *arena);
|
||||
void *icalloc(size_t size);
|
||||
void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
|
||||
arena_t *arena);
|
||||
void *ipalloc(size_t usize, size_t alignment, bool zero);
|
||||
size_t isalloc(const void *ptr, bool demote);
|
||||
size_t ivsalloc(const void *ptr, bool demote);
|
||||
size_t u2rz(size_t usize);
|
||||
size_t p2rz(const void *ptr);
|
||||
void idallocx(void *ptr, bool try_tcache);
|
||||
void idalloc(void *ptr);
|
||||
void iqallocx(void *ptr, bool try_tcache);
|
||||
void iqalloc(void *ptr);
|
||||
void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
|
||||
bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
|
||||
arena_t *arena);
|
||||
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
|
||||
bool zero, bool no_move);
|
||||
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
|
||||
|
@ -730,29 +758,44 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
|
|||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
||||
JEMALLOC_INLINE void *
|
||||
imalloc(size_t size)
|
||||
imallocx(size_t size, bool try_tcache, arena_t *arena)
|
||||
{
|
||||
|
||||
assert(size != 0);
|
||||
|
||||
if (size <= arena_maxclass)
|
||||
return (arena_malloc(NULL, size, false, true));
|
||||
return (arena_malloc(arena, size, false, try_tcache));
|
||||
else
|
||||
return (huge_malloc(size, false));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
imalloc(size_t size)
|
||||
{
|
||||
|
||||
return (imallocx(size, true, NULL));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
icallocx(size_t size, bool try_tcache, arena_t *arena)
|
||||
{
|
||||
|
||||
if (size <= arena_maxclass)
|
||||
return (arena_malloc(arena, size, true, try_tcache));
|
||||
else
|
||||
return (huge_malloc(size, true));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
icalloc(size_t size)
|
||||
{
|
||||
|
||||
if (size <= arena_maxclass)
|
||||
return (arena_malloc(NULL, size, true, true));
|
||||
else
|
||||
return (huge_malloc(size, true));
|
||||
return (icallocx(size, true, NULL));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
ipalloc(size_t usize, size_t alignment, bool zero)
|
||||
ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
|
||||
arena_t *arena)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
|
@ -760,11 +803,11 @@ ipalloc(size_t usize, size_t alignment, bool zero)
|
|||
assert(usize == sa2u(usize, alignment));
|
||||
|
||||
if (usize <= arena_maxclass && alignment <= PAGE)
|
||||
ret = arena_malloc(NULL, usize, zero, true);
|
||||
ret = arena_malloc(arena, usize, zero, try_tcache);
|
||||
else {
|
||||
if (usize <= arena_maxclass) {
|
||||
ret = arena_palloc(choose_arena(NULL), usize, alignment,
|
||||
zero);
|
||||
ret = arena_palloc(choose_arena(arena), usize,
|
||||
alignment, zero);
|
||||
} else if (alignment <= chunksize)
|
||||
ret = huge_malloc(usize, zero);
|
||||
else
|
||||
|
@ -775,6 +818,13 @@ ipalloc(size_t usize, size_t alignment, bool zero)
|
|||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
ipalloc(size_t usize, size_t alignment, bool zero)
|
||||
{
|
||||
|
||||
return (ipallocx(usize, alignment, zero, true, NULL));
|
||||
}
|
||||
|
||||
/*
|
||||
* Typical usage:
|
||||
* void *ptr = [...]
|
||||
|
@ -833,7 +883,7 @@ p2rz(const void *ptr)
|
|||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
idalloc(void *ptr)
|
||||
idallocx(void *ptr, bool try_tcache)
|
||||
{
|
||||
arena_chunk_t *chunk;
|
||||
|
||||
|
@ -841,24 +891,38 @@ idalloc(void *ptr)
|
|||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk != ptr)
|
||||
arena_dalloc(chunk->arena, chunk, ptr, true);
|
||||
arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
|
||||
else
|
||||
huge_dalloc(ptr, true);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
idalloc(void *ptr)
|
||||
{
|
||||
|
||||
idallocx(ptr, true);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
iqallocx(void *ptr, bool try_tcache)
|
||||
{
|
||||
|
||||
if (config_fill && opt_quarantine)
|
||||
quarantine(ptr);
|
||||
else
|
||||
idallocx(ptr, try_tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
iqalloc(void *ptr)
|
||||
{
|
||||
|
||||
if (config_fill && opt_quarantine)
|
||||
quarantine(ptr);
|
||||
else
|
||||
idalloc(ptr);
|
||||
iqallocx(ptr, true);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
|
||||
bool no_move)
|
||||
irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
|
||||
bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
|
||||
{
|
||||
void *ret;
|
||||
size_t oldsize;
|
||||
|
@ -881,7 +945,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
|
|||
usize = sa2u(size + extra, alignment);
|
||||
if (usize == 0)
|
||||
return (NULL);
|
||||
ret = ipalloc(usize, alignment, zero);
|
||||
ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
|
||||
if (ret == NULL) {
|
||||
if (extra == 0)
|
||||
return (NULL);
|
||||
|
@ -889,7 +953,8 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
|
|||
usize = sa2u(size, alignment);
|
||||
if (usize == 0)
|
||||
return (NULL);
|
||||
ret = ipalloc(usize, alignment, zero);
|
||||
ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
|
||||
arena);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
}
|
||||
|
@ -900,7 +965,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
|
|||
*/
|
||||
copysize = (size < oldsize) ? size : oldsize;
|
||||
memcpy(ret, ptr, copysize);
|
||||
iqalloc(ptr);
|
||||
iqallocx(ptr, try_tcache_dalloc);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
@ -914,15 +979,25 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
|
|||
}
|
||||
} else {
|
||||
if (size + extra <= arena_maxclass) {
|
||||
return (arena_ralloc(ptr, oldsize, size, extra,
|
||||
alignment, zero, true));
|
||||
return (arena_ralloc(arena, ptr, oldsize, size, extra,
|
||||
alignment, zero, try_tcache_alloc,
|
||||
try_tcache_dalloc));
|
||||
} else {
|
||||
return (huge_ralloc(ptr, oldsize, size, extra,
|
||||
alignment, zero));
|
||||
alignment, zero, try_tcache_dalloc));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
|
||||
bool no_move)
|
||||
{
|
||||
|
||||
return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
|
||||
NULL));
|
||||
}
|
||||
|
||||
malloc_tsd_externs(thread_allocated, thread_allocated_t)
|
||||
malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
|
||||
THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
|
||||
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
|
||||
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
|
||||
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
|
||||
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
|
||||
#define arena_malloc JEMALLOC_N(arena_malloc)
|
||||
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
|
||||
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
|
||||
|
@ -39,6 +41,8 @@
|
|||
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
|
||||
#define arena_prefork JEMALLOC_N(arena_prefork)
|
||||
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
|
||||
#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl)
|
||||
#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked)
|
||||
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
|
||||
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
|
||||
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
|
||||
|
@ -51,13 +55,11 @@
|
|||
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
|
||||
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
|
||||
#define arenas JEMALLOC_N(arenas)
|
||||
#define arenas_bin_i_index JEMALLOC_N(arenas_bin_i_index)
|
||||
#define arenas_booted JEMALLOC_N(arenas_booted)
|
||||
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
|
||||
#define arenas_extend JEMALLOC_N(arenas_extend)
|
||||
#define arenas_initialized JEMALLOC_N(arenas_initialized)
|
||||
#define arenas_lock JEMALLOC_N(arenas_lock)
|
||||
#define arenas_lrun_i_index JEMALLOC_N(arenas_lrun_i_index)
|
||||
#define arenas_tls JEMALLOC_N(arenas_tls)
|
||||
#define arenas_tsd JEMALLOC_N(arenas_tsd)
|
||||
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
|
||||
|
@ -102,12 +104,15 @@
|
|||
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
|
||||
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
|
||||
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
|
||||
#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
|
||||
#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
|
||||
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
|
||||
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
|
||||
#define chunk_npages JEMALLOC_N(chunk_npages)
|
||||
#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
|
||||
#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
|
||||
#define chunk_prefork JEMALLOC_N(chunk_prefork)
|
||||
#define chunk_unmap JEMALLOC_N(chunk_unmap)
|
||||
#define chunks_mtx JEMALLOC_N(chunks_mtx)
|
||||
#define chunks_rtree JEMALLOC_N(chunks_rtree)
|
||||
#define chunksize JEMALLOC_N(chunksize)
|
||||
|
@ -136,6 +141,7 @@
|
|||
#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
|
||||
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
|
||||
#define ctl_prefork JEMALLOC_N(ctl_prefork)
|
||||
#define dss_prec_names JEMALLOC_N(dss_prec_names)
|
||||
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
|
||||
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
|
||||
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
|
||||
|
@ -188,11 +194,17 @@
|
|||
#define huge_salloc JEMALLOC_N(huge_salloc)
|
||||
#define iallocm JEMALLOC_N(iallocm)
|
||||
#define icalloc JEMALLOC_N(icalloc)
|
||||
#define icallocx JEMALLOC_N(icallocx)
|
||||
#define idalloc JEMALLOC_N(idalloc)
|
||||
#define idallocx JEMALLOC_N(idallocx)
|
||||
#define imalloc JEMALLOC_N(imalloc)
|
||||
#define imallocx JEMALLOC_N(imallocx)
|
||||
#define ipalloc JEMALLOC_N(ipalloc)
|
||||
#define ipallocx JEMALLOC_N(ipallocx)
|
||||
#define iqalloc JEMALLOC_N(iqalloc)
|
||||
#define iqallocx JEMALLOC_N(iqallocx)
|
||||
#define iralloc JEMALLOC_N(iralloc)
|
||||
#define irallocx JEMALLOC_N(irallocx)
|
||||
#define isalloc JEMALLOC_N(isalloc)
|
||||
#define isthreaded JEMALLOC_N(isthreaded)
|
||||
#define ivsalloc JEMALLOC_N(ivsalloc)
|
||||
|
@ -220,7 +232,9 @@
|
|||
#define map_bias JEMALLOC_N(map_bias)
|
||||
#define mb_write JEMALLOC_N(mb_write)
|
||||
#define mutex_boot JEMALLOC_N(mutex_boot)
|
||||
#define narenas JEMALLOC_N(narenas)
|
||||
#define narenas_auto JEMALLOC_N(narenas_auto)
|
||||
#define narenas_total JEMALLOC_N(narenas_total)
|
||||
#define narenas_total_get JEMALLOC_N(narenas_total_get)
|
||||
#define ncpus JEMALLOC_N(ncpus)
|
||||
#define nhbins JEMALLOC_N(nhbins)
|
||||
#define opt_abort JEMALLOC_N(opt_abort)
|
||||
|
@ -297,9 +311,6 @@
|
|||
#define s2u JEMALLOC_N(s2u)
|
||||
#define sa2u JEMALLOC_N(sa2u)
|
||||
#define set_errno JEMALLOC_N(set_errno)
|
||||
#define stats_arenas_i_bins_j_index JEMALLOC_N(stats_arenas_i_bins_j_index)
|
||||
#define stats_arenas_i_index JEMALLOC_N(stats_arenas_i_index)
|
||||
#define stats_arenas_i_lruns_j_index JEMALLOC_N(stats_arenas_i_lruns_j_index)
|
||||
#define stats_cactive JEMALLOC_N(stats_cactive)
|
||||
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
|
||||
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
|
||||
|
|
|
@ -25,6 +25,8 @@ extern "C" {
|
|||
#endif
|
||||
#define ALLOCM_ZERO ((int)0x40)
|
||||
#define ALLOCM_NO_MOVE ((int)0x80)
|
||||
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
|
||||
#define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
|
||||
|
||||
#define ALLOCM_SUCCESS 0
|
||||
#define ALLOCM_ERR_OOM 1
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -32,7 +32,8 @@ base_pages_alloc(size_t minsize)
|
|||
assert(minsize != 0);
|
||||
csize = CHUNK_CEILING(minsize);
|
||||
zero = false;
|
||||
base_pages = chunk_alloc(csize, chunksize, true, &zero);
|
||||
base_pages = chunk_alloc(csize, chunksize, true, &zero,
|
||||
chunk_dss_prec_get());
|
||||
if (base_pages == NULL)
|
||||
return (true);
|
||||
base_next_addr = base_pages;
|
||||
|
|
|
@ -4,7 +4,8 @@
|
|||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
|
||||
const char *opt_dss = DSS_DEFAULT;
|
||||
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
|
||||
|
||||
malloc_mutex_t chunks_mtx;
|
||||
chunk_stats_t stats_chunks;
|
||||
|
@ -15,8 +16,10 @@ chunk_stats_t stats_chunks;
|
|||
* address space. Depending on function, different tree orderings are needed,
|
||||
* which is why there are two trees with the same contents.
|
||||
*/
|
||||
static extent_tree_t chunks_szad;
|
||||
static extent_tree_t chunks_ad;
|
||||
static extent_tree_t chunks_szad_mmap;
|
||||
static extent_tree_t chunks_ad_mmap;
|
||||
static extent_tree_t chunks_szad_dss;
|
||||
static extent_tree_t chunks_ad_dss;
|
||||
|
||||
rtree_t *chunks_rtree;
|
||||
|
||||
|
@ -30,14 +33,17 @@ size_t arena_maxclass; /* Max size class for arenas. */
|
|||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static void *chunk_recycle(size_t size, size_t alignment, bool base,
|
||||
static void *chunk_recycle(extent_tree_t *chunks_szad,
|
||||
extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
|
||||
bool *zero);
|
||||
static void chunk_record(void *chunk, size_t size);
|
||||
static void chunk_record(extent_tree_t *chunks_szad,
|
||||
extent_tree_t *chunks_ad, void *chunk, size_t size);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static void *
|
||||
chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
|
||||
chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
|
||||
size_t alignment, bool base, bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
extent_node_t *node;
|
||||
|
@ -62,7 +68,7 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
|
|||
key.addr = NULL;
|
||||
key.size = alloc_size;
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
node = extent_tree_szad_nsearch(&chunks_szad, &key);
|
||||
node = extent_tree_szad_nsearch(chunks_szad, &key);
|
||||
if (node == NULL) {
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
return (NULL);
|
||||
|
@ -73,13 +79,13 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
|
|||
trailsize = node->size - leadsize - size;
|
||||
ret = (void *)((uintptr_t)node->addr + leadsize);
|
||||
/* Remove node from the tree. */
|
||||
extent_tree_szad_remove(&chunks_szad, node);
|
||||
extent_tree_ad_remove(&chunks_ad, node);
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
extent_tree_ad_remove(chunks_ad, node);
|
||||
if (leadsize != 0) {
|
||||
/* Insert the leading space as a smaller chunk. */
|
||||
node->size = leadsize;
|
||||
extent_tree_szad_insert(&chunks_szad, node);
|
||||
extent_tree_ad_insert(&chunks_ad, node);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
node = NULL;
|
||||
}
|
||||
if (trailsize != 0) {
|
||||
|
@ -102,8 +108,8 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
|
|||
}
|
||||
node->addr = (void *)((uintptr_t)(ret) + size);
|
||||
node->size = trailsize;
|
||||
extent_tree_szad_insert(&chunks_szad, node);
|
||||
extent_tree_ad_insert(&chunks_ad, node);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
node = NULL;
|
||||
}
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
|
@ -130,7 +136,8 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
|
|||
* advantage of them if they are returned.
|
||||
*/
|
||||
void *
|
||||
chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
|
||||
chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
|
||||
dss_prec_t dss_prec)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
|
@ -139,17 +146,26 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
|
|||
assert(alignment != 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
ret = chunk_recycle(size, alignment, base, zero);
|
||||
if (ret != NULL)
|
||||
/* "primary" dss. */
|
||||
if (config_dss && dss_prec == dss_prec_primary) {
|
||||
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
|
||||
alignment, base, zero)) != NULL)
|
||||
goto label_return;
|
||||
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
|
||||
goto label_return;
|
||||
}
|
||||
/* mmap. */
|
||||
if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
|
||||
alignment, base, zero)) != NULL)
|
||||
goto label_return;
|
||||
|
||||
ret = chunk_alloc_mmap(size, alignment, zero);
|
||||
if (ret != NULL)
|
||||
if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
|
||||
goto label_return;
|
||||
|
||||
if (config_dss) {
|
||||
ret = chunk_alloc_dss(size, alignment, zero);
|
||||
if (ret != NULL)
|
||||
/* "secondary" dss. */
|
||||
if (config_dss && dss_prec == dss_prec_secondary) {
|
||||
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
|
||||
alignment, base, zero)) != NULL)
|
||||
goto label_return;
|
||||
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
|
@ -191,7 +207,8 @@ label_return:
|
|||
}
|
||||
|
||||
static void
|
||||
chunk_record(void *chunk, size_t size)
|
||||
chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||
size_t size)
|
||||
{
|
||||
bool unzeroed;
|
||||
extent_node_t *xnode, *node, *prev, key;
|
||||
|
@ -208,7 +225,7 @@ chunk_record(void *chunk, size_t size)
|
|||
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
key.addr = (void *)((uintptr_t)chunk + size);
|
||||
node = extent_tree_ad_nsearch(&chunks_ad, &key);
|
||||
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
||||
/* Try to coalesce forward. */
|
||||
if (node != NULL && node->addr == key.addr) {
|
||||
/*
|
||||
|
@ -216,11 +233,11 @@ chunk_record(void *chunk, size_t size)
|
|||
* not change the position within chunks_ad, so only
|
||||
* remove/insert from/into chunks_szad.
|
||||
*/
|
||||
extent_tree_szad_remove(&chunks_szad, node);
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
node->addr = chunk;
|
||||
node->size += size;
|
||||
node->zeroed = (node->zeroed && (unzeroed == false));
|
||||
extent_tree_szad_insert(&chunks_szad, node);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
if (xnode != NULL)
|
||||
base_node_dealloc(xnode);
|
||||
} else {
|
||||
|
@ -239,12 +256,12 @@ chunk_record(void *chunk, size_t size)
|
|||
node->addr = chunk;
|
||||
node->size = size;
|
||||
node->zeroed = (unzeroed == false);
|
||||
extent_tree_ad_insert(&chunks_ad, node);
|
||||
extent_tree_szad_insert(&chunks_szad, node);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
}
|
||||
|
||||
/* Try to coalesce backward. */
|
||||
prev = extent_tree_ad_prev(&chunks_ad, node);
|
||||
prev = extent_tree_ad_prev(chunks_ad, node);
|
||||
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
|
||||
chunk) {
|
||||
/*
|
||||
|
@ -252,20 +269,34 @@ chunk_record(void *chunk, size_t size)
|
|||
* not change the position within chunks_ad, so only
|
||||
* remove/insert node from/into chunks_szad.
|
||||
*/
|
||||
extent_tree_szad_remove(&chunks_szad, prev);
|
||||
extent_tree_ad_remove(&chunks_ad, prev);
|
||||
extent_tree_szad_remove(chunks_szad, prev);
|
||||
extent_tree_ad_remove(chunks_ad, prev);
|
||||
|
||||
extent_tree_szad_remove(&chunks_szad, node);
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
node->addr = prev->addr;
|
||||
node->size += prev->size;
|
||||
node->zeroed = (node->zeroed && prev->zeroed);
|
||||
extent_tree_szad_insert(&chunks_szad, node);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
|
||||
base_node_dealloc(prev);
|
||||
}
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_unmap(void *chunk, size_t size)
|
||||
{
|
||||
assert(chunk != NULL);
|
||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
if (config_dss && chunk_in_dss(chunk))
|
||||
chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
|
||||
else if (chunk_dealloc_mmap(chunk, size))
|
||||
chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dealloc(void *chunk, size_t size, bool unmap)
|
||||
{
|
||||
|
@ -279,15 +310,13 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
|
|||
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
|
||||
if (config_stats || config_prof) {
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
assert(stats_chunks.curchunks >= (size / chunksize));
|
||||
stats_chunks.curchunks -= (size / chunksize);
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
}
|
||||
|
||||
if (unmap) {
|
||||
if ((config_dss && chunk_in_dss(chunk)) ||
|
||||
chunk_dealloc_mmap(chunk, size))
|
||||
chunk_record(chunk, size);
|
||||
}
|
||||
if (unmap)
|
||||
chunk_unmap(chunk, size);
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -307,8 +336,10 @@ chunk_boot(void)
|
|||
}
|
||||
if (config_dss && chunk_dss_boot())
|
||||
return (true);
|
||||
extent_tree_szad_new(&chunks_szad);
|
||||
extent_tree_ad_new(&chunks_ad);
|
||||
extent_tree_szad_new(&chunks_szad_mmap);
|
||||
extent_tree_ad_new(&chunks_ad_mmap);
|
||||
extent_tree_szad_new(&chunks_szad_dss);
|
||||
extent_tree_ad_new(&chunks_ad_dss);
|
||||
if (config_ivsalloc) {
|
||||
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
opt_lg_chunk);
|
||||
|
|
|
@ -3,6 +3,16 @@
|
|||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
const char *dss_prec_names[] = {
|
||||
"disabled",
|
||||
"primary",
|
||||
"secondary",
|
||||
"N/A"
|
||||
};
|
||||
|
||||
/* Current dss precedence default, used when creating new arenas. */
|
||||
static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
|
||||
|
||||
/*
|
||||
* Protects sbrk() calls. This avoids malloc races among threads, though it
|
||||
* does not protect against races with threads that call sbrk() directly.
|
||||
|
@ -29,6 +39,31 @@ sbrk(intptr_t increment)
|
|||
}
|
||||
#endif
|
||||
|
||||
dss_prec_t
|
||||
chunk_dss_prec_get(void)
|
||||
{
|
||||
dss_prec_t ret;
|
||||
|
||||
if (config_dss == false)
|
||||
return (dss_prec_disabled);
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
ret = dss_prec_default;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_dss_prec_set(dss_prec_t dss_prec)
|
||||
{
|
||||
|
||||
if (config_dss == false)
|
||||
return (true);
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
dss_prec_default = dss_prec;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
return (false);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
|
||||
{
|
||||
|
@ -88,7 +123,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
|
|||
dss_max = dss_next;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
if (cpad_size != 0)
|
||||
chunk_dealloc(cpad, cpad_size, true);
|
||||
chunk_unmap(cpad, cpad_size);
|
||||
if (*zero) {
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
memset(ret, 0, size);
|
||||
|
|
|
@ -48,8 +48,8 @@ static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
|
|||
size_t *oldlenp, void *newp, size_t newlen);
|
||||
|
||||
#define INDEX_PROTO(n) \
|
||||
const ctl_named_node_t *n##_index(const size_t *mib, size_t miblen, \
|
||||
size_t i);
|
||||
static const ctl_named_node_t *n##_index(const size_t *mib, \
|
||||
size_t miblen, size_t i);
|
||||
|
||||
static bool ctl_arena_init(ctl_arena_stats_t *astats);
|
||||
static void ctl_arena_clear(ctl_arena_stats_t *astats);
|
||||
|
@ -58,6 +58,7 @@ static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
|
|||
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
|
||||
ctl_arena_stats_t *astats);
|
||||
static void ctl_arena_refresh(arena_t *arena, unsigned i);
|
||||
static bool ctl_grow(void);
|
||||
static void ctl_refresh(void);
|
||||
static bool ctl_init(void);
|
||||
static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
|
||||
|
@ -88,6 +89,7 @@ CTL_PROTO(config_utrace)
|
|||
CTL_PROTO(config_valgrind)
|
||||
CTL_PROTO(config_xmalloc)
|
||||
CTL_PROTO(opt_abort)
|
||||
CTL_PROTO(opt_dss)
|
||||
CTL_PROTO(opt_lg_chunk)
|
||||
CTL_PROTO(opt_narenas)
|
||||
CTL_PROTO(opt_lg_dirty_mult)
|
||||
|
@ -110,6 +112,10 @@ CTL_PROTO(opt_prof_gdump)
|
|||
CTL_PROTO(opt_prof_final)
|
||||
CTL_PROTO(opt_prof_leak)
|
||||
CTL_PROTO(opt_prof_accum)
|
||||
CTL_PROTO(arena_i_purge)
|
||||
static void arena_purge(unsigned arena_ind);
|
||||
CTL_PROTO(arena_i_dss)
|
||||
INDEX_PROTO(arena_i)
|
||||
CTL_PROTO(arenas_bin_i_size)
|
||||
CTL_PROTO(arenas_bin_i_nregs)
|
||||
CTL_PROTO(arenas_bin_i_run_size)
|
||||
|
@ -125,6 +131,7 @@ CTL_PROTO(arenas_nbins)
|
|||
CTL_PROTO(arenas_nhbins)
|
||||
CTL_PROTO(arenas_nlruns)
|
||||
CTL_PROTO(arenas_purge)
|
||||
CTL_PROTO(arenas_extend)
|
||||
CTL_PROTO(prof_active)
|
||||
CTL_PROTO(prof_dump)
|
||||
CTL_PROTO(prof_interval)
|
||||
|
@ -158,6 +165,7 @@ CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
|
|||
CTL_PROTO(stats_arenas_i_lruns_j_curruns)
|
||||
INDEX_PROTO(stats_arenas_i_lruns_j)
|
||||
CTL_PROTO(stats_arenas_i_nthreads)
|
||||
CTL_PROTO(stats_arenas_i_dss)
|
||||
CTL_PROTO(stats_arenas_i_pactive)
|
||||
CTL_PROTO(stats_arenas_i_pdirty)
|
||||
CTL_PROTO(stats_arenas_i_mapped)
|
||||
|
@ -223,6 +231,7 @@ static const ctl_named_node_t config_node[] = {
|
|||
|
||||
static const ctl_named_node_t opt_node[] = {
|
||||
{NAME("abort"), CTL(opt_abort)},
|
||||
{NAME("dss"), CTL(opt_dss)},
|
||||
{NAME("lg_chunk"), CTL(opt_lg_chunk)},
|
||||
{NAME("narenas"), CTL(opt_narenas)},
|
||||
{NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
|
||||
|
@ -247,6 +256,18 @@ static const ctl_named_node_t opt_node[] = {
|
|||
{NAME("prof_accum"), CTL(opt_prof_accum)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t arena_i_node[] = {
|
||||
{NAME("purge"), CTL(arena_i_purge)},
|
||||
{NAME("dss"), CTL(arena_i_dss)}
|
||||
};
|
||||
static const ctl_named_node_t super_arena_i_node[] = {
|
||||
{NAME(""), CHILD(named, arena_i)}
|
||||
};
|
||||
|
||||
static const ctl_indexed_node_t arena_node[] = {
|
||||
{INDEX(arena_i)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t arenas_bin_i_node[] = {
|
||||
{NAME("size"), CTL(arenas_bin_i_size)},
|
||||
{NAME("nregs"), CTL(arenas_bin_i_nregs)},
|
||||
|
@ -282,7 +303,8 @@ static const ctl_named_node_t arenas_node[] = {
|
|||
{NAME("bin"), CHILD(indexed, arenas_bin)},
|
||||
{NAME("nlruns"), CTL(arenas_nlruns)},
|
||||
{NAME("lrun"), CHILD(indexed, arenas_lrun)},
|
||||
{NAME("purge"), CTL(arenas_purge)}
|
||||
{NAME("purge"), CTL(arenas_purge)},
|
||||
{NAME("extend"), CTL(arenas_extend)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t prof_node[] = {
|
||||
|
@ -352,6 +374,7 @@ static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
|
|||
|
||||
static const ctl_named_node_t stats_arenas_i_node[] = {
|
||||
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
|
||||
{NAME("dss"), CTL(stats_arenas_i_dss)},
|
||||
{NAME("pactive"), CTL(stats_arenas_i_pactive)},
|
||||
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
|
||||
{NAME("mapped"), CTL(stats_arenas_i_mapped)},
|
||||
|
@ -387,6 +410,7 @@ static const ctl_named_node_t root_node[] = {
|
|||
{NAME("thread"), CHILD(named, thread)},
|
||||
{NAME("config"), CHILD(named, config)},
|
||||
{NAME("opt"), CHILD(named, opt)},
|
||||
{NAME("arena"), CHILD(indexed, arena)},
|
||||
{NAME("arenas"), CHILD(named, arenas)},
|
||||
{NAME("prof"), CHILD(named, prof)},
|
||||
{NAME("stats"), CHILD(named, stats)}
|
||||
|
@ -420,6 +444,7 @@ static void
|
|||
ctl_arena_clear(ctl_arena_stats_t *astats)
|
||||
{
|
||||
|
||||
astats->dss = dss_prec_names[dss_prec_limit];
|
||||
astats->pactive = 0;
|
||||
astats->pdirty = 0;
|
||||
if (config_stats) {
|
||||
|
@ -439,8 +464,8 @@ ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
|
|||
{
|
||||
unsigned i;
|
||||
|
||||
arena_stats_merge(arena, &cstats->pactive, &cstats->pdirty,
|
||||
&cstats->astats, cstats->bstats, cstats->lstats);
|
||||
arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
|
||||
&cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats);
|
||||
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
cstats->allocated_small += cstats->bstats[i].allocated;
|
||||
|
@ -500,7 +525,7 @@ static void
|
|||
ctl_arena_refresh(arena_t *arena, unsigned i)
|
||||
{
|
||||
ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
|
||||
ctl_arena_stats_t *sstats = &ctl_stats.arenas[narenas];
|
||||
ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
|
||||
|
||||
ctl_arena_clear(astats);
|
||||
|
||||
|
@ -518,11 +543,72 @@ ctl_arena_refresh(arena_t *arena, unsigned i)
|
|||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
ctl_grow(void)
|
||||
{
|
||||
size_t astats_size;
|
||||
ctl_arena_stats_t *astats;
|
||||
arena_t **tarenas;
|
||||
|
||||
/* Extend arena stats and arenas arrays. */
|
||||
astats_size = (ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t);
|
||||
if (ctl_stats.narenas == narenas_auto) {
|
||||
/* ctl_stats.arenas and arenas came from base_alloc(). */
|
||||
astats = (ctl_arena_stats_t *)imalloc(astats_size);
|
||||
if (astats == NULL)
|
||||
return (true);
|
||||
memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
|
||||
sizeof(ctl_arena_stats_t));
|
||||
|
||||
tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
|
||||
sizeof(arena_t *));
|
||||
if (tarenas == NULL) {
|
||||
idalloc(astats);
|
||||
return (true);
|
||||
}
|
||||
memcpy(tarenas, arenas, ctl_stats.narenas * sizeof(arena_t *));
|
||||
} else {
|
||||
astats = (ctl_arena_stats_t *)iralloc(ctl_stats.arenas,
|
||||
astats_size, 0, 0, false, false);
|
||||
if (astats == NULL)
|
||||
return (true);
|
||||
|
||||
tarenas = (arena_t **)iralloc(arenas, (ctl_stats.narenas + 1) *
|
||||
sizeof(arena_t *), 0, 0, false, false);
|
||||
if (tarenas == NULL)
|
||||
return (true);
|
||||
}
|
||||
/* Initialize the new astats and arenas elements. */
|
||||
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
|
||||
if (ctl_arena_init(&astats[ctl_stats.narenas + 1]))
|
||||
return (true);
|
||||
tarenas[ctl_stats.narenas] = NULL;
|
||||
/* Swap merged stats to their new location. */
|
||||
{
|
||||
ctl_arena_stats_t tstats;
|
||||
memcpy(&tstats, &astats[ctl_stats.narenas],
|
||||
sizeof(ctl_arena_stats_t));
|
||||
memcpy(&astats[ctl_stats.narenas],
|
||||
&astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
|
||||
memcpy(&astats[ctl_stats.narenas + 1], &tstats,
|
||||
sizeof(ctl_arena_stats_t));
|
||||
}
|
||||
ctl_stats.arenas = astats;
|
||||
ctl_stats.narenas++;
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
arenas = tarenas;
|
||||
narenas_total++;
|
||||
arenas_extend(narenas_total - 1);
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
static void
|
||||
ctl_refresh(void)
|
||||
{
|
||||
unsigned i;
|
||||
VARIABLE_ARRAY(arena_t *, tarenas, narenas);
|
||||
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
|
||||
|
||||
if (config_stats) {
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
|
@ -542,19 +628,19 @@ ctl_refresh(void)
|
|||
* Clear sum stats, since they will be merged into by
|
||||
* ctl_arena_refresh().
|
||||
*/
|
||||
ctl_stats.arenas[narenas].nthreads = 0;
|
||||
ctl_arena_clear(&ctl_stats.arenas[narenas]);
|
||||
ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
|
||||
ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
|
||||
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
|
||||
for (i = 0; i < narenas; i++) {
|
||||
memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
|
||||
for (i = 0; i < ctl_stats.narenas; i++) {
|
||||
if (arenas[i] != NULL)
|
||||
ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
|
||||
else
|
||||
ctl_stats.arenas[i].nthreads = 0;
|
||||
}
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
for (i = 0; i < narenas; i++) {
|
||||
for (i = 0; i < ctl_stats.narenas; i++) {
|
||||
bool initialized = (tarenas[i] != NULL);
|
||||
|
||||
ctl_stats.arenas[i].initialized = initialized;
|
||||
|
@ -563,11 +649,13 @@ ctl_refresh(void)
|
|||
}
|
||||
|
||||
if (config_stats) {
|
||||
ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small
|
||||
+ ctl_stats.arenas[narenas].astats.allocated_large
|
||||
ctl_stats.allocated =
|
||||
ctl_stats.arenas[ctl_stats.narenas].allocated_small
|
||||
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
|
||||
+ ctl_stats.huge.allocated;
|
||||
ctl_stats.active =
|
||||
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE)
|
||||
+ ctl_stats.huge.allocated;
|
||||
ctl_stats.active = (ctl_stats.arenas[narenas].pactive <<
|
||||
LG_PAGE) + ctl_stats.huge.allocated;
|
||||
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
|
||||
}
|
||||
|
||||
|
@ -585,13 +673,15 @@ ctl_init(void)
|
|||
* Allocate space for one extra arena stats element, which
|
||||
* contains summed stats across all arenas.
|
||||
*/
|
||||
assert(narenas_auto == narenas_total_get());
|
||||
ctl_stats.narenas = narenas_auto;
|
||||
ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
|
||||
(narenas + 1) * sizeof(ctl_arena_stats_t));
|
||||
(ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
|
||||
if (ctl_stats.arenas == NULL) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
memset(ctl_stats.arenas, 0, (narenas + 1) *
|
||||
memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
|
||||
sizeof(ctl_arena_stats_t));
|
||||
|
||||
/*
|
||||
|
@ -601,14 +691,14 @@ ctl_init(void)
|
|||
*/
|
||||
if (config_stats) {
|
||||
unsigned i;
|
||||
for (i = 0; i <= narenas; i++) {
|
||||
for (i = 0; i <= ctl_stats.narenas; i++) {
|
||||
if (ctl_arena_init(&ctl_stats.arenas[i])) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
}
|
||||
}
|
||||
ctl_stats.arenas[narenas].initialized = true;
|
||||
ctl_stats.arenas[ctl_stats.narenas].initialized = true;
|
||||
|
||||
ctl_epoch = 0;
|
||||
ctl_refresh();
|
||||
|
@ -870,11 +960,11 @@ ctl_postfork_child(void)
|
|||
if (*oldlenp != sizeof(t)) { \
|
||||
size_t copylen = (sizeof(t) <= *oldlenp) \
|
||||
? sizeof(t) : *oldlenp; \
|
||||
memcpy(oldp, (void *)&v, copylen); \
|
||||
memcpy(oldp, (void *)&(v), copylen); \
|
||||
ret = EINVAL; \
|
||||
goto label_return; \
|
||||
} else \
|
||||
*(t *)oldp = v; \
|
||||
*(t *)oldp = (v); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
@ -884,7 +974,7 @@ ctl_postfork_child(void)
|
|||
ret = EINVAL; \
|
||||
goto label_return; \
|
||||
} \
|
||||
v = *(t *)newp; \
|
||||
(v) = *(t *)newp; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
@ -905,7 +995,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
|
|||
if (l) \
|
||||
malloc_mutex_lock(&ctl_mtx); \
|
||||
READONLY(); \
|
||||
oldval = v; \
|
||||
oldval = (v); \
|
||||
READ(oldval, t); \
|
||||
\
|
||||
ret = 0; \
|
||||
|
@ -927,7 +1017,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
|
|||
return (ENOENT); \
|
||||
malloc_mutex_lock(&ctl_mtx); \
|
||||
READONLY(); \
|
||||
oldval = v; \
|
||||
oldval = (v); \
|
||||
READ(oldval, t); \
|
||||
\
|
||||
ret = 0; \
|
||||
|
@ -946,7 +1036,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
|
|||
\
|
||||
malloc_mutex_lock(&ctl_mtx); \
|
||||
READONLY(); \
|
||||
oldval = v; \
|
||||
oldval = (v); \
|
||||
READ(oldval, t); \
|
||||
\
|
||||
ret = 0; \
|
||||
|
@ -970,7 +1060,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
|
|||
if ((c) == false) \
|
||||
return (ENOENT); \
|
||||
READONLY(); \
|
||||
oldval = v; \
|
||||
oldval = (v); \
|
||||
READ(oldval, t); \
|
||||
\
|
||||
ret = 0; \
|
||||
|
@ -987,7 +1077,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
|
|||
t oldval; \
|
||||
\
|
||||
READONLY(); \
|
||||
oldval = v; \
|
||||
oldval = (v); \
|
||||
READ(oldval, t); \
|
||||
\
|
||||
ret = 0; \
|
||||
|
@ -1084,13 +1174,14 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
|||
int ret;
|
||||
unsigned newind, oldind;
|
||||
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
newind = oldind = choose_arena(NULL)->ind;
|
||||
WRITE(newind, unsigned);
|
||||
READ(oldind, unsigned);
|
||||
if (newind != oldind) {
|
||||
arena_t *arena;
|
||||
|
||||
if (newind >= narenas) {
|
||||
if (newind >= ctl_stats.narenas) {
|
||||
/* New arena index is out of range. */
|
||||
ret = EFAULT;
|
||||
goto label_return;
|
||||
|
@ -1123,6 +1214,7 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
|||
|
||||
ret = 0;
|
||||
label_return:
|
||||
malloc_mutex_unlock(&ctl_mtx);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
@ -1156,6 +1248,7 @@ CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
|
|||
/******************************************************************************/
|
||||
|
||||
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
|
||||
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
|
||||
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
|
||||
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
|
||||
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
|
||||
|
@ -1179,12 +1272,123 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
|
|||
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
/* ctl_mutex must be held during execution of this function. */
|
||||
static void
|
||||
arena_purge(unsigned arena_ind)
|
||||
{
|
||||
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
|
||||
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
|
||||
if (arena_ind == ctl_stats.narenas) {
|
||||
unsigned i;
|
||||
for (i = 0; i < ctl_stats.narenas; i++) {
|
||||
if (tarenas[i] != NULL)
|
||||
arena_purge_all(tarenas[i]);
|
||||
}
|
||||
} else {
|
||||
assert(arena_ind < ctl_stats.narenas);
|
||||
if (tarenas[arena_ind] != NULL)
|
||||
arena_purge_all(tarenas[arena_ind]);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
|
||||
READONLY();
|
||||
WRITEONLY();
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
arena_purge(mib[1]);
|
||||
malloc_mutex_unlock(&ctl_mtx);
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
void *newp, size_t newlen)
|
||||
{
|
||||
int ret, i;
|
||||
bool match, err;
|
||||
const char *dss;
|
||||
unsigned arena_ind = mib[1];
|
||||
dss_prec_t dss_prec_old = dss_prec_limit;
|
||||
dss_prec_t dss_prec = dss_prec_limit;
|
||||
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
WRITE(dss, const char *);
|
||||
match = false;
|
||||
for (i = 0; i < dss_prec_limit; i++) {
|
||||
if (strcmp(dss_prec_names[i], dss) == 0) {
|
||||
dss_prec = i;
|
||||
match = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (match == false) {
|
||||
ret = EINVAL;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
if (arena_ind < ctl_stats.narenas) {
|
||||
arena_t *arena = arenas[arena_ind];
|
||||
if (arena != NULL) {
|
||||
dss_prec_old = arena_dss_prec_get(arena);
|
||||
arena_dss_prec_set(arena, dss_prec);
|
||||
err = false;
|
||||
} else
|
||||
err = true;
|
||||
} else {
|
||||
dss_prec_old = chunk_dss_prec_get();
|
||||
err = chunk_dss_prec_set(dss_prec);
|
||||
}
|
||||
dss = dss_prec_names[dss_prec_old];
|
||||
READ(dss, const char *);
|
||||
if (err) {
|
||||
ret = EFAULT;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
malloc_mutex_unlock(&ctl_mtx);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static const ctl_named_node_t *
|
||||
arena_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||
{
|
||||
const ctl_named_node_t * ret;
|
||||
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
if (i > ctl_stats.narenas) {
|
||||
ret = NULL;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
ret = super_arena_i_node;
|
||||
label_return:
|
||||
malloc_mutex_unlock(&ctl_mtx);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
|
||||
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
|
||||
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
|
||||
const ctl_named_node_t *
|
||||
static const ctl_named_node_t *
|
||||
arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||
{
|
||||
|
||||
|
@ -1194,7 +1398,7 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
|
|||
}
|
||||
|
||||
CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
|
||||
const ctl_named_node_t *
|
||||
static const ctl_named_node_t *
|
||||
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||
{
|
||||
|
||||
|
@ -1203,7 +1407,27 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
|
|||
return (super_arenas_lrun_i_node);
|
||||
}
|
||||
|
||||
CTL_RO_NL_GEN(arenas_narenas, narenas, unsigned)
|
||||
static int
|
||||
arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
unsigned narenas;
|
||||
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
READONLY();
|
||||
if (*oldlenp != sizeof(unsigned)) {
|
||||
ret = EINVAL;
|
||||
goto label_return;
|
||||
}
|
||||
narenas = ctl_stats.narenas;
|
||||
READ(narenas, unsigned);
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
malloc_mutex_unlock(&ctl_mtx);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||
|
@ -1214,13 +1438,13 @@ arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
|
|||
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
READONLY();
|
||||
if (*oldlenp != narenas * sizeof(bool)) {
|
||||
if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
|
||||
ret = EINVAL;
|
||||
nread = (*oldlenp < narenas * sizeof(bool))
|
||||
? (*oldlenp / sizeof(bool)) : narenas;
|
||||
nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
|
||||
? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
|
||||
} else {
|
||||
ret = 0;
|
||||
nread = narenas;
|
||||
nread = ctl_stats.narenas;
|
||||
}
|
||||
|
||||
for (i = 0; i < nread; i++)
|
||||
|
@ -1243,36 +1467,45 @@ arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
|||
void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
unsigned arena;
|
||||
unsigned arena_ind;
|
||||
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
WRITEONLY();
|
||||
arena = UINT_MAX;
|
||||
WRITE(arena, unsigned);
|
||||
if (newp != NULL && arena >= narenas) {
|
||||
arena_ind = UINT_MAX;
|
||||
WRITE(arena_ind, unsigned);
|
||||
if (newp != NULL && arena_ind >= ctl_stats.narenas)
|
||||
ret = EFAULT;
|
||||
goto label_return;
|
||||
} else {
|
||||
VARIABLE_ARRAY(arena_t *, tarenas, narenas);
|
||||
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
|
||||
if (arena == UINT_MAX) {
|
||||
unsigned i;
|
||||
for (i = 0; i < narenas; i++) {
|
||||
if (tarenas[i] != NULL)
|
||||
arena_purge_all(tarenas[i]);
|
||||
}
|
||||
} else {
|
||||
assert(arena < narenas);
|
||||
if (tarenas[arena] != NULL)
|
||||
arena_purge_all(tarenas[arena]);
|
||||
}
|
||||
else {
|
||||
if (arena_ind == UINT_MAX)
|
||||
arena_ind = ctl_stats.narenas;
|
||||
arena_purge(arena_ind);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
label_return:
|
||||
malloc_mutex_unlock(&ctl_mtx);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
unsigned narenas;
|
||||
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
READONLY();
|
||||
if (ctl_grow()) {
|
||||
ret = EAGAIN;
|
||||
goto label_return;
|
||||
}
|
||||
narenas = ctl_stats.narenas - 1;
|
||||
READ(narenas, unsigned);
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
malloc_mutex_unlock(&ctl_mtx);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
@ -1377,7 +1610,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
|
|||
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
|
||||
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
|
||||
|
||||
const ctl_named_node_t *
|
||||
static const ctl_named_node_t *
|
||||
stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
|
||||
{
|
||||
|
||||
|
@ -1395,7 +1628,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
|
|||
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
|
||||
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
|
||||
|
||||
const ctl_named_node_t *
|
||||
static const ctl_named_node_t *
|
||||
stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
|
||||
{
|
||||
|
||||
|
@ -1405,6 +1638,7 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
|
|||
}
|
||||
|
||||
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
|
||||
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
|
||||
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
|
||||
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
|
||||
|
@ -1416,13 +1650,13 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
|
|||
CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
|
||||
ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
|
||||
|
||||
const ctl_named_node_t *
|
||||
static const ctl_named_node_t *
|
||||
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||
{
|
||||
const ctl_named_node_t * ret;
|
||||
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
if (ctl_stats.arenas[i].initialized == false) {
|
||||
if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) {
|
||||
ret = NULL;
|
||||
goto label_return;
|
||||
}
|
||||
|
|
|
@ -48,7 +48,8 @@ huge_palloc(size_t size, size_t alignment, bool zero)
|
|||
* it is possible to make correct junk/zero fill decisions below.
|
||||
*/
|
||||
is_zeroed = zero;
|
||||
ret = chunk_alloc(csize, alignment, false, &is_zeroed);
|
||||
ret = chunk_alloc(csize, alignment, false, &is_zeroed,
|
||||
chunk_dss_prec_get());
|
||||
if (ret == NULL) {
|
||||
base_node_dealloc(node);
|
||||
return (NULL);
|
||||
|
@ -101,7 +102,7 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
|
|||
|
||||
void *
|
||||
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
size_t alignment, bool zero)
|
||||
size_t alignment, bool zero, bool try_tcache_dalloc)
|
||||
{
|
||||
void *ret;
|
||||
size_t copysize;
|
||||
|
@ -180,7 +181,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
|||
#endif
|
||||
{
|
||||
memcpy(ret, ptr, copysize);
|
||||
iqalloc(ptr);
|
||||
iqallocx(ptr, try_tcache_dalloc);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
|
|
@ -33,7 +33,8 @@ unsigned ncpus;
|
|||
|
||||
malloc_mutex_t arenas_lock;
|
||||
arena_t **arenas;
|
||||
unsigned narenas;
|
||||
unsigned narenas_total;
|
||||
unsigned narenas_auto;
|
||||
|
||||
/* Set to true once the allocator has been initialized. */
|
||||
static bool malloc_initialized = false;
|
||||
|
@ -144,14 +145,14 @@ choose_arena_hard(void)
|
|||
{
|
||||
arena_t *ret;
|
||||
|
||||
if (narenas > 1) {
|
||||
if (narenas_auto > 1) {
|
||||
unsigned i, choose, first_null;
|
||||
|
||||
choose = 0;
|
||||
first_null = narenas;
|
||||
first_null = narenas_auto;
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
assert(arenas[0] != NULL);
|
||||
for (i = 1; i < narenas; i++) {
|
||||
for (i = 1; i < narenas_auto; i++) {
|
||||
if (arenas[i] != NULL) {
|
||||
/*
|
||||
* Choose the first arena that has the lowest
|
||||
|
@ -160,7 +161,7 @@ choose_arena_hard(void)
|
|||
if (arenas[i]->nthreads <
|
||||
arenas[choose]->nthreads)
|
||||
choose = i;
|
||||
} else if (first_null == narenas) {
|
||||
} else if (first_null == narenas_auto) {
|
||||
/*
|
||||
* Record the index of the first uninitialized
|
||||
* arena, in case all extant arenas are in use.
|
||||
|
@ -174,7 +175,8 @@ choose_arena_hard(void)
|
|||
}
|
||||
}
|
||||
|
||||
if (arenas[choose]->nthreads == 0 || first_null == narenas) {
|
||||
if (arenas[choose]->nthreads == 0
|
||||
|| first_null == narenas_auto) {
|
||||
/*
|
||||
* Use an unloaded arena, or the least loaded arena if
|
||||
* all arenas are already initialized.
|
||||
|
@ -203,7 +205,7 @@ stats_print_atexit(void)
|
|||
{
|
||||
|
||||
if (config_tcache && config_stats) {
|
||||
unsigned i;
|
||||
unsigned narenas, i;
|
||||
|
||||
/*
|
||||
* Merge stats from extant threads. This is racy, since
|
||||
|
@ -212,7 +214,7 @@ stats_print_atexit(void)
|
|||
* out of date by the time they are reported, if other threads
|
||||
* continue to allocate.
|
||||
*/
|
||||
for (i = 0; i < narenas; i++) {
|
||||
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
|
||||
arena_t *arena = arenas[i];
|
||||
if (arena != NULL) {
|
||||
tcache_t *tcache;
|
||||
|
@ -554,6 +556,30 @@ malloc_conf_init(void)
|
|||
*/
|
||||
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
|
||||
(config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
|
||||
if (strncmp("dss", k, klen) == 0) {
|
||||
int i;
|
||||
bool match = false;
|
||||
for (i = 0; i < dss_prec_limit; i++) {
|
||||
if (strncmp(dss_prec_names[i], v, vlen)
|
||||
== 0) {
|
||||
if (chunk_dss_prec_set(i)) {
|
||||
malloc_conf_error(
|
||||
"Error setting dss",
|
||||
k, klen, v, vlen);
|
||||
} else {
|
||||
opt_dss =
|
||||
dss_prec_names[i];
|
||||
match = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (match == false) {
|
||||
malloc_conf_error("Invalid conf value",
|
||||
k, klen, v, vlen);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
|
||||
SIZE_T_MAX)
|
||||
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
|
||||
|
@ -699,9 +725,9 @@ malloc_init_hard(void)
|
|||
* Create enough scaffolding to allow recursive allocation in
|
||||
* malloc_ncpus().
|
||||
*/
|
||||
narenas = 1;
|
||||
narenas_total = narenas_auto = 1;
|
||||
arenas = init_arenas;
|
||||
memset(arenas, 0, sizeof(arena_t *) * narenas);
|
||||
memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
|
||||
|
||||
/*
|
||||
* Initialize one arena here. The rest are lazily created in
|
||||
|
@ -759,20 +785,21 @@ malloc_init_hard(void)
|
|||
else
|
||||
opt_narenas = 1;
|
||||
}
|
||||
narenas = opt_narenas;
|
||||
narenas_auto = opt_narenas;
|
||||
/*
|
||||
* Make sure that the arenas array can be allocated. In practice, this
|
||||
* limit is enough to allow the allocator to function, but the ctl
|
||||
* machinery will fail to allocate memory at far lower limits.
|
||||
*/
|
||||
if (narenas > chunksize / sizeof(arena_t *)) {
|
||||
narenas = chunksize / sizeof(arena_t *);
|
||||
if (narenas_auto > chunksize / sizeof(arena_t *)) {
|
||||
narenas_auto = chunksize / sizeof(arena_t *);
|
||||
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
|
||||
narenas);
|
||||
narenas_auto);
|
||||
}
|
||||
narenas_total = narenas_auto;
|
||||
|
||||
/* Allocate and initialize arenas. */
|
||||
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
|
||||
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
|
||||
if (arenas == NULL) {
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
return (true);
|
||||
|
@ -781,7 +808,7 @@ malloc_init_hard(void)
|
|||
* Zero the array. In practice, this should always be pre-zeroed,
|
||||
* since it was just mmap()ed, but let's be sure.
|
||||
*/
|
||||
memset(arenas, 0, sizeof(arena_t *) * narenas);
|
||||
memset(arenas, 0, sizeof(arena_t *) * narenas_total);
|
||||
/* Copy the pointer to the one arena that was already initialized. */
|
||||
arenas[0] = init_arenas[0];
|
||||
|
||||
|
@ -1346,18 +1373,19 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
|||
#ifdef JEMALLOC_EXPERIMENTAL
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
iallocm(size_t usize, size_t alignment, bool zero)
|
||||
iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
|
||||
arena_t *arena)
|
||||
{
|
||||
|
||||
assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
|
||||
alignment)));
|
||||
|
||||
if (alignment != 0)
|
||||
return (ipalloc(usize, alignment, zero));
|
||||
return (ipallocx(usize, alignment, zero, try_tcache, arena));
|
||||
else if (zero)
|
||||
return (icalloc(usize));
|
||||
return (icallocx(usize, try_tcache, arena));
|
||||
else
|
||||
return (imalloc(usize));
|
||||
return (imallocx(usize, try_tcache, arena));
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -1368,6 +1396,9 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
|
|||
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
|
||||
& (SIZE_T_MAX-1));
|
||||
bool zero = flags & ALLOCM_ZERO;
|
||||
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
|
||||
arena_t *arena;
|
||||
bool try_tcache;
|
||||
|
||||
assert(ptr != NULL);
|
||||
assert(size != 0);
|
||||
|
@ -1375,6 +1406,14 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
|
|||
if (malloc_init())
|
||||
goto label_oom;
|
||||
|
||||
if (arena_ind != UINT_MAX) {
|
||||
arena = arenas[arena_ind];
|
||||
try_tcache = false;
|
||||
} else {
|
||||
arena = NULL;
|
||||
try_tcache = true;
|
||||
}
|
||||
|
||||
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
|
||||
if (usize == 0)
|
||||
goto label_oom;
|
||||
|
@ -1391,18 +1430,19 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
|
|||
s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
|
||||
alignment);
|
||||
assert(usize_promoted != 0);
|
||||
p = iallocm(usize_promoted, alignment, zero);
|
||||
p = iallocm(usize_promoted, alignment, zero,
|
||||
try_tcache, arena);
|
||||
if (p == NULL)
|
||||
goto label_oom;
|
||||
arena_prof_promoted(p, usize);
|
||||
} else {
|
||||
p = iallocm(usize, alignment, zero);
|
||||
p = iallocm(usize, alignment, zero, try_tcache, arena);
|
||||
if (p == NULL)
|
||||
goto label_oom;
|
||||
}
|
||||
prof_malloc(p, usize, cnt);
|
||||
} else {
|
||||
p = iallocm(usize, alignment, zero);
|
||||
p = iallocm(usize, alignment, zero, try_tcache, arena);
|
||||
if (p == NULL)
|
||||
goto label_oom;
|
||||
}
|
||||
|
@ -1439,6 +1479,9 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
|
|||
& (SIZE_T_MAX-1));
|
||||
bool zero = flags & ALLOCM_ZERO;
|
||||
bool no_move = flags & ALLOCM_NO_MOVE;
|
||||
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
|
||||
bool try_tcache_alloc, try_tcache_dalloc;
|
||||
arena_t *arena;
|
||||
|
||||
assert(ptr != NULL);
|
||||
assert(*ptr != NULL);
|
||||
|
@ -1446,6 +1489,19 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
|
|||
assert(SIZE_T_MAX - size >= extra);
|
||||
assert(malloc_initialized || IS_INITIALIZER);
|
||||
|
||||
if (arena_ind != UINT_MAX) {
|
||||
arena_chunk_t *chunk;
|
||||
try_tcache_alloc = true;
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr);
|
||||
try_tcache_dalloc = (chunk == *ptr || chunk->arena !=
|
||||
arenas[arena_ind]);
|
||||
arena = arenas[arena_ind];
|
||||
} else {
|
||||
try_tcache_alloc = true;
|
||||
try_tcache_dalloc = true;
|
||||
arena = NULL;
|
||||
}
|
||||
|
||||
p = *ptr;
|
||||
if (config_prof && opt_prof) {
|
||||
prof_thr_cnt_t *cnt;
|
||||
|
@ -1472,9 +1528,10 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
|
|||
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
|
||||
&& ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
|
||||
<= SMALL_MAXCLASS) {
|
||||
q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
|
||||
q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
|
||||
size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
|
||||
alignment, zero, no_move);
|
||||
alignment, zero, no_move, try_tcache_alloc,
|
||||
try_tcache_dalloc, arena);
|
||||
if (q == NULL)
|
||||
goto label_err;
|
||||
if (max_usize < PAGE) {
|
||||
|
@ -1483,7 +1540,8 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
|
|||
} else
|
||||
usize = isalloc(q, config_prof);
|
||||
} else {
|
||||
q = iralloc(p, size, extra, alignment, zero, no_move);
|
||||
q = irallocx(p, size, extra, alignment, zero, no_move,
|
||||
try_tcache_alloc, try_tcache_dalloc, arena);
|
||||
if (q == NULL)
|
||||
goto label_err;
|
||||
usize = isalloc(q, config_prof);
|
||||
|
@ -1500,7 +1558,8 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
|
|||
old_size = isalloc(p, false);
|
||||
old_rzsize = u2rz(old_size);
|
||||
}
|
||||
q = iralloc(p, size, extra, alignment, zero, no_move);
|
||||
q = irallocx(p, size, extra, alignment, zero, no_move,
|
||||
try_tcache_alloc, try_tcache_dalloc, arena);
|
||||
if (q == NULL)
|
||||
goto label_err;
|
||||
if (config_stats)
|
||||
|
@ -1561,10 +1620,19 @@ je_dallocm(void *ptr, int flags)
|
|||
{
|
||||
size_t usize;
|
||||
size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
|
||||
bool try_tcache;
|
||||
|
||||
assert(ptr != NULL);
|
||||
assert(malloc_initialized || IS_INITIALIZER);
|
||||
|
||||
if (arena_ind != UINT_MAX) {
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
try_tcache = (chunk == ptr || chunk->arena !=
|
||||
arenas[arena_ind]);
|
||||
} else
|
||||
try_tcache = true;
|
||||
|
||||
UTRACE(ptr, 0, 0);
|
||||
if (config_stats || config_valgrind)
|
||||
usize = isalloc(ptr, config_prof);
|
||||
|
@ -1577,7 +1645,7 @@ je_dallocm(void *ptr, int flags)
|
|||
thread_allocated_tsd_get()->deallocated += usize;
|
||||
if (config_valgrind && opt_valgrind)
|
||||
rzsize = p2rz(ptr);
|
||||
iqalloc(ptr);
|
||||
iqallocx(ptr, try_tcache);
|
||||
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
|
||||
|
||||
return (ALLOCM_SUCCESS);
|
||||
|
@ -1654,7 +1722,7 @@ _malloc_prefork(void)
|
|||
/* Acquire all mutexes in a safe order. */
|
||||
ctl_prefork();
|
||||
malloc_mutex_prefork(&arenas_lock);
|
||||
for (i = 0; i < narenas; i++) {
|
||||
for (i = 0; i < narenas_total; i++) {
|
||||
if (arenas[i] != NULL)
|
||||
arena_prefork(arenas[i]);
|
||||
}
|
||||
|
@ -1685,7 +1753,7 @@ _malloc_postfork(void)
|
|||
base_postfork_parent();
|
||||
chunk_postfork_parent();
|
||||
prof_postfork_parent();
|
||||
for (i = 0; i < narenas; i++) {
|
||||
for (i = 0; i < narenas_total; i++) {
|
||||
if (arenas[i] != NULL)
|
||||
arena_postfork_parent(arenas[i]);
|
||||
}
|
||||
|
@ -1705,7 +1773,7 @@ jemalloc_postfork_child(void)
|
|||
base_postfork_child();
|
||||
chunk_postfork_child();
|
||||
prof_postfork_child();
|
||||
for (i = 0; i < narenas; i++) {
|
||||
for (i = 0; i < narenas_total; i++) {
|
||||
if (arenas[i] != NULL)
|
||||
arena_postfork_child(arenas[i]);
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ bool opt_prof_leak = false;
|
|||
bool opt_prof_accum = false;
|
||||
char opt_prof_prefix[PATH_MAX + 1];
|
||||
|
||||
uint64_t prof_interval;
|
||||
uint64_t prof_interval = 0;
|
||||
bool prof_promote;
|
||||
|
||||
/*
|
||||
|
@ -1206,13 +1206,11 @@ prof_boot1(void)
|
|||
*/
|
||||
opt_prof = true;
|
||||
opt_prof_gdump = false;
|
||||
prof_interval = 0;
|
||||
} else if (opt_prof) {
|
||||
if (opt_lg_prof_interval >= 0) {
|
||||
prof_interval = (((uint64_t)1U) <<
|
||||
opt_lg_prof_interval);
|
||||
} else
|
||||
prof_interval = 0;
|
||||
}
|
||||
}
|
||||
|
||||
prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE);
|
||||
|
|
|
@ -206,6 +206,7 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
unsigned i, bool bins, bool large)
|
||||
{
|
||||
unsigned nthreads;
|
||||
const char *dss;
|
||||
size_t page, pactive, pdirty, mapped;
|
||||
uint64_t npurge, nmadvise, purged;
|
||||
size_t small_allocated;
|
||||
|
@ -218,6 +219,9 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"assigned threads: %u\n", nthreads);
|
||||
CTL_I_GET("stats.arenas.0.dss", &dss, const char *);
|
||||
malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
|
||||
dss);
|
||||
CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
|
||||
CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
|
||||
CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
|
||||
|
@ -370,6 +374,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
"Run-time option settings:\n");
|
||||
OPT_WRITE_BOOL(abort)
|
||||
OPT_WRITE_SIZE_T(lg_chunk)
|
||||
OPT_WRITE_CHAR_P(dss)
|
||||
OPT_WRITE_SIZE_T(narenas)
|
||||
OPT_WRITE_SSIZE_T(lg_dirty_mult)
|
||||
OPT_WRITE_BOOL(stats_print)
|
||||
|
@ -400,7 +405,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
|
||||
|
||||
CTL_GET("arenas.narenas", &uv, unsigned);
|
||||
malloc_cprintf(write_cb, cbopaque, "Max arenas: %u\n", uv);
|
||||
malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
|
||||
|
||||
malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
|
||||
sizeof(void *));
|
||||
|
@ -472,7 +477,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
CTL_GET("stats.chunks.current", &chunks_current, size_t);
|
||||
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
|
||||
"highchunks curchunks\n");
|
||||
malloc_cprintf(write_cb, cbopaque, " %13"PRIu64"%13zu%13zu\n",
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
" %13"PRIu64" %12zu %12zu\n",
|
||||
chunks_total, chunks_high, chunks_current);
|
||||
|
||||
/* Print huge stats. */
|
||||
|
|
|
@ -97,9 +97,7 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
|||
arena_bin_t *bin = &arena->bins[binind];
|
||||
|
||||
if (config_prof && arena == tcache->arena) {
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena_prof_accum(arena, tcache->prof_accumbytes);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
|
||||
|
@ -180,7 +178,7 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
|||
malloc_mutex_lock(&arena->lock);
|
||||
if ((config_prof || config_stats) && arena == tcache->arena) {
|
||||
if (config_prof) {
|
||||
arena_prof_accum(arena,
|
||||
arena_prof_accum_locked(arena,
|
||||
tcache->prof_accumbytes);
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
|
@ -288,7 +286,7 @@ tcache_create(arena_t *arena)
|
|||
else if (size <= tcache_maxclass)
|
||||
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
|
||||
else
|
||||
tcache = (tcache_t *)icalloc(size);
|
||||
tcache = (tcache_t *)icallocx(size, false, arena);
|
||||
|
||||
if (tcache == NULL)
|
||||
return (NULL);
|
||||
|
@ -343,11 +341,8 @@ tcache_destroy(tcache_t *tcache)
|
|||
}
|
||||
}
|
||||
|
||||
if (config_prof && tcache->prof_accumbytes > 0) {
|
||||
malloc_mutex_lock(&tcache->arena->lock);
|
||||
if (config_prof && tcache->prof_accumbytes > 0)
|
||||
arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
|
||||
malloc_mutex_unlock(&tcache->arena->lock);
|
||||
}
|
||||
|
||||
tcache_size = arena_salloc(tcache, false);
|
||||
if (tcache_size <= SMALL_MAXCLASS) {
|
||||
|
@ -364,7 +359,7 @@ tcache_destroy(tcache_t *tcache)
|
|||
|
||||
arena_dalloc_large(arena, chunk, tcache);
|
||||
} else
|
||||
idalloc(tcache);
|
||||
idallocx(tcache, false);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -171,6 +171,16 @@ void
|
|||
register_zone(void)
|
||||
{
|
||||
|
||||
/*
|
||||
* If something else replaced the system default zone allocator, don't
|
||||
* register jemalloc's.
|
||||
*/
|
||||
malloc_zone_t *default_zone = malloc_default_zone();
|
||||
if (!default_zone->zone_name ||
|
||||
strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
zone.size = (void *)zone_size;
|
||||
zone.malloc = (void *)zone_malloc;
|
||||
zone.calloc = (void *)zone_calloc;
|
||||
|
@ -241,7 +251,7 @@ register_zone(void)
|
|||
* then becomes the default.
|
||||
*/
|
||||
do {
|
||||
malloc_zone_t *default_zone = malloc_default_zone();
|
||||
default_zone = malloc_default_zone();
|
||||
malloc_zone_unregister(default_zone);
|
||||
malloc_zone_register(default_zone);
|
||||
} while (malloc_default_zone() != &zone);
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
#define JEMALLOC_MANGLE
|
||||
#include "jemalloc_test.h"
|
||||
|
||||
#define NTHREADS 10
|
||||
|
||||
void *
|
||||
je_thread_start(void *arg)
|
||||
{
|
||||
unsigned thread_ind = (unsigned)(uintptr_t)arg;
|
||||
unsigned arena_ind;
|
||||
int r;
|
||||
void *p;
|
||||
size_t rsz, sz;
|
||||
|
||||
sz = sizeof(arena_ind);
|
||||
if (mallctl("arenas.extend", &arena_ind, &sz, NULL, 0)
|
||||
!= 0) {
|
||||
malloc_printf("Error in arenas.extend\n");
|
||||
abort();
|
||||
}
|
||||
|
||||
if (thread_ind % 4 != 3) {
|
||||
size_t mib[3];
|
||||
size_t miblen = sizeof(mib) / sizeof(size_t);
|
||||
const char *dss_precs[] = {"disabled", "primary", "secondary"};
|
||||
const char *dss = dss_precs[thread_ind % 4];
|
||||
if (mallctlnametomib("arena.0.dss", mib, &miblen) != 0) {
|
||||
malloc_printf("Error in mallctlnametomib()\n");
|
||||
abort();
|
||||
}
|
||||
mib[1] = arena_ind;
|
||||
if (mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss,
|
||||
sizeof(const char *))) {
|
||||
malloc_printf("Error in mallctlbymib()\n");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
r = allocm(&p, &rsz, 1, ALLOCM_ARENA(arena_ind));
|
||||
if (r != ALLOCM_SUCCESS) {
|
||||
malloc_printf("Unexpected allocm() error\n");
|
||||
abort();
|
||||
}
|
||||
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
je_thread_t threads[NTHREADS];
|
||||
unsigned i;
|
||||
|
||||
malloc_printf("Test begin\n");
|
||||
|
||||
for (i = 0; i < NTHREADS; i++) {
|
||||
je_thread_create(&threads[i], je_thread_start,
|
||||
(void *)(uintptr_t)i);
|
||||
}
|
||||
|
||||
for (i = 0; i < NTHREADS; i++)
|
||||
je_thread_join(threads[i], NULL);
|
||||
|
||||
malloc_printf("Test end\n");
|
||||
return (0);
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
Test begin
|
||||
Test end
|
|
@ -1,7 +1,7 @@
|
|||
#define JEMALLOC_MANGLE
|
||||
#include "jemalloc_test.h"
|
||||
|
||||
#define NTHREADS 10
|
||||
#define NTHREADS 10
|
||||
|
||||
void *
|
||||
je_thread_start(void *arg)
|
||||
|
@ -66,8 +66,10 @@ main(void)
|
|||
goto label_return;
|
||||
}
|
||||
|
||||
for (i = 0; i < NTHREADS; i++)
|
||||
je_thread_create(&threads[i], je_thread_start, (void *)&arena_ind);
|
||||
for (i = 0; i < NTHREADS; i++) {
|
||||
je_thread_create(&threads[i], je_thread_start,
|
||||
(void *)&arena_ind);
|
||||
}
|
||||
|
||||
for (i = 0; i < NTHREADS; i++)
|
||||
je_thread_join(threads[i], (void *)&ret);
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
UPSTREAM_REPO=git://canonware.com/jemalloc.git
|
||||
UPSTREAM_COMMIT=d0ffd8ed4f6aa4cf7248028eddfcb35f93247fe4
|
||||
UPSTREAM_COMMIT=6eb84fbe315add1e1d4f8deedc25d260fff3ae97
|
||||
|
|
Загрузка…
Ссылка в новой задаче