Bug 427109: Add memory reserve and xmalloc() API, r=benjamin

Add support for a memory reserve, which is managed via the reserve_*() API.

Add xmalloc() variants of malloc()-like functions.  These functions never
return NULL.
This commit is contained in:
Jason Evans 2008-06-23 07:46:37 -07:00
Родитель f6a38ccad6
Коммит 34bdf0c1a0
5 изменённых файлов: 1428 добавлений и 621 удалений

Просмотреть файл

@ -73,8 +73,9 @@ $(CRT_OBJ_DIR)/jemalloc.c: $(srcdir)/crtsp1.diff
$(CRT_OBJ_DIR)/build/intel/mozcrt19.dll: \
$(CRT_OBJ_DIR)/jemalloc.c $(srcdir)/jemalloc.c $(srcdir)/jemalloc.h \
$(srcdir)/rb.h
cp $(srcdir)/jemalloc.c $(srcdir)/jemalloc.h $(srcdir)/rb.h $(CRT_OBJ_DIR)
$(srcdir)/ql.h $(srcdir)/qr.h $(srcdir)/rb.h
cp $(srcdir)/jemalloc.c $(srcdir)/jemalloc.h $(srcdir)/ql.h \
$(srcdir)/qr.h $(srcdir)/rb.h $(CRT_OBJ_DIR)
# this pretty much sucks, but nmake and make don't play well together
$(PYTHON) $(srcdir)/build-crt.py $(CRT_OBJ_DIR)
#XXX: these don't link right for some reason

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -19,9 +19,7 @@ typedef struct {
* Run-time configuration settings.
*/
bool opt_abort; /* abort(3) on error? */
bool opt_dss; /* Use sbrk(2) to map memory? */
bool opt_junk; /* Fill allocated/free memory with 0xa5/0x5a? */
bool opt_mmap; /* Use mmap(2) to map memory? */
bool opt_utrace; /* Trace all allocation events? */
bool opt_sysv; /* SysV semantics? */
bool opt_xmalloc; /* abort(3) on OOM? */
@ -33,6 +31,8 @@ typedef struct {
size_t large_max; /* Max sub-chunksize allocation size. */
size_t chunksize; /* Size of each virtual memory mapping. */
size_t dirty_max; /* Max dirty pages per arena. */
size_t reserve_min; /* reserve_low callback threshold. */
size_t reserve_max; /* Maximum reserve size before unmapping. */
/*
* Current memory usage statistics.
@ -41,6 +41,7 @@ typedef struct {
size_t committed; /* Bytes committed (readable/writable). */
size_t allocated; /* Bytes allocted (in use by application). */
size_t dirty; /* Bytes dirty (committed unused pages). */
size_t reserve_cur; /* Current memory reserve. */
} jemalloc_stats_t;
#ifndef MOZ_MEMORY_DARWIN
@ -55,3 +56,122 @@ int posix_memalign(void **memptr, size_t alignment, size_t size);
void *memalign(size_t alignment, size_t size);
size_t malloc_usable_size(const void *ptr);
void jemalloc_stats(jemalloc_stats_t *stats);
/* The x*() functions never return NULL. */
void *xmalloc(size_t size);
void *xcalloc(size_t num, size_t size);
void *xrealloc(void *ptr, size_t size);
void *xmemalign(size_t alignment, size_t size);
/*
* The allocator maintains a memory reserve that is used to satisfy allocation
* requests when no additional memory can be acquired from the operating
* system. Under normal operating conditions, the reserve size is at least
* reserve_min bytes. If the reserve is depleted or insufficient to satisfy an
* allocation request, then condition notifications are sent to one or more of
* the registered callback functions:
*
* RESERVE_CND_LOW: The reserve had to be used to satisfy an allocation
* request, which dropped the reserve size below the
* minimum. The callee should try to free memory in order
* to restore the reserve.
*
* RESERVE_CND_CRIT: The reserve was not large enough to satisfy a pending
* allocation request. Some callee must free adequate
* memory in order to prevent application failure (unless
* the condition spontaneously desists due to concurrent
* deallocation).
*
* RESERVE_CND_FAIL: An allocation request could not be satisfied, despite all
* attempts. The allocator is about to terminate the
* application.
*
* The order in which the callback functions are called is only loosely
* specified: in the absence of interposing callback
* registrations/unregistrations, enabled callbacks will be called in an
* arbitrary round-robin order.
*
* Condition notifications are sent to callbacks only while conditions exist.
* For example, just before the allocator sends a RESERVE_CND_LOW condition
* notification to a callback, the reserve is in fact depleted. However, due
* to allocator concurrency, the reserve may have been restored by the time the
* callback function executes. Furthermore, if the reserve is restored at some
* point during the delivery of condition notifications to callbacks, no
* further deliveries will occur, since the condition no longer exists.
*
* Callback functions can freely call back into the allocator (i.e. the
* allocator releases all internal resources before calling each callback
* function), though allocation is discouraged, since recursive callbacks are
* likely to result, which places extra burden on the application to avoid
* deadlock.
*
* Callback functions must be thread-safe, since it is possible that multiple
* threads will call into the same callback function concurrently.
*/
/* Memory reserve condition types. */
typedef enum {
RESERVE_CND_LOW,
RESERVE_CND_CRIT,
RESERVE_CND_FAIL
} reserve_cnd_t;
/*
* Reserve condition notification callback function type definition.
*
* Inputs:
* ctx: Opaque application data, as passed to reserve_cb_register().
* cnd: Condition type being delivered.
* size: Allocation request size for the allocation that caused the condition.
*/
typedef void reserve_cb_t(void *ctx, reserve_cnd_t cnd, size_t size);
/*
* Register a callback function.
*
* Inputs:
* cb: Callback function pointer.
* ctx: Opaque application data, passed to cb().
*
* Output:
* ret: If true, failure due to OOM; success otherwise.
*/
bool reserve_cb_register(reserve_cb_t *cb, void *ctx);
/*
* Unregister a callback function.
*
* Inputs:
* cb: Callback function pointer.
* ctx: Opaque application data, same as that passed to reserve_cb_register().
*
* Output:
* ret: False upon success, true if the {cb,ctx} registration could not be
* found.
*/
bool reserve_cb_unregister(reserve_cb_t *cb, void *ctx);
/*
* Get the current reserve size.
*
* ret: Current reserve size.
*/
size_t reserve_cur_get(void);
/*
* Get the minimum acceptable reserve size. If the reserve drops below this
* value, the RESERVE_CND_LOW condition notification is sent to the callbacks.
*
* ret: Minimum acceptable reserve size.
*/
size_t reserve_min_get(void);
/*
* Set the minimum acceptable reserve size.
*
* min: Reserve threshold. This value may be internally rounded up.
* ret: False if the reserve was successfully resized; true otherwise. Note
* that failure to resize the reserve also results in a RESERVE_CND_LOW
* condition.
*/
bool reserve_min_set(size_t min);

114
memory/jemalloc/ql.h Normal file
Просмотреть файл

@ -0,0 +1,114 @@
/******************************************************************************
*
* Copyright (C) 2002 Jason Evans <jasone@canonware.com>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice(s), this list of conditions and the following disclaimer
* unmodified other than the allowable addition of one or more
* copyright notices.
* 2. Redistributions in binary form must reproduce the above copyright
* notice(s), this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/*
* List definitions.
*/
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
#define ql_head_initializer(a_head) {NULL}
#define ql_elm(a_type) qr(a_type)
/* List functions. */
#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
#define ql_first(a_head) ((a_head)->qlh_first)
#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
ql_first(a_head) = NULL; \
} \
} while (0)
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)

98
memory/jemalloc/qr.h Normal file
Просмотреть файл

@ -0,0 +1,98 @@
/******************************************************************************
*
* Copyright (C) 2002 Jason Evans <jasone@canonware.com>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice(s), this list of conditions and the following disclaimer
* unmodified other than the allowable addition of one or more
* copyright notices.
* 2. Redistributions in binary form must reproduce the above copyright
* notice(s), this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/* Ring definitions. */
#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) \
do \
{ \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
void *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code. */
#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
= (a_qr)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))