зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1413475 - Run clang-format on all files in memory/build/. r=njn
--HG-- extra : rebase_source : a0a7ebff22c2387389d2f1dc75f8a5084f76ebb7
This commit is contained in:
Родитель
af14262e54
Коммит
eab43e4a6c
|
@ -11,7 +11,8 @@
|
|||
#ifndef HAVE_MEMALIGN
|
||||
namespace {
|
||||
|
||||
inline void* memalign(size_t aAlignment, size_t aSize)
|
||||
inline void*
|
||||
memalign(size_t aAlignment, size_t aSize)
|
||||
{
|
||||
#ifdef XP_WIN
|
||||
return _aligned_malloc(aSize, aAlignment);
|
||||
|
@ -23,26 +24,25 @@ inline void* memalign(size_t aAlignment, size_t aSize)
|
|||
return ret;
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
struct SystemMalloc {
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
static inline return_type \
|
||||
name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
|
||||
{ \
|
||||
return ::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
|
||||
struct SystemMalloc
|
||||
{
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
static inline return_type name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
|
||||
{ \
|
||||
return ::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
|
||||
}
|
||||
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
|
||||
#include "malloc_decls.h"
|
||||
};
|
||||
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
MOZ_JEMALLOC_API return_type \
|
||||
name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
|
||||
{ \
|
||||
return DummyArenaAllocator<SystemMalloc>::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
MOZ_JEMALLOC_API return_type name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
|
||||
{ \
|
||||
return DummyArenaAllocator<SystemMalloc>::name( \
|
||||
ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
|
||||
}
|
||||
#define MALLOC_FUNCS MALLOC_FUNCS_ARENA
|
||||
#include "malloc_decls.h"
|
||||
|
|
|
@ -11,55 +11,54 @@
|
|||
// - argument types
|
||||
|
||||
#ifndef malloc_decls_h
|
||||
# define malloc_decls_h
|
||||
#define malloc_decls_h
|
||||
|
||||
# include "mozjemalloc_types.h"
|
||||
#include "mozjemalloc_types.h"
|
||||
|
||||
# define MALLOC_FUNCS_MALLOC_BASE 1
|
||||
# define MALLOC_FUNCS_MALLOC_EXTRA 2
|
||||
# define MALLOC_FUNCS_MALLOC (MALLOC_FUNCS_MALLOC_BASE | \
|
||||
MALLOC_FUNCS_MALLOC_EXTRA)
|
||||
# define MALLOC_FUNCS_JEMALLOC 4
|
||||
# define MALLOC_FUNCS_INIT 8
|
||||
# define MALLOC_FUNCS_BRIDGE 16
|
||||
# define MALLOC_FUNCS_ARENA_BASE 32
|
||||
# define MALLOC_FUNCS_ARENA_ALLOC 64
|
||||
# define MALLOC_FUNCS_ARENA (MALLOC_FUNCS_ARENA_BASE | \
|
||||
MALLOC_FUNCS_ARENA_ALLOC)
|
||||
# define MALLOC_FUNCS_ALL (MALLOC_FUNCS_INIT | MALLOC_FUNCS_BRIDGE | \
|
||||
MALLOC_FUNCS_MALLOC | MALLOC_FUNCS_JEMALLOC | \
|
||||
MALLOC_FUNCS_ARENA)
|
||||
#define MALLOC_FUNCS_MALLOC_BASE 1
|
||||
#define MALLOC_FUNCS_MALLOC_EXTRA 2
|
||||
#define MALLOC_FUNCS_MALLOC \
|
||||
(MALLOC_FUNCS_MALLOC_BASE | MALLOC_FUNCS_MALLOC_EXTRA)
|
||||
#define MALLOC_FUNCS_JEMALLOC 4
|
||||
#define MALLOC_FUNCS_INIT 8
|
||||
#define MALLOC_FUNCS_BRIDGE 16
|
||||
#define MALLOC_FUNCS_ARENA_BASE 32
|
||||
#define MALLOC_FUNCS_ARENA_ALLOC 64
|
||||
#define MALLOC_FUNCS_ARENA (MALLOC_FUNCS_ARENA_BASE | MALLOC_FUNCS_ARENA_ALLOC)
|
||||
#define MALLOC_FUNCS_ALL \
|
||||
(MALLOC_FUNCS_INIT | MALLOC_FUNCS_BRIDGE | MALLOC_FUNCS_MALLOC | \
|
||||
MALLOC_FUNCS_JEMALLOC | MALLOC_FUNCS_ARENA)
|
||||
|
||||
#endif // malloc_decls_h
|
||||
|
||||
#ifndef MALLOC_FUNCS
|
||||
# define MALLOC_FUNCS (MALLOC_FUNCS_MALLOC | MALLOC_FUNCS_JEMALLOC | \
|
||||
MALLOC_FUNCS_ARENA)
|
||||
#define MALLOC_FUNCS \
|
||||
(MALLOC_FUNCS_MALLOC | MALLOC_FUNCS_JEMALLOC | MALLOC_FUNCS_ARENA)
|
||||
#endif
|
||||
|
||||
#ifdef MALLOC_DECL
|
||||
# if MALLOC_FUNCS & MALLOC_FUNCS_INIT
|
||||
MALLOC_DECL(init, void, const malloc_table_t *)
|
||||
# endif
|
||||
# if MALLOC_FUNCS & MALLOC_FUNCS_BRIDGE
|
||||
#if MALLOC_FUNCS & MALLOC_FUNCS_INIT
|
||||
MALLOC_DECL(init, void, const malloc_table_t*)
|
||||
#endif
|
||||
#if MALLOC_FUNCS & MALLOC_FUNCS_BRIDGE
|
||||
MALLOC_DECL(get_bridge, struct ReplaceMallocBridge*)
|
||||
# endif
|
||||
# if MALLOC_FUNCS & MALLOC_FUNCS_MALLOC_BASE
|
||||
MALLOC_DECL(malloc, void *, size_t)
|
||||
MALLOC_DECL(calloc, void *, size_t, size_t)
|
||||
MALLOC_DECL(realloc, void *, void *, size_t)
|
||||
MALLOC_DECL(free, void, void *)
|
||||
MALLOC_DECL(memalign, void *, size_t, size_t)
|
||||
# endif
|
||||
# if MALLOC_FUNCS & MALLOC_FUNCS_MALLOC_EXTRA
|
||||
MALLOC_DECL(posix_memalign, int, void **, size_t, size_t)
|
||||
MALLOC_DECL(aligned_alloc, void *, size_t, size_t)
|
||||
MALLOC_DECL(valloc, void *, size_t)
|
||||
#endif
|
||||
#if MALLOC_FUNCS & MALLOC_FUNCS_MALLOC_BASE
|
||||
MALLOC_DECL(malloc, void*, size_t)
|
||||
MALLOC_DECL(calloc, void*, size_t, size_t)
|
||||
MALLOC_DECL(realloc, void*, void*, size_t)
|
||||
MALLOC_DECL(free, void, void*)
|
||||
MALLOC_DECL(memalign, void*, size_t, size_t)
|
||||
#endif
|
||||
#if MALLOC_FUNCS & MALLOC_FUNCS_MALLOC_EXTRA
|
||||
MALLOC_DECL(posix_memalign, int, void**, size_t, size_t)
|
||||
MALLOC_DECL(aligned_alloc, void*, size_t, size_t)
|
||||
MALLOC_DECL(valloc, void*, size_t)
|
||||
MALLOC_DECL(malloc_usable_size, size_t, usable_ptr_t)
|
||||
MALLOC_DECL(malloc_good_size, size_t, size_t)
|
||||
# endif
|
||||
# if MALLOC_FUNCS & MALLOC_FUNCS_JEMALLOC
|
||||
MALLOC_DECL(jemalloc_stats, void, jemalloc_stats_t *)
|
||||
#endif
|
||||
#if MALLOC_FUNCS & MALLOC_FUNCS_JEMALLOC
|
||||
MALLOC_DECL(jemalloc_stats, void, jemalloc_stats_t*)
|
||||
|
||||
// On some operating systems (Mac), we use madvise(MADV_FREE) to hand pages
|
||||
// back to the operating system. On Mac, the operating system doesn't take
|
||||
|
@ -98,9 +97,9 @@ MALLOC_DECL(jemalloc_thread_local_arena, void, bool)
|
|||
|
||||
// Provide information about any allocation enclosing the given address.
|
||||
MALLOC_DECL(jemalloc_ptr_info, void, const void*, jemalloc_ptr_info_t*)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
# if MALLOC_FUNCS & MALLOC_FUNCS_ARENA_BASE
|
||||
#if MALLOC_FUNCS & MALLOC_FUNCS_ARENA_BASE
|
||||
|
||||
// Creates a separate arena, and returns its id, valid to use with moz_arena_*
|
||||
// functions.
|
||||
|
@ -108,9 +107,9 @@ MALLOC_DECL(moz_create_arena, arena_id_t)
|
|||
|
||||
// Dispose of the given arena. Subsequent uses of the arena will fail.
|
||||
MALLOC_DECL(moz_dispose_arena, void, arena_id_t)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
# if MALLOC_FUNCS & MALLOC_FUNCS_ARENA_ALLOC
|
||||
#if MALLOC_FUNCS & MALLOC_FUNCS_ARENA_ALLOC
|
||||
// Same as the functions without the moz_arena_ prefix, but using arenas
|
||||
// created with moz_create_arena.
|
||||
// The contract, even if not enforced at runtime in some configurations,
|
||||
|
@ -124,7 +123,7 @@ MALLOC_DECL(moz_arena_calloc, void*, arena_id_t, size_t, size_t)
|
|||
MALLOC_DECL(moz_arena_realloc, void*, arena_id_t, void*, size_t)
|
||||
MALLOC_DECL(moz_arena_free, void, arena_id_t, void*)
|
||||
MALLOC_DECL(moz_arena_memalign, void*, arena_id_t, size_t, size_t)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif // MALLOC_DECL
|
||||
|
||||
|
|
|
@ -200,7 +200,6 @@ getenv(const char* name)
|
|||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
// Some tools, such as /dev/dsp wrappers, LD_PRELOAD libraries that
|
||||
// happen to override mmap() and call dlsym() from their overridden
|
||||
// mmap(). The problem is that dlsym() calls malloc(), and this ends
|
||||
|
@ -1021,7 +1020,8 @@ public:
|
|||
{
|
||||
mArenas.Init();
|
||||
mPrivateArenas.Init();
|
||||
mDefaultArena = mLock.Init() ? CreateArena(/* IsPrivate = */ false) : nullptr;
|
||||
mDefaultArena =
|
||||
mLock.Init() ? CreateArena(/* IsPrivate = */ false) : nullptr;
|
||||
if (mDefaultArena) {
|
||||
// arena_t constructor sets this to a lower value for thread local
|
||||
// arenas; Reset to the default value for the main arena.
|
||||
|
@ -1058,17 +1058,14 @@ public:
|
|||
return Item<Iterator>(this, *Tree::Iterator::begin());
|
||||
}
|
||||
|
||||
Item<Iterator> end()
|
||||
{
|
||||
return Item<Iterator>(this, nullptr);
|
||||
}
|
||||
Item<Iterator> end() { return Item<Iterator>(this, nullptr); }
|
||||
|
||||
Tree::TreeNode* Next()
|
||||
{
|
||||
Tree::TreeNode* result = Tree::Iterator::Next();
|
||||
if (!result && mNextTree) {
|
||||
new (this) Iterator(mNextTree, nullptr);
|
||||
result = reinterpret_cast<Tree::TreeNode*>(*Tree::Iterator::begin());
|
||||
new (this) Iterator(mNextTree, nullptr);
|
||||
result = reinterpret_cast<Tree::TreeNode*>(*Tree::Iterator::begin());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -4762,7 +4759,8 @@ MozJemalloc::moz_dispose_arena(arena_id_t aArenaId)
|
|||
inline return_type MozJemalloc::moz_arena_##name( \
|
||||
arena_id_t aArenaId, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
|
||||
{ \
|
||||
BaseAllocator allocator(gArenas.GetById(aArenaId, /* IsPrivate = */ true));\
|
||||
BaseAllocator allocator( \
|
||||
gArenas.GetById(aArenaId, /* IsPrivate = */ true)); \
|
||||
return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
|
||||
}
|
||||
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
|
||||
|
|
|
@ -16,9 +16,9 @@
|
|||
// Can't use macros recursively, so we need another one doing the same as above.
|
||||
#define MACRO_CALL2(a, b) a b
|
||||
|
||||
#define ARGS_HELPER(name, ...) MACRO_CALL2( \
|
||||
MOZ_PASTE_PREFIX_AND_ARG_COUNT(name, ##__VA_ARGS__), \
|
||||
(__VA_ARGS__))
|
||||
#define ARGS_HELPER(name, ...) \
|
||||
MACRO_CALL2(MOZ_PASTE_PREFIX_AND_ARG_COUNT(name, ##__VA_ARGS__), \
|
||||
(__VA_ARGS__))
|
||||
#define TYPED_ARGS0()
|
||||
#define TYPED_ARGS1(t1) t1 arg1
|
||||
#define TYPED_ARGS2(t1, t2) TYPED_ARGS1(t1), t2 arg2
|
||||
|
@ -34,20 +34,25 @@
|
|||
// Generic interface exposing the whole public allocator API
|
||||
// This facilitates the implementation of things like replace-malloc.
|
||||
// Note: compilers are expected to be able to optimize out `this`.
|
||||
template <typename T>
|
||||
struct Allocator: public T {
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
template<typename T>
|
||||
struct Allocator : public T
|
||||
{
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
static return_type name(__VA_ARGS__);
|
||||
#include "malloc_decls.h"
|
||||
};
|
||||
|
||||
// The MozJemalloc allocator
|
||||
struct MozJemallocBase {};
|
||||
struct MozJemallocBase
|
||||
{
|
||||
};
|
||||
typedef Allocator<MozJemallocBase> MozJemalloc;
|
||||
|
||||
#ifdef MOZ_REPLACE_MALLOC
|
||||
// The replace-malloc allocator
|
||||
struct ReplaceMallocBase {};
|
||||
struct ReplaceMallocBase
|
||||
{
|
||||
};
|
||||
typedef Allocator<ReplaceMallocBase> ReplaceMalloc;
|
||||
|
||||
typedef ReplaceMalloc DefaultMalloc;
|
||||
|
@ -59,17 +64,18 @@ typedef MozJemalloc DefaultMalloc;
|
|||
|
||||
// Dummy implementation of the moz_arena_* API, falling back to a given
|
||||
// implementation of the base allocator.
|
||||
template <typename T>
|
||||
struct DummyArenaAllocator {
|
||||
template<typename T>
|
||||
struct DummyArenaAllocator
|
||||
{
|
||||
static arena_id_t moz_create_arena(void) { return 0; }
|
||||
|
||||
static void moz_dispose_arena(arena_id_t) { }
|
||||
static void moz_dispose_arena(arena_id_t) {}
|
||||
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
static return_type \
|
||||
moz_arena_ ## name(arena_id_t, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
|
||||
{ \
|
||||
return T::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
static return_type moz_arena_##name(arena_id_t, \
|
||||
ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
|
||||
{ \
|
||||
return T::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
|
||||
}
|
||||
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
|
||||
#include "malloc_decls.h"
|
||||
|
|
|
@ -59,32 +59,34 @@ typedef size_t arena_id_t;
|
|||
// jemalloc_stats() is not a stable interface. When using jemalloc_stats_t, be
|
||||
// sure that the compiled results of jemalloc.c are in sync with this header
|
||||
// file.
|
||||
typedef struct {
|
||||
// Run-time configuration settings.
|
||||
bool opt_junk; // Fill allocated memory with kAllocJunk?
|
||||
bool opt_zero; // Fill allocated memory with 0x0?
|
||||
size_t narenas; // Number of arenas.
|
||||
size_t quantum; // Allocation quantum.
|
||||
size_t small_max; // Max quantum-spaced allocation size.
|
||||
size_t large_max; // Max sub-chunksize allocation size.
|
||||
size_t chunksize; // Size of each virtual memory mapping.
|
||||
size_t page_size; // Size of pages.
|
||||
size_t dirty_max; // Max dirty pages per arena.
|
||||
typedef struct
|
||||
{
|
||||
// Run-time configuration settings.
|
||||
bool opt_junk; // Fill allocated memory with kAllocJunk?
|
||||
bool opt_zero; // Fill allocated memory with 0x0?
|
||||
size_t narenas; // Number of arenas.
|
||||
size_t quantum; // Allocation quantum.
|
||||
size_t small_max; // Max quantum-spaced allocation size.
|
||||
size_t large_max; // Max sub-chunksize allocation size.
|
||||
size_t chunksize; // Size of each virtual memory mapping.
|
||||
size_t page_size; // Size of pages.
|
||||
size_t dirty_max; // Max dirty pages per arena.
|
||||
|
||||
// Current memory usage statistics.
|
||||
size_t mapped; // Bytes mapped (not necessarily committed).
|
||||
size_t allocated; // Bytes allocated (committed, in use by application).
|
||||
size_t waste; // Bytes committed, not in use by the
|
||||
// application, and not intentionally left
|
||||
// unused (i.e., not dirty).
|
||||
size_t page_cache; // Committed, unused pages kept around as a
|
||||
// cache. (jemalloc calls these "dirty".)
|
||||
size_t bookkeeping; // Committed bytes used internally by the
|
||||
// allocator.
|
||||
size_t bin_unused; // Bytes committed to a bin but currently unused.
|
||||
// Current memory usage statistics.
|
||||
size_t mapped; // Bytes mapped (not necessarily committed).
|
||||
size_t allocated; // Bytes allocated (committed, in use by application).
|
||||
size_t waste; // Bytes committed, not in use by the
|
||||
// application, and not intentionally left
|
||||
// unused (i.e., not dirty).
|
||||
size_t page_cache; // Committed, unused pages kept around as a
|
||||
// cache. (jemalloc calls these "dirty".)
|
||||
size_t bookkeeping; // Committed bytes used internally by the
|
||||
// allocator.
|
||||
size_t bin_unused; // Bytes committed to a bin but currently unused.
|
||||
} jemalloc_stats_t;
|
||||
|
||||
enum PtrInfoTag {
|
||||
enum PtrInfoTag
|
||||
{
|
||||
// The pointer is not currently known to the allocator.
|
||||
// 'addr' and 'size' are always 0.
|
||||
TagUnknown,
|
||||
|
@ -113,28 +115,26 @@ enum PtrInfoTag {
|
|||
// - The number of fields is minimized.
|
||||
// - The 'tag' field unambiguously defines the meaning of the subsequent fields.
|
||||
// Helper functions are used to group together related categories of tags.
|
||||
typedef struct {
|
||||
typedef struct
|
||||
{
|
||||
enum PtrInfoTag tag;
|
||||
void* addr; // meaning depends on tag; see above
|
||||
size_t size; // meaning depends on tag; see above
|
||||
void* addr; // meaning depends on tag; see above
|
||||
size_t size; // meaning depends on tag; see above
|
||||
} jemalloc_ptr_info_t;
|
||||
|
||||
static inline bool
|
||||
jemalloc_ptr_is_live(jemalloc_ptr_info_t* info)
|
||||
{
|
||||
return info->tag == TagLiveSmall ||
|
||||
info->tag == TagLiveLarge ||
|
||||
return info->tag == TagLiveSmall || info->tag == TagLiveLarge ||
|
||||
info->tag == TagLiveHuge;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
jemalloc_ptr_is_freed(jemalloc_ptr_info_t* info)
|
||||
{
|
||||
return info->tag == TagFreedSmall ||
|
||||
info->tag == TagFreedPageDirty ||
|
||||
return info->tag == TagFreedSmall || info->tag == TagFreedPageDirty ||
|
||||
info->tag == TagFreedPageDecommitted ||
|
||||
info->tag == TagFreedPageMadvised ||
|
||||
info->tag == TagFreedPageZeroed;
|
||||
info->tag == TagFreedPageMadvised || info->tag == TagFreedPageZeroed;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
|
@ -142,8 +142,7 @@ jemalloc_ptr_is_freed_page(jemalloc_ptr_info_t* info)
|
|||
{
|
||||
return info->tag == TagFreedPageDirty ||
|
||||
info->tag == TagFreedPageDecommitted ||
|
||||
info->tag == TagFreedPageMadvised ||
|
||||
info->tag == TagFreedPageZeroed;
|
||||
info->tag == TagFreedPageMadvised || info->tag == TagFreedPageZeroed;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -28,30 +28,33 @@
|
|||
// On OSX, malloc/malloc.h contains the declaration for malloc_good_size,
|
||||
// which will call back in jemalloc, through the zone allocator so just use it.
|
||||
#ifndef XP_DARWIN
|
||||
MOZ_MEMORY_API size_t malloc_good_size_impl(size_t size);
|
||||
MOZ_MEMORY_API size_t
|
||||
malloc_good_size_impl(size_t size);
|
||||
|
||||
// Note: the MOZ_GLUE_IN_PROGRAM ifdef below is there to avoid -Werror turning
|
||||
// the protective if into errors. MOZ_GLUE_IN_PROGRAM is what triggers MFBT_API
|
||||
// to use weak imports.
|
||||
static inline size_t _malloc_good_size(size_t size) {
|
||||
# if defined(MOZ_GLUE_IN_PROGRAM) && !defined(IMPL_MFBT)
|
||||
static inline size_t
|
||||
_malloc_good_size(size_t size)
|
||||
{
|
||||
#if defined(MOZ_GLUE_IN_PROGRAM) && !defined(IMPL_MFBT)
|
||||
if (!malloc_good_size)
|
||||
return size;
|
||||
# endif
|
||||
#endif
|
||||
return malloc_good_size_impl(size);
|
||||
}
|
||||
|
||||
# define malloc_good_size _malloc_good_size
|
||||
#define malloc_good_size _malloc_good_size
|
||||
#endif
|
||||
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
MOZ_JEMALLOC_API return_type name(__VA_ARGS__);
|
||||
#define MALLOC_FUNCS MALLOC_FUNCS_JEMALLOC
|
||||
#include "malloc_decls.h"
|
||||
|
||||
#endif
|
||||
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
MOZ_JEMALLOC_API return_type name(__VA_ARGS__);
|
||||
#define MALLOC_FUNCS MALLOC_FUNCS_ARENA
|
||||
#include "malloc_decls.h"
|
||||
|
|
|
@ -10,8 +10,8 @@
|
|||
|
||||
// Declare malloc implementation functions with the right return and
|
||||
// argument types.
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
MOZ_MEMORY_API return_type name ## _impl(__VA_ARGS__);
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
MOZ_MEMORY_API return_type name##_impl(__VA_ARGS__);
|
||||
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
|
||||
#include "malloc_decls.h"
|
||||
|
||||
|
@ -72,10 +72,10 @@ operator delete[](void* ptr, std::nothrow_t const&)
|
|||
#undef strndup
|
||||
#undef strdup
|
||||
|
||||
MOZ_MEMORY_API char *
|
||||
strndup_impl(const char *src, size_t len)
|
||||
MOZ_MEMORY_API char*
|
||||
strndup_impl(const char* src, size_t len)
|
||||
{
|
||||
char* dst = (char*) malloc_impl(len + 1);
|
||||
char* dst = (char*)malloc_impl(len + 1);
|
||||
if (dst) {
|
||||
strncpy(dst, src, len);
|
||||
dst[len] = '\0';
|
||||
|
@ -83,8 +83,8 @@ strndup_impl(const char *src, size_t len)
|
|||
return dst;
|
||||
}
|
||||
|
||||
MOZ_MEMORY_API char *
|
||||
strdup_impl(const char *src)
|
||||
MOZ_MEMORY_API char*
|
||||
strdup_impl(const char* src)
|
||||
{
|
||||
size_t len = strlen(src);
|
||||
return strndup_impl(src, len);
|
||||
|
@ -95,9 +95,9 @@ strdup_impl(const char *src)
|
|||
#include <stdio.h>
|
||||
|
||||
MOZ_MEMORY_API int
|
||||
vasprintf_impl(char **str, const char *fmt, va_list ap)
|
||||
vasprintf_impl(char** str, const char* fmt, va_list ap)
|
||||
{
|
||||
char* ptr, *_ptr;
|
||||
char *ptr, *_ptr;
|
||||
int ret;
|
||||
|
||||
if (str == NULL || fmt == NULL) {
|
||||
|
@ -130,17 +130,17 @@ vasprintf_impl(char **str, const char *fmt, va_list ap)
|
|||
}
|
||||
|
||||
MOZ_MEMORY_API int
|
||||
asprintf_impl(char **str, const char *fmt, ...)
|
||||
asprintf_impl(char** str, const char* fmt, ...)
|
||||
{
|
||||
int ret;
|
||||
va_list ap;
|
||||
va_start(ap, fmt);
|
||||
int ret;
|
||||
va_list ap;
|
||||
va_start(ap, fmt);
|
||||
|
||||
ret = vasprintf_impl(str, fmt, ap);
|
||||
ret = vasprintf_impl(str, fmt, ap);
|
||||
|
||||
va_end(ap);
|
||||
va_end(ap);
|
||||
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -153,7 +153,7 @@ MOZ_MEMORY_API wchar_t*
|
|||
wcsdup_impl(const wchar_t* src)
|
||||
{
|
||||
size_t len = wcslen(src);
|
||||
wchar_t *dst = (wchar_t*) malloc_impl((len + 1) * sizeof(wchar_t));
|
||||
wchar_t* dst = (wchar_t*)malloc_impl((len + 1) * sizeof(wchar_t));
|
||||
if (dst)
|
||||
wcsncpy(dst, src, len + 1);
|
||||
return dst;
|
||||
|
|
|
@ -89,10 +89,10 @@
|
|||
// That implementation would call malloc by using "malloc_impl".
|
||||
|
||||
#if defined(MOZ_MEMORY_IMPL) && !defined(IMPL_MFBT)
|
||||
# ifdef MFBT_API // mozilla/Types.h was already included
|
||||
#ifdef MFBT_API // mozilla/Types.h was already included
|
||||
# error mozmemory_wrap.h has to be included before mozilla/Types.h when MOZ_MEMORY_IMPL is set and IMPL_MFBT is not.
|
||||
# endif
|
||||
# define IMPL_MFBT
|
||||
#endif
|
||||
#define IMPL_MFBT
|
||||
#endif
|
||||
|
||||
#include "mozilla/Types.h"
|
||||
|
@ -106,64 +106,64 @@
|
|||
#endif
|
||||
|
||||
#ifdef MOZ_MEMORY_IMPL
|
||||
# define MOZ_JEMALLOC_API MOZ_EXTERN_C MFBT_API
|
||||
# if defined(XP_WIN)
|
||||
# define mozmem_malloc_impl(a) je_ ## a
|
||||
# else
|
||||
# define MOZ_MEMORY_API MOZ_EXTERN_C MFBT_API
|
||||
# if defined(MOZ_WIDGET_ANDROID)
|
||||
# define MOZ_WRAP_NEW_DELETE
|
||||
# endif
|
||||
# endif
|
||||
#define MOZ_JEMALLOC_API MOZ_EXTERN_C MFBT_API
|
||||
#if defined(XP_WIN)
|
||||
#define mozmem_malloc_impl(a) je_##a
|
||||
#else
|
||||
#define MOZ_MEMORY_API MOZ_EXTERN_C MFBT_API
|
||||
#if defined(MOZ_WIDGET_ANDROID)
|
||||
#define MOZ_WRAP_NEW_DELETE
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#ifdef XP_WIN
|
||||
# define mozmem_dup_impl(a) wrap_ ## a
|
||||
#define mozmem_dup_impl(a) wrap_##a
|
||||
#endif
|
||||
|
||||
#if !defined(MOZ_MEMORY_IMPL)
|
||||
# define MOZ_MEMORY_API MOZ_EXTERN_C MFBT_API
|
||||
# define MOZ_JEMALLOC_API MOZ_EXTERN_C MFBT_API
|
||||
#define MOZ_MEMORY_API MOZ_EXTERN_C MFBT_API
|
||||
#define MOZ_JEMALLOC_API MOZ_EXTERN_C MFBT_API
|
||||
#endif
|
||||
|
||||
#ifndef MOZ_MEMORY_API
|
||||
# define MOZ_MEMORY_API MOZ_EXTERN_C
|
||||
#define MOZ_MEMORY_API MOZ_EXTERN_C
|
||||
#endif
|
||||
#ifndef MOZ_JEMALLOC_API
|
||||
# define MOZ_JEMALLOC_API MOZ_EXTERN_C
|
||||
#define MOZ_JEMALLOC_API MOZ_EXTERN_C
|
||||
#endif
|
||||
|
||||
#ifndef mozmem_malloc_impl
|
||||
# define mozmem_malloc_impl(a) a
|
||||
#define mozmem_malloc_impl(a) a
|
||||
#endif
|
||||
#ifndef mozmem_dup_impl
|
||||
# define mozmem_dup_impl(a) a
|
||||
#define mozmem_dup_impl(a) a
|
||||
#endif
|
||||
|
||||
// Malloc implementation functions
|
||||
#define malloc_impl mozmem_malloc_impl(malloc)
|
||||
#define posix_memalign_impl mozmem_malloc_impl(posix_memalign)
|
||||
#define aligned_alloc_impl mozmem_malloc_impl(aligned_alloc)
|
||||
#define calloc_impl mozmem_malloc_impl(calloc)
|
||||
#define realloc_impl mozmem_malloc_impl(realloc)
|
||||
#define free_impl mozmem_malloc_impl(free)
|
||||
#define memalign_impl mozmem_malloc_impl(memalign)
|
||||
#define valloc_impl mozmem_malloc_impl(valloc)
|
||||
#define malloc_usable_size_impl mozmem_malloc_impl(malloc_usable_size)
|
||||
#define malloc_good_size_impl mozmem_malloc_impl(malloc_good_size)
|
||||
#define malloc_impl mozmem_malloc_impl(malloc)
|
||||
#define posix_memalign_impl mozmem_malloc_impl(posix_memalign)
|
||||
#define aligned_alloc_impl mozmem_malloc_impl(aligned_alloc)
|
||||
#define calloc_impl mozmem_malloc_impl(calloc)
|
||||
#define realloc_impl mozmem_malloc_impl(realloc)
|
||||
#define free_impl mozmem_malloc_impl(free)
|
||||
#define memalign_impl mozmem_malloc_impl(memalign)
|
||||
#define valloc_impl mozmem_malloc_impl(valloc)
|
||||
#define malloc_usable_size_impl mozmem_malloc_impl(malloc_usable_size)
|
||||
#define malloc_good_size_impl mozmem_malloc_impl(malloc_good_size)
|
||||
|
||||
// Duplication functions
|
||||
#define strndup_impl mozmem_dup_impl(strndup)
|
||||
#define strdup_impl mozmem_dup_impl(strdup)
|
||||
#define strndup_impl mozmem_dup_impl(strndup)
|
||||
#define strdup_impl mozmem_dup_impl(strdup)
|
||||
#ifdef XP_WIN
|
||||
# define wcsdup_impl mozmem_dup_impl(wcsdup)
|
||||
#define wcsdup_impl mozmem_dup_impl(wcsdup)
|
||||
#endif
|
||||
|
||||
// String functions
|
||||
#ifdef ANDROID
|
||||
// Bug 801571 and Bug 879668, libstagefright uses vasprintf, causing malloc()/
|
||||
// free() to be mismatched between bionic and mozglue implementation.
|
||||
#define vasprintf_impl mozmem_dup_impl(vasprintf)
|
||||
#define asprintf_impl mozmem_dup_impl(asprintf)
|
||||
#define vasprintf_impl mozmem_dup_impl(vasprintf)
|
||||
#define asprintf_impl mozmem_dup_impl(asprintf)
|
||||
#endif
|
||||
|
||||
#endif // mozmemory_wrap_h
|
||||
|
|
|
@ -76,7 +76,7 @@ enum NodeColor
|
|||
};
|
||||
|
||||
// Node structure.
|
||||
template <typename T>
|
||||
template<typename T>
|
||||
class RedBlackTreeNode
|
||||
{
|
||||
T* mLeft;
|
||||
|
@ -84,20 +84,14 @@ class RedBlackTreeNode
|
|||
T* mRightAndColor;
|
||||
|
||||
public:
|
||||
T* Left()
|
||||
{
|
||||
return mLeft;
|
||||
}
|
||||
T* Left() { return mLeft; }
|
||||
|
||||
void SetLeft(T* aValue)
|
||||
{
|
||||
mLeft = aValue;
|
||||
}
|
||||
void SetLeft(T* aValue) { mLeft = aValue; }
|
||||
|
||||
T* Right()
|
||||
{
|
||||
return reinterpret_cast<T*>(
|
||||
reinterpret_cast<uintptr_t>(mRightAndColor) & uintptr_t(~1));
|
||||
return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(mRightAndColor) &
|
||||
uintptr_t(~1));
|
||||
}
|
||||
|
||||
void SetRight(T* aValue)
|
||||
|
@ -108,18 +102,13 @@ public:
|
|||
|
||||
NodeColor Color()
|
||||
{
|
||||
return static_cast<NodeColor>(reinterpret_cast<uintptr_t>(mRightAndColor) & 1);
|
||||
return static_cast<NodeColor>(reinterpret_cast<uintptr_t>(mRightAndColor) &
|
||||
1);
|
||||
}
|
||||
|
||||
bool IsBlack()
|
||||
{
|
||||
return Color() == NodeColor::Black;
|
||||
}
|
||||
bool IsBlack() { return Color() == NodeColor::Black; }
|
||||
|
||||
bool IsRed()
|
||||
{
|
||||
return Color() == NodeColor::Red;
|
||||
}
|
||||
bool IsRed() { return Color() == NodeColor::Red; }
|
||||
|
||||
void SetColor(NodeColor aColor)
|
||||
{
|
||||
|
@ -145,20 +134,11 @@ public:
|
|||
return Last(reinterpret_cast<TreeNode*>(aStart));
|
||||
}
|
||||
|
||||
T* Next(T* aNode)
|
||||
{
|
||||
return Next(reinterpret_cast<TreeNode*>(aNode));
|
||||
}
|
||||
T* Next(T* aNode) { return Next(reinterpret_cast<TreeNode*>(aNode)); }
|
||||
|
||||
T* Prev(T* aNode)
|
||||
{
|
||||
return Prev(reinterpret_cast<TreeNode*>(aNode));
|
||||
}
|
||||
T* Prev(T* aNode) { return Prev(reinterpret_cast<TreeNode*>(aNode)); }
|
||||
|
||||
T* Search(T* aKey)
|
||||
{
|
||||
return Search(reinterpret_cast<TreeNode*>(aKey));
|
||||
}
|
||||
T* Search(T* aKey) { return Search(reinterpret_cast<TreeNode*>(aKey)); }
|
||||
|
||||
// Find a match if it exists. Otherwise, find the next greater node, if one
|
||||
// exists.
|
||||
|
@ -167,54 +147,27 @@ public:
|
|||
return SearchOrNext(reinterpret_cast<TreeNode*>(aKey));
|
||||
}
|
||||
|
||||
void Insert(T* aNode)
|
||||
{
|
||||
Insert(reinterpret_cast<TreeNode*>(aNode));
|
||||
}
|
||||
void Insert(T* aNode) { Insert(reinterpret_cast<TreeNode*>(aNode)); }
|
||||
|
||||
void Remove(T* aNode)
|
||||
{
|
||||
return Remove(reinterpret_cast<TreeNode*>(aNode));
|
||||
}
|
||||
void Remove(T* aNode) { return Remove(reinterpret_cast<TreeNode*>(aNode)); }
|
||||
|
||||
// Helper class to avoid having all the tree traversal code further below
|
||||
// have to use Trait::GetTreeNode, adding visual noise.
|
||||
struct TreeNode : public T
|
||||
{
|
||||
TreeNode* Left()
|
||||
{
|
||||
return (TreeNode*)Trait::GetTreeNode(this).Left();
|
||||
}
|
||||
TreeNode* Left() { return (TreeNode*)Trait::GetTreeNode(this).Left(); }
|
||||
|
||||
void SetLeft(T* aValue)
|
||||
{
|
||||
Trait::GetTreeNode(this).SetLeft(aValue);
|
||||
}
|
||||
void SetLeft(T* aValue) { Trait::GetTreeNode(this).SetLeft(aValue); }
|
||||
|
||||
TreeNode* Right()
|
||||
{
|
||||
return (TreeNode*)Trait::GetTreeNode(this).Right();
|
||||
}
|
||||
TreeNode* Right() { return (TreeNode*)Trait::GetTreeNode(this).Right(); }
|
||||
|
||||
void SetRight(T* aValue)
|
||||
{
|
||||
Trait::GetTreeNode(this).SetRight(aValue);
|
||||
}
|
||||
void SetRight(T* aValue) { Trait::GetTreeNode(this).SetRight(aValue); }
|
||||
|
||||
NodeColor Color()
|
||||
{
|
||||
return Trait::GetTreeNode(this).Color();
|
||||
}
|
||||
NodeColor Color() { return Trait::GetTreeNode(this).Color(); }
|
||||
|
||||
bool IsRed()
|
||||
{
|
||||
return Trait::GetTreeNode(this).IsRed();
|
||||
}
|
||||
bool IsRed() { return Trait::GetTreeNode(this).IsRed(); }
|
||||
|
||||
bool IsBlack()
|
||||
{
|
||||
return Trait::GetTreeNode(this).IsBlack();
|
||||
}
|
||||
bool IsBlack() { return Trait::GetTreeNode(this).IsBlack(); }
|
||||
|
||||
void SetColor(NodeColor aColor)
|
||||
{
|
||||
|
@ -695,7 +648,6 @@ private:
|
|||
// cause the cached path to become invalid. Don't modify the tree during an
|
||||
// iteration.
|
||||
|
||||
|
||||
// Size the path arrays such that they are always large enough, even if a
|
||||
// tree consumes all of memory. Since each node must contain a minimum of
|
||||
// two pointers, there can never be more nodes than:
|
||||
|
@ -739,7 +691,8 @@ public:
|
|||
Item(Iterator* aIterator, T* aItem)
|
||||
: mIterator(aIterator)
|
||||
, mItem(aItem)
|
||||
{ }
|
||||
{
|
||||
}
|
||||
|
||||
bool operator!=(const Item& aOther) const
|
||||
{
|
||||
|
@ -760,10 +713,7 @@ public:
|
|||
return Item<Iterator>(this, mDepth > 0 ? mPath[mDepth - 1] : nullptr);
|
||||
}
|
||||
|
||||
Item<Iterator> end()
|
||||
{
|
||||
return Item<Iterator>(this, nullptr);
|
||||
}
|
||||
Item<Iterator> end() { return Item<Iterator>(this, nullptr); }
|
||||
|
||||
TreeNode* Next()
|
||||
{
|
||||
|
|
|
@ -79,15 +79,15 @@ MOZ_BEGIN_EXTERN_C
|
|||
// definitions.
|
||||
#ifndef MOZ_NO_REPLACE_FUNC_DECL
|
||||
|
||||
# ifndef MOZ_REPLACE_WEAK
|
||||
# define MOZ_REPLACE_WEAK
|
||||
# endif
|
||||
#ifndef MOZ_REPLACE_WEAK
|
||||
#define MOZ_REPLACE_WEAK
|
||||
#endif
|
||||
|
||||
# define MALLOC_DECL(name, return_type, ...) \
|
||||
MOZ_EXPORT return_type replace_ ## name(__VA_ARGS__) MOZ_REPLACE_WEAK;
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
MOZ_EXPORT return_type replace_##name(__VA_ARGS__) MOZ_REPLACE_WEAK;
|
||||
|
||||
# define MALLOC_FUNCS MALLOC_FUNCS_ALL
|
||||
# include "malloc_decls.h"
|
||||
#define MALLOC_FUNCS MALLOC_FUNCS_ALL
|
||||
#include "malloc_decls.h"
|
||||
|
||||
#endif // MOZ_NO_REPLACE_FUNC_DECL
|
||||
|
||||
|
|
|
@ -52,21 +52,22 @@ MOZ_BEGIN_EXTERN_C
|
|||
|
||||
#ifndef REPLACE_MALLOC_IMPL
|
||||
// Returns the replace-malloc bridge if there is one to be returned.
|
||||
MFBT_API ReplaceMallocBridge* get_bridge();
|
||||
MFBT_API ReplaceMallocBridge*
|
||||
get_bridge();
|
||||
#endif
|
||||
|
||||
// Table of malloc functions.
|
||||
// e.g. void* (*malloc)(size_t), etc.
|
||||
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
typedef return_type(name ## _impl_t)(__VA_ARGS__);
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
typedef return_type(name##_impl_t)(__VA_ARGS__);
|
||||
|
||||
#include "malloc_decls.h"
|
||||
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
name ## _impl_t * name;
|
||||
#define MALLOC_DECL(name, return_type, ...) name##_impl_t* name;
|
||||
|
||||
typedef struct {
|
||||
typedef struct
|
||||
{
|
||||
#include "malloc_decls.h"
|
||||
} malloc_table_t;
|
||||
|
||||
|
@ -87,12 +88,13 @@ MOZ_END_EXTERN_C
|
|||
// other allocation functions, like calloc_hook.
|
||||
namespace mozilla {
|
||||
namespace detail {
|
||||
template <typename R, typename... Args>
|
||||
struct AllocHookType {
|
||||
template<typename R, typename... Args>
|
||||
struct AllocHookType
|
||||
{
|
||||
using Type = R (*)(R, Args...);
|
||||
};
|
||||
|
||||
template <typename... Args>
|
||||
template<typename... Args>
|
||||
struct AllocHookType<void, Args...>
|
||||
{
|
||||
using Type = void (*)(Args...);
|
||||
|
@ -101,11 +103,12 @@ struct AllocHookType<void, Args...>
|
|||
} // namespace detail
|
||||
} // namespace mozilla
|
||||
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
typename mozilla::detail::AllocHookType<return_type, ##__VA_ARGS__>::Type \
|
||||
name ## _hook;
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
typename mozilla::detail::AllocHookType<return_type, ##__VA_ARGS__>::Type \
|
||||
name##_hook;
|
||||
|
||||
typedef struct {
|
||||
typedef struct
|
||||
{
|
||||
#include "malloc_decls.h"
|
||||
// Like free_hook, but called before realloc_hook. free_hook is called
|
||||
// instead of not given.
|
||||
|
@ -130,7 +133,10 @@ struct DebugFdRegistry
|
|||
|
||||
struct ReplaceMallocBridge
|
||||
{
|
||||
ReplaceMallocBridge() : mVersion(3) {}
|
||||
ReplaceMallocBridge()
|
||||
: mVersion(3)
|
||||
{
|
||||
}
|
||||
|
||||
// This method was added in version 1 of the bridge.
|
||||
virtual mozilla::dmd::DMDFuncs* GetDMDFuncs() { return nullptr; }
|
||||
|
@ -154,17 +160,22 @@ struct ReplaceMallocBridge
|
|||
// Functions from a previously registered table may still be called for
|
||||
// a brief time after RegisterHook returns.
|
||||
// This method was added in version 3 of the bridge.
|
||||
virtual const malloc_table_t*
|
||||
RegisterHook(const char* aName, const malloc_table_t* aTable,
|
||||
const malloc_hook_table_t* aHookTable) { return nullptr; }
|
||||
virtual const malloc_table_t* RegisterHook(
|
||||
const char* aName,
|
||||
const malloc_table_t* aTable,
|
||||
const malloc_hook_table_t* aHookTable)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
#ifndef REPLACE_MALLOC_IMPL
|
||||
// Returns the replace-malloc bridge if its version is at least the
|
||||
// requested one.
|
||||
static ReplaceMallocBridge* Get(int aMinimumVersion) {
|
||||
static ReplaceMallocBridge* Get(int aMinimumVersion)
|
||||
{
|
||||
static ReplaceMallocBridge* sSingleton = get_bridge();
|
||||
return (sSingleton && sSingleton->mVersion >= aMinimumVersion)
|
||||
? sSingleton : nullptr;
|
||||
return (sSingleton && sSingleton->mVersion >= aMinimumVersion) ? sSingleton
|
||||
: nullptr;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -197,9 +208,10 @@ struct ReplaceMalloc
|
|||
}
|
||||
}
|
||||
|
||||
static const malloc_table_t*
|
||||
RegisterHook(const char* aName, const malloc_table_t* aTable,
|
||||
const malloc_hook_table_t* aHookTable)
|
||||
static const malloc_table_t* RegisterHook(
|
||||
const char* aName,
|
||||
const malloc_table_t* aTable,
|
||||
const malloc_hook_table_t* aHookTable)
|
||||
{
|
||||
auto singleton = ReplaceMallocBridge::Get(/* minimumVersion */ 3);
|
||||
return singleton ? singleton->RegisterHook(aName, aTable, aHookTable)
|
||||
|
|
|
@ -13,13 +13,13 @@
|
|||
// Malloc implementation functions are MOZ_MEMORY_API, and jemalloc
|
||||
// specific functions MOZ_JEMALLOC_API; see mozmemory_wrap.h
|
||||
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
MOZ_MEMORY_API return_type name ## _impl(__VA_ARGS__);
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
MOZ_MEMORY_API return_type name##_impl(__VA_ARGS__);
|
||||
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
|
||||
#include "malloc_decls.h"
|
||||
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
MOZ_JEMALLOC_API return_type name ## _impl(__VA_ARGS__);
|
||||
#define MALLOC_DECL(name, return_type, ...) \
|
||||
MOZ_JEMALLOC_API return_type name##_impl(__VA_ARGS__);
|
||||
#define MALLOC_FUNCS MALLOC_FUNCS_JEMALLOC
|
||||
#include "malloc_decls.h"
|
||||
|
||||
|
@ -27,78 +27,97 @@
|
|||
// for the built binary to run on newer versions of OSX. So use the newest
|
||||
// possible version of those structs.
|
||||
|
||||
typedef struct _malloc_zone_t {
|
||||
void *reserved1;
|
||||
void *reserved2;
|
||||
size_t (*size)(struct _malloc_zone_t *, const void *);
|
||||
void *(*malloc)(struct _malloc_zone_t *, size_t);
|
||||
void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
|
||||
void *(*valloc)(struct _malloc_zone_t *, size_t);
|
||||
void (*free)(struct _malloc_zone_t *, void *);
|
||||
void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
|
||||
void (*destroy)(struct _malloc_zone_t *);
|
||||
const char *zone_name;
|
||||
unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
|
||||
void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
|
||||
struct malloc_introspection_t *introspect;
|
||||
typedef struct _malloc_zone_t
|
||||
{
|
||||
void* reserved1;
|
||||
void* reserved2;
|
||||
size_t (*size)(struct _malloc_zone_t*, const void*);
|
||||
void* (*malloc)(struct _malloc_zone_t*, size_t);
|
||||
void* (*calloc)(struct _malloc_zone_t*, size_t, size_t);
|
||||
void* (*valloc)(struct _malloc_zone_t*, size_t);
|
||||
void (*free)(struct _malloc_zone_t*, void*);
|
||||
void* (*realloc)(struct _malloc_zone_t*, void*, size_t);
|
||||
void (*destroy)(struct _malloc_zone_t*);
|
||||
const char* zone_name;
|
||||
unsigned (*batch_malloc)(struct _malloc_zone_t*, size_t, void**, unsigned);
|
||||
void (*batch_free)(struct _malloc_zone_t*, void**, unsigned);
|
||||
struct malloc_introspection_t* introspect;
|
||||
unsigned version;
|
||||
void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
|
||||
void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
|
||||
size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
|
||||
void* (*memalign)(struct _malloc_zone_t*, size_t, size_t);
|
||||
void (*free_definite_size)(struct _malloc_zone_t*, void*, size_t);
|
||||
size_t (*pressure_relief)(struct _malloc_zone_t*, size_t);
|
||||
} malloc_zone_t;
|
||||
|
||||
typedef struct {
|
||||
typedef struct
|
||||
{
|
||||
vm_address_t address;
|
||||
vm_size_t size;
|
||||
} vm_range_t;
|
||||
|
||||
typedef struct malloc_statistics_t {
|
||||
typedef struct malloc_statistics_t
|
||||
{
|
||||
unsigned blocks_in_use;
|
||||
size_t size_in_use;
|
||||
size_t max_size_in_use;
|
||||
size_t size_allocated;
|
||||
} malloc_statistics_t;
|
||||
|
||||
typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
|
||||
typedef kern_return_t
|
||||
memory_reader_t(task_t, vm_address_t, vm_size_t, void**);
|
||||
|
||||
typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
|
||||
typedef void
|
||||
vm_range_recorder_t(task_t, void*, unsigned type, vm_range_t*, unsigned);
|
||||
|
||||
typedef struct malloc_introspection_t {
|
||||
kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
|
||||
size_t (*good_size)(malloc_zone_t *, size_t);
|
||||
boolean_t (*check)(malloc_zone_t *);
|
||||
void (*print)(malloc_zone_t *, boolean_t);
|
||||
void (*log)(malloc_zone_t *, void *);
|
||||
void (*force_lock)(malloc_zone_t *);
|
||||
void (*force_unlock)(malloc_zone_t *);
|
||||
void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
|
||||
boolean_t (*zone_locked)(malloc_zone_t *);
|
||||
boolean_t (*enable_discharge_checking)(malloc_zone_t *);
|
||||
boolean_t (*disable_discharge_checking)(malloc_zone_t *);
|
||||
void (*discharge)(malloc_zone_t *, void *);
|
||||
typedef struct malloc_introspection_t
|
||||
{
|
||||
kern_return_t (*enumerator)(task_t,
|
||||
void*,
|
||||
unsigned,
|
||||
vm_address_t,
|
||||
memory_reader_t,
|
||||
vm_range_recorder_t);
|
||||
size_t (*good_size)(malloc_zone_t*, size_t);
|
||||
boolean_t (*check)(malloc_zone_t*);
|
||||
void (*print)(malloc_zone_t*, boolean_t);
|
||||
void (*log)(malloc_zone_t*, void*);
|
||||
void (*force_lock)(malloc_zone_t*);
|
||||
void (*force_unlock)(malloc_zone_t*);
|
||||
void (*statistics)(malloc_zone_t*, malloc_statistics_t*);
|
||||
boolean_t (*zone_locked)(malloc_zone_t*);
|
||||
boolean_t (*enable_discharge_checking)(malloc_zone_t*);
|
||||
boolean_t (*disable_discharge_checking)(malloc_zone_t*);
|
||||
void (*discharge)(malloc_zone_t*, void*);
|
||||
#ifdef __BLOCKS__
|
||||
void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
|
||||
void (*enumerate_discharged_pointers)(malloc_zone_t*, void (^)(void*, void*));
|
||||
#else
|
||||
void *enumerate_unavailable_without_blocks;
|
||||
void* enumerate_unavailable_without_blocks;
|
||||
#endif
|
||||
void (*reinit_lock)(malloc_zone_t *);
|
||||
void (*reinit_lock)(malloc_zone_t*);
|
||||
} malloc_introspection_t;
|
||||
|
||||
extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
|
||||
extern kern_return_t
|
||||
malloc_get_all_zones(task_t, memory_reader_t, vm_address_t**, unsigned*);
|
||||
|
||||
extern malloc_zone_t *malloc_default_zone(void);
|
||||
extern malloc_zone_t*
|
||||
malloc_default_zone(void);
|
||||
|
||||
extern void malloc_zone_register(malloc_zone_t *zone);
|
||||
extern void
|
||||
malloc_zone_register(malloc_zone_t* zone);
|
||||
|
||||
extern void malloc_zone_unregister(malloc_zone_t *zone);
|
||||
extern void
|
||||
malloc_zone_unregister(malloc_zone_t* zone);
|
||||
|
||||
extern malloc_zone_t *malloc_default_purgeable_zone(void);
|
||||
extern malloc_zone_t*
|
||||
malloc_default_purgeable_zone(void);
|
||||
|
||||
extern malloc_zone_t* malloc_zone_from_ptr(const void* ptr);
|
||||
extern malloc_zone_t*
|
||||
malloc_zone_from_ptr(const void* ptr);
|
||||
|
||||
extern void malloc_zone_free(malloc_zone_t* zone, void* ptr);
|
||||
extern void
|
||||
malloc_zone_free(malloc_zone_t* zone, void* ptr);
|
||||
|
||||
extern void* malloc_zone_realloc(malloc_zone_t* zone, void* ptr, size_t size);
|
||||
extern void*
|
||||
malloc_zone_realloc(malloc_zone_t* zone, void* ptr, size_t size);
|
||||
|
||||
// The following is a OSX zone allocator implementation.
|
||||
// /!\ WARNING. It assumes the underlying malloc implementation's
|
||||
|
@ -107,25 +126,25 @@ extern void* malloc_zone_realloc(malloc_zone_t* zone, void* ptr, size_t size);
|
|||
// owned by the allocator.
|
||||
|
||||
static size_t
|
||||
zone_size(malloc_zone_t *zone, const void *ptr)
|
||||
zone_size(malloc_zone_t* zone, const void* ptr)
|
||||
{
|
||||
return malloc_usable_size_impl(ptr);
|
||||
}
|
||||
|
||||
static void *
|
||||
zone_malloc(malloc_zone_t *zone, size_t size)
|
||||
static void*
|
||||
zone_malloc(malloc_zone_t* zone, size_t size)
|
||||
{
|
||||
return malloc_impl(size);
|
||||
}
|
||||
|
||||
static void *
|
||||
zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
|
||||
static void*
|
||||
zone_calloc(malloc_zone_t* zone, size_t num, size_t size)
|
||||
{
|
||||
return calloc_impl(num, size);
|
||||
}
|
||||
|
||||
static void *
|
||||
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
|
||||
static void*
|
||||
zone_realloc(malloc_zone_t* zone, void* ptr, size_t size)
|
||||
{
|
||||
if (malloc_usable_size_impl(ptr))
|
||||
return realloc_impl(ptr, size);
|
||||
|
@ -163,7 +182,7 @@ other_zone_free(malloc_zone_t* original_zone, void* ptr)
|
|||
}
|
||||
|
||||
static void
|
||||
zone_free(malloc_zone_t *zone, void *ptr)
|
||||
zone_free(malloc_zone_t* zone, void* ptr)
|
||||
{
|
||||
if (malloc_usable_size_impl(ptr)) {
|
||||
free_impl(ptr);
|
||||
|
@ -173,7 +192,7 @@ zone_free(malloc_zone_t *zone, void *ptr)
|
|||
}
|
||||
|
||||
static void
|
||||
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
|
||||
zone_free_definite_size(malloc_zone_t* zone, void* ptr, size_t size)
|
||||
{
|
||||
size_t current_size = malloc_usable_size_impl(ptr);
|
||||
if (current_size) {
|
||||
|
@ -184,31 +203,33 @@ zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
|
|||
other_zone_free(zone, ptr);
|
||||
}
|
||||
|
||||
static void *
|
||||
zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
|
||||
static void*
|
||||
zone_memalign(malloc_zone_t* zone, size_t alignment, size_t size)
|
||||
{
|
||||
void *ptr;
|
||||
void* ptr;
|
||||
if (posix_memalign_impl(&ptr, alignment, size) == 0)
|
||||
return ptr;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *
|
||||
zone_valloc(malloc_zone_t *zone, size_t size)
|
||||
static void*
|
||||
zone_valloc(malloc_zone_t* zone, size_t size)
|
||||
{
|
||||
return valloc_impl(size);
|
||||
}
|
||||
|
||||
static void
|
||||
zone_destroy(malloc_zone_t *zone)
|
||||
zone_destroy(malloc_zone_t* zone)
|
||||
{
|
||||
// This function should never be called.
|
||||
MOZ_CRASH();
|
||||
}
|
||||
|
||||
static unsigned
|
||||
zone_batch_malloc(malloc_zone_t *zone, size_t size, void **results,
|
||||
unsigned num_requested)
|
||||
zone_batch_malloc(malloc_zone_t* zone,
|
||||
size_t size,
|
||||
void** results,
|
||||
unsigned num_requested)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
|
@ -222,8 +243,9 @@ zone_batch_malloc(malloc_zone_t *zone, size_t size, void **results,
|
|||
}
|
||||
|
||||
static void
|
||||
zone_batch_free(malloc_zone_t *zone, void **to_be_freed,
|
||||
unsigned num_to_be_freed)
|
||||
zone_batch_free(malloc_zone_t* zone,
|
||||
void** to_be_freed,
|
||||
unsigned num_to_be_freed)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
|
@ -234,46 +256,51 @@ zone_batch_free(malloc_zone_t *zone, void **to_be_freed,
|
|||
}
|
||||
|
||||
static size_t
|
||||
zone_pressure_relief(malloc_zone_t *zone, size_t goal)
|
||||
zone_pressure_relief(malloc_zone_t* zone, size_t goal)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t
|
||||
zone_good_size(malloc_zone_t *zone, size_t size)
|
||||
zone_good_size(malloc_zone_t* zone, size_t size)
|
||||
{
|
||||
return malloc_good_size_impl(size);
|
||||
}
|
||||
|
||||
static kern_return_t
|
||||
zone_enumerator(task_t task, void *data, unsigned type_mask,
|
||||
vm_address_t zone_address, memory_reader_t reader,
|
||||
vm_range_recorder_t recorder)
|
||||
zone_enumerator(task_t task,
|
||||
void* data,
|
||||
unsigned type_mask,
|
||||
vm_address_t zone_address,
|
||||
memory_reader_t reader,
|
||||
vm_range_recorder_t recorder)
|
||||
{
|
||||
return KERN_SUCCESS;
|
||||
}
|
||||
|
||||
static boolean_t
|
||||
zone_check(malloc_zone_t *zone)
|
||||
zone_check(malloc_zone_t* zone)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
zone_print(malloc_zone_t *zone, boolean_t verbose)
|
||||
zone_print(malloc_zone_t* zone, boolean_t verbose)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
zone_log(malloc_zone_t *zone, void *address)
|
||||
zone_log(malloc_zone_t* zone, void* address)
|
||||
{
|
||||
}
|
||||
|
||||
extern void _malloc_prefork(void);
|
||||
extern void _malloc_postfork_child(void);
|
||||
extern void
|
||||
_malloc_prefork(void);
|
||||
extern void
|
||||
_malloc_postfork_child(void);
|
||||
|
||||
static void
|
||||
zone_force_lock(malloc_zone_t *zone)
|
||||
zone_force_lock(malloc_zone_t* zone)
|
||||
{
|
||||
// /!\ This calls into mozjemalloc. It works because we're linked in the
|
||||
// same library.
|
||||
|
@ -281,7 +308,7 @@ zone_force_lock(malloc_zone_t *zone)
|
|||
}
|
||||
|
||||
static void
|
||||
zone_force_unlock(malloc_zone_t *zone)
|
||||
zone_force_unlock(malloc_zone_t* zone)
|
||||
{
|
||||
// /!\ This calls into mozjemalloc. It works because we're linked in the
|
||||
// same library.
|
||||
|
@ -289,7 +316,7 @@ zone_force_unlock(malloc_zone_t *zone)
|
|||
}
|
||||
|
||||
static void
|
||||
zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats)
|
||||
zone_statistics(malloc_zone_t* zone, malloc_statistics_t* stats)
|
||||
{
|
||||
// We make no effort to actually fill the values
|
||||
stats->blocks_in_use = 0;
|
||||
|
@ -299,14 +326,14 @@ zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats)
|
|||
}
|
||||
|
||||
static boolean_t
|
||||
zone_locked(malloc_zone_t *zone)
|
||||
zone_locked(malloc_zone_t* zone)
|
||||
{
|
||||
// Pretend no lock is being held
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
zone_reinit_lock(malloc_zone_t *zone)
|
||||
zone_reinit_lock(malloc_zone_t* zone)
|
||||
{
|
||||
// As of OSX 10.12, this function is only used when force_unlock would
|
||||
// be used if the zone version were < 9. So just use force_unlock.
|
||||
|
@ -316,9 +343,10 @@ zone_reinit_lock(malloc_zone_t *zone)
|
|||
static malloc_zone_t zone;
|
||||
static struct malloc_introspection_t zone_introspect;
|
||||
|
||||
static malloc_zone_t *get_default_zone()
|
||||
static malloc_zone_t*
|
||||
get_default_zone()
|
||||
{
|
||||
malloc_zone_t **zones = NULL;
|
||||
malloc_zone_t** zones = NULL;
|
||||
unsigned int num_zones = 0;
|
||||
|
||||
// On OSX 10.12, malloc_default_zone returns a special zone that is not
|
||||
|
@ -329,8 +357,8 @@ static malloc_zone_t *get_default_zone()
|
|||
// zone is the default.
|
||||
// So get the list of zones to get the first one, instead of relying on
|
||||
// malloc_default_zone.
|
||||
if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, (vm_address_t**) &zones,
|
||||
&num_zones)) {
|
||||
if (KERN_SUCCESS !=
|
||||
malloc_get_all_zones(0, NULL, (vm_address_t**)&zones, &num_zones)) {
|
||||
// Reset the value in case the failure happened after it was set.
|
||||
num_zones = 0;
|
||||
}
|
||||
|
@ -340,12 +368,10 @@ static malloc_zone_t *get_default_zone()
|
|||
return malloc_default_zone();
|
||||
}
|
||||
|
||||
|
||||
__attribute__((constructor))
|
||||
static void
|
||||
__attribute__((constructor)) static void
|
||||
register_zone(void)
|
||||
{
|
||||
malloc_zone_t *default_zone = get_default_zone();
|
||||
malloc_zone_t* default_zone = get_default_zone();
|
||||
|
||||
zone.size = zone_size;
|
||||
zone.malloc = zone_malloc;
|
||||
|
@ -392,7 +418,7 @@ register_zone(void)
|
|||
// malloc_default_purgeable_zone is called beforehand so that the
|
||||
// default purgeable zone is created when the default zone is still
|
||||
// a scalable_zone.
|
||||
malloc_zone_t *purgeable_zone = malloc_default_purgeable_zone();
|
||||
malloc_zone_t* purgeable_zone = malloc_default_purgeable_zone();
|
||||
|
||||
// Register the custom zone. At this point it won't be the default.
|
||||
malloc_zone_register(&zone);
|
||||
|
|
Загрузка…
Ссылка в новой задаче