Bug 1389305 - Add jemalloc_ptr_info() and moz_malloc_enclosing_size_of(). r=glandium.

jemalloc_ptr_info() gives info about any pointer, such as whether it's within a
live or free allocation, and if so, info about that allocation. It's useful for
debugging.

moz_malloc_enclosing_size_of() uses jemalloc_ptr_info() to measure the size of
an allocation from an interior pointer. It's useful for memory reporting,
especially for Rust code.

--HG--
extra : rebase_source : caa19cccf8c2d1f79cf004fe6a408775de5a7b22
This commit is contained in:
Nicholas Nethercote 2017-08-24 19:37:27 +10:00
Родитель 20a39fc515
Коммит 365285b831
12 изменённых файлов: 460 добавлений и 17 удалений

Просмотреть файл

@ -62,6 +62,7 @@ MALLOC_DECL_VOID(jemalloc_stats, jemalloc_stats_t *)
MALLOC_DECL_VOID(jemalloc_purge_freed_pages)
MALLOC_DECL_VOID(jemalloc_free_dirty_pages)
MALLOC_DECL_VOID(jemalloc_thread_local_arena, bool)
MALLOC_DECL_VOID(jemalloc_ptr_info, const void*, jemalloc_ptr_info_t*)
# endif
# undef MALLOC_DECL_VOID

Просмотреть файл

@ -13,6 +13,7 @@
* - jemalloc_purge_freed_pages
* - jemalloc_free_dirty_pages
* - jemalloc_thread_local_arena
* - jemalloc_ptr_info
*/
#ifndef MOZ_MEMORY
@ -87,4 +88,10 @@ MOZ_JEMALLOC_API void jemalloc_free_dirty_pages();
MOZ_JEMALLOC_API void jemalloc_thread_local_arena(bool enabled);
/*
* Provide information about any allocation enclosing the given address.
*/
MOZ_JEMALLOC_API void jemalloc_ptr_info(const void* ptr,
jemalloc_ptr_info_t* info);
#endif /* mozmemory_h */

Просмотреть файл

@ -10,6 +10,7 @@
* argument types. */
#define MALLOC_DECL(name, return_type, ...) \
MOZ_MEMORY_API return_type name ## _impl(__VA_ARGS__);
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
#include "malloc_decls.h"
#ifdef MOZ_WRAP_NEW_DELETE

Просмотреть файл

@ -37,6 +37,7 @@
* - jemalloc_purge_freed_pages
* - jemalloc_free_dirty_pages
* - jemalloc_thread_local_arena
* - jemalloc_ptr_info
* (these functions are native to mozjemalloc)
*
* These functions are all exported as part of libmozglue (see
@ -207,5 +208,7 @@
#define jemalloc_free_dirty_pages_impl mozmem_jemalloc_impl(jemalloc_free_dirty_pages)
#define jemalloc_thread_local_arena_impl \
mozmem_jemalloc_impl(jemalloc_thread_local_arena)
#define jemalloc_ptr_info_impl \
mozmem_jemalloc_impl(jemalloc_ptr_info)
#endif /* mozmemory_wrap_h */

Просмотреть файл

@ -1,40 +1,45 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/mozalloc.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/Vector.h"
#include "mozmemory.h"
#include "gtest/gtest.h"
using namespace mozilla;
static inline void
TestOne(size_t size)
{
size_t req = size;
size_t adv = malloc_good_size(req);
char* p = (char*)malloc(req);
size_t usable = moz_malloc_usable_size(p);
// NB: Using EXPECT here so that we still free the memory on failure.
EXPECT_EQ(adv, usable) <<
"malloc_good_size(" << req << ") --> " << adv << "; "
"malloc_usable_size(" << req << ") --> " << usable;
free(p);
size_t req = size;
size_t adv = malloc_good_size(req);
char* p = (char*)malloc(req);
size_t usable = moz_malloc_usable_size(p);
// NB: Using EXPECT here so that we still free the memory on failure.
EXPECT_EQ(adv, usable) <<
"malloc_good_size(" << req << ") --> " << adv << "; "
"malloc_usable_size(" << req << ") --> " << usable;
free(p);
}
static inline void
TestThree(size_t size)
{
ASSERT_NO_FATAL_FAILURE(TestOne(size - 1));
ASSERT_NO_FATAL_FAILURE(TestOne(size));
ASSERT_NO_FATAL_FAILURE(TestOne(size + 1));
ASSERT_NO_FATAL_FAILURE(TestOne(size - 1));
ASSERT_NO_FATAL_FAILURE(TestOne(size));
ASSERT_NO_FATAL_FAILURE(TestOne(size + 1));
}
#define K * 1024
#define M * 1024 * 1024
TEST(Jemalloc, UsableSizeInAdvance)
{
#define K * 1024
#define M * 1024 * 1024
/*
* Test every size up to a certain point, then (N-1, N, N+1) triplets for a
* various sizes beyond that.
@ -49,3 +54,167 @@ TEST(Jemalloc, UsableSizeInAdvance)
for (size_t n = 1 M; n < 8 M; n += 128 K)
ASSERT_NO_FATAL_FAILURE(TestThree(n));
}
static int gStaticVar;
bool InfoEq(jemalloc_ptr_info_t& aInfo, PtrInfoTag aTag, void* aAddr,
size_t aSize)
{
return aInfo.tag == aTag && aInfo.addr == aAddr && aInfo.size == aSize;
}
bool InfoEqFreedPage(jemalloc_ptr_info_t& aInfo, void* aAddr, size_t aPageSize)
{
size_t pageSizeMask = aPageSize - 1;
return jemalloc_ptr_is_freed_page(&aInfo) &&
aInfo.addr == (void*)(uintptr_t(aAddr) & ~pageSizeMask) &&
aInfo.size == aPageSize;
}
TEST(Jemalloc, PtrInfo)
{
jemalloc_stats_t stats;
jemalloc_stats(&stats);
jemalloc_ptr_info_t info;
Vector<char*> small, large, huge;
// For small (<= 2KiB) allocations, test every position within many possible
// sizes.
size_t small_max = stats.page_size / 2;
for (size_t n = 0; n <= small_max; n += 8) {
auto p = (char*)malloc(n);
size_t usable = moz_malloc_size_of(p);
ASSERT_TRUE(small.append(p));
for (size_t j = 0; j < usable; j++) {
jemalloc_ptr_info(&p[j], &info);
ASSERT_TRUE(InfoEq(info, TagLiveSmall, p, usable));
}
}
// Similar for large (2KiB + 1 KiB .. 1MiB - 8KiB) allocations.
for (size_t n = small_max + 1 K; n <= stats.large_max; n += 1 K) {
auto p = (char*)malloc(n);
size_t usable = moz_malloc_size_of(p);
ASSERT_TRUE(large.append(p));
for (size_t j = 0; j < usable; j += 347) {
jemalloc_ptr_info(&p[j], &info);
ASSERT_TRUE(InfoEq(info, TagLiveLarge, p, usable));
}
}
// Similar for huge (> 1MiB - 8KiB) allocations.
for (size_t n = stats.chunksize; n <= 10 M; n += 512 K) {
auto p = (char*)malloc(n);
size_t usable = moz_malloc_size_of(p);
ASSERT_TRUE(huge.append(p));
for (size_t j = 0; j < usable; j += 567) {
jemalloc_ptr_info(&p[j], &info);
ASSERT_TRUE(InfoEq(info, TagLiveHuge, p, usable));
}
}
// The following loops check freed allocations. We step through the vectors
// using prime-sized steps, which gives full coverage of the arrays while
// avoiding deallocating in the same order we allocated.
size_t len;
// Free the small allocations and recheck them.
int isFreedSmall = 0, isFreedPage = 0;
len = small.length();
for (size_t i = 0, j = 0; i < len; i++, j = (j + 19) % len) {
char* p = small[j];
size_t usable = moz_malloc_size_of(p);
free(p);
for (size_t k = 0; k < usable; k++) {
jemalloc_ptr_info(&p[k], &info);
// There are two valid outcomes here.
if (InfoEq(info, TagFreedSmall, p, usable)) {
isFreedSmall++;
} else if (InfoEqFreedPage(info, &p[k], stats.page_size)) {
isFreedPage++;
} else {
ASSERT_TRUE(false);
}
}
}
// There should be a lot more FreedSmall results than FreedPage results.
ASSERT_TRUE(isFreedSmall / isFreedPage > 10);
// Free the large allocations and recheck them.
len = large.length();
for (size_t i = 0, j = 0; i < len; i++, j = (j + 31) % len) {
char* p = large[j];
size_t usable = moz_malloc_size_of(p);
free(p);
for (size_t k = 0; k < usable; k += 357) {
jemalloc_ptr_info(&p[k], &info);
ASSERT_TRUE(InfoEqFreedPage(info, &p[k], stats.page_size));
}
}
// Free the huge allocations and recheck them.
len = huge.length();
for (size_t i = 0, j = 0; i < len; i++, j = (j + 7) % len) {
char* p = huge[j];
size_t usable = moz_malloc_size_of(p);
free(p);
for (size_t k = 0; k < usable; k += 587) {
jemalloc_ptr_info(&p[k], &info);
ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
}
}
// Null ptr.
jemalloc_ptr_info(nullptr, &info);
ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
// Near-null ptr.
jemalloc_ptr_info((void*)0x123, &info);
ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
// Maximum address.
jemalloc_ptr_info((void*)uintptr_t(-1), &info);
ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
// Stack memory.
int stackVar;
jemalloc_ptr_info(&stackVar, &info);
ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
// Code memory.
jemalloc_ptr_info((const void*)&jemalloc_ptr_info, &info);
ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
// Static memory.
jemalloc_ptr_info(&gStaticVar, &info);
ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
// Chunk header.
UniquePtr<int> p = MakeUnique<int>();
size_t chunksizeMask = stats.chunksize - 1;
char* chunk = (char*)(uintptr_t(p.get()) & ~chunksizeMask);
size_t chunkHeaderSize = stats.chunksize - stats.large_max;
for (size_t i = 0; i < chunkHeaderSize; i += 64) {
jemalloc_ptr_info(&chunk[i], &info);
ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
}
// Run header.
size_t page_sizeMask = stats.page_size - 1;
char* run = (char*)(uintptr_t(p.get()) & ~page_sizeMask);
for (size_t i = 0; i < 4 * sizeof(void*); i++) {
jemalloc_ptr_info(&run[i], &info);
ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
}
// Entire chunk. It's impossible to check what is put into |info| for all of
// these addresses; this is more about checking that we don't crash.
for (size_t i = 0; i < stats.chunksize; i += 256) {
jemalloc_ptr_info(&chunk[i], &info);
}
}
#undef K
#undef M

Просмотреть файл

@ -66,6 +66,7 @@ MOZ_MEMORY_API char *strndup_impl(const char *, size_t);
#include <sys/types.h>
#include "mozilla/Assertions.h"
#include "mozilla/mozalloc.h"
#include "mozilla/mozalloc_oom.h" // for mozalloc_handle_oom
@ -214,8 +215,30 @@ moz_malloc_usable_size(void *ptr)
#endif
}
size_t moz_malloc_size_of(const void *ptr)
size_t
moz_malloc_size_of(const void *ptr)
{
return moz_malloc_usable_size((void *)ptr);
}
#if defined(MOZ_MEMORY)
#include "mozjemalloc_types.h"
// mozmemory.h declares jemalloc_ptr_info(), but including that header in this
// file is complicated. So we just redeclare it here instead, and include
// mozjemalloc_types.h for jemalloc_ptr_info_t.
MOZ_JEMALLOC_API void jemalloc_ptr_info(const void* ptr,
jemalloc_ptr_info_t* info);
#endif
size_t
moz_malloc_enclosing_size_of(const void *ptr)
{
#if defined(MOZ_MEMORY)
jemalloc_ptr_info_t info;
jemalloc_ptr_info(ptr, &info);
return jemalloc_ptr_is_live(&info) ? info.size : 0;
#else
return 0;
#endif
}
#endif

Просмотреть файл

@ -98,6 +98,12 @@ MFBT_API size_t moz_malloc_usable_size(void *ptr);
MFBT_API size_t moz_malloc_size_of(const void *ptr);
/*
* Like moz_malloc_size_of(), but works reliably with interior pointers, i.e.
* pointers into the middle of a live allocation.
*/
MFBT_API size_t moz_malloc_enclosing_size_of(const void *ptr);
#if defined(HAVE_STRNDUP)
MFBT_API char* moz_xstrndup(const char* str, size_t strsize)
MOZ_ALLOCATOR;

Просмотреть файл

@ -1460,6 +1460,31 @@ extent_ad_comp(extent_node_t *a, extent_node_t *b)
rb_wrap(static, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
extent_ad_comp)
static inline int
extent_bounds_comp(extent_node_t* aKey, extent_node_t* aNode)
{
uintptr_t key_addr = (uintptr_t)aKey->addr;
uintptr_t node_addr = (uintptr_t)aNode->addr;
size_t node_size = aNode->size;
// Is aKey within aNode?
if (node_addr <= key_addr && key_addr < node_addr + node_size) {
return 0;
}
return ((key_addr > node_addr) - (key_addr < node_addr));
}
/*
* This is an expansion of just the search function from the rb_wrap macro.
*/
static extent_node_t *
extent_tree_bounds_search(extent_tree_t *tree, extent_node_t *key) {
extent_node_t *ret;
rb_search(extent_node_t, link_ad, extent_bounds_comp, tree, key, ret);
return ret;
}
/*
* End extent tree code.
*/
@ -3544,6 +3569,134 @@ isalloc(const void *ptr)
return (ret);
}
MOZ_JEMALLOC_API void
jemalloc_ptr_info_impl(const void* aPtr, jemalloc_ptr_info_t* aInfo)
{
arena_chunk_t* chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(aPtr);
// Is the pointer null, or within one chunk's size of null?
if (!chunk) {
*aInfo = { TagUnknown, nullptr, 0 };
return;
}
// Look for huge allocations before looking for |chunk| in chunk_rtree.
// This is necessary because |chunk| won't be in chunk_rtree if it's
// the second or subsequent chunk in a huge allocation.
extent_node_t* node;
extent_node_t key;
malloc_mutex_lock(&huge_mtx);
key.addr = const_cast<void*>(aPtr);
node = extent_tree_bounds_search(&huge, &key);
if (node) {
*aInfo = { TagLiveHuge, node->addr, node->size };
}
malloc_mutex_unlock(&huge_mtx);
if (node) {
return;
}
// It's not a huge allocation. Check if we have a known chunk.
if (!malloc_rtree_get(chunk_rtree, (uintptr_t)chunk)) {
*aInfo = { TagUnknown, nullptr, 0 };
return;
}
MOZ_DIAGNOSTIC_ASSERT(chunk->arena->magic == ARENA_MAGIC);
// Get the page number within the chunk.
size_t pageind = (((uintptr_t)aPtr - (uintptr_t)chunk) >> pagesize_2pow);
if (pageind < arena_chunk_header_npages) {
// Within the chunk header.
*aInfo = { TagUnknown, nullptr, 0 };
return;
}
size_t mapbits = chunk->map[pageind].bits;
if (!(mapbits & CHUNK_MAP_ALLOCATED)) {
PtrInfoTag tag = TagFreedPageDirty;
if (mapbits & CHUNK_MAP_DIRTY)
tag = TagFreedPageDirty;
else if (mapbits & CHUNK_MAP_DECOMMITTED)
tag = TagFreedPageDecommitted;
else if (mapbits & CHUNK_MAP_MADVISED)
tag = TagFreedPageMadvised;
else if (mapbits & CHUNK_MAP_ZEROED)
tag = TagFreedPageZeroed;
else
MOZ_CRASH();
void* pageaddr = (void*)(uintptr_t(aPtr) & ~pagesize_mask);
*aInfo = { tag, pageaddr, pagesize };
return;
}
if (mapbits & CHUNK_MAP_LARGE) {
// It's a large allocation. Only the first page of a large
// allocation contains its size, so if the address is not in
// the first page, scan back to find the allocation size.
size_t size;
while (true) {
size = mapbits & ~pagesize_mask;
if (size != 0) {
break;
}
// The following two return paths shouldn't occur in
// practice unless there is heap corruption.
pageind--;
MOZ_DIAGNOSTIC_ASSERT(pageind >= arena_chunk_header_npages);
if (pageind < arena_chunk_header_npages) {
*aInfo = { TagUnknown, nullptr, 0 };
return;
}
mapbits = chunk->map[pageind].bits;
MOZ_DIAGNOSTIC_ASSERT(mapbits & CHUNK_MAP_LARGE);
if (!(mapbits & CHUNK_MAP_LARGE)) {
*aInfo = { TagUnknown, nullptr, 0 };
return;
}
}
void* addr = ((char*)chunk) + (pageind << pagesize_2pow);
*aInfo = { TagLiveLarge, addr, size };
return;
}
// It must be a small allocation.
auto run = (arena_run_t *)(mapbits & ~pagesize_mask);
MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
// The allocation size is stored in the run metadata.
size_t size = run->bin->reg_size;
// Address of the first possible pointer in the run after its headers.
uintptr_t reg0_addr = (uintptr_t)run + run->bin->reg0_offset;
if (aPtr < (void*)reg0_addr) {
// In the run header.
*aInfo = { TagUnknown, nullptr, 0 };
return;
}
// Position in the run.
unsigned regind = ((uintptr_t)aPtr - reg0_addr) / size;
// Pointer to the allocation's base address.
void* addr = (void*)(reg0_addr + regind * size);
// Check if the allocation has been freed.
unsigned elm = regind >> (SIZEOF_INT_2POW + 3);
unsigned bit = regind - (elm << (SIZEOF_INT_2POW + 3));
PtrInfoTag tag = ((run->regs_mask[elm] & (1U << bit)))
? TagFreedSmall : TagLiveSmall;
*aInfo = { tag, addr, size};
}
static inline void
arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_t *mapelm)
@ -4772,6 +4925,7 @@ jemalloc_stats_impl(jemalloc_stats_t *stats)
stats->small_max = small_max;
stats->large_max = arena_maxclass;
stats->chunksize = chunksize;
stats->page_size = pagesize;
stats->dirty_max = opt_dirty_max;
/*

Просмотреть файл

@ -60,6 +60,7 @@ typedef struct {
size_t small_max; /* Max quantum-spaced allocation size. */
size_t large_max; /* Max sub-chunksize allocation size. */
size_t chunksize; /* Size of each virtual memory mapping. */
size_t page_size; /* Size of pages. */
size_t dirty_max; /* Max dirty pages per arena. */
/*
@ -77,6 +78,70 @@ typedef struct {
size_t bin_unused; /* Bytes committed to a bin but currently unused. */
} jemalloc_stats_t;
enum PtrInfoTag {
// The pointer is not currently known to the allocator.
// 'addr' and 'size' are always 0.
TagUnknown,
// The pointer is within a live allocation.
// 'addr' and 'size' describe the allocation.
TagLiveSmall,
TagLiveLarge,
TagLiveHuge,
// The pointer is within a small freed allocation.
// 'addr' and 'size' describe the allocation.
TagFreedSmall,
// The pointer is within a freed page. Details about the original
// allocation, including its size, are not available.
// 'addr' and 'size' describe the page.
TagFreedPageDirty,
TagFreedPageDecommitted,
TagFreedPageMadvised,
TagFreedPageZeroed,
};
/*
* The information in jemalloc_ptr_info_t could be represented in a variety of
* ways. The chosen representation has the following properties.
* - The number of fields is minimized.
* - The 'tag' field unambiguously defines the meaning of the subsequent fields.
* Helper functions are used to group together related categories of tags.
*/
typedef struct {
enum PtrInfoTag tag;
void* addr; // meaning depends on tag; see above
size_t size; // meaning depends on tag; see above
} jemalloc_ptr_info_t;
static inline jemalloc_bool
jemalloc_ptr_is_live(jemalloc_ptr_info_t* info)
{
return info->tag == TagLiveSmall ||
info->tag == TagLiveLarge ||
info->tag == TagLiveHuge;
}
static inline jemalloc_bool
jemalloc_ptr_is_freed(jemalloc_ptr_info_t* info)
{
return info->tag == TagFreedSmall ||
info->tag == TagFreedPageDirty ||
info->tag == TagFreedPageDecommitted ||
info->tag == TagFreedPageMadvised ||
info->tag == TagFreedPageZeroed;
}
static inline jemalloc_bool
jemalloc_ptr_is_freed_page(jemalloc_ptr_info_t* info)
{
return info->tag == TagFreedPageDirty ||
info->tag == TagFreedPageDecommitted ||
info->tag == TagFreedPageMadvised ||
info->tag == TagFreedPageZeroed;
}
#ifdef __cplusplus
} /* extern "C" */
#endif

Просмотреть файл

@ -261,3 +261,14 @@ replace_jemalloc_thread_local_arena(bool aEnabled)
hook_table->jemalloc_thread_local_arena_hook(aEnabled);
}
}
void
replace_jemalloc_ptr_info(const void* aPtr, jemalloc_ptr_info_t* aInfo)
{
gFuncs->jemalloc_ptr_info(aPtr, aInfo);
const malloc_hook_table_t* hook_table = gHookTable;
if (hook_table && hook_table->jemalloc_ptr_info_hook) {
hook_table->jemalloc_ptr_info_hook(aPtr, aInfo);
}
}

Просмотреть файл

@ -33,8 +33,10 @@ EXPORTS
wcsdup=wrap_wcsdup
_wcsdup=wrap_wcsdup
jemalloc_stats
jemalloc_purge_freed_pages
jemalloc_free_dirty_pages
jemalloc_thread_local_arena
jemalloc_ptr_info
; A hack to work around the CRT (see giant comment in Makefile.in)
frex=dumb_free_thunk
#endif

Просмотреть файл

@ -20,6 +20,7 @@ OS_LDFLAGS += \
-Wl,-U,_replace_jemalloc_purge_freed_pages \
-Wl,-U,_replace_jemalloc_free_dirty_pages \
-Wl,-U,_replace_jemalloc_thread_local_arena \
-Wl,-U,_replace_jemalloc_ptr_info \
$(NULL)
EXTRA_DEPS += $(topsrcdir)/mozglue/build/replace_malloc.mk