/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- * vim:cindent:ts=8:et:sw=4: * * ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 * * The contents of this file are subject to the Mozilla Public License Version * 1.1 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is nsTraceMalloc.c/bloatblame.c code, released * April 19, 2000. * * The Initial Developer of the Original Code is * Netscape Communications Corporation. * Portions created by the Initial Developer are Copyright (C) 2000 * the Initial Developer. All Rights Reserved. * * Contributor(s): * Brendan Eich, 14-April-2000 * * Alternatively, the contents of this file may be used under the terms of * either the GNU General Public License Version 2 or later (the "GPL"), or * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * in which case the provisions of the GPL or the LGPL are applicable instead * of those above. If you wish to allow use of your version of this file only * under the terms of either the GPL or the LGPL, and not to allow others to * use your version of this file under the terms of the MPL, indicate your * decision by deleting the provisions above and replace them with the notice * and other provisions required by the GPL or the LGPL. If you do not delete * the provisions above, a recipient may use your version of this file under * the terms of any one of the MPL, the GPL or the LGPL. * * ***** END LICENSE BLOCK ***** */ #ifdef NS_TRACE_MALLOC /* * TODO: * - extend logfile so 'F' record tells free stack * - diagnose rusty's SMP realloc oldsize corruption bug * - #ifdef __linux__/x86 and port to other platforms * - unify calltree with gc/boehm somehow (common utility lib?) */ #include #include #include #include #include #ifdef XP_UNIX #include #include #include #endif #include "plhash.h" #include "pratom.h" #include "prlog.h" #include "prlock.h" #include "prmon.h" #include "prprf.h" #include "prenv.h" #include "prnetdb.h" #include "nsTraceMalloc.h" #include "nscore.h" #include "prinit.h" #include "prthread.h" #include "nsStackWalk.h" #ifdef XP_WIN32 #include /*for timeb*/ #include /*for fstat*/ #include /*for write*/ #include "nsTraceMallocCallbacks.h" #define WRITE_FLAGS "w" #endif /* WIN32 */ #ifdef XP_UNIX #define WRITE_FLAGS "w" #ifdef WRAP_SYSTEM_INCLUDES #pragma GCC visibility push(default) #endif extern __ptr_t __libc_malloc(size_t); extern __ptr_t __libc_calloc(size_t, size_t); extern __ptr_t __libc_realloc(__ptr_t, size_t); extern void __libc_free(__ptr_t); extern __ptr_t __libc_memalign(size_t, size_t); extern __ptr_t __libc_valloc(size_t); #ifdef WRAP_SYSTEM_INCLUDES #pragma GCC visibility pop #endif #endif /* !XP_UNIX */ #ifdef XP_WIN32 /* defined in nsWinTraceMalloc.cpp */ void* dhw_orig_malloc(size_t); void* dhw_orig_calloc(size_t, size_t); void* dhw_orig_realloc(void*, size_t); void dhw_orig_free(void*); #define __libc_malloc(x) dhw_orig_malloc(x) #define __libc_calloc(x, y) dhw_orig_calloc(x,y) #define __libc_realloc(x, y) dhw_orig_realloc(x,y) #define __libc_free(x) dhw_orig_free(x) #endif typedef struct logfile logfile; #define STARTUP_TMBUFSIZE (64 * 1024) #define LOGFILE_TMBUFSIZE (16 * 1024) struct logfile { int fd; int lfd; /* logical fd, dense among all logfiles */ char *buf; int bufsize; int pos; uint32 size; uint32 simsize; logfile *next; logfile **prevp; }; static char default_buf[STARTUP_TMBUFSIZE]; static logfile default_logfile = {-1, 0, default_buf, STARTUP_TMBUFSIZE, 0, 0, 0, NULL, NULL}; static logfile *logfile_list = NULL; static logfile **logfile_tail = &logfile_list; static logfile *logfp = &default_logfile; static PRLock *tmlock = NULL; static char *sdlogname = NULL; /* filename for shutdown leak log */ /* * This enables/disables trace-malloc logging. * * It is separate from suppress_tracing so that we do not have to pay * the performance cost of repeated PR_GetThreadPrivate calls when * trace-malloc is disabled (which is not as bad as the locking we used * to have). */ static int tracing_enabled = 1; /* * This lock must be held while manipulating the calltree, the * allocations table, the log, or the tmstats. * * Callers should not *enter* the lock without checking suppress_tracing * first; otherwise they risk trying to re-enter on the same thread. */ #define TM_ENTER_LOCK() \ PR_BEGIN_MACRO \ if (tmlock) \ PR_Lock(tmlock); \ PR_END_MACRO #define TM_EXIT_LOCK() \ PR_BEGIN_MACRO \ if (tmlock) \ PR_Unlock(tmlock); \ PR_END_MACRO /* Used by backtrace. */ typedef struct stack_buffer_info { void **buffer; size_t size; size_t entries; } stack_buffer_info; /* * Thread-local storage. * * We can't use NSPR thread-local storage for this because it mallocs * within PR_GetThreadPrivate (the first time) and PR_SetThreadPrivate * (which can be worked around by protecting all uses of those functions * with a monitor, ugh) and because it calls malloc/free when the * thread-local storage is in an inconsistent state within * PR_SetThreadPrivate (when expanding the thread-local storage array) * and _PRI_DetachThread (when and after deleting the thread-local * storage array). */ #ifdef XP_WIN32 #include #define TM_TLS_INDEX_TYPE DWORD #define TM_CREATE_TLS_INDEX(i_) PR_BEGIN_MACRO \ (i_) = TlsAlloc(); \ PR_END_MACRO #define TM_DESTROY_TLS_INDEX(i_) TlsFree((i_)) #define TM_GET_TLS_DATA(i_) TlsGetValue((i_)) #define TM_SET_TLS_DATA(i_, v_) TlsSetValue((i_), (v_)) #else #include #define TM_TLS_INDEX_TYPE pthread_key_t #define TM_CREATE_TLS_INDEX(i_) pthread_key_create(&(i_), NULL) #define TM_DESTROY_TLS_INDEX(i_) pthread_key_delete((i_)) #define TM_GET_TLS_DATA(i_) pthread_getspecific((i_)) #define TM_SET_TLS_DATA(i_, v_) pthread_setspecific((i_), (v_)) #endif typedef struct tm_thread tm_thread; struct tm_thread { /* * This counter suppresses tracing, in case any tracing code needs * to malloc. */ uint32 suppress_tracing; /* buffer for backtrace, below */ stack_buffer_info backtrace_buf; }; static TM_TLS_INDEX_TYPE tls_index; static tm_thread main_thread; /* 0-initialization is correct */ /* FIXME (maybe): This is currently unused; we leak the thread-local data. */ #if 0 PR_STATIC_CALLBACK(void) free_tm_thread(void *priv) { tm_thread *t = (tm_thread*) priv; PR_ASSERT(t->suppress_tracing == 0); if (t->in_heap) { t->suppress_tracing = 1; if (t->backtrace_buf.buffer) __libc_free(t->backtrace_buf.buffer); __libc_free(t); } } #endif static tm_thread * get_tm_thread(void) { tm_thread *t; tm_thread stack_tm_thread; if (!tmlock) { return &main_thread; } t = TM_GET_TLS_DATA(tls_index); if (!t) { /* * First, store a tm_thread on the stack to suppress for the * malloc below */ stack_tm_thread.suppress_tracing = 1; stack_tm_thread.backtrace_buf.buffer = NULL; stack_tm_thread.backtrace_buf.size = 0; stack_tm_thread.backtrace_buf.entries = 0; TM_SET_TLS_DATA(tls_index, &stack_tm_thread); t = (tm_thread*) __libc_malloc(sizeof(tm_thread)); t->suppress_tracing = 0; t->backtrace_buf = stack_tm_thread.backtrace_buf; TM_SET_TLS_DATA(tls_index, t); PR_ASSERT(stack_tm_thread.suppress_tracing == 1); /* balanced */ } return t; } /* We don't want more than 32 logfiles open at once, ok? */ typedef uint32 lfd_set; #define LFD_SET_STATIC_INITIALIZER 0 #define LFD_SET_SIZE 32 #define LFD_ZERO(s) (*(s) = 0) #define LFD_BIT(i) ((uint32)1 << (i)) #define LFD_TEST(i,s) (LFD_BIT(i) & *(s)) #define LFD_SET(i,s) (*(s) |= LFD_BIT(i)) #define LFD_CLR(i,s) (*(s) &= ~LFD_BIT(i)) static logfile *get_logfile(int fd) { logfile *fp; int lfd; for (fp = logfile_list; fp; fp = fp->next) { if (fp->fd == fd) return fp; } lfd = 0; retry: for (fp = logfile_list; fp; fp = fp->next) { if (fp->fd == lfd) { if (++lfd >= LFD_SET_SIZE) return NULL; goto retry; } } fp = __libc_malloc(sizeof(logfile) + LOGFILE_TMBUFSIZE); if (!fp) return NULL; fp->fd = fd; fp->lfd = lfd; fp->buf = (char*) (fp + 1); fp->bufsize = LOGFILE_TMBUFSIZE; fp->pos = 0; fp->size = fp->simsize = 0; fp->next = NULL; fp->prevp = logfile_tail; *logfile_tail = fp; logfile_tail = &fp->next; return fp; } static void flush_logfile(logfile *fp) { int len, cnt, fd; char *bp; len = fp->pos; if (len == 0) return; fp->pos = 0; fd = fp->fd; if (fd >= 0) { fp->size += len; bp = fp->buf; do { cnt = write(fd, bp, len); if (cnt <= 0) { printf("### nsTraceMalloc: write failed or wrote 0 bytes!\n"); return; } bp += cnt; len -= cnt; } while (len > 0); } fp->simsize += len; } static void log_byte(logfile *fp, char byte) { if (fp->pos == fp->bufsize) flush_logfile(fp); fp->buf[fp->pos++] = byte; } static void log_string(logfile *fp, const char *str) { int len, rem, cnt; len = strlen(str); while ((rem = fp->pos + len - fp->bufsize) > 0) { cnt = len - rem; strncpy(&fp->buf[fp->pos], str, cnt); str += cnt; fp->pos += cnt; flush_logfile(fp); len = rem; } strncpy(&fp->buf[fp->pos], str, len); fp->pos += len; /* Terminate the string. */ log_byte(fp, '\0'); } static void log_filename(logfile* fp, const char* filename) { if (strlen(filename) < 512) { char *bp, *cp, buf[512]; bp = strstr(strcpy(buf, filename), "mozilla"); if (!bp) bp = buf; for (cp = bp; *cp; cp++) { if (*cp == '\\') *cp = '/'; } filename = bp; } log_string(fp, filename); } static void log_uint32(logfile *fp, uint32 ival) { if (ival < 0x80) { /* 0xxx xxxx */ log_byte(fp, (char) ival); } else if (ival < 0x4000) { /* 10xx xxxx xxxx xxxx */ log_byte(fp, (char) ((ival >> 8) | 0x80)); log_byte(fp, (char) (ival & 0xff)); } else if (ival < 0x200000) { /* 110x xxxx xxxx xxxx xxxx xxxx */ log_byte(fp, (char) ((ival >> 16) | 0xc0)); log_byte(fp, (char) ((ival >> 8) & 0xff)); log_byte(fp, (char) (ival & 0xff)); } else if (ival < 0x10000000) { /* 1110 xxxx xxxx xxxx xxxx xxxx xxxx xxxx */ log_byte(fp, (char) ((ival >> 24) | 0xe0)); log_byte(fp, (char) ((ival >> 16) & 0xff)); log_byte(fp, (char) ((ival >> 8) & 0xff)); log_byte(fp, (char) (ival & 0xff)); } else { /* 1111 0000 xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx */ log_byte(fp, (char) 0xf0); log_byte(fp, (char) ((ival >> 24) & 0xff)); log_byte(fp, (char) ((ival >> 16) & 0xff)); log_byte(fp, (char) ((ival >> 8) & 0xff)); log_byte(fp, (char) (ival & 0xff)); } } static void log_event1(logfile *fp, char event, uint32 serial) { log_byte(fp, event); log_uint32(fp, (uint32) serial); } static void log_event2(logfile *fp, char event, uint32 serial, size_t size) { log_event1(fp, event, serial); log_uint32(fp, (uint32) size); } static void log_event3(logfile *fp, char event, uint32 serial, size_t oldsize, size_t size) { log_event2(fp, event, serial, oldsize); log_uint32(fp, (uint32) size); } static void log_event4(logfile *fp, char event, uint32 serial, uint32 ui2, uint32 ui3, uint32 ui4) { log_event3(fp, event, serial, ui2, ui3); log_uint32(fp, ui4); } static void log_event5(logfile *fp, char event, uint32 serial, uint32 ui2, uint32 ui3, uint32 ui4, uint32 ui5) { log_event4(fp, event, serial, ui2, ui3, ui4); log_uint32(fp, ui5); } static void log_event6(logfile *fp, char event, uint32 serial, uint32 ui2, uint32 ui3, uint32 ui4, uint32 ui5, uint32 ui6) { log_event5(fp, event, serial, ui2, ui3, ui4, ui5); log_uint32(fp, ui6); } static void log_event7(logfile *fp, char event, uint32 serial, uint32 ui2, uint32 ui3, uint32 ui4, uint32 ui5, uint32 ui6, uint32 ui7) { log_event6(fp, event, serial, ui2, ui3, ui4, ui5, ui6); log_uint32(fp, ui7); } static void log_event8(logfile *fp, char event, uint32 serial, uint32 ui2, uint32 ui3, uint32 ui4, uint32 ui5, uint32 ui6, uint32 ui7, uint32 ui8) { log_event7(fp, event, serial, ui2, ui3, ui4, ui5, ui6, ui7); log_uint32(fp, ui8); } typedef struct callsite callsite; struct callsite { void* pc; uint32 serial; lfd_set lfdset; const char *name; /* pointer to string owned by methods table */ const char *library; /* pointer to string owned by libraries table */ int offset; callsite *parent; callsite *siblings; callsite *kids; }; /* NB: these counters are incremented and decremented only within tmlock. */ static uint32 library_serial_generator = 0; static uint32 method_serial_generator = 0; static uint32 callsite_serial_generator = 0; static uint32 tmstats_serial_generator = 0; static uint32 filename_serial_generator = 0; /* Root of the tree of callsites, the sum of all (cycle-compressed) stacks. */ static callsite calltree_root = {0, 0, LFD_SET_STATIC_INITIALIZER, NULL, NULL, 0, NULL, NULL, NULL}; /* Basic instrumentation. */ static nsTMStats tmstats = NS_TMSTATS_STATIC_INITIALIZER; /* Parent with the most kids (tmstats.calltree_maxkids). */ static callsite *calltree_maxkids_parent; /* Calltree leaf for path with deepest stack backtrace. */ static callsite *calltree_maxstack_top; /* Last site (i.e., calling pc) that recurred during a backtrace. */ static callsite *last_callsite_recurrence; static void log_tmstats(logfile *fp) { log_event1(fp, TM_EVENT_STATS, ++tmstats_serial_generator); log_uint32(fp, tmstats.calltree_maxstack); log_uint32(fp, tmstats.calltree_maxdepth); log_uint32(fp, tmstats.calltree_parents); log_uint32(fp, tmstats.calltree_maxkids); log_uint32(fp, tmstats.calltree_kidhits); log_uint32(fp, tmstats.calltree_kidmisses); log_uint32(fp, tmstats.calltree_kidsteps); log_uint32(fp, tmstats.callsite_recurrences); log_uint32(fp, tmstats.backtrace_calls); log_uint32(fp, tmstats.backtrace_failures); log_uint32(fp, tmstats.btmalloc_failures); log_uint32(fp, tmstats.dladdr_failures); log_uint32(fp, tmstats.malloc_calls); log_uint32(fp, tmstats.malloc_failures); log_uint32(fp, tmstats.calloc_calls); log_uint32(fp, tmstats.calloc_failures); log_uint32(fp, tmstats.realloc_calls); log_uint32(fp, tmstats.realloc_failures); log_uint32(fp, tmstats.free_calls); log_uint32(fp, tmstats.null_free_calls); log_uint32(fp, calltree_maxkids_parent ? calltree_maxkids_parent->serial : 0); log_uint32(fp, calltree_maxstack_top ? calltree_maxstack_top->serial : 0); } static void *generic_alloctable(void *pool, PRSize size) { return __libc_malloc(size); } static void generic_freetable(void *pool, void *item) { __libc_free(item); } typedef struct lfdset_entry { PLHashEntry base; lfd_set lfdset; } lfdset_entry; static PLHashEntry *lfdset_allocentry(void *pool, const void *key) { lfdset_entry *le = __libc_malloc(sizeof *le); if (le) LFD_ZERO(&le->lfdset); return &le->base; } static void lfdset_freeentry(void *pool, PLHashEntry *he, PRUintn flag) { lfdset_entry *le; if (flag != HT_FREE_ENTRY) return; le = (lfdset_entry*) he; __libc_free((void*) le); } static PLHashAllocOps lfdset_hashallocops = { generic_alloctable, generic_freetable, lfdset_allocentry, lfdset_freeentry }; /* Table of library pathnames mapped to to logged 'L' record serial numbers. */ static PLHashTable *libraries = NULL; /* Table of filename pathnames mapped to logged 'G' record serial numbers. */ static PLHashTable *filenames = NULL; /* Table mapping method names to logged 'N' record serial numbers. */ static PLHashTable *methods = NULL; static callsite *calltree(void **stack, size_t num_stack_entries) { logfile *fp = logfp; void *pc; uint32 nkids; callsite *parent, *site, **csp, *tmp; int maxstack; uint32 library_serial, method_serial, filename_serial; const char *library, *method, *filename; char *slash; PLHashNumber hash; PLHashEntry **hep, *he; lfdset_entry *le; size_t stack_index; nsCodeAddressDetails details; nsresult rv; /* * FIXME bug 391749: We should really lock only the minimum amount * that we need to in this function, because it makes some calls * that could lock in the system's shared library loader. */ TM_ENTER_LOCK(); maxstack = (num_stack_entries > tmstats.calltree_maxstack); if (maxstack) { /* these two are the same, although that used to be less clear */ tmstats.calltree_maxstack = num_stack_entries; tmstats.calltree_maxdepth = num_stack_entries; } /* Reverse the stack again, finding and building a path in the tree. */ parent = &calltree_root; stack_index = num_stack_entries; do { --stack_index; pc = stack[stack_index]; csp = &parent->kids; while ((site = *csp) != NULL) { if (site->pc == pc) { tmstats.calltree_kidhits++; /* Put the most recently used site at the front of siblings. */ *csp = site->siblings; site->siblings = parent->kids; parent->kids = site; /* Check whether we've logged for this site and logfile yet. */ if (!LFD_TEST(fp->lfd, &site->lfdset)) { /* * Some other logfile put this site in the calltree. We * must log an event for site, and possibly first for its * method and/or library. Note the code after the while * loop that tests if (!site). */ break; } /* Site already built and logged to fp -- go up the stack. */ goto upward; } tmstats.calltree_kidsteps++; csp = &site->siblings; } if (!site) { tmstats.calltree_kidmisses++; /* Check for recursion: see if pc is on our ancestor line. */ for (site = parent; site; site = site->parent) { if (site->pc == pc) { tmstats.callsite_recurrences++; last_callsite_recurrence = site; goto upward; } } } /* * Not in tree at all, or not logged to fp: let's find our symbolic * callsite info. */ /* * NS_DescribeCodeAddress can (on Linux) acquire a lock inside * the shared library loader. Another thread might call malloc * while holding that lock (when loading a shared library). So * we have to exit tmlock around this call. For details, see * https://bugzilla.mozilla.org/show_bug.cgi?id=363334#c3 * * We could be more efficient by building the nodes in the * calltree, exiting the monitor once to describe all of them, * and then filling in the descriptions for any that hadn't been * described already. But this is easier for now. */ TM_EXIT_LOCK(); rv = NS_DescribeCodeAddress(pc, &details); TM_ENTER_LOCK(); if (NS_FAILED(rv)) { tmstats.dladdr_failures++; goto fail; } /* Check whether we need to emit a library trace record. */ library_serial = 0; library = NULL; if (details.library[0]) { if (!libraries) { libraries = PL_NewHashTable(100, PL_HashString, PL_CompareStrings, PL_CompareValues, &lfdset_hashallocops, NULL); if (!libraries) { tmstats.btmalloc_failures++; goto fail; } } hash = PL_HashString(details.library); hep = PL_HashTableRawLookup(libraries, hash, details.library); he = *hep; if (he) { library = (char*) he->key; library_serial = (uint32) NS_PTR_TO_INT32(he->value); le = (lfdset_entry *) he; if (LFD_TEST(fp->lfd, &le->lfdset)) { /* We already logged an event on fp for this library. */ le = NULL; } } else { library = strdup(details.library); if (library) { library_serial = ++library_serial_generator; he = PL_HashTableRawAdd(libraries, hep, hash, library, (void*) library_serial); } if (!he) { tmstats.btmalloc_failures++; goto fail; } le = (lfdset_entry *) he; } if (le) { /* Need to log an event to fp for this lib. */ slash = strrchr(library, '/'); log_event1(fp, TM_EVENT_LIBRARY, library_serial); log_string(fp, slash ? slash + 1 : library); LFD_SET(fp->lfd, &le->lfdset); } } /* For compatibility with current log format, always emit a * filename trace record, using "noname" / 0 when no file name * is available. */ filename_serial = 0; filename = details.filename[0] ? details.filename : "noname"; if (!filenames) { filenames = PL_NewHashTable(100, PL_HashString, PL_CompareStrings, PL_CompareValues, &lfdset_hashallocops, NULL); if (!filenames) { tmstats.btmalloc_failures++; return NULL; } } hash = PL_HashString(filename); hep = PL_HashTableRawLookup(filenames, hash, filename); he = *hep; if (he) { filename = (char*) he->key; filename_serial = (uint32) NS_PTR_TO_INT32(he->value); le = (lfdset_entry *) he; if (LFD_TEST(fp->lfd, &le->lfdset)) { /* We already logged an event on fp for this filename. */ le = NULL; } } else { filename = strdup(filename); if (filename) { filename_serial = ++filename_serial_generator; he = PL_HashTableRawAdd(filenames, hep, hash, filename, (void*) filename_serial); } if (!he) { tmstats.btmalloc_failures++; return NULL; } le = (lfdset_entry *) he; } if (le) { /* Need to log an event to fp for this filename. */ log_event1(fp, TM_EVENT_FILENAME, filename_serial); log_filename(fp, filename); LFD_SET(fp->lfd, &le->lfdset); } if (!details.function[0]) { PR_snprintf(details.function, sizeof(details.function), "%s+%X", library ? library : "main", details.loffset); } /* Emit an 'N' (for New method, 'M' is for malloc!) event if needed. */ method_serial = 0; if (!methods) { methods = PL_NewHashTable(10000, PL_HashString, PL_CompareStrings, PL_CompareValues, &lfdset_hashallocops, NULL); if (!methods) { tmstats.btmalloc_failures++; goto fail; } } hash = PL_HashString(details.function); hep = PL_HashTableRawLookup(methods, hash, details.function); he = *hep; if (he) { method = (char*) he->key; method_serial = (uint32) NS_PTR_TO_INT32(he->value); le = (lfdset_entry *) he; if (LFD_TEST(fp->lfd, &le->lfdset)) { /* We already logged an event on fp for this method. */ le = NULL; } } else { method = strdup(details.function); if (method) { method_serial = ++method_serial_generator; he = PL_HashTableRawAdd(methods, hep, hash, method, (void*) method_serial); } if (!he) { tmstats.btmalloc_failures++; return NULL; } le = (lfdset_entry *) he; } if (le) { log_event4(fp, TM_EVENT_METHOD, method_serial, library_serial, filename_serial, details.lineno); log_string(fp, method); LFD_SET(fp->lfd, &le->lfdset); } /* Create a new callsite record. */ if (!site) { site = __libc_malloc(sizeof(callsite)); if (!site) { tmstats.btmalloc_failures++; goto fail; } /* Update parent and max-kids-per-parent stats. */ if (!parent->kids) tmstats.calltree_parents++; nkids = 1; for (tmp = parent->kids; tmp; tmp = tmp->siblings) nkids++; if (nkids > tmstats.calltree_maxkids) { tmstats.calltree_maxkids = nkids; calltree_maxkids_parent = parent; } /* Insert the new site into the tree. */ site->pc = pc; site->serial = ++callsite_serial_generator; LFD_ZERO(&site->lfdset); site->name = method; site->library = library; site->offset = details.loffset; site->parent = parent; site->siblings = parent->kids; parent->kids = site; site->kids = NULL; } /* Log the site with its parent, method, and offset. */ log_event4(fp, TM_EVENT_CALLSITE, site->serial, parent->serial, method_serial, details.foffset); LFD_SET(fp->lfd, &site->lfdset); upward: parent = site; } while (stack_index > 0); if (maxstack) calltree_maxstack_top = site; TM_EXIT_LOCK(); return site; fail: TM_EXIT_LOCK(); return NULL; } /* buffer the stack so that we can reverse it */ PR_STATIC_CALLBACK(void) stack_callback(void *pc, void *closure) { stack_buffer_info *info = (stack_buffer_info*) closure; /* * If we run out of buffer, keep incrementing entries so that * backtrace can call us again with a bigger buffer. */ if (info->entries < info->size) info->buffer[info->entries] = pc; ++info->entries; } /* * The caller MUST NOT be holding tmlock when calling backtrace. */ callsite * backtrace(tm_thread *t, int skip) { callsite *site; stack_buffer_info *info = &t->backtrace_buf; void ** new_stack_buffer; size_t new_stack_buffer_size; t->suppress_tracing++; /* * NS_StackWalk can (on Windows) acquire a lock the shared library * loader. Another thread might call malloc while holding that lock * (when loading a shared library). So we can't be in tmlock during * this call. For details, see * https://bugzilla.mozilla.org/show_bug.cgi?id=374829#c8 */ /* skip == 0 means |backtrace| should show up, so don't use skip + 1 */ /* NB: this call is repeated below if the buffer is too small */ info->entries = 0; NS_StackWalk(stack_callback, skip, info); /* * To avoid allocating in stack_callback (which, on Windows, is * called on a different thread from the one we're running on here), * reallocate here if it didn't have a big enough buffer (which * includes the first call on any thread), and call it again. */ if (info->entries > info->size) { new_stack_buffer_size = 2 * info->entries; new_stack_buffer = __libc_realloc(info->buffer, new_stack_buffer_size * sizeof(void*)); if (!new_stack_buffer) return NULL; info->buffer = new_stack_buffer; info->size = new_stack_buffer_size; /* and call NS_StackWalk again */ info->entries = 0; NS_StackWalk(stack_callback, skip, info); PR_ASSERT(info->entries * 2 == new_stack_buffer_size); /* same stack */ } if (info->entries == 0) { t->suppress_tracing--; return NULL; } site = calltree(info->buffer, info->entries); TM_ENTER_LOCK(); tmstats.backtrace_calls++; if (!site) { tmstats.backtrace_failures++; PR_ASSERT(tmstats.backtrace_failures < 100); } TM_EXIT_LOCK(); t->suppress_tracing--; return site; } typedef struct allocation { PLHashEntry entry; size_t size; FILE *trackfp; /* for allocation tracking */ } allocation; #define ALLOC_HEAP_SIZE 150000 static allocation alloc_heap[ALLOC_HEAP_SIZE]; static allocation *alloc_freelist = NULL; static int alloc_heap_initialized = 0; static PLHashEntry *alloc_allocentry(void *pool, const void *key) { allocation **listp, *alloc; int n; if (!alloc_heap_initialized) { n = ALLOC_HEAP_SIZE; listp = &alloc_freelist; for (alloc = alloc_heap; --n >= 0; alloc++) { *listp = alloc; listp = (allocation**) &alloc->entry.next; } *listp = NULL; alloc_heap_initialized = 1; } listp = &alloc_freelist; alloc = *listp; if (!alloc) return __libc_malloc(sizeof(allocation)); *listp = (allocation*) alloc->entry.next; return &alloc->entry; } static void alloc_freeentry(void *pool, PLHashEntry *he, PRUintn flag) { allocation *alloc; if (flag != HT_FREE_ENTRY) return; alloc = (allocation*) he; if ((PRUptrdiff)(alloc - alloc_heap) < (PRUptrdiff)ALLOC_HEAP_SIZE) { alloc->entry.next = &alloc_freelist->entry; alloc_freelist = alloc; } else { __libc_free((void*) alloc); } } static PLHashAllocOps alloc_hashallocops = { generic_alloctable, generic_freetable, alloc_allocentry, alloc_freeentry }; static PLHashNumber hash_pointer(const void *key) { return (PLHashNumber) key; } static PLHashTable *allocations = NULL; static PLHashTable *new_allocations(void) { allocations = PL_NewHashTable(200000, hash_pointer, PL_CompareValues, PL_CompareValues, &alloc_hashallocops, NULL); return allocations; } #define get_allocations() (allocations ? allocations : new_allocations()) #ifdef XP_UNIX NS_EXTERNAL_VIS_(__ptr_t) malloc(size_t size) { PRUint32 start, end; __ptr_t ptr; callsite *site; PLHashEntry *he; allocation *alloc; tm_thread *t; if (!tracing_enabled || !PR_Initialized() || (t = get_tm_thread())->suppress_tracing != 0) { return __libc_malloc(size); } start = PR_IntervalNow(); ptr = __libc_malloc(size); end = PR_IntervalNow(); site = backtrace(t, 1); t->suppress_tracing++; TM_ENTER_LOCK(); tmstats.malloc_calls++; if (!ptr) { tmstats.malloc_failures++; } else { if (site) log_event5(logfp, TM_EVENT_MALLOC, site->serial, start, end - start, (uint32)NS_PTR_TO_INT32(ptr), size); if (get_allocations()) { he = PL_HashTableAdd(allocations, ptr, site); if (he) { alloc = (allocation*) he; alloc->size = size; alloc->trackfp = NULL; } } } TM_EXIT_LOCK(); t->suppress_tracing--; return ptr; } NS_EXTERNAL_VIS_(__ptr_t) calloc(size_t count, size_t size) { PRUint32 start, end; __ptr_t ptr; callsite *site; PLHashEntry *he; allocation *alloc; tm_thread *t; /** * During the initialization of the glibc/libpthread, and * before main() is running, ld-linux.so.2 tries to allocate memory * using calloc (call from _dl_tls_setup). * * Thus, our calloc replacement is invoked too early, tries to * initialize NSPR, which calls dlopen, which calls into the dl * -> crash. * * Delaying NSPR calls until NSPR is initialized helps. */ if (!tracing_enabled || !PR_Initialized() || (t = get_tm_thread())->suppress_tracing != 0) { return __libc_calloc(count, size); } start = PR_IntervalNow(); ptr = __libc_calloc(count, size); end = PR_IntervalNow(); site = backtrace(t, 1); t->suppress_tracing++; TM_ENTER_LOCK(); tmstats.calloc_calls++; if (!ptr) { tmstats.calloc_failures++; } else { size *= count; if (site) { log_event5(logfp, TM_EVENT_CALLOC, site->serial, start, end - start, (uint32)NS_PTR_TO_INT32(ptr), size); } if (get_allocations()) { he = PL_HashTableAdd(allocations, ptr, site); if (he) { alloc = (allocation*) he; alloc->size = size; alloc->trackfp = NULL; } } } TM_EXIT_LOCK(); t->suppress_tracing--; return ptr; } NS_EXTERNAL_VIS_(__ptr_t) realloc(__ptr_t ptr, size_t size) { PRUint32 start, end; __ptr_t oldptr; callsite *oldsite, *site; size_t oldsize; PLHashNumber hash; PLHashEntry **hep, *he; allocation *alloc; FILE *trackfp = NULL; tm_thread *t; if (!tracing_enabled || !PR_Initialized() || (t = get_tm_thread())->suppress_tracing != 0) { return __libc_realloc(ptr, size); } t->suppress_tracing++; TM_ENTER_LOCK(); tmstats.realloc_calls++; if (PR_TRUE) { oldptr = ptr; oldsite = NULL; oldsize = 0; he = NULL; if (oldptr && get_allocations()) { hash = hash_pointer(oldptr); hep = PL_HashTableRawLookup(allocations, hash, oldptr); he = *hep; if (he) { oldsite = (callsite*) he->value; alloc = (allocation*) he; oldsize = alloc->size; trackfp = alloc->trackfp; if (trackfp) { fprintf(alloc->trackfp, "\nrealloc(%p, %lu), oldsize %lu, alloc site %p\n", (void*) ptr, (unsigned long) size, (unsigned long) oldsize, (void*) oldsite); NS_TraceStack(1, trackfp); } } } } TM_EXIT_LOCK(); t->suppress_tracing--; start = PR_IntervalNow(); ptr = __libc_realloc(ptr, size); end = PR_IntervalNow(); site = backtrace(t, 1); t->suppress_tracing++; TM_ENTER_LOCK(); if (!ptr && size) { /* * When realloc() fails, the original block is not freed or moved, so * we'll leave the allocation entry untouched. */ tmstats.realloc_failures++; } else { if (site) { log_event8(logfp, TM_EVENT_REALLOC, site->serial, start, end - start, (uint32)NS_PTR_TO_INT32(ptr), size, oldsite ? oldsite->serial : 0, (uint32)NS_PTR_TO_INT32(oldptr), oldsize); } if (ptr && allocations) { if (ptr != oldptr) { /* * If we're reallocating (not merely allocating new space by * passing null to realloc) and realloc has moved the block, * free oldptr. */ if (he) PL_HashTableRemove(allocations, oldptr); /* Record the new allocation now, setting he. */ he = PL_HashTableAdd(allocations, ptr, site); } else { /* * If we haven't yet recorded an allocation (possibly due to * a temporary memory shortage), do it now. */ if (!he) he = PL_HashTableAdd(allocations, ptr, site); } if (he) { alloc = (allocation*) he; alloc->size = size; alloc->trackfp = trackfp; } } } TM_EXIT_LOCK(); t->suppress_tracing--; return ptr; } NS_EXTERNAL_VIS_(void*) valloc(size_t size) { PRUint32 start, end; __ptr_t ptr; callsite *site; PLHashEntry *he; allocation *alloc; tm_thread *t; if (!tracing_enabled || !PR_Initialized() || (t = get_tm_thread())->suppress_tracing != 0) { return __libc_valloc(size); } start = PR_IntervalNow(); ptr = __libc_valloc(size); end = PR_IntervalNow(); site = backtrace(t, 1); t->suppress_tracing++; TM_ENTER_LOCK(); tmstats.malloc_calls++; /* XXX valloc_calls ? */ if (!ptr) { tmstats.malloc_failures++; /* XXX valloc_failures ? */ } else { if (site) log_event5(logfp, TM_EVENT_MALLOC, /* XXX TM_EVENT_VALLOC? */ site->serial, start, end - start, (uint32)NS_PTR_TO_INT32(ptr), size); if (get_allocations()) { he = PL_HashTableAdd(allocations, ptr, site); if (he) { alloc = (allocation*) he; alloc->size = size; alloc->trackfp = NULL; } } } TM_EXIT_LOCK(); t->suppress_tracing--; return ptr; } NS_EXTERNAL_VIS_(void*) memalign(size_t boundary, size_t size) { PRUint32 start, end; __ptr_t ptr; callsite *site; PLHashEntry *he; allocation *alloc; tm_thread *t; if (!tracing_enabled || !PR_Initialized() || (t = get_tm_thread())->suppress_tracing != 0) { return __libc_memalign(boundary, size); } start = PR_IntervalNow(); ptr = __libc_memalign(boundary, size); end = PR_IntervalNow(); site = backtrace(t, 1); t->suppress_tracing++; TM_ENTER_LOCK(); tmstats.malloc_calls++; /* XXX memalign_calls ? */ if (!ptr) { tmstats.malloc_failures++; /* XXX memalign_failures ? */ } else { if (site) { log_event5(logfp, TM_EVENT_MALLOC, /* XXX TM_EVENT_MEMALIGN? */ site->serial, start, end - start, (uint32)NS_PTR_TO_INT32(ptr), size); } if (get_allocations()) { he = PL_HashTableAdd(allocations, ptr, site); if (he) { alloc = (allocation*) he; alloc->size = size; alloc->trackfp = NULL; } } } TM_EXIT_LOCK(); t->suppress_tracing--; return ptr; } NS_EXTERNAL_VIS_(int) posix_memalign(void **memptr, size_t alignment, size_t size) { __ptr_t ptr = memalign(alignment, size); if (!ptr) return ENOMEM; *memptr = ptr; return 0; } NS_EXTERNAL_VIS_(void) free(__ptr_t ptr) { PLHashEntry **hep, *he; callsite *site; allocation *alloc; uint32 serial = 0, size = 0; PRUint32 start, end; tm_thread *t; if (!tracing_enabled || !PR_Initialized() || (t = get_tm_thread())->suppress_tracing != 0) { __libc_free(ptr); return; } t->suppress_tracing++; TM_ENTER_LOCK(); tmstats.free_calls++; if (!ptr) { tmstats.null_free_calls++; } else { if (get_allocations()) { hep = PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr); he = *hep; if (he) { site = (callsite*) he->value; if (site) { alloc = (allocation*) he; serial = site->serial; size = alloc->size; if (alloc->trackfp) { fprintf(alloc->trackfp, "\nfree(%p), alloc site %p\n", (void*) ptr, (void*) site); NS_TraceStack(1, alloc->trackfp); } } PL_HashTableRawRemove(allocations, hep, he); } } } TM_EXIT_LOCK(); t->suppress_tracing--; start = PR_IntervalNow(); __libc_free(ptr); end = PR_IntervalNow(); if (size != 0) { t->suppress_tracing++; TM_ENTER_LOCK(); log_event5(logfp, TM_EVENT_FREE, serial, start, end - start, (uint32)NS_PTR_TO_INT32(ptr), size); TM_EXIT_LOCK(); t->suppress_tracing--; } } NS_EXTERNAL_VIS_(void) cfree(void *ptr) { free(ptr); } #endif /* XP_UNIX */ static const char magic[] = NS_TRACE_MALLOC_MAGIC; static void log_header(int logfd) { uint32 ticksPerSec = PR_htonl(PR_TicksPerSecond()); (void) write(logfd, magic, NS_TRACE_MALLOC_MAGIC_SIZE); (void) write(logfd, &ticksPerSec, sizeof ticksPerSec); } PR_IMPLEMENT(void) NS_TraceMallocStartup(int logfd) { /* We must be running on the primordial thread. */ PR_ASSERT(tracing_enabled == 1); PR_ASSERT(logfp == &default_logfile); tracing_enabled = (logfd >= 0); if (tracing_enabled) { PR_ASSERT(logfp->simsize == 0); /* didn't overflow startup buffer */ /* Log everything in logfp (aka default_logfile)'s buffer to logfd. */ logfp->fd = logfd; logfile_list = &default_logfile; logfp->prevp = &logfile_list; logfile_tail = &logfp->next; log_header(logfd); } atexit(NS_TraceMallocShutdown); /* * We only allow one thread until NS_TraceMallocStartup is called. * When it is, we have to initialize tls_index before allocating tmlock * since get_tm_index uses NULL-tmlock to detect tls_index being * uninitialized. */ main_thread.suppress_tracing++; TM_CREATE_TLS_INDEX(tls_index); TM_SET_TLS_DATA(tls_index, &main_thread); tmlock = PR_NewLock(); main_thread.suppress_tracing--; #ifdef XP_WIN32 /* Register listeners for win32. */ if (tracing_enabled) { StartupHooker(); } #endif } /* * Options for log files, with the log file name either as the next option * or separated by '=' (e.g. "./mozilla --trace-malloc * malloc.log" or * "./mozilla --trace-malloc=malloc.log"). */ static const char TMLOG_OPTION[] = "--trace-malloc"; static const char SDLOG_OPTION[] = "--shutdown-leaks"; #define SHOULD_PARSE_ARG(name_, log_, arg_) \ (0 == strncmp(arg_, name_, sizeof(name_) - 1)) #define PARSE_ARG(name_, log_, argv_, i_, consumed_) \ PR_BEGIN_MACRO \ char _nextchar = argv_[i_][sizeof(name_) - 1]; \ if (_nextchar == '=') { \ log_ = argv_[i_] + sizeof(name_); \ consumed_ = 1; \ } else if (_nextchar == '\0') { \ log_ = argv_[i_+1]; \ consumed_ = 2; \ } \ PR_END_MACRO PR_IMPLEMENT(int) NS_TraceMallocStartupArgs(int argc, char* argv[]) { int i, logfd = -1, consumed, logflags; char *tmlogname = NULL; /* note global |sdlogname| */ /* * Look for the --trace-malloc option early, to avoid missing * early mallocs (we miss static constructors whose output overflows the * log file's static 16K output buffer). */ for (i = 1; i < argc; i += consumed) { consumed = 0; if (SHOULD_PARSE_ARG(TMLOG_OPTION, tmlogname, argv[i])) PARSE_ARG(TMLOG_OPTION, tmlogname, argv, i, consumed); else if (SHOULD_PARSE_ARG(SDLOG_OPTION, sdlogname, argv[i])) PARSE_ARG(SDLOG_OPTION, sdlogname, argv, i, consumed); if (consumed) { #ifndef XP_WIN32 /* If we don't comment this out, it will crash Windows. */ int j; /* Now remove --trace-malloc and its argument from argv. */ argc -= consumed; for (j = i; j < argc; ++j) argv[j] = argv[j+consumed]; argv[argc] = NULL; consumed = 0; /* don't advance next iteration */ #endif } else { consumed = 1; } } if (tmlogname) { #ifdef XP_UNIX int pipefds[2]; #endif switch (*tmlogname) { #ifdef XP_UNIX case '|': if (pipe(pipefds) == 0) { pid_t pid = fork(); if (pid == 0) { /* In child: set up stdin, parse args, and exec. */ int maxargc, nargc; char **nargv, *token; if (pipefds[0] != 0) { dup2(pipefds[0], 0); close(pipefds[0]); } close(pipefds[1]); tmlogname = strtok(tmlogname + 1, " \t"); maxargc = 3; nargv = (char **) malloc((maxargc+1) * sizeof(char *)); if (!nargv) exit(1); nargc = 0; nargv[nargc++] = tmlogname; while ((token = strtok(NULL, " \t")) != NULL) { if (nargc == maxargc) { maxargc *= 2; nargv = (char**) realloc(nargv, (maxargc+1) * sizeof(char*)); if (!nargv) exit(1); } nargv[nargc++] = token; } nargv[nargc] = NULL; (void) setsid(); execvp(tmlogname, nargv); exit(127); } if (pid > 0) { /* In parent: set logfd to the pipe's write side. */ close(pipefds[0]); logfd = pipefds[1]; } } if (logfd < 0) { fprintf(stderr, "%s: can't pipe to trace-malloc child process %s: %s\n", argv[0], tmlogname, strerror(errno)); exit(1); } break; #endif /*XP_UNIX*/ case '-': /* Don't log from startup, but do prepare to log later. */ /* XXX traditional meaning of '-' as option argument is "stdin" or "stdout" */ if (tmlogname[1] == '\0') break; /* FALL THROUGH */ default: logflags = O_CREAT | O_WRONLY | O_TRUNC; #if defined(XP_WIN32) /* * Avoid translations on WIN32. */ logflags |= O_BINARY; #endif logfd = open(tmlogname, logflags, 0644); if (logfd < 0) { fprintf(stderr, "%s: can't create trace-malloc log named %s: %s\n", argv[0], tmlogname, strerror(errno)); exit(1); } break; } } NS_TraceMallocStartup(logfd); return argc; } PR_IMPLEMENT(void) NS_TraceMallocShutdown() { logfile *fp; if (sdlogname) NS_TraceMallocDumpAllocations(sdlogname); if (tmstats.backtrace_failures) { fprintf(stderr, "TraceMalloc backtrace failures: %lu (malloc %lu dladdr %lu)\n", (unsigned long) tmstats.backtrace_failures, (unsigned long) tmstats.btmalloc_failures, (unsigned long) tmstats.dladdr_failures); } while ((fp = logfile_list) != NULL) { logfile_list = fp->next; log_tmstats(fp); flush_logfile(fp); if (fp->fd >= 0) { close(fp->fd); fp->fd = -1; } if (fp != &default_logfile) { if (fp == logfp) logfp = &default_logfile; free((void*) fp); } } if (tmlock) { PRLock *lock = tmlock; tmlock = NULL; PR_DestroyLock(lock); } #ifdef XP_WIN32 if (tracing_enabled) { ShutdownHooker(); } #endif } PR_IMPLEMENT(void) NS_TraceMallocDisable() { logfile *fp; tm_thread *t = get_tm_thread(); t->suppress_tracing++; TM_ENTER_LOCK(); for (fp = logfile_list; fp; fp = fp->next) flush_logfile(fp); tracing_enabled = 0; TM_EXIT_LOCK(); t->suppress_tracing--; } PR_IMPLEMENT(void) NS_TraceMallocEnable() { tm_thread *t = get_tm_thread(); t->suppress_tracing++; TM_ENTER_LOCK(); tracing_enabled = 1; TM_EXIT_LOCK(); t->suppress_tracing--; } PR_IMPLEMENT(int) NS_TraceMallocChangeLogFD(int fd) { logfile *oldfp, *fp; struct stat sb; tm_thread *t = get_tm_thread(); t->suppress_tracing++; TM_ENTER_LOCK(); oldfp = logfp; if (oldfp->fd != fd) { flush_logfile(oldfp); fp = get_logfile(fd); if (!fp) { TM_EXIT_LOCK(); t->suppress_tracing--; return -2; } if (fd >= 0 && fstat(fd, &sb) == 0 && sb.st_size == 0) log_header(fd); logfp = fp; } TM_EXIT_LOCK(); t->suppress_tracing--; return oldfp->fd; } static PRIntn lfd_clr_enumerator(PLHashEntry *he, PRIntn i, void *arg) { lfdset_entry *le = (lfdset_entry*) he; logfile *fp = (logfile*) arg; LFD_CLR(fp->lfd, &le->lfdset); return HT_ENUMERATE_NEXT; } static void lfd_clr_walk(callsite *site, logfile *fp) { callsite *kid; LFD_CLR(fp->lfd, &site->lfdset); for (kid = site->kids; kid; kid = kid->siblings) lfd_clr_walk(kid, fp); } PR_IMPLEMENT(void) NS_TraceMallocCloseLogFD(int fd) { logfile *fp; tm_thread *t = get_tm_thread(); t->suppress_tracing++; TM_ENTER_LOCK(); fp = get_logfile(fd); if (fp) { flush_logfile(fp); if (fp == &default_logfile) { /* Leave default_logfile in logfile_list with an fd of -1. */ fp->fd = -1; /* NB: we can never free lfd 0, it belongs to default_logfile. */ PR_ASSERT(fp->lfd == 0); } else { /* Clear fp->lfd in all possible lfdsets. */ PL_HashTableEnumerateEntries(libraries, lfd_clr_enumerator, fp); PL_HashTableEnumerateEntries(methods, lfd_clr_enumerator, fp); lfd_clr_walk(&calltree_root, fp); /* Unlink fp from logfile_list, freeing lfd for reallocation. */ *fp->prevp = fp->next; if (!fp->next) { PR_ASSERT(logfile_tail == &fp->next); logfile_tail = fp->prevp; } /* Reset logfp if we must, then free fp. */ if (fp == logfp) logfp = &default_logfile; free((void*) fp); } } TM_EXIT_LOCK(); t->suppress_tracing--; close(fd); } PR_IMPLEMENT(void) NS_TraceMallocLogTimestamp(const char *caption) { logfile *fp; #ifdef XP_UNIX struct timeval tv; #endif #ifdef XP_WIN32 struct _timeb tb; #endif tm_thread *t = get_tm_thread(); t->suppress_tracing++; TM_ENTER_LOCK(); fp = logfp; log_byte(fp, TM_EVENT_TIMESTAMP); #ifdef XP_UNIX gettimeofday(&tv, NULL); log_uint32(fp, (uint32) tv.tv_sec); log_uint32(fp, (uint32) tv.tv_usec); #endif #ifdef XP_WIN32 _ftime(&tb); log_uint32(fp, (uint32) tb.time); log_uint32(fp, (uint32) tb.millitm); #endif log_string(fp, caption); TM_EXIT_LOCK(); t->suppress_tracing--; } static PRIntn allocation_enumerator(PLHashEntry *he, PRIntn i, void *arg) { allocation *alloc = (allocation*) he; FILE *ofp = (FILE*) arg; callsite *site = (callsite*) he->value; extern const char* nsGetTypeName(const void* ptr); unsigned long *p, *end; fprintf(ofp, "%p <%s> (%lu)\n", he->key, nsGetTypeName(he->key), (unsigned long) alloc->size); for (p = (unsigned long*) he->key, end = (unsigned long*) ((char*)he->key + alloc->size); p < end; ++p) fprintf(ofp, "\t0x%08lX\n", *p); while (site) { if (site->name || site->parent) { fprintf(ofp, "%s[%s +0x%X]\n", site->name, site->library, site->offset); } site = site->parent; } fputc('\n', ofp); return HT_ENUMERATE_NEXT; } PR_IMPLEMENT(void) NS_TraceStack(int skip, FILE *ofp) { callsite *site; tm_thread *t = get_tm_thread(); site = backtrace(t, skip + 1); while (site) { if (site->name || site->parent) { fprintf(ofp, "%s[%s +0x%X]\n", site->name, site->library, site->offset); } site = site->parent; } } PR_IMPLEMENT(int) NS_TraceMallocDumpAllocations(const char *pathname) { FILE *ofp; int rv; ofp = fopen(pathname, WRITE_FLAGS); if (!ofp) return -1; if (allocations) PL_HashTableEnumerateEntries(allocations, allocation_enumerator, ofp); rv = ferror(ofp) ? -1 : 0; fclose(ofp); return rv; } PR_IMPLEMENT(void) NS_TraceMallocFlushLogfiles() { logfile *fp; tm_thread *t = get_tm_thread(); t->suppress_tracing++; TM_ENTER_LOCK(); for (fp = logfile_list; fp; fp = fp->next) flush_logfile(fp); TM_EXIT_LOCK(); t->suppress_tracing--; } PR_IMPLEMENT(void) NS_TrackAllocation(void* ptr, FILE *ofp) { PLHashEntry **hep; allocation *alloc; tm_thread *t = get_tm_thread(); fprintf(ofp, "Trying to track %p\n", (void*) ptr); setlinebuf(ofp); t->suppress_tracing++; TM_ENTER_LOCK(); if (get_allocations()) { hep = PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr); alloc = (allocation*) *hep; if (alloc) { fprintf(ofp, "Tracking %p\n", (void*) ptr); alloc->trackfp = ofp; } else { fprintf(ofp, "Not tracking %p\n", (void*) ptr); } } TM_EXIT_LOCK(); t->suppress_tracing--; } #ifdef XP_WIN32 PR_IMPLEMENT(void) MallocCallback(void *ptr, size_t size, PRUint32 start, PRUint32 end) { callsite *site; PLHashEntry *he; allocation *alloc; tm_thread *t; if (!tracing_enabled || (t = get_tm_thread())->suppress_tracing != 0) return; site = backtrace(t, 2); t->suppress_tracing++; TM_ENTER_LOCK(); tmstats.malloc_calls++; if (!ptr) { tmstats.malloc_failures++; } else { if (site) log_event5(logfp, TM_EVENT_MALLOC, site->serial, start, end - start, (uint32)NS_PTR_TO_INT32(ptr), size); if (get_allocations()) { he = PL_HashTableAdd(allocations, ptr, site); if (he) { alloc = (allocation*) he; alloc->size = size; } } } TM_EXIT_LOCK(); t->suppress_tracing--; } PR_IMPLEMENT(void) CallocCallback(void *ptr, size_t count, size_t size, PRUint32 start, PRUint32 end) { callsite *site; PLHashEntry *he; allocation *alloc; tm_thread *t; if (!tracing_enabled || (t = get_tm_thread())->suppress_tracing != 0) return; site = backtrace(t, 2); t->suppress_tracing++; TM_ENTER_LOCK(); tmstats.calloc_calls++; if (!ptr) { tmstats.calloc_failures++; } else { size *= count; if (site) log_event5(logfp, TM_EVENT_CALLOC, site->serial, start, end - start, (uint32)NS_PTR_TO_INT32(ptr), size); if (get_allocations()) { he = PL_HashTableAdd(allocations, ptr, site); if (he) { alloc = (allocation*) he; alloc->size = size; } } } TM_EXIT_LOCK(); t->suppress_tracing--; } PR_IMPLEMENT(void) ReallocCallback(void * oldptr, void *ptr, size_t size, PRUint32 start, PRUint32 end) { callsite *oldsite, *site; size_t oldsize; PLHashNumber hash; PLHashEntry **hep, *he; allocation *alloc; tm_thread *t; if (!tracing_enabled || (t = get_tm_thread())->suppress_tracing != 0) return; site = backtrace(t, 2); t->suppress_tracing++; TM_ENTER_LOCK(); tmstats.realloc_calls++; if (PR_TRUE) { oldsite = NULL; oldsize = 0; he = NULL; if (oldptr && get_allocations()) { hash = hash_pointer(oldptr); hep = PL_HashTableRawLookup(allocations, hash, oldptr); he = *hep; if (he) { oldsite = (callsite*) he->value; alloc = (allocation*) he; oldsize = alloc->size; } } } if (!ptr && size) { tmstats.realloc_failures++; /* * When realloc() fails, the original block is not freed or moved, so * we'll leave the allocation entry untouched. */ } else { if (site) { log_event8(logfp, TM_EVENT_REALLOC, site->serial, start, end - start, (uint32)NS_PTR_TO_INT32(ptr), size, oldsite ? oldsite->serial : 0, (uint32)NS_PTR_TO_INT32(oldptr), oldsize); } if (ptr && allocations) { if (ptr != oldptr) { /* * If we're reallocating (not allocating new space by passing * null to realloc) and realloc moved the block, free oldptr. */ if (he) PL_HashTableRawRemove(allocations, hep, he); /* Record the new allocation now, setting he. */ he = PL_HashTableAdd(allocations, ptr, site); } else { /* * If we haven't yet recorded an allocation (possibly due to a * temporary memory shortage), do it now. */ if (!he) he = PL_HashTableAdd(allocations, ptr, site); } if (he) { alloc = (allocation*) he; alloc->size = size; } } } TM_EXIT_LOCK(); t->suppress_tracing--; } PR_IMPLEMENT(void) FreeCallback(void * ptr, PRUint32 start, PRUint32 end) { PLHashEntry **hep, *he; callsite *site; allocation *alloc; tm_thread *t; if (!tracing_enabled || (t = get_tm_thread())->suppress_tracing != 0) return; t->suppress_tracing++; TM_ENTER_LOCK(); tmstats.free_calls++; if (!ptr) { tmstats.null_free_calls++; } else { if (get_allocations()) { hep = PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr); he = *hep; if (he) { site = (callsite*) he->value; if (site) { alloc = (allocation*) he; log_event5(logfp, TM_EVENT_FREE, site->serial, start, end - start, (uint32)NS_PTR_TO_INT32(ptr), alloc->size); } PL_HashTableRawRemove(allocations, hep, he); } } } TM_EXIT_LOCK(); t->suppress_tracing--; } #endif /*XP_WIN32*/ #endif /* NS_TRACE_MALLOC */