2000-04-20 08:55:26 +04:00
|
|
|
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
2006-03-27 11:29:32 +04:00
|
|
|
* vim:cindent:ts=8:et:sw=4:
|
2000-04-20 08:55:26 +04:00
|
|
|
*
|
2004-04-26 01:07:34 +04:00
|
|
|
* ***** BEGIN LICENSE BLOCK *****
|
|
|
|
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
2000-04-20 08:55:26 +04:00
|
|
|
*
|
2004-04-26 01:07:34 +04:00
|
|
|
* The contents of this file are subject to the Mozilla Public License Version
|
|
|
|
* 1.1 (the "License"); you may not use this file except in compliance with
|
|
|
|
* the License. You may obtain a copy of the License at
|
|
|
|
* http://www.mozilla.org/MPL/
|
|
|
|
*
|
|
|
|
* Software distributed under the License is distributed on an "AS IS" basis,
|
|
|
|
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
|
|
|
* for the specific language governing rights and limitations under the
|
|
|
|
* License.
|
2000-04-20 08:55:26 +04:00
|
|
|
*
|
|
|
|
* The Original Code is nsTraceMalloc.c/bloatblame.c code, released
|
|
|
|
* April 19, 2000.
|
|
|
|
*
|
2004-04-26 01:07:34 +04:00
|
|
|
* The Initial Developer of the Original Code is
|
|
|
|
* Netscape Communications Corporation.
|
|
|
|
* Portions created by the Initial Developer are Copyright (C) 2000
|
|
|
|
* the Initial Developer. All Rights Reserved.
|
2000-04-20 08:55:26 +04:00
|
|
|
*
|
|
|
|
* Contributor(s):
|
2004-04-26 01:07:34 +04:00
|
|
|
* Brendan Eich, 14-April-2000
|
|
|
|
*
|
|
|
|
* Alternatively, the contents of this file may be used under the terms of
|
|
|
|
* either the GNU General Public License Version 2 or later (the "GPL"), or
|
|
|
|
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
|
|
|
* in which case the provisions of the GPL or the LGPL are applicable instead
|
|
|
|
* of those above. If you wish to allow use of your version of this file only
|
|
|
|
* under the terms of either the GPL or the LGPL, and not to allow others to
|
|
|
|
* use your version of this file under the terms of the MPL, indicate your
|
|
|
|
* decision by deleting the provisions above and replace them with the notice
|
|
|
|
* and other provisions required by the GPL or the LGPL. If you do not delete
|
|
|
|
* the provisions above, a recipient may use your version of this file under
|
|
|
|
* the terms of any one of the MPL, the GPL or the LGPL.
|
|
|
|
*
|
|
|
|
* ***** END LICENSE BLOCK ***** */
|
2000-05-03 07:07:20 +04:00
|
|
|
#ifdef NS_TRACE_MALLOC
|
2001-01-26 01:54:05 +03:00
|
|
|
/*
|
2007-03-23 02:01:14 +03:00
|
|
|
* TODO:
|
|
|
|
* - extend logfile so 'F' record tells free stack
|
|
|
|
* - diagnose rusty's SMP realloc oldsize corruption bug
|
|
|
|
* - #ifdef __linux__/x86 and port to other platforms
|
|
|
|
* - unify calltree with gc/boehm somehow (common utility lib?)
|
|
|
|
*/
|
2000-06-01 06:09:25 +04:00
|
|
|
#include <errno.h>
|
|
|
|
#include <fcntl.h>
|
2000-08-29 05:52:13 +04:00
|
|
|
#include <stdio.h>
|
2000-04-20 08:55:26 +04:00
|
|
|
#include <string.h>
|
2001-01-26 01:54:05 +03:00
|
|
|
#ifdef XP_UNIX
|
2000-04-20 08:55:26 +04:00
|
|
|
#include <unistd.h>
|
2000-04-27 08:02:22 +04:00
|
|
|
#include <sys/stat.h>
|
2000-08-09 06:41:58 +04:00
|
|
|
#include <sys/time.h>
|
2001-01-26 01:54:05 +03:00
|
|
|
#endif
|
2000-04-20 08:55:26 +04:00
|
|
|
#include "plhash.h"
|
2001-09-12 10:39:31 +04:00
|
|
|
#include "pratom.h"
|
2000-04-20 08:55:26 +04:00
|
|
|
#include "prlog.h"
|
2007-08-11 02:19:32 +04:00
|
|
|
#include "prlock.h"
|
2000-04-20 08:55:26 +04:00
|
|
|
#include "prmon.h"
|
|
|
|
#include "prprf.h"
|
2001-09-08 22:29:24 +04:00
|
|
|
#include "prenv.h"
|
2001-12-15 03:24:12 +03:00
|
|
|
#include "prnetdb.h"
|
2000-04-20 08:55:26 +04:00
|
|
|
#include "nsTraceMalloc.h"
|
2001-11-21 02:32:17 +03:00
|
|
|
#include "nscore.h"
|
2005-11-14 01:39:05 +03:00
|
|
|
#include "prinit.h"
|
2007-08-11 02:19:14 +04:00
|
|
|
#include "prthread.h"
|
2007-08-11 02:20:48 +04:00
|
|
|
#include "nsStackWalk.h"
|
2007-08-11 02:22:07 +04:00
|
|
|
#include "nsTraceMallocCallbacks.h"
|
2000-04-20 08:55:26 +04:00
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
#ifdef XP_WIN32
|
|
|
|
#include <sys/timeb.h>/*for timeb*/
|
|
|
|
#include <sys/stat.h>/*for fstat*/
|
2001-01-26 01:54:05 +03:00
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
#include <io.h> /*for write*/
|
2001-01-26 01:54:05 +03:00
|
|
|
|
|
|
|
#define WRITE_FLAGS "w"
|
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
#endif /* WIN32 */
|
2007-09-25 05:13:17 +04:00
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
#ifdef XP_UNIX
|
2001-01-26 01:54:05 +03:00
|
|
|
#define WRITE_FLAGS "w"
|
|
|
|
|
2006-10-29 19:11:26 +03:00
|
|
|
#ifdef WRAP_SYSTEM_INCLUDES
|
|
|
|
#pragma GCC visibility push(default)
|
|
|
|
#endif
|
2000-04-20 08:55:26 +04:00
|
|
|
extern __ptr_t __libc_malloc(size_t);
|
2007-09-29 02:39:59 +04:00
|
|
|
extern __ptr_t __libc_calloc(size_t, size_t);
|
2000-04-20 08:55:26 +04:00
|
|
|
extern __ptr_t __libc_realloc(__ptr_t, size_t);
|
|
|
|
extern void __libc_free(__ptr_t);
|
2007-02-14 10:06:35 +03:00
|
|
|
extern __ptr_t __libc_memalign(size_t, size_t);
|
2007-09-29 02:39:59 +04:00
|
|
|
extern __ptr_t __libc_valloc(size_t);
|
2006-10-29 19:11:26 +03:00
|
|
|
#ifdef WRAP_SYSTEM_INCLUDES
|
|
|
|
#pragma GCC visibility pop
|
|
|
|
#endif
|
2000-04-20 08:55:26 +04:00
|
|
|
|
2007-08-11 02:21:13 +04:00
|
|
|
#endif /* !XP_UNIX */
|
2007-03-23 02:01:14 +03:00
|
|
|
|
2007-08-11 02:21:13 +04:00
|
|
|
#ifdef XP_WIN32
|
2007-03-23 02:01:14 +03:00
|
|
|
|
2007-08-11 02:21:13 +04:00
|
|
|
#define __libc_malloc(x) dhw_orig_malloc(x)
|
|
|
|
#define __libc_calloc(x, y) dhw_orig_calloc(x,y)
|
|
|
|
#define __libc_realloc(x, y) dhw_orig_realloc(x,y)
|
|
|
|
#define __libc_free(x) dhw_orig_free(x)
|
2007-03-23 02:01:14 +03:00
|
|
|
|
2007-08-11 02:21:13 +04:00
|
|
|
#endif
|
2007-03-23 02:01:14 +03:00
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
typedef struct logfile logfile;
|
|
|
|
|
2007-04-04 05:06:34 +04:00
|
|
|
#define STARTUP_TMBUFSIZE (64 * 1024)
|
2000-05-17 08:44:14 +04:00
|
|
|
#define LOGFILE_TMBUFSIZE (16 * 1024)
|
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
struct logfile {
|
|
|
|
int fd;
|
2000-05-17 08:44:14 +04:00
|
|
|
int lfd; /* logical fd, dense among all logfiles */
|
|
|
|
char *buf;
|
|
|
|
int bufsize;
|
2000-04-27 08:02:22 +04:00
|
|
|
int pos;
|
|
|
|
uint32 size;
|
|
|
|
uint32 simsize;
|
|
|
|
logfile *next;
|
2000-05-17 08:44:14 +04:00
|
|
|
logfile **prevp;
|
2000-04-27 08:02:22 +04:00
|
|
|
};
|
|
|
|
|
2000-05-17 08:44:14 +04:00
|
|
|
static char default_buf[STARTUP_TMBUFSIZE];
|
2002-11-13 07:40:17 +03:00
|
|
|
static logfile default_logfile =
|
|
|
|
{-1, 0, default_buf, STARTUP_TMBUFSIZE, 0, 0, 0, NULL, NULL};
|
2000-05-17 08:44:14 +04:00
|
|
|
static logfile *logfile_list = NULL;
|
|
|
|
static logfile **logfile_tail = &logfile_list;
|
|
|
|
static logfile *logfp = &default_logfile;
|
2007-08-11 02:19:32 +04:00
|
|
|
static PRLock *tmlock = NULL;
|
2001-09-08 22:29:24 +04:00
|
|
|
static char *sdlogname = NULL; /* filename for shutdown leak log */
|
2000-04-20 08:55:26 +04:00
|
|
|
|
2007-08-11 01:24:32 +04:00
|
|
|
/*
|
|
|
|
* This enables/disables trace-malloc logging.
|
|
|
|
*
|
|
|
|
* It is separate from suppress_tracing so that we do not have to pay
|
2007-08-11 02:19:32 +04:00
|
|
|
* the performance cost of repeated PR_GetThreadPrivate calls when
|
|
|
|
* trace-malloc is disabled (which is not as bad as the locking we used
|
|
|
|
* to have).
|
2007-08-11 01:24:32 +04:00
|
|
|
*/
|
|
|
|
static int tracing_enabled = 1;
|
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
/*
|
|
|
|
* This lock must be held while manipulating the calltree, the
|
|
|
|
* allocations table, the log, or the tmstats.
|
|
|
|
*
|
|
|
|
* Callers should not *enter* the lock without checking suppress_tracing
|
|
|
|
* first; otherwise they risk trying to re-enter on the same thread.
|
|
|
|
*/
|
|
|
|
#define TM_ENTER_LOCK() \
|
2001-09-12 10:39:31 +04:00
|
|
|
PR_BEGIN_MACRO \
|
2007-08-11 02:19:32 +04:00
|
|
|
if (tmlock) \
|
|
|
|
PR_Lock(tmlock); \
|
2001-09-12 10:39:31 +04:00
|
|
|
PR_END_MACRO
|
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
#define TM_EXIT_LOCK() \
|
2007-09-29 02:39:59 +04:00
|
|
|
PR_BEGIN_MACRO \
|
2007-08-11 02:19:32 +04:00
|
|
|
if (tmlock) \
|
|
|
|
PR_Unlock(tmlock); \
|
2007-09-29 02:39:59 +04:00
|
|
|
PR_END_MACRO
|
|
|
|
|
2007-08-11 02:19:14 +04:00
|
|
|
/*
|
|
|
|
* Thread-local storage.
|
|
|
|
*
|
|
|
|
* We can't use NSPR thread-local storage for this because it mallocs
|
|
|
|
* within PR_GetThreadPrivate (the first time) and PR_SetThreadPrivate
|
|
|
|
* (which can be worked around by protecting all uses of those functions
|
|
|
|
* with a monitor, ugh) and because it calls malloc/free when the
|
|
|
|
* thread-local storage is in an inconsistent state within
|
|
|
|
* PR_SetThreadPrivate (when expanding the thread-local storage array)
|
|
|
|
* and _PRI_DetachThread (when and after deleting the thread-local
|
|
|
|
* storage array).
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef XP_WIN32
|
|
|
|
|
|
|
|
#include <windows.h>
|
|
|
|
|
|
|
|
#define TM_TLS_INDEX_TYPE DWORD
|
|
|
|
#define TM_CREATE_TLS_INDEX(i_) PR_BEGIN_MACRO \
|
|
|
|
(i_) = TlsAlloc(); \
|
|
|
|
PR_END_MACRO
|
|
|
|
#define TM_DESTROY_TLS_INDEX(i_) TlsFree((i_))
|
|
|
|
#define TM_GET_TLS_DATA(i_) TlsGetValue((i_))
|
|
|
|
#define TM_SET_TLS_DATA(i_, v_) TlsSetValue((i_), (v_))
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#include <pthread.h>
|
|
|
|
|
|
|
|
#define TM_TLS_INDEX_TYPE pthread_key_t
|
|
|
|
#define TM_CREATE_TLS_INDEX(i_) pthread_key_create(&(i_), NULL)
|
|
|
|
#define TM_DESTROY_TLS_INDEX(i_) pthread_key_delete((i_))
|
|
|
|
#define TM_GET_TLS_DATA(i_) pthread_getspecific((i_))
|
|
|
|
#define TM_SET_TLS_DATA(i_, v_) pthread_setspecific((i_), (v_))
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static TM_TLS_INDEX_TYPE tls_index;
|
|
|
|
static tm_thread main_thread; /* 0-initialization is correct */
|
|
|
|
|
|
|
|
/* FIXME (maybe): This is currently unused; we leak the thread-local data. */
|
|
|
|
#if 0
|
|
|
|
PR_STATIC_CALLBACK(void)
|
|
|
|
free_tm_thread(void *priv)
|
|
|
|
{
|
|
|
|
tm_thread *t = (tm_thread*) priv;
|
|
|
|
|
|
|
|
PR_ASSERT(t->suppress_tracing == 0);
|
|
|
|
|
|
|
|
if (t->in_heap) {
|
|
|
|
t->suppress_tracing = 1;
|
2007-08-11 02:20:48 +04:00
|
|
|
if (t->backtrace_buf.buffer)
|
|
|
|
__libc_free(t->backtrace_buf.buffer);
|
|
|
|
|
2007-08-11 02:19:14 +04:00
|
|
|
__libc_free(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-08-11 02:22:07 +04:00
|
|
|
tm_thread *
|
|
|
|
tm_get_thread(void)
|
2007-08-11 02:19:14 +04:00
|
|
|
{
|
|
|
|
tm_thread *t;
|
|
|
|
tm_thread stack_tm_thread;
|
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
if (!tmlock) {
|
2007-08-11 02:19:14 +04:00
|
|
|
return &main_thread;
|
|
|
|
}
|
|
|
|
|
|
|
|
t = TM_GET_TLS_DATA(tls_index);
|
|
|
|
|
|
|
|
if (!t) {
|
|
|
|
/*
|
|
|
|
* First, store a tm_thread on the stack to suppress for the
|
|
|
|
* malloc below
|
|
|
|
*/
|
|
|
|
stack_tm_thread.suppress_tracing = 1;
|
2007-08-11 02:20:48 +04:00
|
|
|
stack_tm_thread.backtrace_buf.buffer = NULL;
|
|
|
|
stack_tm_thread.backtrace_buf.size = 0;
|
|
|
|
stack_tm_thread.backtrace_buf.entries = 0;
|
2007-08-11 02:19:14 +04:00
|
|
|
TM_SET_TLS_DATA(tls_index, &stack_tm_thread);
|
|
|
|
|
|
|
|
t = (tm_thread*) __libc_malloc(sizeof(tm_thread));
|
|
|
|
t->suppress_tracing = 0;
|
2007-08-11 02:20:48 +04:00
|
|
|
t->backtrace_buf = stack_tm_thread.backtrace_buf;
|
2007-08-11 02:19:14 +04:00
|
|
|
TM_SET_TLS_DATA(tls_index, t);
|
|
|
|
|
|
|
|
PR_ASSERT(stack_tm_thread.suppress_tracing == 1); /* balanced */
|
|
|
|
}
|
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2000-05-17 08:44:14 +04:00
|
|
|
/* We don't want more than 32 logfiles open at once, ok? */
|
|
|
|
typedef uint32 lfd_set;
|
|
|
|
|
|
|
|
#define LFD_SET_STATIC_INITIALIZER 0
|
|
|
|
#define LFD_SET_SIZE 32
|
|
|
|
|
|
|
|
#define LFD_ZERO(s) (*(s) = 0)
|
|
|
|
#define LFD_BIT(i) ((uint32)1 << (i))
|
|
|
|
#define LFD_TEST(i,s) (LFD_BIT(i) & *(s))
|
|
|
|
#define LFD_SET(i,s) (*(s) |= LFD_BIT(i))
|
|
|
|
#define LFD_CLR(i,s) (*(s) &= ~LFD_BIT(i))
|
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
static logfile *get_logfile(int fd)
|
|
|
|
{
|
|
|
|
logfile *fp;
|
2000-05-17 08:44:14 +04:00
|
|
|
int lfd;
|
2000-04-27 08:02:22 +04:00
|
|
|
|
2000-05-17 08:44:14 +04:00
|
|
|
for (fp = logfile_list; fp; fp = fp->next) {
|
2000-04-27 08:02:22 +04:00
|
|
|
if (fp->fd == fd)
|
|
|
|
return fp;
|
2000-05-17 08:44:14 +04:00
|
|
|
}
|
|
|
|
lfd = 0;
|
|
|
|
retry:
|
|
|
|
for (fp = logfile_list; fp; fp = fp->next) {
|
|
|
|
if (fp->fd == lfd) {
|
|
|
|
if (++lfd >= LFD_SET_SIZE)
|
|
|
|
return NULL;
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fp = __libc_malloc(sizeof(logfile) + LOGFILE_TMBUFSIZE);
|
2000-04-27 08:02:22 +04:00
|
|
|
if (!fp)
|
|
|
|
return NULL;
|
|
|
|
fp->fd = fd;
|
2000-05-17 08:44:14 +04:00
|
|
|
fp->lfd = lfd;
|
|
|
|
fp->buf = (char*) (fp + 1);
|
|
|
|
fp->bufsize = LOGFILE_TMBUFSIZE;
|
2000-04-27 08:02:22 +04:00
|
|
|
fp->pos = 0;
|
|
|
|
fp->size = fp->simsize = 0;
|
2000-05-17 08:44:14 +04:00
|
|
|
fp->next = NULL;
|
|
|
|
fp->prevp = logfile_tail;
|
|
|
|
*logfile_tail = fp;
|
|
|
|
logfile_tail = &fp->next;
|
2000-04-27 08:02:22 +04:00
|
|
|
return fp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void flush_logfile(logfile *fp)
|
2000-04-20 08:55:26 +04:00
|
|
|
{
|
2001-09-12 10:39:31 +04:00
|
|
|
int len, cnt, fd;
|
2000-04-20 08:55:26 +04:00
|
|
|
char *bp;
|
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
len = fp->pos;
|
2000-04-20 08:55:26 +04:00
|
|
|
if (len == 0)
|
|
|
|
return;
|
2000-04-27 08:02:22 +04:00
|
|
|
fp->pos = 0;
|
|
|
|
fd = fp->fd;
|
|
|
|
if (fd >= 0) {
|
|
|
|
fp->size += len;
|
|
|
|
bp = fp->buf;
|
2000-04-20 08:55:26 +04:00
|
|
|
do {
|
2000-04-27 08:02:22 +04:00
|
|
|
cnt = write(fd, bp, len);
|
2000-04-20 08:55:26 +04:00
|
|
|
if (cnt <= 0) {
|
|
|
|
printf("### nsTraceMalloc: write failed or wrote 0 bytes!\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
bp += cnt;
|
|
|
|
len -= cnt;
|
|
|
|
} while (len > 0);
|
|
|
|
}
|
2000-04-27 08:02:22 +04:00
|
|
|
fp->simsize += len;
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
static void log_byte(logfile *fp, char byte)
|
2000-04-20 08:55:26 +04:00
|
|
|
{
|
2000-05-17 08:44:14 +04:00
|
|
|
if (fp->pos == fp->bufsize)
|
2000-04-27 08:02:22 +04:00
|
|
|
flush_logfile(fp);
|
|
|
|
fp->buf[fp->pos++] = byte;
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
static void log_string(logfile *fp, const char *str)
|
2000-04-20 08:55:26 +04:00
|
|
|
{
|
|
|
|
int len, rem, cnt;
|
|
|
|
|
2007-08-11 04:52:35 +04:00
|
|
|
len = strlen(str) + 1; /* include null terminator */
|
2000-05-17 08:44:14 +04:00
|
|
|
while ((rem = fp->pos + len - fp->bufsize) > 0) {
|
2000-04-20 08:55:26 +04:00
|
|
|
cnt = len - rem;
|
2007-08-11 04:52:35 +04:00
|
|
|
memcpy(&fp->buf[fp->pos], str, cnt);
|
2000-04-20 08:55:26 +04:00
|
|
|
str += cnt;
|
2000-04-27 08:02:22 +04:00
|
|
|
fp->pos += cnt;
|
|
|
|
flush_logfile(fp);
|
2000-04-20 08:55:26 +04:00
|
|
|
len = rem;
|
|
|
|
}
|
2007-08-11 04:52:35 +04:00
|
|
|
memcpy(&fp->buf[fp->pos], str, len);
|
2000-04-27 08:02:22 +04:00
|
|
|
fp->pos += len;
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
|
|
|
|
2002-11-13 07:40:17 +03:00
|
|
|
static void log_filename(logfile* fp, const char* filename)
|
2002-01-09 22:03:01 +03:00
|
|
|
{
|
2002-11-13 07:40:17 +03:00
|
|
|
if (strlen(filename) < 512) {
|
|
|
|
char *bp, *cp, buf[512];
|
2002-01-09 22:03:01 +03:00
|
|
|
|
2002-11-13 07:40:17 +03:00
|
|
|
bp = strstr(strcpy(buf, filename), "mozilla");
|
|
|
|
if (!bp)
|
|
|
|
bp = buf;
|
2002-01-09 22:03:01 +03:00
|
|
|
|
2002-11-13 07:40:17 +03:00
|
|
|
for (cp = bp; *cp; cp++) {
|
|
|
|
if (*cp == '\\')
|
|
|
|
*cp = '/';
|
2002-01-09 22:03:01 +03:00
|
|
|
}
|
|
|
|
|
2002-11-13 07:40:17 +03:00
|
|
|
filename = bp;
|
2002-01-09 22:03:01 +03:00
|
|
|
}
|
2002-11-13 07:40:17 +03:00
|
|
|
log_string(fp, filename);
|
2002-01-09 22:03:01 +03:00
|
|
|
}
|
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
static void log_uint32(logfile *fp, uint32 ival)
|
2000-04-20 08:55:26 +04:00
|
|
|
{
|
|
|
|
if (ival < 0x80) {
|
|
|
|
/* 0xxx xxxx */
|
2000-04-27 08:02:22 +04:00
|
|
|
log_byte(fp, (char) ival);
|
2000-04-20 08:55:26 +04:00
|
|
|
} else if (ival < 0x4000) {
|
|
|
|
/* 10xx xxxx xxxx xxxx */
|
2000-04-27 08:02:22 +04:00
|
|
|
log_byte(fp, (char) ((ival >> 8) | 0x80));
|
|
|
|
log_byte(fp, (char) (ival & 0xff));
|
2000-04-20 08:55:26 +04:00
|
|
|
} else if (ival < 0x200000) {
|
|
|
|
/* 110x xxxx xxxx xxxx xxxx xxxx */
|
2000-04-27 08:02:22 +04:00
|
|
|
log_byte(fp, (char) ((ival >> 16) | 0xc0));
|
|
|
|
log_byte(fp, (char) ((ival >> 8) & 0xff));
|
|
|
|
log_byte(fp, (char) (ival & 0xff));
|
2000-04-20 08:55:26 +04:00
|
|
|
} else if (ival < 0x10000000) {
|
|
|
|
/* 1110 xxxx xxxx xxxx xxxx xxxx xxxx xxxx */
|
2000-04-27 08:02:22 +04:00
|
|
|
log_byte(fp, (char) ((ival >> 24) | 0xe0));
|
|
|
|
log_byte(fp, (char) ((ival >> 16) & 0xff));
|
|
|
|
log_byte(fp, (char) ((ival >> 8) & 0xff));
|
|
|
|
log_byte(fp, (char) (ival & 0xff));
|
2000-04-20 08:55:26 +04:00
|
|
|
} else {
|
|
|
|
/* 1111 0000 xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx */
|
2000-04-27 08:02:22 +04:00
|
|
|
log_byte(fp, (char) 0xf0);
|
|
|
|
log_byte(fp, (char) ((ival >> 24) & 0xff));
|
|
|
|
log_byte(fp, (char) ((ival >> 16) & 0xff));
|
|
|
|
log_byte(fp, (char) ((ival >> 8) & 0xff));
|
|
|
|
log_byte(fp, (char) (ival & 0xff));
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
static void log_event1(logfile *fp, char event, uint32 serial)
|
2000-04-20 08:55:26 +04:00
|
|
|
{
|
2000-04-27 08:02:22 +04:00
|
|
|
log_byte(fp, event);
|
|
|
|
log_uint32(fp, (uint32) serial);
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
static void log_event2(logfile *fp, char event, uint32 serial, size_t size)
|
2000-04-20 08:55:26 +04:00
|
|
|
{
|
2000-04-27 08:02:22 +04:00
|
|
|
log_event1(fp, event, serial);
|
|
|
|
log_uint32(fp, (uint32) size);
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
static void log_event3(logfile *fp, char event, uint32 serial, size_t oldsize,
|
|
|
|
size_t size)
|
2000-04-20 08:55:26 +04:00
|
|
|
{
|
2000-04-27 08:02:22 +04:00
|
|
|
log_event2(fp, event, serial, oldsize);
|
|
|
|
log_uint32(fp, (uint32) size);
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
static void log_event4(logfile *fp, char event, uint32 serial, uint32 ui2,
|
|
|
|
uint32 ui3, uint32 ui4)
|
2000-04-20 08:55:26 +04:00
|
|
|
{
|
2000-04-27 08:02:22 +04:00
|
|
|
log_event3(fp, event, serial, ui2, ui3);
|
|
|
|
log_uint32(fp, ui4);
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
|
|
|
|
2001-11-16 01:40:53 +03:00
|
|
|
static void log_event5(logfile *fp, char event, uint32 serial, uint32 ui2,
|
|
|
|
uint32 ui3, uint32 ui4, uint32 ui5)
|
|
|
|
{
|
|
|
|
log_event4(fp, event, serial, ui2, ui3, ui4);
|
|
|
|
log_uint32(fp, ui5);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void log_event6(logfile *fp, char event, uint32 serial, uint32 ui2,
|
|
|
|
uint32 ui3, uint32 ui4, uint32 ui5, uint32 ui6)
|
|
|
|
{
|
|
|
|
log_event5(fp, event, serial, ui2, ui3, ui4, ui5);
|
|
|
|
log_uint32(fp, ui6);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void log_event7(logfile *fp, char event, uint32 serial, uint32 ui2,
|
|
|
|
uint32 ui3, uint32 ui4, uint32 ui5, uint32 ui6,
|
|
|
|
uint32 ui7)
|
|
|
|
{
|
|
|
|
log_event6(fp, event, serial, ui2, ui3, ui4, ui5, ui6);
|
|
|
|
log_uint32(fp, ui7);
|
|
|
|
}
|
|
|
|
|
2001-12-15 03:24:12 +03:00
|
|
|
static void log_event8(logfile *fp, char event, uint32 serial, uint32 ui2,
|
|
|
|
uint32 ui3, uint32 ui4, uint32 ui5, uint32 ui6,
|
|
|
|
uint32 ui7, uint32 ui8)
|
|
|
|
{
|
|
|
|
log_event7(fp, event, serial, ui2, ui3, ui4, ui5, ui6, ui7);
|
|
|
|
log_uint32(fp, ui8);
|
|
|
|
}
|
2001-11-16 01:40:53 +03:00
|
|
|
|
2000-04-20 08:55:26 +04:00
|
|
|
typedef struct callsite callsite;
|
|
|
|
|
|
|
|
struct callsite {
|
2006-03-27 11:29:32 +04:00
|
|
|
void* pc;
|
2000-04-20 08:55:26 +04:00
|
|
|
uint32 serial;
|
2000-05-17 08:44:14 +04:00
|
|
|
lfd_set lfdset;
|
2007-08-11 02:20:48 +04:00
|
|
|
const char *name; /* pointer to string owned by methods table */
|
|
|
|
const char *library; /* pointer to string owned by libraries table */
|
2000-08-29 05:52:13 +04:00
|
|
|
int offset;
|
2000-04-20 08:55:26 +04:00
|
|
|
callsite *parent;
|
|
|
|
callsite *siblings;
|
|
|
|
callsite *kids;
|
|
|
|
};
|
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
/* NB: these counters are incremented and decremented only within tmlock. */
|
2001-09-12 10:39:31 +04:00
|
|
|
static uint32 library_serial_generator = 0;
|
|
|
|
static uint32 method_serial_generator = 0;
|
|
|
|
static uint32 callsite_serial_generator = 0;
|
|
|
|
static uint32 tmstats_serial_generator = 0;
|
2002-01-09 22:03:01 +03:00
|
|
|
static uint32 filename_serial_generator = 0;
|
2000-07-22 00:42:15 +04:00
|
|
|
|
|
|
|
/* Root of the tree of callsites, the sum of all (cycle-compressed) stacks. */
|
2002-11-13 07:40:17 +03:00
|
|
|
static callsite calltree_root =
|
|
|
|
{0, 0, LFD_SET_STATIC_INITIALIZER, NULL, NULL, 0, NULL, NULL, NULL};
|
2000-04-27 08:02:22 +04:00
|
|
|
|
|
|
|
/* Basic instrumentation. */
|
2000-05-17 08:44:14 +04:00
|
|
|
static nsTMStats tmstats = NS_TMSTATS_STATIC_INITIALIZER;
|
2000-04-20 08:55:26 +04:00
|
|
|
|
|
|
|
/* Parent with the most kids (tmstats.calltree_maxkids). */
|
|
|
|
static callsite *calltree_maxkids_parent;
|
|
|
|
|
|
|
|
/* Calltree leaf for path with deepest stack backtrace. */
|
|
|
|
static callsite *calltree_maxstack_top;
|
|
|
|
|
|
|
|
/* Last site (i.e., calling pc) that recurred during a backtrace. */
|
|
|
|
static callsite *last_callsite_recurrence;
|
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
static void log_tmstats(logfile *fp)
|
|
|
|
{
|
2000-07-22 00:42:15 +04:00
|
|
|
log_event1(fp, TM_EVENT_STATS, ++tmstats_serial_generator);
|
2000-04-27 08:02:22 +04:00
|
|
|
log_uint32(fp, tmstats.calltree_maxstack);
|
|
|
|
log_uint32(fp, tmstats.calltree_maxdepth);
|
|
|
|
log_uint32(fp, tmstats.calltree_parents);
|
|
|
|
log_uint32(fp, tmstats.calltree_maxkids);
|
|
|
|
log_uint32(fp, tmstats.calltree_kidhits);
|
|
|
|
log_uint32(fp, tmstats.calltree_kidmisses);
|
|
|
|
log_uint32(fp, tmstats.calltree_kidsteps);
|
|
|
|
log_uint32(fp, tmstats.callsite_recurrences);
|
|
|
|
log_uint32(fp, tmstats.backtrace_calls);
|
|
|
|
log_uint32(fp, tmstats.backtrace_failures);
|
|
|
|
log_uint32(fp, tmstats.btmalloc_failures);
|
|
|
|
log_uint32(fp, tmstats.dladdr_failures);
|
|
|
|
log_uint32(fp, tmstats.malloc_calls);
|
|
|
|
log_uint32(fp, tmstats.malloc_failures);
|
|
|
|
log_uint32(fp, tmstats.calloc_calls);
|
|
|
|
log_uint32(fp, tmstats.calloc_failures);
|
|
|
|
log_uint32(fp, tmstats.realloc_calls);
|
|
|
|
log_uint32(fp, tmstats.realloc_failures);
|
|
|
|
log_uint32(fp, tmstats.free_calls);
|
|
|
|
log_uint32(fp, tmstats.null_free_calls);
|
2002-11-13 07:40:17 +03:00
|
|
|
log_uint32(fp, calltree_maxkids_parent ? calltree_maxkids_parent->serial
|
|
|
|
: 0);
|
2000-04-27 08:02:22 +04:00
|
|
|
log_uint32(fp, calltree_maxstack_top ? calltree_maxstack_top->serial : 0);
|
|
|
|
}
|
|
|
|
|
2000-05-17 08:44:14 +04:00
|
|
|
static void *generic_alloctable(void *pool, PRSize size)
|
|
|
|
{
|
|
|
|
return __libc_malloc(size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void generic_freetable(void *pool, void *item)
|
|
|
|
{
|
|
|
|
__libc_free(item);
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct lfdset_entry {
|
|
|
|
PLHashEntry base;
|
|
|
|
lfd_set lfdset;
|
|
|
|
} lfdset_entry;
|
|
|
|
|
|
|
|
static PLHashEntry *lfdset_allocentry(void *pool, const void *key)
|
|
|
|
{
|
|
|
|
lfdset_entry *le = __libc_malloc(sizeof *le);
|
|
|
|
if (le)
|
|
|
|
LFD_ZERO(&le->lfdset);
|
|
|
|
return &le->base;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lfdset_freeentry(void *pool, PLHashEntry *he, PRUintn flag)
|
|
|
|
{
|
|
|
|
lfdset_entry *le;
|
|
|
|
|
|
|
|
if (flag != HT_FREE_ENTRY)
|
|
|
|
return;
|
|
|
|
le = (lfdset_entry*) he;
|
|
|
|
__libc_free((void*) le);
|
|
|
|
}
|
|
|
|
|
|
|
|
static PLHashAllocOps lfdset_hashallocops = {
|
|
|
|
generic_alloctable, generic_freetable,
|
|
|
|
lfdset_allocentry, lfdset_freeentry
|
|
|
|
};
|
|
|
|
|
2000-04-20 08:55:26 +04:00
|
|
|
/* Table of library pathnames mapped to to logged 'L' record serial numbers. */
|
|
|
|
static PLHashTable *libraries = NULL;
|
|
|
|
|
2002-01-09 22:03:01 +03:00
|
|
|
/* Table of filename pathnames mapped to logged 'G' record serial numbers. */
|
2002-11-13 07:40:17 +03:00
|
|
|
static PLHashTable *filenames = NULL;
|
2002-01-09 22:03:01 +03:00
|
|
|
|
2000-04-20 08:55:26 +04:00
|
|
|
/* Table mapping method names to logged 'N' record serial numbers. */
|
|
|
|
static PLHashTable *methods = NULL;
|
|
|
|
|
2007-08-11 02:20:48 +04:00
|
|
|
static callsite *calltree(void **stack, size_t num_stack_entries)
|
2001-01-26 01:54:05 +03:00
|
|
|
{
|
|
|
|
logfile *fp = logfp;
|
2006-03-27 11:29:32 +04:00
|
|
|
void *pc;
|
2007-08-11 02:20:48 +04:00
|
|
|
uint32 nkids;
|
2007-03-23 02:01:14 +03:00
|
|
|
callsite *parent, *site, **csp, *tmp;
|
2007-08-11 02:20:48 +04:00
|
|
|
int maxstack;
|
|
|
|
uint32 library_serial, method_serial, filename_serial;
|
|
|
|
const char *library, *method, *filename;
|
|
|
|
char *slash;
|
2007-03-23 02:01:14 +03:00
|
|
|
PLHashNumber hash;
|
|
|
|
PLHashEntry **hep, *he;
|
|
|
|
lfdset_entry *le;
|
2007-08-11 02:20:48 +04:00
|
|
|
size_t stack_index;
|
|
|
|
nsCodeAddressDetails details;
|
|
|
|
nsresult rv;
|
2007-03-23 02:01:14 +03:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
/*
|
|
|
|
* FIXME bug 391749: We should really lock only the minimum amount
|
|
|
|
* that we need to in this function, because it makes some calls
|
|
|
|
* that could lock in the system's shared library loader.
|
|
|
|
*/
|
|
|
|
TM_ENTER_LOCK();
|
|
|
|
|
2007-08-11 02:20:48 +04:00
|
|
|
maxstack = (num_stack_entries > tmstats.calltree_maxstack);
|
|
|
|
if (maxstack) {
|
|
|
|
/* these two are the same, although that used to be less clear */
|
|
|
|
tmstats.calltree_maxstack = num_stack_entries;
|
|
|
|
tmstats.calltree_maxdepth = num_stack_entries;
|
2007-03-23 02:01:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Reverse the stack again, finding and building a path in the tree. */
|
|
|
|
parent = &calltree_root;
|
2007-08-11 02:20:48 +04:00
|
|
|
stack_index = num_stack_entries;
|
2007-03-23 02:01:14 +03:00
|
|
|
do {
|
2007-08-11 02:20:48 +04:00
|
|
|
--stack_index;
|
|
|
|
pc = stack[stack_index];
|
2007-03-23 02:01:14 +03:00
|
|
|
|
|
|
|
csp = &parent->kids;
|
|
|
|
while ((site = *csp) != NULL) {
|
|
|
|
if (site->pc == pc) {
|
|
|
|
tmstats.calltree_kidhits++;
|
|
|
|
|
|
|
|
/* Put the most recently used site at the front of siblings. */
|
|
|
|
*csp = site->siblings;
|
|
|
|
site->siblings = parent->kids;
|
|
|
|
parent->kids = site;
|
|
|
|
|
|
|
|
/* Check whether we've logged for this site and logfile yet. */
|
|
|
|
if (!LFD_TEST(fp->lfd, &site->lfdset)) {
|
|
|
|
/*
|
|
|
|
* Some other logfile put this site in the calltree. We
|
|
|
|
* must log an event for site, and possibly first for its
|
|
|
|
* method and/or library. Note the code after the while
|
|
|
|
* loop that tests if (!site).
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Site already built and logged to fp -- go up the stack. */
|
|
|
|
goto upward;
|
|
|
|
}
|
|
|
|
tmstats.calltree_kidsteps++;
|
|
|
|
csp = &site->siblings;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!site) {
|
|
|
|
tmstats.calltree_kidmisses++;
|
|
|
|
|
|
|
|
/* Check for recursion: see if pc is on our ancestor line. */
|
|
|
|
for (site = parent; site; site = site->parent) {
|
|
|
|
if (site->pc == pc) {
|
|
|
|
tmstats.callsite_recurrences++;
|
|
|
|
last_callsite_recurrence = site;
|
|
|
|
goto upward;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Not in tree at all, or not logged to fp: let's find our symbolic
|
2007-08-11 02:20:48 +04:00
|
|
|
* callsite info.
|
2007-03-23 02:01:14 +03:00
|
|
|
*/
|
2007-04-04 02:14:01 +04:00
|
|
|
|
|
|
|
/*
|
2007-08-11 02:20:48 +04:00
|
|
|
* NS_DescribeCodeAddress can (on Linux) acquire a lock inside
|
|
|
|
* the shared library loader. Another thread might call malloc
|
|
|
|
* while holding that lock (when loading a shared library). So
|
|
|
|
* we have to exit tmlock around this call. For details, see
|
2007-04-04 02:14:01 +04:00
|
|
|
* https://bugzilla.mozilla.org/show_bug.cgi?id=363334#c3
|
|
|
|
*
|
|
|
|
* We could be more efficient by building the nodes in the
|
|
|
|
* calltree, exiting the monitor once to describe all of them,
|
|
|
|
* and then filling in the descriptions for any that hadn't been
|
|
|
|
* described already. But this is easier for now.
|
|
|
|
*/
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
2007-08-11 02:20:48 +04:00
|
|
|
rv = NS_DescribeCodeAddress(pc, &details);
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_ENTER_LOCK();
|
2007-08-11 02:20:48 +04:00
|
|
|
if (NS_FAILED(rv)) {
|
2007-03-23 02:01:14 +03:00
|
|
|
tmstats.dladdr_failures++;
|
2007-08-11 02:19:32 +04:00
|
|
|
goto fail;
|
2007-03-23 02:01:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Check whether we need to emit a library trace record. */
|
|
|
|
library_serial = 0;
|
2007-08-11 02:20:48 +04:00
|
|
|
library = NULL;
|
|
|
|
if (details.library[0]) {
|
2007-03-23 02:01:14 +03:00
|
|
|
if (!libraries) {
|
|
|
|
libraries = PL_NewHashTable(100, PL_HashString,
|
|
|
|
PL_CompareStrings, PL_CompareValues,
|
|
|
|
&lfdset_hashallocops, NULL);
|
|
|
|
if (!libraries) {
|
|
|
|
tmstats.btmalloc_failures++;
|
2007-08-11 02:19:32 +04:00
|
|
|
goto fail;
|
2007-03-23 02:01:14 +03:00
|
|
|
}
|
|
|
|
}
|
2007-08-11 02:20:48 +04:00
|
|
|
hash = PL_HashString(details.library);
|
|
|
|
hep = PL_HashTableRawLookup(libraries, hash, details.library);
|
2007-03-23 02:01:14 +03:00
|
|
|
he = *hep;
|
|
|
|
if (he) {
|
2007-08-11 02:20:48 +04:00
|
|
|
library = (char*) he->key;
|
2007-03-23 02:01:14 +03:00
|
|
|
library_serial = (uint32) NS_PTR_TO_INT32(he->value);
|
|
|
|
le = (lfdset_entry *) he;
|
|
|
|
if (LFD_TEST(fp->lfd, &le->lfdset)) {
|
|
|
|
/* We already logged an event on fp for this library. */
|
|
|
|
le = NULL;
|
|
|
|
}
|
|
|
|
} else {
|
2007-08-11 02:20:48 +04:00
|
|
|
library = strdup(details.library);
|
2007-03-23 02:01:14 +03:00
|
|
|
if (library) {
|
|
|
|
library_serial = ++library_serial_generator;
|
|
|
|
he = PL_HashTableRawAdd(libraries, hep, hash, library,
|
|
|
|
(void*) library_serial);
|
|
|
|
}
|
|
|
|
if (!he) {
|
|
|
|
tmstats.btmalloc_failures++;
|
2007-08-11 02:19:32 +04:00
|
|
|
goto fail;
|
2007-03-23 02:01:14 +03:00
|
|
|
}
|
|
|
|
le = (lfdset_entry *) he;
|
|
|
|
}
|
|
|
|
if (le) {
|
|
|
|
/* Need to log an event to fp for this lib. */
|
|
|
|
slash = strrchr(library, '/');
|
|
|
|
log_event1(fp, TM_EVENT_LIBRARY, library_serial);
|
2007-08-11 02:20:48 +04:00
|
|
|
log_string(fp, slash ? slash + 1 : library);
|
2007-03-23 02:01:14 +03:00
|
|
|
LFD_SET(fp->lfd, &le->lfdset);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-08-11 02:20:48 +04:00
|
|
|
/* For compatibility with current log format, always emit a
|
|
|
|
* filename trace record, using "noname" / 0 when no file name
|
|
|
|
* is available. */
|
2007-03-23 02:01:14 +03:00
|
|
|
filename_serial = 0;
|
2007-08-11 02:20:48 +04:00
|
|
|
filename = details.filename[0] ? details.filename : "noname";
|
|
|
|
if (!filenames) {
|
|
|
|
filenames = PL_NewHashTable(100, PL_HashString,
|
|
|
|
PL_CompareStrings, PL_CompareValues,
|
|
|
|
&lfdset_hashallocops, NULL);
|
2007-03-23 02:01:14 +03:00
|
|
|
if (!filenames) {
|
2007-08-11 02:20:48 +04:00
|
|
|
tmstats.btmalloc_failures++;
|
|
|
|
return NULL;
|
2007-03-23 02:01:14 +03:00
|
|
|
}
|
2007-08-11 02:20:48 +04:00
|
|
|
}
|
|
|
|
hash = PL_HashString(filename);
|
|
|
|
hep = PL_HashTableRawLookup(filenames, hash, filename);
|
|
|
|
he = *hep;
|
|
|
|
if (he) {
|
|
|
|
filename = (char*) he->key;
|
|
|
|
filename_serial = (uint32) NS_PTR_TO_INT32(he->value);
|
|
|
|
le = (lfdset_entry *) he;
|
|
|
|
if (LFD_TEST(fp->lfd, &le->lfdset)) {
|
|
|
|
/* We already logged an event on fp for this filename. */
|
|
|
|
le = NULL;
|
2007-03-23 02:01:14 +03:00
|
|
|
}
|
2007-08-11 02:20:48 +04:00
|
|
|
} else {
|
|
|
|
filename = strdup(filename);
|
|
|
|
if (filename) {
|
|
|
|
filename_serial = ++filename_serial_generator;
|
|
|
|
he = PL_HashTableRawAdd(filenames, hep, hash, filename,
|
|
|
|
(void*) filename_serial);
|
2007-03-23 02:01:14 +03:00
|
|
|
}
|
2007-08-11 02:20:48 +04:00
|
|
|
if (!he) {
|
|
|
|
tmstats.btmalloc_failures++;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
le = (lfdset_entry *) he;
|
2007-03-23 02:01:14 +03:00
|
|
|
}
|
2007-08-11 02:20:48 +04:00
|
|
|
if (le) {
|
|
|
|
/* Need to log an event to fp for this filename. */
|
|
|
|
log_event1(fp, TM_EVENT_FILENAME, filename_serial);
|
|
|
|
log_filename(fp, filename);
|
|
|
|
LFD_SET(fp->lfd, &le->lfdset);
|
2007-03-23 02:01:14 +03:00
|
|
|
}
|
2007-08-11 02:20:48 +04:00
|
|
|
|
|
|
|
if (!details.function[0]) {
|
|
|
|
PR_snprintf(details.function, sizeof(details.function),
|
|
|
|
"%s+%X", library ? library : "main", details.loffset);
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Emit an 'N' (for New method, 'M' is for malloc!) event if needed. */
|
|
|
|
method_serial = 0;
|
|
|
|
if (!methods) {
|
|
|
|
methods = PL_NewHashTable(10000, PL_HashString,
|
|
|
|
PL_CompareStrings, PL_CompareValues,
|
2000-05-17 08:44:14 +04:00
|
|
|
&lfdset_hashallocops, NULL);
|
2000-04-20 08:55:26 +04:00
|
|
|
if (!methods) {
|
|
|
|
tmstats.btmalloc_failures++;
|
2007-08-11 02:19:32 +04:00
|
|
|
goto fail;
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
|
|
|
}
|
2007-08-11 02:20:48 +04:00
|
|
|
hash = PL_HashString(details.function);
|
|
|
|
hep = PL_HashTableRawLookup(methods, hash, details.function);
|
2000-04-20 08:55:26 +04:00
|
|
|
he = *hep;
|
|
|
|
if (he) {
|
2007-08-11 02:20:48 +04:00
|
|
|
method = (char*) he->key;
|
2006-03-27 11:29:32 +04:00
|
|
|
method_serial = (uint32) NS_PTR_TO_INT32(he->value);
|
2000-05-17 08:44:14 +04:00
|
|
|
le = (lfdset_entry *) he;
|
|
|
|
if (LFD_TEST(fp->lfd, &le->lfdset)) {
|
|
|
|
/* We already logged an event on fp for this method. */
|
|
|
|
le = NULL;
|
|
|
|
}
|
2000-04-20 08:55:26 +04:00
|
|
|
} else {
|
2007-08-11 02:20:48 +04:00
|
|
|
method = strdup(details.function);
|
|
|
|
if (method) {
|
|
|
|
method_serial = ++method_serial_generator;
|
|
|
|
he = PL_HashTableRawAdd(methods, hep, hash, method,
|
|
|
|
(void*) method_serial);
|
|
|
|
}
|
2000-04-20 08:55:26 +04:00
|
|
|
if (!he) {
|
|
|
|
tmstats.btmalloc_failures++;
|
2007-08-11 02:20:48 +04:00
|
|
|
return NULL;
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
2000-05-17 08:44:14 +04:00
|
|
|
le = (lfdset_entry *) he;
|
|
|
|
}
|
|
|
|
if (le) {
|
2002-11-13 07:40:17 +03:00
|
|
|
log_event4(fp, TM_EVENT_METHOD, method_serial, library_serial,
|
2007-08-11 02:20:48 +04:00
|
|
|
filename_serial, details.lineno);
|
2000-04-27 08:02:22 +04:00
|
|
|
log_string(fp, method);
|
2000-05-17 08:44:14 +04:00
|
|
|
LFD_SET(fp->lfd, &le->lfdset);
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Create a new callsite record. */
|
|
|
|
if (!site) {
|
2000-05-17 08:44:14 +04:00
|
|
|
site = __libc_malloc(sizeof(callsite));
|
|
|
|
if (!site) {
|
|
|
|
tmstats.btmalloc_failures++;
|
2007-08-11 02:19:32 +04:00
|
|
|
goto fail;
|
2000-05-17 08:44:14 +04:00
|
|
|
}
|
2000-04-20 08:55:26 +04:00
|
|
|
|
2000-05-17 08:44:14 +04:00
|
|
|
/* Update parent and max-kids-per-parent stats. */
|
|
|
|
if (!parent->kids)
|
|
|
|
tmstats.calltree_parents++;
|
|
|
|
nkids = 1;
|
|
|
|
for (tmp = parent->kids; tmp; tmp = tmp->siblings)
|
|
|
|
nkids++;
|
|
|
|
if (nkids > tmstats.calltree_maxkids) {
|
|
|
|
tmstats.calltree_maxkids = nkids;
|
|
|
|
calltree_maxkids_parent = parent;
|
|
|
|
}
|
2000-04-20 08:55:26 +04:00
|
|
|
|
2000-05-17 08:44:14 +04:00
|
|
|
/* Insert the new site into the tree. */
|
|
|
|
site->pc = pc;
|
|
|
|
site->serial = ++callsite_serial_generator;
|
|
|
|
LFD_ZERO(&site->lfdset);
|
|
|
|
site->name = method;
|
2007-08-11 02:20:48 +04:00
|
|
|
site->library = library;
|
|
|
|
site->offset = details.loffset;
|
2000-05-17 08:44:14 +04:00
|
|
|
site->parent = parent;
|
|
|
|
site->siblings = parent->kids;
|
|
|
|
parent->kids = site;
|
|
|
|
site->kids = NULL;
|
|
|
|
}
|
2000-04-20 08:55:26 +04:00
|
|
|
|
|
|
|
/* Log the site with its parent, method, and offset. */
|
2000-07-22 00:42:15 +04:00
|
|
|
log_event4(fp, TM_EVENT_CALLSITE, site->serial, parent->serial,
|
2007-08-11 02:20:48 +04:00
|
|
|
method_serial, details.foffset);
|
2000-05-17 08:44:14 +04:00
|
|
|
LFD_SET(fp->lfd, &site->lfdset);
|
2000-04-20 08:55:26 +04:00
|
|
|
|
|
|
|
upward:
|
|
|
|
parent = site;
|
2007-08-11 02:20:48 +04:00
|
|
|
} while (stack_index > 0);
|
2000-04-20 08:55:26 +04:00
|
|
|
|
|
|
|
if (maxstack)
|
|
|
|
calltree_maxstack_top = site;
|
2007-08-11 02:19:32 +04:00
|
|
|
|
|
|
|
TM_EXIT_LOCK();
|
|
|
|
|
2000-04-20 08:55:26 +04:00
|
|
|
return site;
|
2007-08-11 02:19:32 +04:00
|
|
|
fail:
|
|
|
|
TM_EXIT_LOCK();
|
|
|
|
return NULL;
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
|
|
|
|
2007-08-11 02:20:48 +04:00
|
|
|
|
|
|
|
/* buffer the stack so that we can reverse it */
|
|
|
|
|
|
|
|
PR_STATIC_CALLBACK(void)
|
|
|
|
stack_callback(void *pc, void *closure)
|
|
|
|
{
|
|
|
|
stack_buffer_info *info = (stack_buffer_info*) closure;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we run out of buffer, keep incrementing entries so that
|
|
|
|
* backtrace can call us again with a bigger buffer.
|
|
|
|
*/
|
|
|
|
if (info->entries < info->size)
|
|
|
|
info->buffer[info->entries] = pc;
|
|
|
|
++info->entries;
|
|
|
|
}
|
2007-08-11 02:20:49 +04:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
/*
|
|
|
|
* The caller MUST NOT be holding tmlock when calling backtrace.
|
|
|
|
*/
|
|
|
|
|
2001-01-26 01:54:05 +03:00
|
|
|
callsite *
|
2007-08-11 02:19:14 +04:00
|
|
|
backtrace(tm_thread *t, int skip)
|
2001-01-26 01:54:05 +03:00
|
|
|
{
|
|
|
|
callsite *site;
|
2007-08-11 02:20:48 +04:00
|
|
|
stack_buffer_info *info = &t->backtrace_buf;
|
|
|
|
void ** new_stack_buffer;
|
|
|
|
size_t new_stack_buffer_size;
|
2001-01-26 01:54:05 +03:00
|
|
|
|
2007-08-11 02:19:14 +04:00
|
|
|
t->suppress_tracing++;
|
2000-04-20 08:55:26 +04:00
|
|
|
|
2007-08-11 02:20:48 +04:00
|
|
|
/*
|
|
|
|
* NS_StackWalk can (on Windows) acquire a lock the shared library
|
|
|
|
* loader. Another thread might call malloc while holding that lock
|
|
|
|
* (when loading a shared library). So we can't be in tmlock during
|
|
|
|
* this call. For details, see
|
|
|
|
* https://bugzilla.mozilla.org/show_bug.cgi?id=374829#c8
|
|
|
|
*/
|
2007-08-11 02:20:49 +04:00
|
|
|
|
2007-08-11 02:20:48 +04:00
|
|
|
/* skip == 0 means |backtrace| should show up, so don't use skip + 1 */
|
|
|
|
/* NB: this call is repeated below if the buffer is too small */
|
|
|
|
info->entries = 0;
|
|
|
|
NS_StackWalk(stack_callback, skip, info);
|
2000-04-20 08:55:26 +04:00
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
/*
|
2007-08-11 02:20:48 +04:00
|
|
|
* To avoid allocating in stack_callback (which, on Windows, is
|
|
|
|
* called on a different thread from the one we're running on here),
|
|
|
|
* reallocate here if it didn't have a big enough buffer (which
|
|
|
|
* includes the first call on any thread), and call it again.
|
2007-03-23 02:01:14 +03:00
|
|
|
*/
|
2007-08-11 02:20:48 +04:00
|
|
|
if (info->entries > info->size) {
|
|
|
|
new_stack_buffer_size = 2 * info->entries;
|
|
|
|
new_stack_buffer = __libc_realloc(info->buffer,
|
|
|
|
new_stack_buffer_size * sizeof(void*));
|
|
|
|
if (!new_stack_buffer)
|
|
|
|
return NULL;
|
|
|
|
info->buffer = new_stack_buffer;
|
|
|
|
info->size = new_stack_buffer_size;
|
|
|
|
|
|
|
|
/* and call NS_StackWalk again */
|
|
|
|
info->entries = 0;
|
|
|
|
NS_StackWalk(stack_callback, skip, info);
|
|
|
|
|
|
|
|
PR_ASSERT(info->entries * 2 == new_stack_buffer_size); /* same stack */
|
2007-08-11 02:20:49 +04:00
|
|
|
}
|
|
|
|
|
2007-08-11 02:20:48 +04:00
|
|
|
if (info->entries == 0) {
|
|
|
|
t->suppress_tracing--;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
site = calltree(info->buffer, info->entries);
|
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_ENTER_LOCK();
|
|
|
|
tmstats.backtrace_calls++;
|
2000-04-20 08:55:26 +04:00
|
|
|
if (!site) {
|
|
|
|
tmstats.backtrace_failures++;
|
|
|
|
PR_ASSERT(tmstats.backtrace_failures < 100);
|
|
|
|
}
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
2007-08-11 02:20:48 +04:00
|
|
|
|
2007-08-11 02:19:14 +04:00
|
|
|
t->suppress_tracing--;
|
2000-04-20 08:55:26 +04:00
|
|
|
return site;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct allocation {
|
|
|
|
PLHashEntry entry;
|
|
|
|
size_t size;
|
2004-02-11 07:15:17 +03:00
|
|
|
FILE *trackfp; /* for allocation tracking */
|
2000-04-20 08:55:26 +04:00
|
|
|
} allocation;
|
|
|
|
|
|
|
|
#define ALLOC_HEAP_SIZE 150000
|
|
|
|
|
|
|
|
static allocation alloc_heap[ALLOC_HEAP_SIZE];
|
|
|
|
static allocation *alloc_freelist = NULL;
|
|
|
|
static int alloc_heap_initialized = 0;
|
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
static PLHashEntry *alloc_allocentry(void *pool, const void *key)
|
2000-04-20 08:55:26 +04:00
|
|
|
{
|
2000-04-27 08:02:22 +04:00
|
|
|
allocation **listp, *alloc;
|
2000-04-20 08:55:26 +04:00
|
|
|
int n;
|
|
|
|
|
|
|
|
if (!alloc_heap_initialized) {
|
|
|
|
n = ALLOC_HEAP_SIZE;
|
2000-04-27 08:02:22 +04:00
|
|
|
listp = &alloc_freelist;
|
2000-04-20 08:55:26 +04:00
|
|
|
for (alloc = alloc_heap; --n >= 0; alloc++) {
|
2000-04-27 08:02:22 +04:00
|
|
|
*listp = alloc;
|
|
|
|
listp = (allocation**) &alloc->entry.next;
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
2000-04-27 08:02:22 +04:00
|
|
|
*listp = NULL;
|
2000-04-20 08:55:26 +04:00
|
|
|
alloc_heap_initialized = 1;
|
|
|
|
}
|
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
listp = &alloc_freelist;
|
|
|
|
alloc = *listp;
|
2000-04-20 08:55:26 +04:00
|
|
|
if (!alloc)
|
|
|
|
return __libc_malloc(sizeof(allocation));
|
2000-04-27 08:02:22 +04:00
|
|
|
*listp = (allocation*) alloc->entry.next;
|
2000-04-20 08:55:26 +04:00
|
|
|
return &alloc->entry;
|
|
|
|
}
|
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
static void alloc_freeentry(void *pool, PLHashEntry *he, PRUintn flag)
|
2000-04-20 08:55:26 +04:00
|
|
|
{
|
|
|
|
allocation *alloc;
|
|
|
|
|
|
|
|
if (flag != HT_FREE_ENTRY)
|
|
|
|
return;
|
|
|
|
alloc = (allocation*) he;
|
2004-02-11 07:15:17 +03:00
|
|
|
if ((PRUptrdiff)(alloc - alloc_heap) < (PRUptrdiff)ALLOC_HEAP_SIZE) {
|
2000-04-20 08:55:26 +04:00
|
|
|
alloc->entry.next = &alloc_freelist->entry;
|
|
|
|
alloc_freelist = alloc;
|
|
|
|
} else {
|
|
|
|
__libc_free((void*) alloc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
static PLHashAllocOps alloc_hashallocops = {
|
|
|
|
generic_alloctable, generic_freetable,
|
|
|
|
alloc_allocentry, alloc_freeentry
|
2000-04-20 08:55:26 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
static PLHashNumber hash_pointer(const void *key)
|
|
|
|
{
|
|
|
|
return (PLHashNumber) key;
|
|
|
|
}
|
|
|
|
|
|
|
|
static PLHashTable *allocations = NULL;
|
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
static PLHashTable *new_allocations(void)
|
2000-04-20 08:55:26 +04:00
|
|
|
{
|
|
|
|
allocations = PL_NewHashTable(200000, hash_pointer,
|
|
|
|
PL_CompareValues, PL_CompareValues,
|
2000-04-27 08:02:22 +04:00
|
|
|
&alloc_hashallocops, NULL);
|
2000-04-20 08:55:26 +04:00
|
|
|
return allocations;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define get_allocations() (allocations ? allocations : new_allocations())
|
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
#ifdef XP_UNIX
|
|
|
|
|
2007-09-29 02:39:59 +04:00
|
|
|
NS_EXTERNAL_VIS_(__ptr_t)
|
|
|
|
malloc(size_t size)
|
2000-04-20 08:55:26 +04:00
|
|
|
{
|
2007-09-25 05:13:17 +04:00
|
|
|
PRUint32 start, end;
|
|
|
|
__ptr_t ptr;
|
2007-03-23 02:01:14 +03:00
|
|
|
callsite *site;
|
|
|
|
PLHashEntry *he;
|
|
|
|
allocation *alloc;
|
2007-08-11 02:19:14 +04:00
|
|
|
tm_thread *t;
|
2007-09-29 02:39:59 +04:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
if (!tracing_enabled || !PR_Initialized() ||
|
2007-08-11 02:22:07 +04:00
|
|
|
(t = tm_get_thread())->suppress_tracing != 0) {
|
2007-09-29 02:39:59 +04:00
|
|
|
return __libc_malloc(size);
|
|
|
|
}
|
2000-04-20 08:55:26 +04:00
|
|
|
|
2001-12-15 03:24:12 +03:00
|
|
|
start = PR_IntervalNow();
|
2007-09-25 05:13:17 +04:00
|
|
|
ptr = __libc_malloc(size);
|
2001-12-15 03:24:12 +03:00
|
|
|
end = PR_IntervalNow();
|
2007-08-11 02:19:32 +04:00
|
|
|
|
|
|
|
site = backtrace(t, 1);
|
|
|
|
|
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2007-03-23 02:01:14 +03:00
|
|
|
tmstats.malloc_calls++;
|
|
|
|
if (!ptr) {
|
|
|
|
tmstats.malloc_failures++;
|
2007-08-11 02:19:32 +04:00
|
|
|
} else {
|
2007-03-23 02:01:14 +03:00
|
|
|
if (site)
|
|
|
|
log_event5(logfp, TM_EVENT_MALLOC,
|
|
|
|
site->serial, start, end - start,
|
|
|
|
(uint32)NS_PTR_TO_INT32(ptr), size);
|
|
|
|
if (get_allocations()) {
|
|
|
|
he = PL_HashTableAdd(allocations, ptr, site);
|
|
|
|
if (he) {
|
|
|
|
alloc = (allocation*) he;
|
|
|
|
alloc->size = size;
|
|
|
|
alloc->trackfp = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
|
|
|
|
2000-04-20 08:55:26 +04:00
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2007-09-29 02:39:59 +04:00
|
|
|
NS_EXTERNAL_VIS_(__ptr_t)
|
|
|
|
calloc(size_t count, size_t size)
|
2007-02-14 10:06:35 +03:00
|
|
|
{
|
|
|
|
PRUint32 start, end;
|
|
|
|
__ptr_t ptr;
|
2007-03-23 02:01:14 +03:00
|
|
|
callsite *site;
|
|
|
|
PLHashEntry *he;
|
|
|
|
allocation *alloc;
|
2007-08-11 02:19:14 +04:00
|
|
|
tm_thread *t;
|
2007-09-29 02:39:59 +04:00
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
/**
|
|
|
|
* During the initialization of the glibc/libpthread, and
|
|
|
|
* before main() is running, ld-linux.so.2 tries to allocate memory
|
|
|
|
* using calloc (call from _dl_tls_setup).
|
|
|
|
*
|
|
|
|
* Thus, our calloc replacement is invoked too early, tries to
|
|
|
|
* initialize NSPR, which calls dlopen, which calls into the dl
|
|
|
|
* -> crash.
|
|
|
|
*
|
|
|
|
* Delaying NSPR calls until NSPR is initialized helps.
|
|
|
|
*/
|
2007-08-11 02:19:32 +04:00
|
|
|
if (!tracing_enabled || !PR_Initialized() ||
|
2007-08-11 02:22:07 +04:00
|
|
|
(t = tm_get_thread())->suppress_tracing != 0) {
|
2007-09-29 02:39:59 +04:00
|
|
|
return __libc_calloc(count, size);
|
|
|
|
}
|
2007-02-14 10:06:35 +03:00
|
|
|
|
|
|
|
start = PR_IntervalNow();
|
2007-09-29 02:39:59 +04:00
|
|
|
ptr = __libc_calloc(count, size);
|
|
|
|
end = PR_IntervalNow();
|
2007-08-11 02:19:32 +04:00
|
|
|
|
|
|
|
site = backtrace(t, 1);
|
|
|
|
|
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2007-03-23 02:01:14 +03:00
|
|
|
tmstats.calloc_calls++;
|
|
|
|
if (!ptr) {
|
|
|
|
tmstats.calloc_failures++;
|
2007-08-11 02:19:32 +04:00
|
|
|
} else {
|
2007-03-23 02:01:14 +03:00
|
|
|
size *= count;
|
|
|
|
if (site) {
|
|
|
|
log_event5(logfp, TM_EVENT_CALLOC,
|
|
|
|
site->serial, start, end - start,
|
|
|
|
(uint32)NS_PTR_TO_INT32(ptr), size);
|
|
|
|
}
|
|
|
|
if (get_allocations()) {
|
|
|
|
he = PL_HashTableAdd(allocations, ptr, site);
|
|
|
|
if (he) {
|
|
|
|
alloc = (allocation*) he;
|
|
|
|
alloc->size = size;
|
|
|
|
alloc->trackfp = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2007-09-29 02:39:59 +04:00
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_EXTERNAL_VIS_(__ptr_t)
|
2007-03-23 02:01:14 +03:00
|
|
|
realloc(__ptr_t ptr, size_t size)
|
2007-09-29 02:39:59 +04:00
|
|
|
{
|
|
|
|
PRUint32 start, end;
|
2007-03-23 02:01:14 +03:00
|
|
|
__ptr_t oldptr;
|
|
|
|
callsite *oldsite, *site;
|
|
|
|
size_t oldsize;
|
|
|
|
PLHashNumber hash;
|
|
|
|
PLHashEntry **hep, *he;
|
|
|
|
allocation *alloc;
|
|
|
|
FILE *trackfp = NULL;
|
2007-08-11 02:19:14 +04:00
|
|
|
tm_thread *t;
|
2007-03-23 02:01:14 +03:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
if (!tracing_enabled || !PR_Initialized() ||
|
2007-08-11 02:22:07 +04:00
|
|
|
(t = tm_get_thread())->suppress_tracing != 0) {
|
2007-03-23 02:01:14 +03:00
|
|
|
return __libc_realloc(ptr, size);
|
|
|
|
}
|
2007-09-29 02:39:59 +04:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2007-03-23 02:01:14 +03:00
|
|
|
tmstats.realloc_calls++;
|
2007-08-11 02:19:32 +04:00
|
|
|
if (PR_TRUE) {
|
2007-03-23 02:01:14 +03:00
|
|
|
oldptr = ptr;
|
|
|
|
oldsite = NULL;
|
|
|
|
oldsize = 0;
|
|
|
|
he = NULL;
|
|
|
|
if (oldptr && get_allocations()) {
|
|
|
|
hash = hash_pointer(oldptr);
|
|
|
|
hep = PL_HashTableRawLookup(allocations, hash, oldptr);
|
|
|
|
he = *hep;
|
|
|
|
if (he) {
|
|
|
|
oldsite = (callsite*) he->value;
|
|
|
|
alloc = (allocation*) he;
|
|
|
|
oldsize = alloc->size;
|
|
|
|
trackfp = alloc->trackfp;
|
|
|
|
if (trackfp) {
|
|
|
|
fprintf(alloc->trackfp,
|
|
|
|
"\nrealloc(%p, %lu), oldsize %lu, alloc site %p\n",
|
|
|
|
(void*) ptr, (unsigned long) size,
|
|
|
|
(unsigned long) oldsize, (void*) oldsite);
|
|
|
|
NS_TraceStack(1, trackfp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-09-29 02:39:59 +04:00
|
|
|
}
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2007-09-29 02:39:59 +04:00
|
|
|
|
|
|
|
start = PR_IntervalNow();
|
2007-03-23 02:01:14 +03:00
|
|
|
ptr = __libc_realloc(ptr, size);
|
2007-02-14 10:06:35 +03:00
|
|
|
end = PR_IntervalNow();
|
2007-09-29 02:39:59 +04:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
site = backtrace(t, 1);
|
|
|
|
|
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2007-03-23 02:01:14 +03:00
|
|
|
if (!ptr && size) {
|
|
|
|
/*
|
|
|
|
* When realloc() fails, the original block is not freed or moved, so
|
|
|
|
* we'll leave the allocation entry untouched.
|
|
|
|
*/
|
|
|
|
tmstats.realloc_failures++;
|
2007-08-11 02:19:32 +04:00
|
|
|
} else {
|
2007-03-23 02:01:14 +03:00
|
|
|
if (site) {
|
|
|
|
log_event8(logfp, TM_EVENT_REALLOC,
|
|
|
|
site->serial, start, end - start,
|
|
|
|
(uint32)NS_PTR_TO_INT32(ptr), size,
|
|
|
|
oldsite ? oldsite->serial : 0,
|
|
|
|
(uint32)NS_PTR_TO_INT32(oldptr), oldsize);
|
|
|
|
}
|
|
|
|
if (ptr && allocations) {
|
|
|
|
if (ptr != oldptr) {
|
|
|
|
/*
|
|
|
|
* If we're reallocating (not merely allocating new space by
|
|
|
|
* passing null to realloc) and realloc has moved the block,
|
|
|
|
* free oldptr.
|
|
|
|
*/
|
|
|
|
if (he)
|
|
|
|
PL_HashTableRemove(allocations, oldptr);
|
2007-09-29 02:39:59 +04:00
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
/* Record the new allocation now, setting he. */
|
|
|
|
he = PL_HashTableAdd(allocations, ptr, site);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If we haven't yet recorded an allocation (possibly due to
|
|
|
|
* a temporary memory shortage), do it now.
|
|
|
|
*/
|
|
|
|
if (!he)
|
|
|
|
he = PL_HashTableAdd(allocations, ptr, site);
|
|
|
|
}
|
|
|
|
if (he) {
|
|
|
|
alloc = (allocation*) he;
|
|
|
|
alloc->size = size;
|
|
|
|
alloc->trackfp = trackfp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2007-02-14 10:06:35 +03:00
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2007-09-29 02:39:59 +04:00
|
|
|
NS_EXTERNAL_VIS_(void*)
|
|
|
|
valloc(size_t size)
|
2007-02-14 10:06:35 +03:00
|
|
|
{
|
|
|
|
PRUint32 start, end;
|
|
|
|
__ptr_t ptr;
|
2007-03-23 02:01:14 +03:00
|
|
|
callsite *site;
|
|
|
|
PLHashEntry *he;
|
|
|
|
allocation *alloc;
|
2007-08-11 02:19:14 +04:00
|
|
|
tm_thread *t;
|
2007-09-29 02:39:59 +04:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
if (!tracing_enabled || !PR_Initialized() ||
|
2007-08-11 02:22:07 +04:00
|
|
|
(t = tm_get_thread())->suppress_tracing != 0) {
|
2007-09-29 02:39:59 +04:00
|
|
|
return __libc_valloc(size);
|
|
|
|
}
|
2007-02-14 10:06:35 +03:00
|
|
|
|
|
|
|
start = PR_IntervalNow();
|
2007-09-29 02:39:59 +04:00
|
|
|
ptr = __libc_valloc(size);
|
2007-02-14 10:06:35 +03:00
|
|
|
end = PR_IntervalNow();
|
2007-08-11 02:19:32 +04:00
|
|
|
|
|
|
|
site = backtrace(t, 1);
|
|
|
|
|
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2007-03-23 02:01:14 +03:00
|
|
|
tmstats.malloc_calls++; /* XXX valloc_calls ? */
|
|
|
|
if (!ptr) {
|
|
|
|
tmstats.malloc_failures++; /* XXX valloc_failures ? */
|
2007-08-11 02:19:32 +04:00
|
|
|
} else {
|
2007-03-23 02:01:14 +03:00
|
|
|
if (site)
|
|
|
|
log_event5(logfp, TM_EVENT_MALLOC, /* XXX TM_EVENT_VALLOC? */
|
|
|
|
site->serial, start, end - start,
|
|
|
|
(uint32)NS_PTR_TO_INT32(ptr), size);
|
|
|
|
if (get_allocations()) {
|
|
|
|
he = PL_HashTableAdd(allocations, ptr, site);
|
|
|
|
if (he) {
|
|
|
|
alloc = (allocation*) he;
|
|
|
|
alloc->size = size;
|
|
|
|
alloc->trackfp = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2007-02-14 10:06:35 +03:00
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2007-09-29 02:39:59 +04:00
|
|
|
NS_EXTERNAL_VIS_(void*)
|
|
|
|
memalign(size_t boundary, size_t size)
|
2000-04-20 08:55:26 +04:00
|
|
|
{
|
2007-09-25 05:13:17 +04:00
|
|
|
PRUint32 start, end;
|
2007-09-29 02:39:59 +04:00
|
|
|
__ptr_t ptr;
|
2007-03-23 02:01:14 +03:00
|
|
|
callsite *site;
|
|
|
|
PLHashEntry *he;
|
|
|
|
allocation *alloc;
|
2007-08-11 02:19:14 +04:00
|
|
|
tm_thread *t;
|
2007-09-29 02:39:59 +04:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
if (!tracing_enabled || !PR_Initialized() ||
|
2007-08-11 02:22:07 +04:00
|
|
|
(t = tm_get_thread())->suppress_tracing != 0) {
|
2007-09-29 02:39:59 +04:00
|
|
|
return __libc_memalign(boundary, size);
|
|
|
|
}
|
2000-04-20 08:55:26 +04:00
|
|
|
|
2001-12-15 03:24:12 +03:00
|
|
|
start = PR_IntervalNow();
|
2007-09-29 02:39:59 +04:00
|
|
|
ptr = __libc_memalign(boundary, size);
|
2001-12-15 03:24:12 +03:00
|
|
|
end = PR_IntervalNow();
|
2007-08-11 02:19:32 +04:00
|
|
|
|
|
|
|
site = backtrace(t, 1);
|
|
|
|
|
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2007-03-23 02:01:14 +03:00
|
|
|
tmstats.malloc_calls++; /* XXX memalign_calls ? */
|
|
|
|
if (!ptr) {
|
|
|
|
tmstats.malloc_failures++; /* XXX memalign_failures ? */
|
2007-08-11 02:19:32 +04:00
|
|
|
} else {
|
2007-03-23 02:01:14 +03:00
|
|
|
if (site) {
|
|
|
|
log_event5(logfp, TM_EVENT_MALLOC, /* XXX TM_EVENT_MEMALIGN? */
|
|
|
|
site->serial, start, end - start,
|
|
|
|
(uint32)NS_PTR_TO_INT32(ptr), size);
|
|
|
|
}
|
|
|
|
if (get_allocations()) {
|
|
|
|
he = PL_HashTableAdd(allocations, ptr, site);
|
|
|
|
if (he) {
|
|
|
|
alloc = (allocation*) he;
|
|
|
|
alloc->size = size;
|
|
|
|
alloc->trackfp = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2007-09-29 02:39:59 +04:00
|
|
|
return ptr;
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
2001-09-12 10:39:31 +04:00
|
|
|
|
2007-09-29 02:39:59 +04:00
|
|
|
NS_EXTERNAL_VIS_(int)
|
|
|
|
posix_memalign(void **memptr, size_t alignment, size_t size)
|
2007-02-14 10:06:35 +03:00
|
|
|
{
|
2007-09-29 02:39:59 +04:00
|
|
|
__ptr_t ptr = memalign(alignment, size);
|
|
|
|
if (!ptr)
|
|
|
|
return ENOMEM;
|
|
|
|
*memptr = ptr;
|
|
|
|
return 0;
|
|
|
|
}
|
2007-02-14 10:06:35 +03:00
|
|
|
|
2007-09-29 02:39:59 +04:00
|
|
|
NS_EXTERNAL_VIS_(void)
|
|
|
|
free(__ptr_t ptr)
|
|
|
|
{
|
2007-03-23 02:01:14 +03:00
|
|
|
PLHashEntry **hep, *he;
|
|
|
|
callsite *site;
|
|
|
|
allocation *alloc;
|
|
|
|
uint32 serial = 0, size = 0;
|
2007-09-29 02:39:59 +04:00
|
|
|
PRUint32 start, end;
|
2007-08-11 02:19:14 +04:00
|
|
|
tm_thread *t;
|
2007-09-29 02:39:59 +04:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
if (!tracing_enabled || !PR_Initialized() ||
|
2007-08-11 02:22:07 +04:00
|
|
|
(t = tm_get_thread())->suppress_tracing != 0) {
|
2007-09-29 02:39:59 +04:00
|
|
|
__libc_free(ptr);
|
|
|
|
return;
|
|
|
|
}
|
2000-04-20 08:55:26 +04:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2007-03-23 02:01:14 +03:00
|
|
|
tmstats.free_calls++;
|
|
|
|
if (!ptr) {
|
|
|
|
tmstats.null_free_calls++;
|
2007-08-11 02:19:32 +04:00
|
|
|
} else {
|
2007-03-23 02:01:14 +03:00
|
|
|
if (get_allocations()) {
|
|
|
|
hep = PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr);
|
|
|
|
he = *hep;
|
|
|
|
if (he) {
|
|
|
|
site = (callsite*) he->value;
|
|
|
|
if (site) {
|
|
|
|
alloc = (allocation*) he;
|
|
|
|
serial = site->serial;
|
|
|
|
size = alloc->size;
|
|
|
|
if (alloc->trackfp) {
|
|
|
|
fprintf(alloc->trackfp, "\nfree(%p), alloc site %p\n",
|
|
|
|
(void*) ptr, (void*) site);
|
|
|
|
NS_TraceStack(1, alloc->trackfp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
PL_HashTableRawRemove(allocations, hep, he);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2007-03-23 02:01:14 +03:00
|
|
|
|
2007-09-29 02:39:59 +04:00
|
|
|
start = PR_IntervalNow();
|
|
|
|
__libc_free(ptr);
|
|
|
|
end = PR_IntervalNow();
|
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
if (size != 0) {
|
2007-08-11 02:19:32 +04:00
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2007-03-23 02:01:14 +03:00
|
|
|
log_event5(logfp, TM_EVENT_FREE,
|
|
|
|
serial, start, end - start,
|
|
|
|
(uint32)NS_PTR_TO_INT32(ptr), size);
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2007-03-23 02:01:14 +03:00
|
|
|
}
|
2007-09-25 05:13:17 +04:00
|
|
|
}
|
2007-08-14 20:35:46 +04:00
|
|
|
|
2007-09-29 02:39:59 +04:00
|
|
|
NS_EXTERNAL_VIS_(void)
|
|
|
|
cfree(void *ptr)
|
2007-08-14 20:35:46 +04:00
|
|
|
{
|
2007-09-29 02:39:59 +04:00
|
|
|
free(ptr);
|
2007-08-14 20:35:46 +04:00
|
|
|
}
|
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
#endif /* XP_UNIX */
|
2007-08-14 20:35:46 +04:00
|
|
|
|
2000-04-27 08:02:22 +04:00
|
|
|
static const char magic[] = NS_TRACE_MALLOC_MAGIC;
|
|
|
|
|
2001-12-15 03:24:12 +03:00
|
|
|
static void
|
|
|
|
log_header(int logfd)
|
|
|
|
{
|
|
|
|
uint32 ticksPerSec = PR_htonl(PR_TicksPerSecond());
|
|
|
|
(void) write(logfd, magic, NS_TRACE_MALLOC_MAGIC_SIZE);
|
|
|
|
(void) write(logfd, &ticksPerSec, sizeof ticksPerSec);
|
|
|
|
}
|
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
PR_IMPLEMENT(void) NS_TraceMallocStartup(int logfd)
|
2000-04-20 08:55:26 +04:00
|
|
|
{
|
2000-04-27 08:02:22 +04:00
|
|
|
/* We must be running on the primordial thread. */
|
2007-08-11 01:24:32 +04:00
|
|
|
PR_ASSERT(tracing_enabled == 1);
|
2000-05-17 08:44:14 +04:00
|
|
|
PR_ASSERT(logfp == &default_logfile);
|
2007-08-11 01:24:32 +04:00
|
|
|
tracing_enabled = (logfd >= 0);
|
2007-04-04 05:35:35 +04:00
|
|
|
|
2007-08-11 01:24:32 +04:00
|
|
|
if (tracing_enabled) {
|
2007-04-04 05:35:35 +04:00
|
|
|
PR_ASSERT(logfp->simsize == 0); /* didn't overflow startup buffer */
|
|
|
|
|
2000-05-17 08:44:14 +04:00
|
|
|
/* Log everything in logfp (aka default_logfile)'s buffer to logfd. */
|
2000-04-27 08:02:22 +04:00
|
|
|
logfp->fd = logfd;
|
2000-05-17 08:44:14 +04:00
|
|
|
logfile_list = &default_logfile;
|
|
|
|
logfp->prevp = &logfile_list;
|
|
|
|
logfile_tail = &logfp->next;
|
2001-12-15 03:24:12 +03:00
|
|
|
log_header(logfd);
|
2000-04-27 08:02:22 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
atexit(NS_TraceMallocShutdown);
|
2007-08-11 02:19:14 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We only allow one thread until NS_TraceMallocStartup is called.
|
2007-08-11 02:19:32 +04:00
|
|
|
* When it is, we have to initialize tls_index before allocating tmlock
|
|
|
|
* since get_tm_index uses NULL-tmlock to detect tls_index being
|
2007-08-11 02:19:14 +04:00
|
|
|
* uninitialized.
|
|
|
|
*/
|
|
|
|
main_thread.suppress_tracing++;
|
|
|
|
TM_CREATE_TLS_INDEX(tls_index);
|
|
|
|
TM_SET_TLS_DATA(tls_index, &main_thread);
|
2007-08-11 02:19:32 +04:00
|
|
|
tmlock = PR_NewLock();
|
2007-08-11 02:19:14 +04:00
|
|
|
main_thread.suppress_tracing--;
|
2007-08-11 02:19:14 +04:00
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
#ifdef XP_WIN32
|
|
|
|
/* Register listeners for win32. */
|
2007-08-11 01:24:32 +04:00
|
|
|
if (tracing_enabled) {
|
2001-09-12 10:39:31 +04:00
|
|
|
StartupHooker();
|
2007-03-23 02:01:14 +03:00
|
|
|
}
|
|
|
|
#endif
|
2000-04-27 08:02:22 +04:00
|
|
|
}
|
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
|
2001-09-08 22:29:24 +04:00
|
|
|
/*
|
|
|
|
* Options for log files, with the log file name either as the next option
|
|
|
|
* or separated by '=' (e.g. "./mozilla --trace-malloc * malloc.log" or
|
|
|
|
* "./mozilla --trace-malloc=malloc.log").
|
|
|
|
*/
|
|
|
|
static const char TMLOG_OPTION[] = "--trace-malloc";
|
|
|
|
static const char SDLOG_OPTION[] = "--shutdown-leaks";
|
|
|
|
|
|
|
|
#define SHOULD_PARSE_ARG(name_, log_, arg_) \
|
|
|
|
(0 == strncmp(arg_, name_, sizeof(name_) - 1))
|
|
|
|
|
|
|
|
#define PARSE_ARG(name_, log_, argv_, i_, consumed_) \
|
|
|
|
PR_BEGIN_MACRO \
|
|
|
|
char _nextchar = argv_[i_][sizeof(name_) - 1]; \
|
|
|
|
if (_nextchar == '=') { \
|
|
|
|
log_ = argv_[i_] + sizeof(name_); \
|
|
|
|
consumed_ = 1; \
|
|
|
|
} else if (_nextchar == '\0') { \
|
|
|
|
log_ = argv_[i_+1]; \
|
|
|
|
consumed_ = 2; \
|
|
|
|
} \
|
|
|
|
PR_END_MACRO
|
2001-01-26 01:54:05 +03:00
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
PR_IMPLEMENT(int) NS_TraceMallocStartupArgs(int argc, char* argv[])
|
2000-06-01 06:09:25 +04:00
|
|
|
{
|
2001-11-27 00:10:45 +03:00
|
|
|
int i, logfd = -1, consumed, logflags;
|
2001-09-08 22:29:24 +04:00
|
|
|
char *tmlogname = NULL; /* note global |sdlogname| */
|
2000-06-01 06:09:25 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Look for the --trace-malloc <logfile> option early, to avoid missing
|
|
|
|
* early mallocs (we miss static constructors whose output overflows the
|
2000-06-21 22:03:01 +04:00
|
|
|
* log file's static 16K output buffer).
|
2000-06-01 06:09:25 +04:00
|
|
|
*/
|
2001-09-08 22:29:24 +04:00
|
|
|
for (i = 1; i < argc; i += consumed) {
|
|
|
|
consumed = 0;
|
|
|
|
if (SHOULD_PARSE_ARG(TMLOG_OPTION, tmlogname, argv[i]))
|
|
|
|
PARSE_ARG(TMLOG_OPTION, tmlogname, argv, i, consumed);
|
|
|
|
else if (SHOULD_PARSE_ARG(SDLOG_OPTION, sdlogname, argv[i]))
|
|
|
|
PARSE_ARG(SDLOG_OPTION, sdlogname, argv, i, consumed);
|
|
|
|
|
|
|
|
if (consumed) {
|
|
|
|
#ifndef XP_WIN32 /* If we don't comment this out, it will crash Windows. */
|
|
|
|
int j;
|
2000-06-01 06:09:25 +04:00
|
|
|
/* Now remove --trace-malloc and its argument from argv. */
|
2001-09-08 22:29:24 +04:00
|
|
|
argc -= consumed;
|
|
|
|
for (j = i; j < argc; ++j)
|
|
|
|
argv[j] = argv[j+consumed];
|
2000-06-01 06:09:25 +04:00
|
|
|
argv[argc] = NULL;
|
2001-09-08 22:29:24 +04:00
|
|
|
consumed = 0; /* don't advance next iteration */
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
consumed = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tmlogname) {
|
2001-12-15 03:24:12 +03:00
|
|
|
#ifdef XP_UNIX
|
2001-09-08 22:29:24 +04:00
|
|
|
int pipefds[2];
|
2001-12-15 03:24:12 +03:00
|
|
|
#endif
|
2001-09-08 22:29:24 +04:00
|
|
|
|
|
|
|
switch (*tmlogname) {
|
2001-09-12 10:39:31 +04:00
|
|
|
#ifdef XP_UNIX
|
|
|
|
case '|':
|
2001-09-08 22:29:24 +04:00
|
|
|
if (pipe(pipefds) == 0) {
|
|
|
|
pid_t pid = fork();
|
|
|
|
if (pid == 0) {
|
|
|
|
/* In child: set up stdin, parse args, and exec. */
|
|
|
|
int maxargc, nargc;
|
|
|
|
char **nargv, *token;
|
|
|
|
|
|
|
|
if (pipefds[0] != 0) {
|
|
|
|
dup2(pipefds[0], 0);
|
|
|
|
close(pipefds[0]);
|
|
|
|
}
|
|
|
|
close(pipefds[1]);
|
|
|
|
|
|
|
|
tmlogname = strtok(tmlogname + 1, " \t");
|
|
|
|
maxargc = 3;
|
|
|
|
nargv = (char **) malloc((maxargc+1) * sizeof(char *));
|
|
|
|
if (!nargv) exit(1);
|
|
|
|
nargc = 0;
|
|
|
|
nargv[nargc++] = tmlogname;
|
2001-09-27 07:52:45 +04:00
|
|
|
while ((token = strtok(NULL, " \t")) != NULL) {
|
2001-09-08 22:29:24 +04:00
|
|
|
if (nargc == maxargc) {
|
|
|
|
maxargc *= 2;
|
|
|
|
nargv = (char**)
|
|
|
|
realloc(nargv, (maxargc+1) * sizeof(char*));
|
|
|
|
if (!nargv) exit(1);
|
|
|
|
}
|
|
|
|
nargv[nargc++] = token;
|
|
|
|
}
|
|
|
|
nargv[nargc] = NULL;
|
|
|
|
|
|
|
|
(void) setsid();
|
|
|
|
execvp(tmlogname, nargv);
|
|
|
|
exit(127);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pid > 0) {
|
|
|
|
/* In parent: set logfd to the pipe's write side. */
|
|
|
|
close(pipefds[0]);
|
|
|
|
logfd = pipefds[1];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (logfd < 0) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"%s: can't pipe to trace-malloc child process %s: %s\n",
|
|
|
|
argv[0], tmlogname, strerror(errno));
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
break;
|
2001-09-12 10:39:31 +04:00
|
|
|
#endif /*XP_UNIX*/
|
2001-09-08 22:29:24 +04:00
|
|
|
case '-':
|
|
|
|
/* Don't log from startup, but do prepare to log later. */
|
|
|
|
/* XXX traditional meaning of '-' as option argument is "stdin" or "stdout" */
|
|
|
|
if (tmlogname[1] == '\0')
|
|
|
|
break;
|
|
|
|
/* FALL THROUGH */
|
|
|
|
|
|
|
|
default:
|
2001-11-27 00:10:45 +03:00
|
|
|
logflags = O_CREAT | O_WRONLY | O_TRUNC;
|
|
|
|
#if defined(XP_WIN32)
|
|
|
|
/*
|
|
|
|
* Avoid translations on WIN32.
|
|
|
|
*/
|
|
|
|
logflags |= O_BINARY;
|
|
|
|
#endif
|
|
|
|
logfd = open(tmlogname, logflags, 0644);
|
2001-09-08 22:29:24 +04:00
|
|
|
if (logfd < 0) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"%s: can't create trace-malloc log named %s: %s\n",
|
|
|
|
argv[0], tmlogname, strerror(errno));
|
|
|
|
exit(1);
|
|
|
|
}
|
2000-06-01 06:09:25 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-06-21 04:28:05 +04:00
|
|
|
NS_TraceMallocStartup(logfd);
|
2000-06-01 06:09:25 +04:00
|
|
|
return argc;
|
|
|
|
}
|
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
PR_IMPLEMENT(void) NS_TraceMallocShutdown()
|
2000-04-27 08:02:22 +04:00
|
|
|
{
|
2000-07-11 23:10:52 +04:00
|
|
|
logfile *fp;
|
2000-04-27 08:02:22 +04:00
|
|
|
|
2001-09-08 22:29:24 +04:00
|
|
|
if (sdlogname)
|
|
|
|
NS_TraceMallocDumpAllocations(sdlogname);
|
|
|
|
|
2000-04-20 08:55:26 +04:00
|
|
|
if (tmstats.backtrace_failures) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"TraceMalloc backtrace failures: %lu (malloc %lu dladdr %lu)\n",
|
|
|
|
(unsigned long) tmstats.backtrace_failures,
|
|
|
|
(unsigned long) tmstats.btmalloc_failures,
|
|
|
|
(unsigned long) tmstats.dladdr_failures);
|
|
|
|
}
|
2000-07-11 23:10:52 +04:00
|
|
|
while ((fp = logfile_list) != NULL) {
|
|
|
|
logfile_list = fp->next;
|
2000-05-17 08:44:14 +04:00
|
|
|
log_tmstats(fp);
|
2000-04-27 08:02:22 +04:00
|
|
|
flush_logfile(fp);
|
|
|
|
if (fp->fd >= 0) {
|
|
|
|
close(fp->fd);
|
|
|
|
fp->fd = -1;
|
|
|
|
}
|
2000-07-11 23:10:52 +04:00
|
|
|
if (fp != &default_logfile) {
|
|
|
|
if (fp == logfp)
|
|
|
|
logfp = &default_logfile;
|
|
|
|
free((void*) fp);
|
|
|
|
}
|
2000-04-27 08:02:22 +04:00
|
|
|
}
|
2007-08-11 02:19:32 +04:00
|
|
|
if (tmlock) {
|
|
|
|
PRLock *lock = tmlock;
|
|
|
|
tmlock = NULL;
|
|
|
|
PR_DestroyLock(lock);
|
2000-07-11 11:28:14 +04:00
|
|
|
}
|
2007-03-23 02:01:14 +03:00
|
|
|
#ifdef XP_WIN32
|
2007-08-11 01:24:32 +04:00
|
|
|
if (tracing_enabled) {
|
2003-06-21 04:28:05 +04:00
|
|
|
ShutdownHooker();
|
|
|
|
}
|
2007-03-23 02:01:14 +03:00
|
|
|
#endif
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
PR_IMPLEMENT(void) NS_TraceMallocDisable()
|
2000-04-27 08:02:22 +04:00
|
|
|
{
|
2007-09-25 05:13:17 +04:00
|
|
|
logfile *fp;
|
2007-08-11 02:22:07 +04:00
|
|
|
tm_thread *t = tm_get_thread();
|
2000-04-20 08:55:26 +04:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2000-04-27 08:02:22 +04:00
|
|
|
for (fp = logfile_list; fp; fp = fp->next)
|
|
|
|
flush_logfile(fp);
|
2007-08-11 01:24:32 +04:00
|
|
|
tracing_enabled = 0;
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2000-04-27 08:02:22 +04:00
|
|
|
}
|
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
PR_IMPLEMENT(void) NS_TraceMallocEnable()
|
2000-04-20 08:55:26 +04:00
|
|
|
{
|
2007-08-11 02:22:07 +04:00
|
|
|
tm_thread *t = tm_get_thread();
|
2007-08-11 02:19:32 +04:00
|
|
|
|
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2007-08-11 01:24:32 +04:00
|
|
|
tracing_enabled = 1;
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2000-04-27 08:02:22 +04:00
|
|
|
}
|
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
PR_IMPLEMENT(int) NS_TraceMallocChangeLogFD(int fd)
|
2000-04-27 08:02:22 +04:00
|
|
|
{
|
|
|
|
logfile *oldfp, *fp;
|
|
|
|
struct stat sb;
|
2007-08-11 02:22:07 +04:00
|
|
|
tm_thread *t = tm_get_thread();
|
2000-04-27 08:02:22 +04:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2000-04-27 08:02:22 +04:00
|
|
|
oldfp = logfp;
|
|
|
|
if (oldfp->fd != fd) {
|
|
|
|
flush_logfile(oldfp);
|
|
|
|
fp = get_logfile(fd);
|
2007-08-11 02:19:32 +04:00
|
|
|
if (!fp) {
|
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2000-04-27 08:02:22 +04:00
|
|
|
return -2;
|
2007-08-11 02:19:32 +04:00
|
|
|
}
|
2000-04-27 08:02:22 +04:00
|
|
|
if (fd >= 0 && fstat(fd, &sb) == 0 && sb.st_size == 0)
|
2001-12-15 03:24:12 +03:00
|
|
|
log_header(fd);
|
2000-04-27 08:02:22 +04:00
|
|
|
logfp = fp;
|
|
|
|
}
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2000-04-27 08:02:22 +04:00
|
|
|
return oldfp->fd;
|
2000-04-20 08:55:26 +04:00
|
|
|
}
|
|
|
|
|
2000-05-17 08:44:14 +04:00
|
|
|
static PRIntn
|
|
|
|
lfd_clr_enumerator(PLHashEntry *he, PRIntn i, void *arg)
|
|
|
|
{
|
|
|
|
lfdset_entry *le = (lfdset_entry*) he;
|
|
|
|
logfile *fp = (logfile*) arg;
|
|
|
|
|
|
|
|
LFD_CLR(fp->lfd, &le->lfdset);
|
|
|
|
return HT_ENUMERATE_NEXT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lfd_clr_walk(callsite *site, logfile *fp)
|
|
|
|
{
|
|
|
|
callsite *kid;
|
2001-09-12 10:39:31 +04:00
|
|
|
|
2000-05-17 08:44:14 +04:00
|
|
|
LFD_CLR(fp->lfd, &site->lfdset);
|
|
|
|
for (kid = site->kids; kid; kid = kid->siblings)
|
|
|
|
lfd_clr_walk(kid, fp);
|
|
|
|
}
|
|
|
|
|
|
|
|
PR_IMPLEMENT(void)
|
|
|
|
NS_TraceMallocCloseLogFD(int fd)
|
|
|
|
{
|
|
|
|
logfile *fp;
|
2007-08-11 02:22:07 +04:00
|
|
|
tm_thread *t = tm_get_thread();
|
2000-05-17 08:44:14 +04:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2000-05-17 08:44:14 +04:00
|
|
|
|
|
|
|
fp = get_logfile(fd);
|
|
|
|
if (fp) {
|
|
|
|
flush_logfile(fp);
|
|
|
|
if (fp == &default_logfile) {
|
|
|
|
/* Leave default_logfile in logfile_list with an fd of -1. */
|
|
|
|
fp->fd = -1;
|
|
|
|
|
|
|
|
/* NB: we can never free lfd 0, it belongs to default_logfile. */
|
|
|
|
PR_ASSERT(fp->lfd == 0);
|
|
|
|
} else {
|
|
|
|
/* Clear fp->lfd in all possible lfdsets. */
|
|
|
|
PL_HashTableEnumerateEntries(libraries, lfd_clr_enumerator, fp);
|
|
|
|
PL_HashTableEnumerateEntries(methods, lfd_clr_enumerator, fp);
|
|
|
|
lfd_clr_walk(&calltree_root, fp);
|
|
|
|
|
|
|
|
/* Unlink fp from logfile_list, freeing lfd for reallocation. */
|
|
|
|
*fp->prevp = fp->next;
|
2000-05-17 10:59:21 +04:00
|
|
|
if (!fp->next) {
|
|
|
|
PR_ASSERT(logfile_tail == &fp->next);
|
|
|
|
logfile_tail = fp->prevp;
|
|
|
|
}
|
2000-05-17 08:44:14 +04:00
|
|
|
|
|
|
|
/* Reset logfp if we must, then free fp. */
|
|
|
|
if (fp == logfp)
|
|
|
|
logfp = &default_logfile;
|
|
|
|
free((void*) fp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2000-05-17 08:44:14 +04:00
|
|
|
close(fd);
|
|
|
|
}
|
|
|
|
|
2000-08-09 06:41:58 +04:00
|
|
|
PR_IMPLEMENT(void)
|
|
|
|
NS_TraceMallocLogTimestamp(const char *caption)
|
|
|
|
{
|
|
|
|
logfile *fp;
|
2001-09-12 10:39:31 +04:00
|
|
|
#ifdef XP_UNIX
|
2000-08-09 06:41:58 +04:00
|
|
|
struct timeval tv;
|
2001-01-26 01:54:05 +03:00
|
|
|
#endif
|
2001-09-12 10:39:31 +04:00
|
|
|
#ifdef XP_WIN32
|
2001-01-26 01:54:05 +03:00
|
|
|
struct _timeb tb;
|
2001-09-08 22:29:24 +04:00
|
|
|
#endif
|
2007-08-11 02:22:07 +04:00
|
|
|
tm_thread *t = tm_get_thread();
|
2001-01-26 01:54:05 +03:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2000-08-09 06:41:58 +04:00
|
|
|
|
|
|
|
fp = logfp;
|
|
|
|
log_byte(fp, TM_EVENT_TIMESTAMP);
|
|
|
|
|
2001-09-12 10:39:31 +04:00
|
|
|
#ifdef XP_UNIX
|
2002-11-13 07:40:17 +03:00
|
|
|
gettimeofday(&tv, NULL);
|
2000-08-09 06:41:58 +04:00
|
|
|
log_uint32(fp, (uint32) tv.tv_sec);
|
|
|
|
log_uint32(fp, (uint32) tv.tv_usec);
|
2001-01-26 01:54:05 +03:00
|
|
|
#endif
|
2001-09-12 10:39:31 +04:00
|
|
|
#ifdef XP_WIN32
|
2002-11-13 07:40:17 +03:00
|
|
|
_ftime(&tb);
|
2001-01-26 01:54:05 +03:00
|
|
|
log_uint32(fp, (uint32) tb.time);
|
|
|
|
log_uint32(fp, (uint32) tb.millitm);
|
|
|
|
#endif
|
2000-08-09 06:41:58 +04:00
|
|
|
log_string(fp, caption);
|
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2008-03-15 03:11:37 +03:00
|
|
|
}
|
|
|
|
|
2000-08-29 05:52:13 +04:00
|
|
|
static PRIntn
|
|
|
|
allocation_enumerator(PLHashEntry *he, PRIntn i, void *arg)
|
|
|
|
{
|
|
|
|
allocation *alloc = (allocation*) he;
|
|
|
|
FILE *ofp = (FILE*) arg;
|
|
|
|
callsite *site = (callsite*) he->value;
|
|
|
|
|
2000-11-30 00:16:53 +03:00
|
|
|
extern const char* nsGetTypeName(const void* ptr);
|
2007-02-14 10:07:51 +03:00
|
|
|
unsigned long *p, *end;
|
2000-11-30 00:16:53 +03:00
|
|
|
|
2006-03-27 11:29:32 +04:00
|
|
|
fprintf(ofp, "%p <%s> (%lu)\n",
|
|
|
|
he->key,
|
2002-11-13 07:40:17 +03:00
|
|
|
nsGetTypeName(he->key),
|
|
|
|
(unsigned long) alloc->size);
|
2000-11-30 00:16:53 +03:00
|
|
|
|
2007-02-14 10:07:51 +03:00
|
|
|
for (p = (unsigned long*) he->key,
|
|
|
|
end = (unsigned long*) ((char*)he->key + alloc->size);
|
2007-03-23 02:01:14 +03:00
|
|
|
p < end; ++p)
|
2007-02-14 10:07:51 +03:00
|
|
|
fprintf(ofp, "\t0x%08lX\n", *p);
|
2000-11-30 00:16:53 +03:00
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
while (site) {
|
|
|
|
if (site->name || site->parent) {
|
|
|
|
fprintf(ofp, "%s[%s +0x%X]\n",
|
|
|
|
site->name, site->library, site->offset);
|
|
|
|
}
|
|
|
|
site = site->parent;
|
|
|
|
}
|
2000-08-29 05:52:13 +04:00
|
|
|
fputc('\n', ofp);
|
|
|
|
return HT_ENUMERATE_NEXT;
|
|
|
|
}
|
|
|
|
|
2001-09-12 10:39:31 +04:00
|
|
|
PR_IMPLEMENT(void)
|
|
|
|
NS_TraceStack(int skip, FILE *ofp)
|
|
|
|
{
|
|
|
|
callsite *site;
|
2007-08-11 02:22:07 +04:00
|
|
|
tm_thread *t = tm_get_thread();
|
2001-09-12 10:39:31 +04:00
|
|
|
|
2007-08-11 02:19:14 +04:00
|
|
|
site = backtrace(t, skip + 1);
|
2001-09-12 10:39:31 +04:00
|
|
|
while (site) {
|
2002-11-13 07:40:17 +03:00
|
|
|
if (site->name || site->parent) {
|
|
|
|
fprintf(ofp, "%s[%s +0x%X]\n",
|
|
|
|
site->name, site->library, site->offset);
|
|
|
|
}
|
2001-09-12 10:39:31 +04:00
|
|
|
site = site->parent;
|
|
|
|
}
|
|
|
|
}
|
2001-01-26 01:54:05 +03:00
|
|
|
|
2000-08-29 05:52:13 +04:00
|
|
|
PR_IMPLEMENT(int)
|
|
|
|
NS_TraceMallocDumpAllocations(const char *pathname)
|
|
|
|
{
|
|
|
|
FILE *ofp;
|
|
|
|
int rv;
|
2001-01-26 01:54:05 +03:00
|
|
|
ofp = fopen(pathname, WRITE_FLAGS);
|
2007-03-23 02:01:14 +03:00
|
|
|
if (!ofp)
|
|
|
|
return -1;
|
|
|
|
if (allocations)
|
|
|
|
PL_HashTableEnumerateEntries(allocations, allocation_enumerator, ofp);
|
|
|
|
rv = ferror(ofp) ? -1 : 0;
|
|
|
|
fclose(ofp);
|
2000-08-29 05:52:13 +04:00
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2000-11-22 11:05:05 +03:00
|
|
|
PR_IMPLEMENT(void)
|
2007-03-23 02:01:14 +03:00
|
|
|
NS_TraceMallocFlushLogfiles()
|
2000-11-22 11:05:05 +03:00
|
|
|
{
|
|
|
|
logfile *fp;
|
2007-08-11 02:22:07 +04:00
|
|
|
tm_thread *t = tm_get_thread();
|
2000-11-22 11:05:05 +03:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2000-11-22 11:05:05 +03:00
|
|
|
|
|
|
|
for (fp = logfile_list; fp; fp = fp->next)
|
|
|
|
flush_logfile(fp);
|
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2000-11-22 11:05:05 +03:00
|
|
|
}
|
|
|
|
|
2004-02-11 07:15:17 +03:00
|
|
|
PR_IMPLEMENT(void)
|
2005-06-11 16:52:23 +04:00
|
|
|
NS_TrackAllocation(void* ptr, FILE *ofp)
|
2004-02-11 07:15:17 +03:00
|
|
|
{
|
2007-03-23 02:01:14 +03:00
|
|
|
PLHashEntry **hep;
|
2004-02-11 07:15:17 +03:00
|
|
|
allocation *alloc;
|
2007-08-11 02:22:07 +04:00
|
|
|
tm_thread *t = tm_get_thread();
|
2004-02-11 07:15:17 +03:00
|
|
|
|
|
|
|
fprintf(ofp, "Trying to track %p\n", (void*) ptr);
|
|
|
|
setlinebuf(ofp);
|
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2004-02-11 07:15:17 +03:00
|
|
|
if (get_allocations()) {
|
2007-03-23 02:01:14 +03:00
|
|
|
hep = PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr);
|
|
|
|
alloc = (allocation*) *hep;
|
2004-02-11 07:15:17 +03:00
|
|
|
if (alloc) {
|
|
|
|
fprintf(ofp, "Tracking %p\n", (void*) ptr);
|
|
|
|
alloc->trackfp = ofp;
|
|
|
|
} else {
|
|
|
|
fprintf(ofp, "Not tracking %p\n", (void*) ptr);
|
|
|
|
}
|
|
|
|
}
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2004-02-11 07:15:17 +03:00
|
|
|
}
|
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
#ifdef XP_WIN32
|
|
|
|
|
2001-09-12 10:39:31 +04:00
|
|
|
PR_IMPLEMENT(void)
|
2007-08-11 02:22:07 +04:00
|
|
|
MallocCallback(void *ptr, size_t size, PRUint32 start, PRUint32 end, tm_thread *t)
|
2001-01-26 01:54:05 +03:00
|
|
|
{
|
|
|
|
callsite *site;
|
|
|
|
PLHashEntry *he;
|
|
|
|
allocation *alloc;
|
2001-09-12 10:39:31 +04:00
|
|
|
|
2007-08-11 02:22:07 +04:00
|
|
|
if (!tracing_enabled || t->suppress_tracing != 0)
|
2007-08-11 01:24:32 +04:00
|
|
|
return;
|
|
|
|
|
2007-08-11 02:20:48 +04:00
|
|
|
site = backtrace(t, 2);
|
2007-08-11 02:19:14 +04:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2001-01-26 01:54:05 +03:00
|
|
|
tmstats.malloc_calls++;
|
2001-11-16 01:40:53 +03:00
|
|
|
if (!ptr) {
|
2001-01-26 01:54:05 +03:00
|
|
|
tmstats.malloc_failures++;
|
2007-08-11 02:19:32 +04:00
|
|
|
} else {
|
2007-03-23 02:01:14 +03:00
|
|
|
if (site)
|
2001-12-15 03:24:12 +03:00
|
|
|
log_event5(logfp, TM_EVENT_MALLOC,
|
|
|
|
site->serial, start, end - start,
|
2001-11-21 02:32:17 +03:00
|
|
|
(uint32)NS_PTR_TO_INT32(ptr), size);
|
2001-01-26 01:54:05 +03:00
|
|
|
if (get_allocations()) {
|
2001-11-16 01:40:53 +03:00
|
|
|
he = PL_HashTableAdd(allocations, ptr, site);
|
2001-01-26 01:54:05 +03:00
|
|
|
if (he) {
|
|
|
|
alloc = (allocation*) he;
|
|
|
|
alloc->size = size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2001-01-26 01:54:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
PR_IMPLEMENT(void)
|
2007-08-11 02:22:07 +04:00
|
|
|
CallocCallback(void *ptr, size_t count, size_t size, PRUint32 start, PRUint32 end, tm_thread *t)
|
2001-01-26 01:54:05 +03:00
|
|
|
{
|
|
|
|
callsite *site;
|
|
|
|
PLHashEntry *he;
|
|
|
|
allocation *alloc;
|
|
|
|
|
2007-08-11 02:22:07 +04:00
|
|
|
if (!tracing_enabled || t->suppress_tracing != 0)
|
2007-08-11 01:24:32 +04:00
|
|
|
return;
|
|
|
|
|
2007-08-11 02:20:48 +04:00
|
|
|
site = backtrace(t, 2);
|
2007-08-11 02:19:14 +04:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2001-01-26 01:54:05 +03:00
|
|
|
tmstats.calloc_calls++;
|
|
|
|
if (!ptr) {
|
|
|
|
tmstats.calloc_failures++;
|
2007-08-11 02:19:32 +04:00
|
|
|
} else {
|
2001-01-26 01:54:05 +03:00
|
|
|
size *= count;
|
2007-03-23 02:01:14 +03:00
|
|
|
if (site)
|
2001-12-15 03:24:12 +03:00
|
|
|
log_event5(logfp, TM_EVENT_CALLOC,
|
|
|
|
site->serial, start, end - start,
|
2001-11-21 02:32:17 +03:00
|
|
|
(uint32)NS_PTR_TO_INT32(ptr), size);
|
2001-01-26 01:54:05 +03:00
|
|
|
if (get_allocations()) {
|
|
|
|
he = PL_HashTableAdd(allocations, ptr, site);
|
|
|
|
if (he) {
|
|
|
|
alloc = (allocation*) he;
|
|
|
|
alloc->size = size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2001-01-26 01:54:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
PR_IMPLEMENT(void)
|
2007-08-11 02:22:07 +04:00
|
|
|
ReallocCallback(void * oldptr, void *ptr, size_t size, PRUint32 start, PRUint32 end, tm_thread *t)
|
2001-01-26 01:54:05 +03:00
|
|
|
{
|
|
|
|
callsite *oldsite, *site;
|
|
|
|
size_t oldsize;
|
|
|
|
PLHashNumber hash;
|
|
|
|
PLHashEntry **hep, *he;
|
|
|
|
allocation *alloc;
|
2007-08-11 02:19:14 +04:00
|
|
|
|
2007-08-11 02:22:07 +04:00
|
|
|
if (!tracing_enabled || t->suppress_tracing != 0)
|
2007-08-11 01:24:32 +04:00
|
|
|
return;
|
|
|
|
|
2007-08-11 02:20:48 +04:00
|
|
|
site = backtrace(t, 2);
|
2007-08-11 02:19:14 +04:00
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2001-01-26 01:54:05 +03:00
|
|
|
tmstats.realloc_calls++;
|
2007-08-11 02:19:32 +04:00
|
|
|
if (PR_TRUE) {
|
2007-03-23 02:01:14 +03:00
|
|
|
oldsite = NULL;
|
|
|
|
oldsize = 0;
|
|
|
|
he = NULL;
|
|
|
|
if (oldptr && get_allocations()) {
|
|
|
|
hash = hash_pointer(oldptr);
|
|
|
|
hep = PL_HashTableRawLookup(allocations, hash, oldptr);
|
|
|
|
he = *hep;
|
|
|
|
if (he) {
|
|
|
|
oldsite = (callsite*) he->value;
|
|
|
|
alloc = (allocation*) he;
|
|
|
|
oldsize = alloc->size;
|
2001-01-26 01:54:05 +03:00
|
|
|
}
|
|
|
|
}
|
2001-09-12 10:39:31 +04:00
|
|
|
}
|
|
|
|
if (!ptr && size) {
|
2007-03-23 02:01:14 +03:00
|
|
|
tmstats.realloc_failures++;
|
|
|
|
|
2001-09-12 10:39:31 +04:00
|
|
|
/*
|
|
|
|
* When realloc() fails, the original block is not freed or moved, so
|
|
|
|
* we'll leave the allocation entry untouched.
|
|
|
|
*/
|
2007-08-11 02:19:32 +04:00
|
|
|
} else {
|
2001-01-26 01:54:05 +03:00
|
|
|
if (site) {
|
2001-12-15 03:24:12 +03:00
|
|
|
log_event8(logfp, TM_EVENT_REALLOC,
|
|
|
|
site->serial, start, end - start,
|
2001-11-21 02:32:17 +03:00
|
|
|
(uint32)NS_PTR_TO_INT32(ptr), size,
|
|
|
|
oldsite ? oldsite->serial : 0,
|
|
|
|
(uint32)NS_PTR_TO_INT32(oldptr), oldsize);
|
2001-01-26 01:54:05 +03:00
|
|
|
}
|
|
|
|
if (ptr && allocations) {
|
|
|
|
if (ptr != oldptr) {
|
|
|
|
/*
|
|
|
|
* If we're reallocating (not allocating new space by passing
|
|
|
|
* null to realloc) and realloc moved the block, free oldptr.
|
|
|
|
*/
|
|
|
|
if (he)
|
|
|
|
PL_HashTableRawRemove(allocations, hep, he);
|
|
|
|
|
|
|
|
/* Record the new allocation now, setting he. */
|
|
|
|
he = PL_HashTableAdd(allocations, ptr, site);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If we haven't yet recorded an allocation (possibly due to a
|
|
|
|
* temporary memory shortage), do it now.
|
|
|
|
*/
|
|
|
|
if (!he)
|
|
|
|
he = PL_HashTableAdd(allocations, ptr, site);
|
|
|
|
}
|
|
|
|
if (he) {
|
|
|
|
alloc = (allocation*) he;
|
|
|
|
alloc->size = size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2001-01-26 01:54:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
PR_IMPLEMENT(void)
|
2007-08-11 02:22:07 +04:00
|
|
|
FreeCallback(void * ptr, PRUint32 start, PRUint32 end, tm_thread *t)
|
2001-01-26 01:54:05 +03:00
|
|
|
{
|
|
|
|
PLHashEntry **hep, *he;
|
|
|
|
callsite *site;
|
|
|
|
allocation *alloc;
|
|
|
|
|
2007-08-11 02:22:07 +04:00
|
|
|
if (!tracing_enabled || t->suppress_tracing != 0)
|
2007-08-11 01:24:32 +04:00
|
|
|
return;
|
|
|
|
|
2007-08-11 02:19:32 +04:00
|
|
|
t->suppress_tracing++;
|
|
|
|
TM_ENTER_LOCK();
|
2001-01-26 01:54:05 +03:00
|
|
|
tmstats.free_calls++;
|
|
|
|
if (!ptr) {
|
|
|
|
tmstats.null_free_calls++;
|
2007-08-11 02:19:32 +04:00
|
|
|
} else {
|
2001-01-26 01:54:05 +03:00
|
|
|
if (get_allocations()) {
|
|
|
|
hep = PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr);
|
|
|
|
he = *hep;
|
|
|
|
if (he) {
|
|
|
|
site = (callsite*) he->value;
|
|
|
|
if (site) {
|
|
|
|
alloc = (allocation*) he;
|
2001-12-15 03:24:12 +03:00
|
|
|
log_event5(logfp, TM_EVENT_FREE,
|
|
|
|
site->serial, start, end - start,
|
2001-11-21 02:32:17 +03:00
|
|
|
(uint32)NS_PTR_TO_INT32(ptr), alloc->size);
|
2001-01-26 01:54:05 +03:00
|
|
|
}
|
|
|
|
PL_HashTableRawRemove(allocations, hep, he);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-08-11 02:19:32 +04:00
|
|
|
TM_EXIT_LOCK();
|
|
|
|
t->suppress_tracing--;
|
2001-01-26 01:54:05 +03:00
|
|
|
}
|
|
|
|
|
2007-03-23 02:01:14 +03:00
|
|
|
#endif /*XP_WIN32*/
|
2008-03-15 03:11:37 +03:00
|
|
|
|
2001-07-31 23:05:34 +04:00
|
|
|
#endif /* NS_TRACE_MALLOC */
|