Add nsTraceMalloc.[ch] and bloatblame.c, not part of build (setenv NS_TRACE_MALLOC to enable here and in xpfe/bootstrap).

This commit is contained in:
brendan%mozilla.org 2000-04-20 04:55:26 +00:00
Родитель 208ce8c320
Коммит 43bfe2d5e8
7 изменённых файлов: 3299 добавлений и 0 удалений

Просмотреть файл

@ -0,0 +1,836 @@
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express oqr
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is nsTraceMalloc.c/bloatblame.c code, released
* April 19, 2000.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 2000 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
* Brendan Eich, 14-April-2000
*
* Alternatively, the contents of this file may be used under the
* terms of the GNU Public License (the "GPL"), in which case the
* provisions of the GPL are applicable instead of those above.
* If you wish to allow use of your version of this file only
* under the terms of the GPL and not to allow others to use your
* version of this file under the MPL, indicate your decision by
* deleting the provisions above and replace them with the notice
* and other provisions required by the GPL. If you do not delete
* the provisions above, a recipient may use your version of this
* file under either the MPL or the GPL.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <errno.h>
#include <getopt.h>
#include <time.h>
#include <unistd.h>
#include "prtypes.h"
#include "prlog.h"
#include "prprf.h"
#include "plhash.h"
#include "nsTraceMalloc.h"
static char *program;
static int sort_by_direct = 0;
static int do_tree_dump = 0;
static uint32 min_subtotal = 0;
static int accum_byte(uint32 *uip)
{
int c = getchar();
if (c == EOF)
return 0;
*uip = (*uip << 8) | c;
return 1;
}
static int get_uint32(uint32 *uip)
{
int c;
uint32 ui;
c = getchar();
if (c == EOF)
return 0;
ui = 0;
if (c & 0x80) {
c &= 0x7f;
if (c & 0x40) {
c &= 0x3f;
if (c & 0x20) {
c &= 0x1f;
if (c & 0x10) {
if (!accum_byte(&ui))
return 0;
} else {
ui = (uint32) c;
}
if (!accum_byte(&ui))
return 0;
} else {
ui = (uint32) c;
}
if (!accum_byte(&ui))
return 0;
} else {
ui = (uint32) c;
}
if (!accum_byte(&ui))
return 0;
} else {
ui = (uint32) c;
}
*uip = ui;
return 1;
}
static char *get_string(void)
{
char *cp;
int c;
static char buf[256];
static char *bp = buf, *ep = buf + sizeof buf;
static size_t bsize = sizeof buf;
cp = bp;
do {
c = getchar();
if (c == EOF)
return 0;
if (cp == ep) {
if (bp == buf) {
bp = malloc(2 * bsize);
memcpy(bp, buf, bsize);
} else {
bp = realloc(bp, 2 * bsize);
}
if (!bp)
return 0;
cp = bp + bsize;
bsize *= 2;
ep = bp + bsize;
}
*cp++ = c;
} while (c != '\0');
return strdup(bp);
}
typedef struct logevent {
char type;
uint32 serial;
union {
char *libname;
struct {
uint32 library;
char *name;
} method;
struct {
uint32 parent;
uint32 method;
uint32 offset;
} site;
struct {
uint32 oldsize;
uint32 size;
} alloc;
} u;
} logevent;
static int get_logevent(logevent *event)
{
int c;
char *s;
c = getchar();
if (c == EOF)
return 0;
event->type = (char) c;
if (!get_uint32(&event->serial))
return 0;
switch (c) {
case 'L':
s = get_string();
if (!s)
return 0;
event->u.libname = s;
break;
case 'N':
if (!get_uint32(&event->u.method.library))
return 0;
s = get_string();
if (!s)
return 0;
event->u.method.name = s;
break;
case 'S':
if (!get_uint32(&event->u.site.parent))
return 0;
if (!get_uint32(&event->u.site.method))
return 0;
if (!get_uint32(&event->u.site.offset))
return 0;
break;
case 'M':
case 'C':
case 'F':
event->u.alloc.oldsize = 0;
if (!get_uint32(&event->u.alloc.size))
return 0;
break;
case 'R':
if (!get_uint32(&event->u.alloc.oldsize))
return 0;
if (!get_uint32(&event->u.alloc.size))
return 0;
break;
}
return 1;
}
typedef struct graphedge graphedge;
typedef struct graphnode graphnode;
typedef struct callsite callsite;
struct graphnode {
PLHashEntry entry; /* key is serial or name, value must be name */
graphedge *in;
graphedge *out;
graphnode *up;
uint32 direct; /* bytes allocated by this node's code */
uint32 total; /* direct + bytes from all descendents */
int visited; /* flag used during walk_callsite_tree */
};
#define graphnode_name(node) ((char*) (node)->entry.value)
#define library_serial(lib) ((uint32) (lib)->entry.key)
#define component_name(comp) ((const char*) (comp)->entry.key)
struct graphedge {
graphedge *next;
graphnode *node;
uint32 direct;
uint32 total;
};
struct callsite {
PLHashEntry entry;
callsite *parent;
callsite *siblings;
callsite *kids;
graphnode *method;
uint32 offset;
uint32 direct;
uint32 total;
};
#define callsite_serial(site) ((uint32) (site)->entry.key)
static void connect_nodes(graphnode *from, graphnode *to, callsite *site)
{
graphedge *edge;
for (edge = from->out; edge; edge = edge->next) {
if (edge->node == to) {
edge[0].direct += site->direct;
edge[1].direct += site->direct;
edge[0].total += site->total;
edge[1].total += site->total;
return;
}
}
edge = (graphedge*) malloc(2 * sizeof(graphedge));
if (!edge) {
perror(program);
exit(1);
}
edge[0].node = to;
edge[0].next = from->out;
from->out = &edge[0];
edge[1].node = from;
edge[1].next = to->in;
to->in = &edge[1];
edge[0].direct = edge[1].direct = site->direct;
edge[0].total = edge[1].total = site->total;
}
static void *generic_alloctable(void *pool, PRSize size)
{
return malloc(size);
}
static void generic_freetable(void *pool, void *item)
{
free(item);
}
static PLHashEntry *callsite_allocentry(void *pool, const void *key)
{
return malloc(sizeof(callsite));
}
static PLHashEntry *graphnode_allocentry(void *pool, const void *key)
{
graphnode *node = (graphnode*) malloc(sizeof(graphnode));
if (node) {
node->in = node->out = NULL;
node->up = NULL;
node->direct = node->total = 0;
node->visited = 0;
}
return &node->entry;
}
static void graphnode_freeentry(void *pool, PLHashEntry *he, PRUintn flag)
{
/* Always free the value, which points to a strdup'd string. */
free(he->value);
/* Free the whole thing if we're told to. */
if (flag == HT_FREE_ENTRY)
free((void*) he);
}
static void component_freeentry(void *pool, PLHashEntry *he, PRUintn flag)
{
if (flag == HT_FREE_ENTRY) {
graphnode *comp = (graphnode*) he;
/* Free the key, which was strdup'd (N.B. value also points to it). */
free((void*) component_name(comp));
free((void*) comp);
}
}
static PLHashAllocOps callsite_hashallocops = {
generic_alloctable, generic_freetable,
callsite_allocentry, graphnode_freeentry
};
static PLHashAllocOps graphnode_hashallocops = {
generic_alloctable, generic_freetable,
graphnode_allocentry, graphnode_freeentry
};
static PLHashAllocOps component_hashallocops = {
generic_alloctable, generic_freetable,
graphnode_allocentry, component_freeentry
};
static PLHashNumber hash_serial(const void *key)
{
return (PLHashNumber) key;
}
static PLHashTable *libraries;
static PLHashTable *components;
static PLHashTable *methods;
static PLHashTable *callsites;
static callsite calltree_root;
static void walk_callsite_tree(callsite *site, int level, int kidnum, FILE *fp)
{
callsite *parent;
graphnode *meth, *pmeth, *comp, *pcomp, *lib, *plib;
callsite *kid;
int nkids;
parent = site->parent;
meth = comp = lib = NULL;
if (parent) {
meth = site->method;
if (meth) {
pmeth = parent->method;
if (pmeth && pmeth != meth) {
if (!meth->visited)
meth->total += site->total;
connect_nodes(pmeth, meth, site);
comp = meth->up;
if (comp) {
pcomp = pmeth->up;
if (pcomp && pcomp != comp) {
if (!comp->visited)
comp->total += site->total;
connect_nodes(pcomp, comp, site);
lib = comp->up;
if (lib) {
plib = pcomp->up;
if (plib && plib != lib) {
if (!lib->visited)
plib->total += site->total;
connect_nodes(plib, lib, site);
}
lib->visited = 1;
}
}
comp->visited = 1;
}
}
meth->visited = 1;
}
}
if (do_tree_dump) {
fprintf(fp, "%c%*s%3d %3d %s %lu %lu\n",
site->kids ? '+' : '-', level, "", level, kidnum,
meth ? graphnode_name(meth) : "???",
(unsigned long)site->direct, (unsigned long)site->total);
}
nkids = 0;
for (kid = site->kids; kid; kid = kid->siblings) {
walk_callsite_tree(kid, level + 1, nkids, fp);
nkids++;
}
if (meth) {
meth->visited = 0;
if (comp) {
comp->visited = 0;
if (lib)
lib->visited = 0;
}
}
}
static PRIntn tabulate_node(PLHashEntry *he, PRIntn i, void *arg)
{
graphnode **table = (graphnode**) arg;
table[i] = (graphnode*) he;
return HT_ENUMERATE_NEXT;
}
/* Sort in reverse size order, so biggest node comes first. */
static int node_table_compare(const void *p1, const void *p2)
{
const graphnode *node1, *node2;
uint32 key1, key2;
node1 = *(const graphnode**) p1;
node2 = *(const graphnode**) p2;
if (sort_by_direct) {
key1 = node1->direct;
key2 = node2->direct;
} else {
key1 = node1->total;
key2 = node2->total;
}
if (key1 < key2)
return 1;
if (key1 > key2)
return -1;
return 0;
}
static const char *prettybig(uint32 num, char *buf, size_t limit)
{
if (num > 1000000)
PR_snprintf(buf, limit, "%1.2fM", (double) num / 1e6);
else if (num > 1000)
PR_snprintf(buf, limit, "%1.2fK", (double) num / 1e3);
else
PR_snprintf(buf, limit, "%lu", (unsigned long) num);
return buf;
}
static double percent(uint32 num, uint32 total)
{
if (num == 0)
return 0.0;
return ((double) num * 100) / (double) total;
}
/* Linked list bubble-sort (waterson and brendan went bald hacking this). */
static void sort_graphedge_list(graphedge **currp)
{
graphedge *curr, *next, **nextp, *tmp;
while ((curr = *currp) != NULL && curr->next) {
nextp = &curr->next;
while ((next = *nextp) != NULL) {
if (curr->total < next->total) {
tmp = curr->next;
*currp = tmp;
if (tmp == next) {
PR_ASSERT(nextp == &curr->next);
curr->next = next->next;
next->next = curr;
} else {
*nextp = next->next;
curr->next = next->next;
next->next = tmp;
*currp = next;
*nextp = curr;
nextp = &curr->next;
}
curr = next;
continue;
}
nextp = &next->next;
}
currp = &curr->next;
}
}
static void dump_graphedge_list(graphedge *list, FILE *fp)
{
uint32 total;
graphedge *edge;
char buf[32];
fputs("<td valign=top>", fp);
total = 0;
for (edge = list; edge; edge = edge->next)
total += edge->total;
for (edge = list; edge; edge = edge->next) {
fprintf(fp, "<a href='#%s'>%s&nbsp;(%%%1.2f)</a>\n",
graphnode_name(edge->node),
prettybig(edge->total, buf, sizeof buf),
percent(edge->total, total));
}
fputs("</td>", fp);
}
static void dump_graph(PLHashTable *hashtbl, const char *title, FILE *fp)
{
uint32 i, count;
graphnode **table;
char buf1[32], buf2[32];
count = hashtbl->nentries;
table = (graphnode**) malloc(count * sizeof(graphnode*));
if (!table) {
perror(program);
exit(1);
}
PL_HashTableEnumerateEntries(hashtbl, tabulate_node, table);
qsort(table, count, sizeof(graphnode*), node_table_compare);
fprintf(fp,
"<table border=1>\n"
"<tr><th>%s</th>"
"<th>Total/Direct (percents)</th>"
"<th>Fan-in</th>"
"<th>Fan-out</th>"
"</tr>\n",
title);
for (i = 0; i < count; i++) {
graphnode *node;
node = table[i];
/* XXX cast out bogusly large components (threading confusion? */
if (node->total > calltree_root.total)
continue;
/* Don't bother with truly puny nodes. */
if (node->total < min_subtotal)
break;
fprintf(fp,
"<tr>"
"<td valign=top><a name='%s'>%s</td>"
"<td valign=top>%s/%s (%%%1.2f/%%%1.2f)</td>",
graphnode_name(node),
graphnode_name(node),
prettybig(node->total, buf1, sizeof buf1),
prettybig(node->direct, buf2, sizeof buf2),
percent(node->total, calltree_root.total),
percent(node->direct, calltree_root.total));
sort_graphedge_list(&node->in);
dump_graphedge_list(node->in, fp);
sort_graphedge_list(&node->out);
dump_graphedge_list(node->out, fp);
fputs("</tr>\n", fp);
}
fputs("</table>\n", fp);
free((void*) table);
}
static const char magic[] = NS_TRACE_MALLOC_LOGFILE_MAGIC;
int main(int argc, char **argv)
{
int c;
FILE *fp;
char buf[16];
time_t start;
logevent event;
program = *argv;
while ((c = getopt(argc, argv, "dtm:")) != EOF) {
switch (c) {
case 'd':
sort_by_direct = 1;
break;
case 't':
do_tree_dump = 1;
break;
case 'm':
min_subtotal = atoi(optarg);
break;
default:
fprintf(stderr, "usage: %s [-dt] [-m min] [output.html]\n",
program);
return 2;
}
}
argc -= optind;
argv += optind;
if (argc == 0) {
fp = stdout;
} else {
fp = fopen(*argv, "w");
if (!fp) {
fprintf(stderr, "%s: can't open %s: %s\n",
program, *argv, strerror(errno));
return 1;
}
}
if (read(0, buf, 16) != 16 || strncmp(buf, magic, 16) != 0) {
fprintf(stderr, "%s: bad magic string %s at start of standard input.\n",
program, buf);
return 1;
}
start = time(NULL);
fprintf(fp, "%s starting at %s", program, ctime(&start));
fflush(fp);
libraries = PL_NewHashTable(100, hash_serial, PL_CompareValues,
PL_CompareStrings, &graphnode_hashallocops,
NULL);
components = PL_NewHashTable(10000, PL_HashString, PL_CompareStrings,
PL_CompareValues, &component_hashallocops,
NULL);
methods = PL_NewHashTable(10000, hash_serial, PL_CompareValues,
PL_CompareStrings, &graphnode_hashallocops,
NULL);
callsites = PL_NewHashTable(200000, hash_serial, PL_CompareValues,
PL_CompareValues, &callsite_hashallocops,
NULL);
calltree_root.entry.value = (void*) strdup("root");
if (!libraries || !components || !methods || !callsites ||
!calltree_root.entry.value) {
perror(program);
return 1;
}
while (get_logevent(&event)) {
switch (event.type) {
case 'L': {
const void *key;
PLHashNumber hash;
PLHashEntry **hep, *he;
key = (const void*) event.serial;
hash = hash_serial(key);
hep = PL_HashTableRawLookup(libraries, hash, key);
he = *hep;
PR_ASSERT(!he);
if (he) return 2;
he = PL_HashTableRawAdd(libraries, hep, hash, key, event.u.libname);
if (!he) {
perror(program);
return 1;
}
break;
}
case 'N': {
const void *key;
PLHashNumber hash;
PLHashEntry **hep, *he;
char *name, *head, *mark, save;
graphnode *meth, *comp, *lib;
key = (const void*) event.serial;
hash = hash_serial(key);
hep = PL_HashTableRawLookup(methods, hash, key);
he = *hep;
PR_ASSERT(!he);
if (he) return 2;
name = event.u.method.name;
he = PL_HashTableRawAdd(methods, hep, hash, key, name);
if (!he) {
perror(program);
return 1;
}
meth = (graphnode*) he;
head = name;
mark = strchr(name, ':');
if (!mark) {
mark = name;
while (*mark != '\0' && *mark == '_')
mark++;
head = mark;
mark = strchr(head, '_');
if (!mark) {
mark = strchr(head, '+');
if (!mark)
mark = head + strlen(head);
}
}
save = *mark;
*mark = '\0';
hash = PL_HashString(head);
hep = PL_HashTableRawLookup(components, hash, head);
he = *hep;
if (he) {
comp = (graphnode*) he;
} else {
head = strdup(head);
if (head)
he = PL_HashTableRawAdd(components, hep, hash, head, head);
if (!he) {
perror(program);
exit(1);
}
comp = (graphnode*) he;
key = (const void*) event.u.method.library;
hash = hash_serial(key);
lib = (graphnode*) *PL_HashTableRawLookup(libraries, hash, key);
comp->up = lib;
}
*mark = save;
meth->up = comp;
break;
}
case 'S': {
const void *key, *pkey, *mkey;
PLHashNumber hash, phash, mhash;
PLHashEntry **hep, *he;
callsite *site, *parent;
graphnode *meth;
key = (const void*) event.serial;
hash = hash_serial(key);
hep = PL_HashTableRawLookup(callsites, hash, key);
he = *hep;
PR_ASSERT(!he);
if (he) return 2;
if (event.u.site.parent == 0) {
parent = &calltree_root;
} else {
pkey = (const void*) event.u.site.parent;
phash = hash_serial(pkey);
parent = (callsite*)
*PL_HashTableRawLookup(callsites, phash, pkey);
if (!parent) {
fprintf(fp, "### no parent for %lu (%lu)!\n",
(unsigned long) event.serial,
(unsigned long) event.u.site.parent);
continue;
}
}
he = PL_HashTableRawAdd(callsites, hep, hash, key, NULL);
if (!he) {
perror(program);
return 1;
}
site = (callsite*) he;
site->parent = parent;
site->siblings = parent->kids;
parent->kids = site;
site->kids = NULL;
mkey = (const void*) event.u.site.method;
mhash = hash_serial(mkey);
meth = (graphnode*) *PL_HashTableRawLookup(methods, mhash, mkey);
site->method = meth;
site->offset = event.u.site.offset;
site->direct = site->total = 0;
break;
}
case 'M':
case 'C':
case 'R': {
const void *key;
PLHashNumber hash;
callsite *site, *tmp;
int32 delta;
graphnode *meth, *comp, *lib;
key = (const void*) event.serial;
hash = hash_serial(key);
site = (callsite*) *PL_HashTableRawLookup(callsites, hash, key);
if (!site) {
fprintf(fp, "### no callsite for '%c' (%lu)!\n",
event.type, (unsigned long) event.serial);
continue;
}
delta = (int32)event.u.alloc.size - (int32)event.u.alloc.oldsize;
site->direct += delta;
for (tmp = site; tmp; tmp = tmp->parent)
tmp->total += delta;
meth = site->method;
if (meth) {
meth->direct += delta;
comp = meth->up;
if (comp) {
comp->direct += delta;
lib = comp->up;
if (lib)
lib->direct += delta;
}
}
break;
}
case 'F':
break;
}
}
walk_callsite_tree(&calltree_root, 0, 0, fp);
dump_graph(libraries, "Library", fp);
fputs("<hr>\n", fp);
dump_graph(components, "Component", fp);
#if 0
fputs("<hr>\n", fp);
dump_graph(methods, "Method", fp);
#endif
fclose(fp);
return 0;
}

Просмотреть файл

@ -0,0 +1,739 @@
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express oqr
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is nsTraceMalloc.c/bloatblame.c code, released
* April 19, 2000.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 2000 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
* Brendan Eich, 14-April-2000
*
* Alternatively, the contents of this file may be used under the
* terms of the GNU Public License (the "GPL"), in which case the
* provisions of the GPL are applicable instead of those above.
* If you wish to allow use of your version of this file only
* under the terms of the GPL and not to allow others to use your
* version of this file under the MPL, indicate your decision by
* deleting the provisions above and replace them with the notice
* and other provisions required by the GPL. If you do not delete
* the provisions above, a recipient may use your version of this
* file under either the MPL or the GPL.
*/
#if defined NS_TRACE_MALLOC
/*
* TODO:
* - #ifdef __linux__/x86 and port to other platforms
* - unify calltree with gc/boehm somehow (common utility libs)
* - provide NS_DumpTraceMallocStats() and hook up to some xul kbd event
* - provide NS_TraceMallocTimestamp() or do it internally
*/
#include <stdlib.h>
#include <string.h>
#include <setjmp.h>
#include <unistd.h>
#include "plhash.h"
#include "prlog.h"
#include "prmon.h"
#include "prprf.h"
#include "nsTraceMalloc.h"
#define __USE_GNU 1
#include <dlfcn.h>
/* From libiberty, why isn't this in <libiberty.h> ? */
extern char *cplus_demangle(const char *, int);
extern __ptr_t __libc_malloc(size_t);
extern __ptr_t __libc_calloc(size_t, size_t);
extern __ptr_t __libc_realloc(__ptr_t, size_t);
extern void __libc_free(__ptr_t);
static int logfd = -1;
static uint32 logsize = 0;
static uint32 simlogsize = 0;
static char buffer[16*1024];
static int bufpos = 0;
static PRMonitor *tmmon = NULL;
static void flush_log_buffer()
{
int len, cnt;
char *bp;
len = bufpos;
if (len == 0)
return;
if (logfd >= 0) {
bp = buffer;
do {
cnt = write(logfd, bp, len);
if (cnt <= 0) {
printf("### nsTraceMalloc: write failed or wrote 0 bytes!\n");
return;
}
bp += cnt;
len -= cnt;
} while (len > 0);
logsize += len;
}
simlogsize += len;
bufpos = 0;
}
static void log_byte(char byte)
{
if (bufpos == sizeof buffer)
flush_log_buffer();
buffer[bufpos++] = byte;
}
static void log_string(const char *str)
{
int len, rem, cnt;
len = strlen(str);
while ((rem = bufpos + len - sizeof buffer) > 0) {
cnt = len - rem;
strncpy(&buffer[bufpos], str, cnt);
str += cnt;
bufpos += cnt;
flush_log_buffer();
len = rem;
}
strncpy(&buffer[bufpos], str, len);
bufpos += len;
/* Terminate the string. */
log_byte('\0');
}
static void log_uint32(uint32 ival)
{
if (ival < 0x80) {
/* 0xxx xxxx */
log_byte((char) ival);
} else if (ival < 0x4000) {
/* 10xx xxxx xxxx xxxx */
log_byte((char) ((ival >> 8) | 0x80));
log_byte((char) (ival & 0xff));
} else if (ival < 0x200000) {
/* 110x xxxx xxxx xxxx xxxx xxxx */
log_byte((char) ((ival >> 16) | 0xc0));
log_byte((char) ((ival >> 8) & 0xff));
log_byte((char) (ival & 0xff));
} else if (ival < 0x10000000) {
/* 1110 xxxx xxxx xxxx xxxx xxxx xxxx xxxx */
log_byte((char) ((ival >> 24) | 0xe0));
log_byte((char) ((ival >> 16) & 0xff));
log_byte((char) ((ival >> 8) & 0xff));
log_byte((char) (ival & 0xff));
} else {
/* 1111 0000 xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx */
log_byte((char) 0xf0);
log_byte((char) ((ival >> 24) & 0xff));
log_byte((char) ((ival >> 16) & 0xff));
log_byte((char) ((ival >> 8) & 0xff));
log_byte((char) (ival & 0xff));
}
}
static void log_event1(char event, uint32 serial)
{
log_byte(event);
log_uint32((uint32) serial);
}
static void log_event2(char event, uint32 serial, size_t size)
{
log_event1(event, serial);
log_uint32((uint32) size);
}
static void log_event3(char event, uint32 serial, size_t oldsize, size_t size)
{
log_event2(event, serial, oldsize);
log_uint32((uint32) size);
}
static void log_event4(char event, uint32 serial, uint32 ui2, uint32 ui3,
uint32 ui4)
{
log_event3(event, serial, ui2, ui3);
log_uint32(ui4);
}
typedef struct callsite callsite;
struct callsite {
uint32 pc;
uint32 serial;
char *name;
callsite *parent;
callsite *siblings;
callsite *kids;
};
static uint32 suppress_tracing = 0;
static uint32 library_serial_generator = 0;
static uint32 method_serial_generator = 0;
static uint32 callsite_serial_generator = 0;
static callsite calltree_root = {0, 0, NULL, NULL, NULL, NULL};
static struct tmstats {
uint32 calltree_maxstack;
uint32 calltree_maxdepth;
uint32 calltree_parents;
uint32 calltree_maxkids;
uint32 calltree_kidhits;
uint32 calltree_kidmisses;
uint32 calltree_kidsteps;
uint32 callsite_recurrences;
uint32 backtrace_calls;
uint32 backtrace_failures;
uint32 btmalloc_failures;
uint32 dladdr_failures;
uint32 malloc_calls;
uint32 malloc_failures;
uint32 calloc_calls;
uint32 calloc_failures;
uint32 realloc_calls;
uint32 realloc_failures;
uint32 free_calls;
uint32 null_free_calls;
} tmstats;
/* Parent with the most kids (tmstats.calltree_maxkids). */
static callsite *calltree_maxkids_parent;
/* Calltree leaf for path with deepest stack backtrace. */
static callsite *calltree_maxstack_top;
/* Last site (i.e., calling pc) that recurred during a backtrace. */
static callsite *last_callsite_recurrence;
/* Table of library pathnames mapped to to logged 'L' record serial numbers. */
static PLHashTable *libraries = NULL;
/* Table mapping method names to logged 'N' record serial numbers. */
static PLHashTable *methods = NULL;
static callsite *calltree(uint32 *bp)
{
uint32 depth, nkids;
uint32 *bpup, *bpdown, pc;
callsite *parent, *site, **csp, *tmp;
Dl_info info;
int ok, len, maxstack, offset;
uint32 library_serial, method_serial;
const char *library, *symbol;
char *method;
PLHashNumber hash;
PLHashEntry **hep, *he;
/* Reverse the stack frame list to avoid recursion. */
bpup = NULL;
for (depth = 0; ; depth++) {
bpdown = (uint32*) bp[0];
bp[0] = (uint32) bpup;
pc = bp[1];
if (pc < 0x08000000 || pc > 0x7fffffff || bpdown < bp)
break;
bpup = bp;
bp = bpdown;
}
maxstack = (depth > tmstats.calltree_maxstack);
if (maxstack)
tmstats.calltree_maxstack = depth;
/* Reverse the stack again, finding and building a path in the tree. */
parent = &calltree_root;
do {
bpup = (uint32*) bp[0];
bp[0] = (uint32) bpdown;
pc = bp[1];
csp = &parent->kids;
while ((site = *csp) != NULL) {
if (site->pc == pc) {
tmstats.calltree_kidhits++;
/* Put the most recently used site at the front of siblings. */
*csp = site->siblings;
site->siblings = parent->kids;
parent->kids = site;
goto upward;
}
tmstats.calltree_kidsteps++;
csp = &site->siblings;
}
tmstats.calltree_kidmisses++;
/* Check for recursion: see if pc is on our ancestor line. */
for (site = parent; site; site = site->parent) {
if (site->pc == pc) {
tmstats.callsite_recurrences++;
last_callsite_recurrence = site;
goto upward;
}
}
/* Not in tree, let's find our symbolic callsite info. */
info.dli_fname = info.dli_sname = NULL;
ok = dladdr((void*) pc, &info);
if (ok < 0) {
tmstats.dladdr_failures++;
return NULL;
}
/* Check whether we need to emit a library trace record. */
library_serial = 0;
library = info.dli_fname;
if (library) {
if (!libraries) {
libraries = PL_NewHashTable(100, PL_HashString,
PL_CompareStrings, PL_CompareValues,
NULL, NULL);
if (!libraries) {
printf("OINK 1\n");
tmstats.btmalloc_failures++;
return NULL;
}
}
hash = PL_HashString(library);
hep = PL_HashTableRawLookup(libraries, hash, library);
he = *hep;
if (he) {
library_serial = (uint32) he->value;
} else {
library = strdup(library);
if (library) {
library_serial = ++library_serial_generator;
he = PL_HashTableRawAdd(libraries, hep, hash, library,
(void*) library_serial);
if (he) {
char *slash = strrchr(library, '/');
if (slash)
library = slash + 1;
log_event1('L', library_serial);
log_string(library);
}
}
if (!he) {
printf("OINK 2\n");
tmstats.btmalloc_failures++;
return NULL;
}
}
}
/* Now find the demangled method name and pc offset in it. */
symbol = info.dli_sname;
offset = (char*)pc - (char*)info.dli_saddr;
method = NULL;
if (symbol && (len = strlen(symbol)) != 0) {
/*
* Attempt to demangle symbol in case it's a C++ mangled name.
* The magic 3 passed here specifies DMGL_PARAMS | DMGL_ANSI.
*/
method = cplus_demangle(symbol, 3);
}
if (! method) {
method = symbol
? strdup(symbol)
: PR_smprintf("%s+%X",
info.dli_fname ? info.dli_fname : "main",
offset);
}
if (! method) {
printf("OINK 3\n");
tmstats.btmalloc_failures++;
return NULL;
}
/* Emit an 'N' (for New method, 'M' is for malloc!) event if needed. */
method_serial = 0;
if (!methods) {
methods = PL_NewHashTable(10000, PL_HashString,
PL_CompareStrings, PL_CompareValues,
NULL, NULL);
if (!methods) {
printf("OINK 4\n");
tmstats.btmalloc_failures++;
free((void*) method);
return NULL;
}
}
hash = PL_HashString(method);
hep = PL_HashTableRawLookup(methods, hash, method);
he = *hep;
if (he) {
method_serial = (uint32) he->value;
free((void*) method);
method = (char *) he->key;
} else {
method_serial = ++method_serial_generator;
he = PL_HashTableRawAdd(methods, hep, hash, method,
(void*) method_serial);
if (!he) {
printf("OINK 5\n");
tmstats.btmalloc_failures++;
free((void*) method);
return NULL;
}
log_event2('N', method_serial, library_serial);
log_string(method);
}
/* Create a new callsite record. */
site = __libc_malloc(sizeof(callsite));
if (!site) {
printf("OINK 6\n");
tmstats.btmalloc_failures++;
return NULL;
}
/* Update parent and max-kids-per-parent stats. */
if (!parent->kids)
tmstats.calltree_parents++;
nkids = 1;
for (tmp = parent->kids; tmp; tmp = tmp->siblings)
nkids++;
if (nkids > tmstats.calltree_maxkids) {
tmstats.calltree_maxkids = nkids;
calltree_maxkids_parent = parent;
}
/* Insert the new site into the tree. */
site->pc = pc;
site->serial = ++callsite_serial_generator;
site->name = method;
site->parent = parent;
site->siblings = parent->kids;
parent->kids = site;
site->kids = NULL;
/* Log the site with its parent, method, and offset. */
log_event4('S', site->serial, parent->serial, method_serial, offset);
upward:
parent = site;
bpdown = bp;
bp = bpup;
} while (bp);
if (maxstack)
calltree_maxstack_top = site;
depth = 0;
for (tmp = site; tmp; tmp = tmp->parent)
depth++;
if (depth > tmstats.calltree_maxdepth)
tmstats.calltree_maxdepth = depth;
return site;
}
callsite *
backtrace(int skip)
{
jmp_buf jb;
uint32 *bp, *bpdown, pc;
callsite *site, **key;
PLHashNumber hash;
PLHashEntry **hep, *he;
int i, n;
tmstats.backtrace_calls++;
suppress_tracing++;
setjmp(jb);
/* Stack walking code adapted from Kipp's "leaky". */
bp = (uint32*) jb[0].__jmpbuf[JB_BP];
while (--skip >= 0) {
bpdown = (uint32*) *bp++;
pc = *bp;
if (pc < 0x08000000 || pc > 0x7fffffff || bpdown < bp)
break;
bp = bpdown;
}
site = calltree(bp);
if (!site) {
tmstats.backtrace_failures++;
PR_ASSERT(tmstats.backtrace_failures < 100);
}
suppress_tracing--;
return site;
}
typedef struct allocation {
PLHashEntry entry;
size_t size;
} allocation;
#define ALLOC_HEAP_SIZE 150000
static allocation alloc_heap[ALLOC_HEAP_SIZE];
static allocation *alloc_freelist = NULL;
static int alloc_heap_initialized = 0;
static void *a2f_alloctable(void *pool, PRSize size)
{
return __libc_malloc(size);
}
static void a2f_freetable(void *pool, void *item)
{
__libc_free(item);
}
static PLHashEntry *a2f_allocentry(void *pool, const void *key)
{
allocation **afp, *alloc;
int n;
if (!alloc_heap_initialized) {
n = ALLOC_HEAP_SIZE;
afp = &alloc_freelist;
for (alloc = alloc_heap; --n >= 0; alloc++) {
*afp = alloc;
afp = (allocation**) &alloc->entry.next;
}
*afp = NULL;
alloc_heap_initialized = 1;
}
afp = &alloc_freelist;
alloc = *afp;
if (!alloc)
return __libc_malloc(sizeof(allocation));
*afp = (allocation*) alloc->entry.next;
return &alloc->entry;
}
static void a2f_freeentry(void *pool, PLHashEntry *he, PRUintn flag)
{
allocation *alloc;
if (flag != HT_FREE_ENTRY)
return;
alloc = (allocation*) he;
if (&alloc_heap[0] <= alloc && alloc < &alloc_heap[ALLOC_HEAP_SIZE]) {
alloc->entry.next = &alloc_freelist->entry;
alloc_freelist = alloc;
} else {
__libc_free((void*) alloc);
}
}
static PLHashAllocOps a2f_hashallocops = {
a2f_alloctable, a2f_freetable,
a2f_allocentry, a2f_freeentry
};
static PLHashNumber hash_pointer(const void *key)
{
return (PLHashNumber) key;
}
static PLHashTable *allocations = NULL;
static PLHashTable *new_allocations()
{
allocations = PL_NewHashTable(200000, hash_pointer,
PL_CompareValues, PL_CompareValues,
&a2f_hashallocops, NULL);
return allocations;
}
#define get_allocations() (allocations ? allocations : new_allocations())
__ptr_t malloc(size_t size)
{
__ptr_t *ptr;
callsite *site;
PLHashEntry *he;
allocation *alloc;
ptr = __libc_malloc(size);
if (tmmon)
PR_EnterMonitor(tmmon);
tmstats.malloc_calls++;
if (!ptr) {
tmstats.malloc_failures++;
} else if (suppress_tracing == 0) {
site = backtrace(2);
if (site)
log_event2('M', site->serial, size);
if (get_allocations()) {
suppress_tracing++;
he = PL_HashTableAdd(allocations, ptr, site);
suppress_tracing--;
if (he) {
alloc = (allocation*) he;
alloc->size = size;
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
return ptr;
}
__ptr_t calloc(size_t count, size_t size)
{
__ptr_t *ptr;
callsite *site;
PLHashEntry *he;
allocation *alloc;
ptr = __libc_calloc(count, size);
if (tmmon)
PR_EnterMonitor(tmmon);
tmstats.calloc_calls++;
if (!ptr) {
tmstats.calloc_failures++;
} else if (suppress_tracing == 0) {
site = backtrace(2);
size *= count;
if (site)
log_event2('C', site->serial, size);
if (get_allocations()) {
suppress_tracing++;
he = PL_HashTableAdd(allocations, ptr, site);
suppress_tracing--;
if (he) {
alloc = (allocation*) he;
alloc->size = size;
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
return ptr;
}
__ptr_t realloc(__ptr_t ptr, size_t size)
{
size_t oldsize;
PLHashNumber hash;
PLHashEntry *he;
allocation *alloc;
callsite *site;
if (tmmon)
PR_EnterMonitor(tmmon);
tmstats.realloc_calls++;
if (suppress_tracing == 0) {
oldsize = 0;
if (ptr && get_allocations()) {
hash = hash_pointer(ptr);
he = *PL_HashTableRawLookup(allocations, hash, ptr);
if (he) {
alloc = (allocation*) he;
oldsize = alloc->size;
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
ptr = __libc_realloc(ptr, size);
if (tmmon)
PR_EnterMonitor(tmmon);
if (!ptr && size) {
tmstats.realloc_failures++;
} else if (suppress_tracing == 0) {
site = backtrace(2);
if (site)
log_event3('R', site->serial, oldsize, size);
if (ptr && allocations) {
suppress_tracing++;
he = PL_HashTableAdd(allocations, ptr, site);
suppress_tracing--;
if (he) {
alloc = (allocation*) he;
alloc->size = size;
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
return ptr;
}
void free(__ptr_t ptr)
{
PLHashEntry **hep, *he;
callsite *site;
allocation *alloc;
if (tmmon)
PR_EnterMonitor(tmmon);
tmstats.free_calls++;
if (!ptr) {
tmstats.null_free_calls++;
} else if (suppress_tracing == 0 && get_allocations()) {
hep = PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr);
he = *hep;
if (he) {
site = (callsite*) he->value;
if (site) {
alloc = (allocation*) he;
log_event2('F', site->serial, alloc->size);
}
PL_HashTableRawRemove(allocations, hep, he);
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
__libc_free(ptr);
}
static void cleanup(void)
{
if (tmstats.backtrace_failures) {
fprintf(stderr,
"TraceMalloc backtrace failures: %lu (malloc %lu dladdr %lu)\n",
(unsigned long) tmstats.backtrace_failures,
(unsigned long) tmstats.btmalloc_failures,
(unsigned long) tmstats.dladdr_failures);
}
if (bufpos != 0)
flush_log_buffer();
if (logfd > 0)
close(logfd);
if (tmmon)
PR_DestroyMonitor(tmmon);
}
static const char magic[] = NS_TRACE_MALLOC_LOGFILE_MAGIC;
PR_IMPLEMENT(void) NS_TraceMalloc(int fd)
{
/* We must be running on the primordial thread. */
PR_ASSERT(suppress_tracing == 0);
suppress_tracing = 1;
logfd = fd;
(void) write(fd, magic, 16);
atexit(cleanup);
tmmon = PR_NewMonitor();
if (bufpos != 0)
flush_log_buffer();
suppress_tracing = 0;
}
#endif /* defined __linux__ && defined DEBUG */

Просмотреть файл

@ -0,0 +1,70 @@
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express oqr
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is nsTraceMalloc.c/bloatblame.c code, released
* April 19, 2000.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 2000 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
* Brendan Eich, 14-April-2000
*
* Alternatively, the contents of this file may be used under the
* terms of the GNU Public License (the "GPL"), in which case the
* provisions of the GPL are applicable instead of those above.
* If you wish to allow use of your version of this file only
* under the terms of the GPL and not to allow others to use your
* version of this file under the MPL, indicate your decision by
* deleting the provisions above and replace them with the notice
* and other provisions required by the GPL. If you do not delete
* the provisions above, a recipient may use your version of this
* file under either the MPL or the GPL.
*/
#ifndef nsTraceMalloc_h___
#define nsTraceMalloc_h___
#include "prtypes.h"
PR_BEGIN_EXTERN_C
#define NS_TRACE_MALLOC_LOGFILE_MAGIC "XPCOM\nTMLog01\r\n\032"
/**
* Call NS_TraceMalloc with a valid log file descriptor to enable logging
* of compressed malloc traces, including callsite chains. Integers are
* unsigned, at most 32 bits, and encoded as follows:
* 0-127 0xxxxxxx (binary, one byte)
* 128-16383 10xxxxxx xxxxxxxx
* 16384-0x1fffff 110xxxxx xxxxxxxx xxxxxxxx
* 0x200000-0xfffffff 1110xxxx xxxxxxxx xxxxxxxx xxxxxxxx
* 0x10000000-0xffffffff 11110000 xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
* Strings are NUL-terminated ASCII.
*
* Event Operands
* 'L' library_serial shared_object_filename_string
* 'N' method_serial library_serial demangled_name_string
* 'S' site_serial parent_serial method_serial calling_pc_offset
* 'M' site_serial malloc_size
* 'C' site_serial calloc_size
* 'R' site_serial realloc_oldsize realloc_size
* 'F' site_serial free_size
*
* See xpcom/base/bloatblame.c for an example log-file reader.
*/
PR_EXTERN(void) NS_TraceMalloc(int fd);
PR_END_EXTERN_C
#endif /* nsTraceMalloc_h___ */

Просмотреть файл

@ -68,6 +68,15 @@ EXPORTS = \
nscore.h \
$(NULL)
ifdef NS_TRACE_MALLOC
CSRCS += nsTraceMalloc.c
EXPORTS += nsTraceMalloc.h
DEFINES += -DNS_TRACE_MALLOC
SIMPLE_PROGRAMS = bloatblame
LIBS += $(NSPR_LIBS)
endif
XPIDLSRCS = \
nsrootidl.idl \
nsISupports.idl \

836
xpcom/base/bloatblame.c Normal file
Просмотреть файл

@ -0,0 +1,836 @@
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express oqr
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is nsTraceMalloc.c/bloatblame.c code, released
* April 19, 2000.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 2000 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
* Brendan Eich, 14-April-2000
*
* Alternatively, the contents of this file may be used under the
* terms of the GNU Public License (the "GPL"), in which case the
* provisions of the GPL are applicable instead of those above.
* If you wish to allow use of your version of this file only
* under the terms of the GPL and not to allow others to use your
* version of this file under the MPL, indicate your decision by
* deleting the provisions above and replace them with the notice
* and other provisions required by the GPL. If you do not delete
* the provisions above, a recipient may use your version of this
* file under either the MPL or the GPL.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <errno.h>
#include <getopt.h>
#include <time.h>
#include <unistd.h>
#include "prtypes.h"
#include "prlog.h"
#include "prprf.h"
#include "plhash.h"
#include "nsTraceMalloc.h"
static char *program;
static int sort_by_direct = 0;
static int do_tree_dump = 0;
static uint32 min_subtotal = 0;
static int accum_byte(uint32 *uip)
{
int c = getchar();
if (c == EOF)
return 0;
*uip = (*uip << 8) | c;
return 1;
}
static int get_uint32(uint32 *uip)
{
int c;
uint32 ui;
c = getchar();
if (c == EOF)
return 0;
ui = 0;
if (c & 0x80) {
c &= 0x7f;
if (c & 0x40) {
c &= 0x3f;
if (c & 0x20) {
c &= 0x1f;
if (c & 0x10) {
if (!accum_byte(&ui))
return 0;
} else {
ui = (uint32) c;
}
if (!accum_byte(&ui))
return 0;
} else {
ui = (uint32) c;
}
if (!accum_byte(&ui))
return 0;
} else {
ui = (uint32) c;
}
if (!accum_byte(&ui))
return 0;
} else {
ui = (uint32) c;
}
*uip = ui;
return 1;
}
static char *get_string(void)
{
char *cp;
int c;
static char buf[256];
static char *bp = buf, *ep = buf + sizeof buf;
static size_t bsize = sizeof buf;
cp = bp;
do {
c = getchar();
if (c == EOF)
return 0;
if (cp == ep) {
if (bp == buf) {
bp = malloc(2 * bsize);
memcpy(bp, buf, bsize);
} else {
bp = realloc(bp, 2 * bsize);
}
if (!bp)
return 0;
cp = bp + bsize;
bsize *= 2;
ep = bp + bsize;
}
*cp++ = c;
} while (c != '\0');
return strdup(bp);
}
typedef struct logevent {
char type;
uint32 serial;
union {
char *libname;
struct {
uint32 library;
char *name;
} method;
struct {
uint32 parent;
uint32 method;
uint32 offset;
} site;
struct {
uint32 oldsize;
uint32 size;
} alloc;
} u;
} logevent;
static int get_logevent(logevent *event)
{
int c;
char *s;
c = getchar();
if (c == EOF)
return 0;
event->type = (char) c;
if (!get_uint32(&event->serial))
return 0;
switch (c) {
case 'L':
s = get_string();
if (!s)
return 0;
event->u.libname = s;
break;
case 'N':
if (!get_uint32(&event->u.method.library))
return 0;
s = get_string();
if (!s)
return 0;
event->u.method.name = s;
break;
case 'S':
if (!get_uint32(&event->u.site.parent))
return 0;
if (!get_uint32(&event->u.site.method))
return 0;
if (!get_uint32(&event->u.site.offset))
return 0;
break;
case 'M':
case 'C':
case 'F':
event->u.alloc.oldsize = 0;
if (!get_uint32(&event->u.alloc.size))
return 0;
break;
case 'R':
if (!get_uint32(&event->u.alloc.oldsize))
return 0;
if (!get_uint32(&event->u.alloc.size))
return 0;
break;
}
return 1;
}
typedef struct graphedge graphedge;
typedef struct graphnode graphnode;
typedef struct callsite callsite;
struct graphnode {
PLHashEntry entry; /* key is serial or name, value must be name */
graphedge *in;
graphedge *out;
graphnode *up;
uint32 direct; /* bytes allocated by this node's code */
uint32 total; /* direct + bytes from all descendents */
int visited; /* flag used during walk_callsite_tree */
};
#define graphnode_name(node) ((char*) (node)->entry.value)
#define library_serial(lib) ((uint32) (lib)->entry.key)
#define component_name(comp) ((const char*) (comp)->entry.key)
struct graphedge {
graphedge *next;
graphnode *node;
uint32 direct;
uint32 total;
};
struct callsite {
PLHashEntry entry;
callsite *parent;
callsite *siblings;
callsite *kids;
graphnode *method;
uint32 offset;
uint32 direct;
uint32 total;
};
#define callsite_serial(site) ((uint32) (site)->entry.key)
static void connect_nodes(graphnode *from, graphnode *to, callsite *site)
{
graphedge *edge;
for (edge = from->out; edge; edge = edge->next) {
if (edge->node == to) {
edge[0].direct += site->direct;
edge[1].direct += site->direct;
edge[0].total += site->total;
edge[1].total += site->total;
return;
}
}
edge = (graphedge*) malloc(2 * sizeof(graphedge));
if (!edge) {
perror(program);
exit(1);
}
edge[0].node = to;
edge[0].next = from->out;
from->out = &edge[0];
edge[1].node = from;
edge[1].next = to->in;
to->in = &edge[1];
edge[0].direct = edge[1].direct = site->direct;
edge[0].total = edge[1].total = site->total;
}
static void *generic_alloctable(void *pool, PRSize size)
{
return malloc(size);
}
static void generic_freetable(void *pool, void *item)
{
free(item);
}
static PLHashEntry *callsite_allocentry(void *pool, const void *key)
{
return malloc(sizeof(callsite));
}
static PLHashEntry *graphnode_allocentry(void *pool, const void *key)
{
graphnode *node = (graphnode*) malloc(sizeof(graphnode));
if (node) {
node->in = node->out = NULL;
node->up = NULL;
node->direct = node->total = 0;
node->visited = 0;
}
return &node->entry;
}
static void graphnode_freeentry(void *pool, PLHashEntry *he, PRUintn flag)
{
/* Always free the value, which points to a strdup'd string. */
free(he->value);
/* Free the whole thing if we're told to. */
if (flag == HT_FREE_ENTRY)
free((void*) he);
}
static void component_freeentry(void *pool, PLHashEntry *he, PRUintn flag)
{
if (flag == HT_FREE_ENTRY) {
graphnode *comp = (graphnode*) he;
/* Free the key, which was strdup'd (N.B. value also points to it). */
free((void*) component_name(comp));
free((void*) comp);
}
}
static PLHashAllocOps callsite_hashallocops = {
generic_alloctable, generic_freetable,
callsite_allocentry, graphnode_freeentry
};
static PLHashAllocOps graphnode_hashallocops = {
generic_alloctable, generic_freetable,
graphnode_allocentry, graphnode_freeentry
};
static PLHashAllocOps component_hashallocops = {
generic_alloctable, generic_freetable,
graphnode_allocentry, component_freeentry
};
static PLHashNumber hash_serial(const void *key)
{
return (PLHashNumber) key;
}
static PLHashTable *libraries;
static PLHashTable *components;
static PLHashTable *methods;
static PLHashTable *callsites;
static callsite calltree_root;
static void walk_callsite_tree(callsite *site, int level, int kidnum, FILE *fp)
{
callsite *parent;
graphnode *meth, *pmeth, *comp, *pcomp, *lib, *plib;
callsite *kid;
int nkids;
parent = site->parent;
meth = comp = lib = NULL;
if (parent) {
meth = site->method;
if (meth) {
pmeth = parent->method;
if (pmeth && pmeth != meth) {
if (!meth->visited)
meth->total += site->total;
connect_nodes(pmeth, meth, site);
comp = meth->up;
if (comp) {
pcomp = pmeth->up;
if (pcomp && pcomp != comp) {
if (!comp->visited)
comp->total += site->total;
connect_nodes(pcomp, comp, site);
lib = comp->up;
if (lib) {
plib = pcomp->up;
if (plib && plib != lib) {
if (!lib->visited)
plib->total += site->total;
connect_nodes(plib, lib, site);
}
lib->visited = 1;
}
}
comp->visited = 1;
}
}
meth->visited = 1;
}
}
if (do_tree_dump) {
fprintf(fp, "%c%*s%3d %3d %s %lu %lu\n",
site->kids ? '+' : '-', level, "", level, kidnum,
meth ? graphnode_name(meth) : "???",
(unsigned long)site->direct, (unsigned long)site->total);
}
nkids = 0;
for (kid = site->kids; kid; kid = kid->siblings) {
walk_callsite_tree(kid, level + 1, nkids, fp);
nkids++;
}
if (meth) {
meth->visited = 0;
if (comp) {
comp->visited = 0;
if (lib)
lib->visited = 0;
}
}
}
static PRIntn tabulate_node(PLHashEntry *he, PRIntn i, void *arg)
{
graphnode **table = (graphnode**) arg;
table[i] = (graphnode*) he;
return HT_ENUMERATE_NEXT;
}
/* Sort in reverse size order, so biggest node comes first. */
static int node_table_compare(const void *p1, const void *p2)
{
const graphnode *node1, *node2;
uint32 key1, key2;
node1 = *(const graphnode**) p1;
node2 = *(const graphnode**) p2;
if (sort_by_direct) {
key1 = node1->direct;
key2 = node2->direct;
} else {
key1 = node1->total;
key2 = node2->total;
}
if (key1 < key2)
return 1;
if (key1 > key2)
return -1;
return 0;
}
static const char *prettybig(uint32 num, char *buf, size_t limit)
{
if (num > 1000000)
PR_snprintf(buf, limit, "%1.2fM", (double) num / 1e6);
else if (num > 1000)
PR_snprintf(buf, limit, "%1.2fK", (double) num / 1e3);
else
PR_snprintf(buf, limit, "%lu", (unsigned long) num);
return buf;
}
static double percent(uint32 num, uint32 total)
{
if (num == 0)
return 0.0;
return ((double) num * 100) / (double) total;
}
/* Linked list bubble-sort (waterson and brendan went bald hacking this). */
static void sort_graphedge_list(graphedge **currp)
{
graphedge *curr, *next, **nextp, *tmp;
while ((curr = *currp) != NULL && curr->next) {
nextp = &curr->next;
while ((next = *nextp) != NULL) {
if (curr->total < next->total) {
tmp = curr->next;
*currp = tmp;
if (tmp == next) {
PR_ASSERT(nextp == &curr->next);
curr->next = next->next;
next->next = curr;
} else {
*nextp = next->next;
curr->next = next->next;
next->next = tmp;
*currp = next;
*nextp = curr;
nextp = &curr->next;
}
curr = next;
continue;
}
nextp = &next->next;
}
currp = &curr->next;
}
}
static void dump_graphedge_list(graphedge *list, FILE *fp)
{
uint32 total;
graphedge *edge;
char buf[32];
fputs("<td valign=top>", fp);
total = 0;
for (edge = list; edge; edge = edge->next)
total += edge->total;
for (edge = list; edge; edge = edge->next) {
fprintf(fp, "<a href='#%s'>%s&nbsp;(%%%1.2f)</a>\n",
graphnode_name(edge->node),
prettybig(edge->total, buf, sizeof buf),
percent(edge->total, total));
}
fputs("</td>", fp);
}
static void dump_graph(PLHashTable *hashtbl, const char *title, FILE *fp)
{
uint32 i, count;
graphnode **table;
char buf1[32], buf2[32];
count = hashtbl->nentries;
table = (graphnode**) malloc(count * sizeof(graphnode*));
if (!table) {
perror(program);
exit(1);
}
PL_HashTableEnumerateEntries(hashtbl, tabulate_node, table);
qsort(table, count, sizeof(graphnode*), node_table_compare);
fprintf(fp,
"<table border=1>\n"
"<tr><th>%s</th>"
"<th>Total/Direct (percents)</th>"
"<th>Fan-in</th>"
"<th>Fan-out</th>"
"</tr>\n",
title);
for (i = 0; i < count; i++) {
graphnode *node;
node = table[i];
/* XXX cast out bogusly large components (threading confusion? */
if (node->total > calltree_root.total)
continue;
/* Don't bother with truly puny nodes. */
if (node->total < min_subtotal)
break;
fprintf(fp,
"<tr>"
"<td valign=top><a name='%s'>%s</td>"
"<td valign=top>%s/%s (%%%1.2f/%%%1.2f)</td>",
graphnode_name(node),
graphnode_name(node),
prettybig(node->total, buf1, sizeof buf1),
prettybig(node->direct, buf2, sizeof buf2),
percent(node->total, calltree_root.total),
percent(node->direct, calltree_root.total));
sort_graphedge_list(&node->in);
dump_graphedge_list(node->in, fp);
sort_graphedge_list(&node->out);
dump_graphedge_list(node->out, fp);
fputs("</tr>\n", fp);
}
fputs("</table>\n", fp);
free((void*) table);
}
static const char magic[] = NS_TRACE_MALLOC_LOGFILE_MAGIC;
int main(int argc, char **argv)
{
int c;
FILE *fp;
char buf[16];
time_t start;
logevent event;
program = *argv;
while ((c = getopt(argc, argv, "dtm:")) != EOF) {
switch (c) {
case 'd':
sort_by_direct = 1;
break;
case 't':
do_tree_dump = 1;
break;
case 'm':
min_subtotal = atoi(optarg);
break;
default:
fprintf(stderr, "usage: %s [-dt] [-m min] [output.html]\n",
program);
return 2;
}
}
argc -= optind;
argv += optind;
if (argc == 0) {
fp = stdout;
} else {
fp = fopen(*argv, "w");
if (!fp) {
fprintf(stderr, "%s: can't open %s: %s\n",
program, *argv, strerror(errno));
return 1;
}
}
if (read(0, buf, 16) != 16 || strncmp(buf, magic, 16) != 0) {
fprintf(stderr, "%s: bad magic string %s at start of standard input.\n",
program, buf);
return 1;
}
start = time(NULL);
fprintf(fp, "%s starting at %s", program, ctime(&start));
fflush(fp);
libraries = PL_NewHashTable(100, hash_serial, PL_CompareValues,
PL_CompareStrings, &graphnode_hashallocops,
NULL);
components = PL_NewHashTable(10000, PL_HashString, PL_CompareStrings,
PL_CompareValues, &component_hashallocops,
NULL);
methods = PL_NewHashTable(10000, hash_serial, PL_CompareValues,
PL_CompareStrings, &graphnode_hashallocops,
NULL);
callsites = PL_NewHashTable(200000, hash_serial, PL_CompareValues,
PL_CompareValues, &callsite_hashallocops,
NULL);
calltree_root.entry.value = (void*) strdup("root");
if (!libraries || !components || !methods || !callsites ||
!calltree_root.entry.value) {
perror(program);
return 1;
}
while (get_logevent(&event)) {
switch (event.type) {
case 'L': {
const void *key;
PLHashNumber hash;
PLHashEntry **hep, *he;
key = (const void*) event.serial;
hash = hash_serial(key);
hep = PL_HashTableRawLookup(libraries, hash, key);
he = *hep;
PR_ASSERT(!he);
if (he) return 2;
he = PL_HashTableRawAdd(libraries, hep, hash, key, event.u.libname);
if (!he) {
perror(program);
return 1;
}
break;
}
case 'N': {
const void *key;
PLHashNumber hash;
PLHashEntry **hep, *he;
char *name, *head, *mark, save;
graphnode *meth, *comp, *lib;
key = (const void*) event.serial;
hash = hash_serial(key);
hep = PL_HashTableRawLookup(methods, hash, key);
he = *hep;
PR_ASSERT(!he);
if (he) return 2;
name = event.u.method.name;
he = PL_HashTableRawAdd(methods, hep, hash, key, name);
if (!he) {
perror(program);
return 1;
}
meth = (graphnode*) he;
head = name;
mark = strchr(name, ':');
if (!mark) {
mark = name;
while (*mark != '\0' && *mark == '_')
mark++;
head = mark;
mark = strchr(head, '_');
if (!mark) {
mark = strchr(head, '+');
if (!mark)
mark = head + strlen(head);
}
}
save = *mark;
*mark = '\0';
hash = PL_HashString(head);
hep = PL_HashTableRawLookup(components, hash, head);
he = *hep;
if (he) {
comp = (graphnode*) he;
} else {
head = strdup(head);
if (head)
he = PL_HashTableRawAdd(components, hep, hash, head, head);
if (!he) {
perror(program);
exit(1);
}
comp = (graphnode*) he;
key = (const void*) event.u.method.library;
hash = hash_serial(key);
lib = (graphnode*) *PL_HashTableRawLookup(libraries, hash, key);
comp->up = lib;
}
*mark = save;
meth->up = comp;
break;
}
case 'S': {
const void *key, *pkey, *mkey;
PLHashNumber hash, phash, mhash;
PLHashEntry **hep, *he;
callsite *site, *parent;
graphnode *meth;
key = (const void*) event.serial;
hash = hash_serial(key);
hep = PL_HashTableRawLookup(callsites, hash, key);
he = *hep;
PR_ASSERT(!he);
if (he) return 2;
if (event.u.site.parent == 0) {
parent = &calltree_root;
} else {
pkey = (const void*) event.u.site.parent;
phash = hash_serial(pkey);
parent = (callsite*)
*PL_HashTableRawLookup(callsites, phash, pkey);
if (!parent) {
fprintf(fp, "### no parent for %lu (%lu)!\n",
(unsigned long) event.serial,
(unsigned long) event.u.site.parent);
continue;
}
}
he = PL_HashTableRawAdd(callsites, hep, hash, key, NULL);
if (!he) {
perror(program);
return 1;
}
site = (callsite*) he;
site->parent = parent;
site->siblings = parent->kids;
parent->kids = site;
site->kids = NULL;
mkey = (const void*) event.u.site.method;
mhash = hash_serial(mkey);
meth = (graphnode*) *PL_HashTableRawLookup(methods, mhash, mkey);
site->method = meth;
site->offset = event.u.site.offset;
site->direct = site->total = 0;
break;
}
case 'M':
case 'C':
case 'R': {
const void *key;
PLHashNumber hash;
callsite *site, *tmp;
int32 delta;
graphnode *meth, *comp, *lib;
key = (const void*) event.serial;
hash = hash_serial(key);
site = (callsite*) *PL_HashTableRawLookup(callsites, hash, key);
if (!site) {
fprintf(fp, "### no callsite for '%c' (%lu)!\n",
event.type, (unsigned long) event.serial);
continue;
}
delta = (int32)event.u.alloc.size - (int32)event.u.alloc.oldsize;
site->direct += delta;
for (tmp = site; tmp; tmp = tmp->parent)
tmp->total += delta;
meth = site->method;
if (meth) {
meth->direct += delta;
comp = meth->up;
if (comp) {
comp->direct += delta;
lib = comp->up;
if (lib)
lib->direct += delta;
}
}
break;
}
case 'F':
break;
}
}
walk_callsite_tree(&calltree_root, 0, 0, fp);
dump_graph(libraries, "Library", fp);
fputs("<hr>\n", fp);
dump_graph(components, "Component", fp);
#if 0
fputs("<hr>\n", fp);
dump_graph(methods, "Method", fp);
#endif
fclose(fp);
return 0;
}

739
xpcom/base/nsTraceMalloc.c Normal file
Просмотреть файл

@ -0,0 +1,739 @@
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express oqr
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is nsTraceMalloc.c/bloatblame.c code, released
* April 19, 2000.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 2000 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
* Brendan Eich, 14-April-2000
*
* Alternatively, the contents of this file may be used under the
* terms of the GNU Public License (the "GPL"), in which case the
* provisions of the GPL are applicable instead of those above.
* If you wish to allow use of your version of this file only
* under the terms of the GPL and not to allow others to use your
* version of this file under the MPL, indicate your decision by
* deleting the provisions above and replace them with the notice
* and other provisions required by the GPL. If you do not delete
* the provisions above, a recipient may use your version of this
* file under either the MPL or the GPL.
*/
#if defined NS_TRACE_MALLOC
/*
* TODO:
* - #ifdef __linux__/x86 and port to other platforms
* - unify calltree with gc/boehm somehow (common utility libs)
* - provide NS_DumpTraceMallocStats() and hook up to some xul kbd event
* - provide NS_TraceMallocTimestamp() or do it internally
*/
#include <stdlib.h>
#include <string.h>
#include <setjmp.h>
#include <unistd.h>
#include "plhash.h"
#include "prlog.h"
#include "prmon.h"
#include "prprf.h"
#include "nsTraceMalloc.h"
#define __USE_GNU 1
#include <dlfcn.h>
/* From libiberty, why isn't this in <libiberty.h> ? */
extern char *cplus_demangle(const char *, int);
extern __ptr_t __libc_malloc(size_t);
extern __ptr_t __libc_calloc(size_t, size_t);
extern __ptr_t __libc_realloc(__ptr_t, size_t);
extern void __libc_free(__ptr_t);
static int logfd = -1;
static uint32 logsize = 0;
static uint32 simlogsize = 0;
static char buffer[16*1024];
static int bufpos = 0;
static PRMonitor *tmmon = NULL;
static void flush_log_buffer()
{
int len, cnt;
char *bp;
len = bufpos;
if (len == 0)
return;
if (logfd >= 0) {
bp = buffer;
do {
cnt = write(logfd, bp, len);
if (cnt <= 0) {
printf("### nsTraceMalloc: write failed or wrote 0 bytes!\n");
return;
}
bp += cnt;
len -= cnt;
} while (len > 0);
logsize += len;
}
simlogsize += len;
bufpos = 0;
}
static void log_byte(char byte)
{
if (bufpos == sizeof buffer)
flush_log_buffer();
buffer[bufpos++] = byte;
}
static void log_string(const char *str)
{
int len, rem, cnt;
len = strlen(str);
while ((rem = bufpos + len - sizeof buffer) > 0) {
cnt = len - rem;
strncpy(&buffer[bufpos], str, cnt);
str += cnt;
bufpos += cnt;
flush_log_buffer();
len = rem;
}
strncpy(&buffer[bufpos], str, len);
bufpos += len;
/* Terminate the string. */
log_byte('\0');
}
static void log_uint32(uint32 ival)
{
if (ival < 0x80) {
/* 0xxx xxxx */
log_byte((char) ival);
} else if (ival < 0x4000) {
/* 10xx xxxx xxxx xxxx */
log_byte((char) ((ival >> 8) | 0x80));
log_byte((char) (ival & 0xff));
} else if (ival < 0x200000) {
/* 110x xxxx xxxx xxxx xxxx xxxx */
log_byte((char) ((ival >> 16) | 0xc0));
log_byte((char) ((ival >> 8) & 0xff));
log_byte((char) (ival & 0xff));
} else if (ival < 0x10000000) {
/* 1110 xxxx xxxx xxxx xxxx xxxx xxxx xxxx */
log_byte((char) ((ival >> 24) | 0xe0));
log_byte((char) ((ival >> 16) & 0xff));
log_byte((char) ((ival >> 8) & 0xff));
log_byte((char) (ival & 0xff));
} else {
/* 1111 0000 xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx */
log_byte((char) 0xf0);
log_byte((char) ((ival >> 24) & 0xff));
log_byte((char) ((ival >> 16) & 0xff));
log_byte((char) ((ival >> 8) & 0xff));
log_byte((char) (ival & 0xff));
}
}
static void log_event1(char event, uint32 serial)
{
log_byte(event);
log_uint32((uint32) serial);
}
static void log_event2(char event, uint32 serial, size_t size)
{
log_event1(event, serial);
log_uint32((uint32) size);
}
static void log_event3(char event, uint32 serial, size_t oldsize, size_t size)
{
log_event2(event, serial, oldsize);
log_uint32((uint32) size);
}
static void log_event4(char event, uint32 serial, uint32 ui2, uint32 ui3,
uint32 ui4)
{
log_event3(event, serial, ui2, ui3);
log_uint32(ui4);
}
typedef struct callsite callsite;
struct callsite {
uint32 pc;
uint32 serial;
char *name;
callsite *parent;
callsite *siblings;
callsite *kids;
};
static uint32 suppress_tracing = 0;
static uint32 library_serial_generator = 0;
static uint32 method_serial_generator = 0;
static uint32 callsite_serial_generator = 0;
static callsite calltree_root = {0, 0, NULL, NULL, NULL, NULL};
static struct tmstats {
uint32 calltree_maxstack;
uint32 calltree_maxdepth;
uint32 calltree_parents;
uint32 calltree_maxkids;
uint32 calltree_kidhits;
uint32 calltree_kidmisses;
uint32 calltree_kidsteps;
uint32 callsite_recurrences;
uint32 backtrace_calls;
uint32 backtrace_failures;
uint32 btmalloc_failures;
uint32 dladdr_failures;
uint32 malloc_calls;
uint32 malloc_failures;
uint32 calloc_calls;
uint32 calloc_failures;
uint32 realloc_calls;
uint32 realloc_failures;
uint32 free_calls;
uint32 null_free_calls;
} tmstats;
/* Parent with the most kids (tmstats.calltree_maxkids). */
static callsite *calltree_maxkids_parent;
/* Calltree leaf for path with deepest stack backtrace. */
static callsite *calltree_maxstack_top;
/* Last site (i.e., calling pc) that recurred during a backtrace. */
static callsite *last_callsite_recurrence;
/* Table of library pathnames mapped to to logged 'L' record serial numbers. */
static PLHashTable *libraries = NULL;
/* Table mapping method names to logged 'N' record serial numbers. */
static PLHashTable *methods = NULL;
static callsite *calltree(uint32 *bp)
{
uint32 depth, nkids;
uint32 *bpup, *bpdown, pc;
callsite *parent, *site, **csp, *tmp;
Dl_info info;
int ok, len, maxstack, offset;
uint32 library_serial, method_serial;
const char *library, *symbol;
char *method;
PLHashNumber hash;
PLHashEntry **hep, *he;
/* Reverse the stack frame list to avoid recursion. */
bpup = NULL;
for (depth = 0; ; depth++) {
bpdown = (uint32*) bp[0];
bp[0] = (uint32) bpup;
pc = bp[1];
if (pc < 0x08000000 || pc > 0x7fffffff || bpdown < bp)
break;
bpup = bp;
bp = bpdown;
}
maxstack = (depth > tmstats.calltree_maxstack);
if (maxstack)
tmstats.calltree_maxstack = depth;
/* Reverse the stack again, finding and building a path in the tree. */
parent = &calltree_root;
do {
bpup = (uint32*) bp[0];
bp[0] = (uint32) bpdown;
pc = bp[1];
csp = &parent->kids;
while ((site = *csp) != NULL) {
if (site->pc == pc) {
tmstats.calltree_kidhits++;
/* Put the most recently used site at the front of siblings. */
*csp = site->siblings;
site->siblings = parent->kids;
parent->kids = site;
goto upward;
}
tmstats.calltree_kidsteps++;
csp = &site->siblings;
}
tmstats.calltree_kidmisses++;
/* Check for recursion: see if pc is on our ancestor line. */
for (site = parent; site; site = site->parent) {
if (site->pc == pc) {
tmstats.callsite_recurrences++;
last_callsite_recurrence = site;
goto upward;
}
}
/* Not in tree, let's find our symbolic callsite info. */
info.dli_fname = info.dli_sname = NULL;
ok = dladdr((void*) pc, &info);
if (ok < 0) {
tmstats.dladdr_failures++;
return NULL;
}
/* Check whether we need to emit a library trace record. */
library_serial = 0;
library = info.dli_fname;
if (library) {
if (!libraries) {
libraries = PL_NewHashTable(100, PL_HashString,
PL_CompareStrings, PL_CompareValues,
NULL, NULL);
if (!libraries) {
printf("OINK 1\n");
tmstats.btmalloc_failures++;
return NULL;
}
}
hash = PL_HashString(library);
hep = PL_HashTableRawLookup(libraries, hash, library);
he = *hep;
if (he) {
library_serial = (uint32) he->value;
} else {
library = strdup(library);
if (library) {
library_serial = ++library_serial_generator;
he = PL_HashTableRawAdd(libraries, hep, hash, library,
(void*) library_serial);
if (he) {
char *slash = strrchr(library, '/');
if (slash)
library = slash + 1;
log_event1('L', library_serial);
log_string(library);
}
}
if (!he) {
printf("OINK 2\n");
tmstats.btmalloc_failures++;
return NULL;
}
}
}
/* Now find the demangled method name and pc offset in it. */
symbol = info.dli_sname;
offset = (char*)pc - (char*)info.dli_saddr;
method = NULL;
if (symbol && (len = strlen(symbol)) != 0) {
/*
* Attempt to demangle symbol in case it's a C++ mangled name.
* The magic 3 passed here specifies DMGL_PARAMS | DMGL_ANSI.
*/
method = cplus_demangle(symbol, 3);
}
if (! method) {
method = symbol
? strdup(symbol)
: PR_smprintf("%s+%X",
info.dli_fname ? info.dli_fname : "main",
offset);
}
if (! method) {
printf("OINK 3\n");
tmstats.btmalloc_failures++;
return NULL;
}
/* Emit an 'N' (for New method, 'M' is for malloc!) event if needed. */
method_serial = 0;
if (!methods) {
methods = PL_NewHashTable(10000, PL_HashString,
PL_CompareStrings, PL_CompareValues,
NULL, NULL);
if (!methods) {
printf("OINK 4\n");
tmstats.btmalloc_failures++;
free((void*) method);
return NULL;
}
}
hash = PL_HashString(method);
hep = PL_HashTableRawLookup(methods, hash, method);
he = *hep;
if (he) {
method_serial = (uint32) he->value;
free((void*) method);
method = (char *) he->key;
} else {
method_serial = ++method_serial_generator;
he = PL_HashTableRawAdd(methods, hep, hash, method,
(void*) method_serial);
if (!he) {
printf("OINK 5\n");
tmstats.btmalloc_failures++;
free((void*) method);
return NULL;
}
log_event2('N', method_serial, library_serial);
log_string(method);
}
/* Create a new callsite record. */
site = __libc_malloc(sizeof(callsite));
if (!site) {
printf("OINK 6\n");
tmstats.btmalloc_failures++;
return NULL;
}
/* Update parent and max-kids-per-parent stats. */
if (!parent->kids)
tmstats.calltree_parents++;
nkids = 1;
for (tmp = parent->kids; tmp; tmp = tmp->siblings)
nkids++;
if (nkids > tmstats.calltree_maxkids) {
tmstats.calltree_maxkids = nkids;
calltree_maxkids_parent = parent;
}
/* Insert the new site into the tree. */
site->pc = pc;
site->serial = ++callsite_serial_generator;
site->name = method;
site->parent = parent;
site->siblings = parent->kids;
parent->kids = site;
site->kids = NULL;
/* Log the site with its parent, method, and offset. */
log_event4('S', site->serial, parent->serial, method_serial, offset);
upward:
parent = site;
bpdown = bp;
bp = bpup;
} while (bp);
if (maxstack)
calltree_maxstack_top = site;
depth = 0;
for (tmp = site; tmp; tmp = tmp->parent)
depth++;
if (depth > tmstats.calltree_maxdepth)
tmstats.calltree_maxdepth = depth;
return site;
}
callsite *
backtrace(int skip)
{
jmp_buf jb;
uint32 *bp, *bpdown, pc;
callsite *site, **key;
PLHashNumber hash;
PLHashEntry **hep, *he;
int i, n;
tmstats.backtrace_calls++;
suppress_tracing++;
setjmp(jb);
/* Stack walking code adapted from Kipp's "leaky". */
bp = (uint32*) jb[0].__jmpbuf[JB_BP];
while (--skip >= 0) {
bpdown = (uint32*) *bp++;
pc = *bp;
if (pc < 0x08000000 || pc > 0x7fffffff || bpdown < bp)
break;
bp = bpdown;
}
site = calltree(bp);
if (!site) {
tmstats.backtrace_failures++;
PR_ASSERT(tmstats.backtrace_failures < 100);
}
suppress_tracing--;
return site;
}
typedef struct allocation {
PLHashEntry entry;
size_t size;
} allocation;
#define ALLOC_HEAP_SIZE 150000
static allocation alloc_heap[ALLOC_HEAP_SIZE];
static allocation *alloc_freelist = NULL;
static int alloc_heap_initialized = 0;
static void *a2f_alloctable(void *pool, PRSize size)
{
return __libc_malloc(size);
}
static void a2f_freetable(void *pool, void *item)
{
__libc_free(item);
}
static PLHashEntry *a2f_allocentry(void *pool, const void *key)
{
allocation **afp, *alloc;
int n;
if (!alloc_heap_initialized) {
n = ALLOC_HEAP_SIZE;
afp = &alloc_freelist;
for (alloc = alloc_heap; --n >= 0; alloc++) {
*afp = alloc;
afp = (allocation**) &alloc->entry.next;
}
*afp = NULL;
alloc_heap_initialized = 1;
}
afp = &alloc_freelist;
alloc = *afp;
if (!alloc)
return __libc_malloc(sizeof(allocation));
*afp = (allocation*) alloc->entry.next;
return &alloc->entry;
}
static void a2f_freeentry(void *pool, PLHashEntry *he, PRUintn flag)
{
allocation *alloc;
if (flag != HT_FREE_ENTRY)
return;
alloc = (allocation*) he;
if (&alloc_heap[0] <= alloc && alloc < &alloc_heap[ALLOC_HEAP_SIZE]) {
alloc->entry.next = &alloc_freelist->entry;
alloc_freelist = alloc;
} else {
__libc_free((void*) alloc);
}
}
static PLHashAllocOps a2f_hashallocops = {
a2f_alloctable, a2f_freetable,
a2f_allocentry, a2f_freeentry
};
static PLHashNumber hash_pointer(const void *key)
{
return (PLHashNumber) key;
}
static PLHashTable *allocations = NULL;
static PLHashTable *new_allocations()
{
allocations = PL_NewHashTable(200000, hash_pointer,
PL_CompareValues, PL_CompareValues,
&a2f_hashallocops, NULL);
return allocations;
}
#define get_allocations() (allocations ? allocations : new_allocations())
__ptr_t malloc(size_t size)
{
__ptr_t *ptr;
callsite *site;
PLHashEntry *he;
allocation *alloc;
ptr = __libc_malloc(size);
if (tmmon)
PR_EnterMonitor(tmmon);
tmstats.malloc_calls++;
if (!ptr) {
tmstats.malloc_failures++;
} else if (suppress_tracing == 0) {
site = backtrace(2);
if (site)
log_event2('M', site->serial, size);
if (get_allocations()) {
suppress_tracing++;
he = PL_HashTableAdd(allocations, ptr, site);
suppress_tracing--;
if (he) {
alloc = (allocation*) he;
alloc->size = size;
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
return ptr;
}
__ptr_t calloc(size_t count, size_t size)
{
__ptr_t *ptr;
callsite *site;
PLHashEntry *he;
allocation *alloc;
ptr = __libc_calloc(count, size);
if (tmmon)
PR_EnterMonitor(tmmon);
tmstats.calloc_calls++;
if (!ptr) {
tmstats.calloc_failures++;
} else if (suppress_tracing == 0) {
site = backtrace(2);
size *= count;
if (site)
log_event2('C', site->serial, size);
if (get_allocations()) {
suppress_tracing++;
he = PL_HashTableAdd(allocations, ptr, site);
suppress_tracing--;
if (he) {
alloc = (allocation*) he;
alloc->size = size;
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
return ptr;
}
__ptr_t realloc(__ptr_t ptr, size_t size)
{
size_t oldsize;
PLHashNumber hash;
PLHashEntry *he;
allocation *alloc;
callsite *site;
if (tmmon)
PR_EnterMonitor(tmmon);
tmstats.realloc_calls++;
if (suppress_tracing == 0) {
oldsize = 0;
if (ptr && get_allocations()) {
hash = hash_pointer(ptr);
he = *PL_HashTableRawLookup(allocations, hash, ptr);
if (he) {
alloc = (allocation*) he;
oldsize = alloc->size;
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
ptr = __libc_realloc(ptr, size);
if (tmmon)
PR_EnterMonitor(tmmon);
if (!ptr && size) {
tmstats.realloc_failures++;
} else if (suppress_tracing == 0) {
site = backtrace(2);
if (site)
log_event3('R', site->serial, oldsize, size);
if (ptr && allocations) {
suppress_tracing++;
he = PL_HashTableAdd(allocations, ptr, site);
suppress_tracing--;
if (he) {
alloc = (allocation*) he;
alloc->size = size;
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
return ptr;
}
void free(__ptr_t ptr)
{
PLHashEntry **hep, *he;
callsite *site;
allocation *alloc;
if (tmmon)
PR_EnterMonitor(tmmon);
tmstats.free_calls++;
if (!ptr) {
tmstats.null_free_calls++;
} else if (suppress_tracing == 0 && get_allocations()) {
hep = PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr);
he = *hep;
if (he) {
site = (callsite*) he->value;
if (site) {
alloc = (allocation*) he;
log_event2('F', site->serial, alloc->size);
}
PL_HashTableRawRemove(allocations, hep, he);
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
__libc_free(ptr);
}
static void cleanup(void)
{
if (tmstats.backtrace_failures) {
fprintf(stderr,
"TraceMalloc backtrace failures: %lu (malloc %lu dladdr %lu)\n",
(unsigned long) tmstats.backtrace_failures,
(unsigned long) tmstats.btmalloc_failures,
(unsigned long) tmstats.dladdr_failures);
}
if (bufpos != 0)
flush_log_buffer();
if (logfd > 0)
close(logfd);
if (tmmon)
PR_DestroyMonitor(tmmon);
}
static const char magic[] = NS_TRACE_MALLOC_LOGFILE_MAGIC;
PR_IMPLEMENT(void) NS_TraceMalloc(int fd)
{
/* We must be running on the primordial thread. */
PR_ASSERT(suppress_tracing == 0);
suppress_tracing = 1;
logfd = fd;
(void) write(fd, magic, 16);
atexit(cleanup);
tmmon = PR_NewMonitor();
if (bufpos != 0)
flush_log_buffer();
suppress_tracing = 0;
}
#endif /* defined __linux__ && defined DEBUG */

Просмотреть файл

@ -0,0 +1,70 @@
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express oqr
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is nsTraceMalloc.c/bloatblame.c code, released
* April 19, 2000.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 2000 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
* Brendan Eich, 14-April-2000
*
* Alternatively, the contents of this file may be used under the
* terms of the GNU Public License (the "GPL"), in which case the
* provisions of the GPL are applicable instead of those above.
* If you wish to allow use of your version of this file only
* under the terms of the GPL and not to allow others to use your
* version of this file under the MPL, indicate your decision by
* deleting the provisions above and replace them with the notice
* and other provisions required by the GPL. If you do not delete
* the provisions above, a recipient may use your version of this
* file under either the MPL or the GPL.
*/
#ifndef nsTraceMalloc_h___
#define nsTraceMalloc_h___
#include "prtypes.h"
PR_BEGIN_EXTERN_C
#define NS_TRACE_MALLOC_LOGFILE_MAGIC "XPCOM\nTMLog01\r\n\032"
/**
* Call NS_TraceMalloc with a valid log file descriptor to enable logging
* of compressed malloc traces, including callsite chains. Integers are
* unsigned, at most 32 bits, and encoded as follows:
* 0-127 0xxxxxxx (binary, one byte)
* 128-16383 10xxxxxx xxxxxxxx
* 16384-0x1fffff 110xxxxx xxxxxxxx xxxxxxxx
* 0x200000-0xfffffff 1110xxxx xxxxxxxx xxxxxxxx xxxxxxxx
* 0x10000000-0xffffffff 11110000 xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
* Strings are NUL-terminated ASCII.
*
* Event Operands
* 'L' library_serial shared_object_filename_string
* 'N' method_serial library_serial demangled_name_string
* 'S' site_serial parent_serial method_serial calling_pc_offset
* 'M' site_serial malloc_size
* 'C' site_serial calloc_size
* 'R' site_serial realloc_oldsize realloc_size
* 'F' site_serial free_size
*
* See xpcom/base/bloatblame.c for an example log-file reader.
*/
PR_EXTERN(void) NS_TraceMalloc(int fd);
PR_END_EXTERN_C
#endif /* nsTraceMalloc_h___ */