зеркало из https://github.com/mozilla/pjs.git
First Checked In.
This commit is contained in:
Родитель
feddb2d6d7
Коммит
1328765f22
|
@ -0,0 +1,6 @@
|
|||
#
|
||||
# This is a list of local files which get copied to the mozilla:dist:gc directory
|
||||
#
|
||||
|
||||
gc.h
|
||||
generic_threads.h
|
|
@ -0,0 +1,60 @@
|
|||
# $Id: alpha_mach_dep.s,v 1.1 1999-09-30 02:39:45 beard%netscape.com Exp $
|
||||
|
||||
# define call_push(x) \
|
||||
lda $16, 0(x); /* copy x to first argument register */ \
|
||||
jsr $26, GC_push_one; /* call GC_push_one, ret addr in $26 */ \
|
||||
ldgp $gp, 0($26) /* restore $gp register from $ra */
|
||||
|
||||
.text
|
||||
.align 4
|
||||
.globl GC_push_regs
|
||||
.ent GC_push_regs 2
|
||||
GC_push_regs:
|
||||
ldgp $gp, 0($27) # set gp from the procedure value reg
|
||||
lda $sp, -32($sp) # make stack frame
|
||||
stq $26, 8($sp) # save return address
|
||||
.mask 0x04000000, -8
|
||||
.frame $sp, 16, $26, 0
|
||||
|
||||
# call_push($0) # expression eval and int func result
|
||||
|
||||
# call_push($1) # temp regs - not preserved cross calls
|
||||
# call_push($2)
|
||||
# call_push($3)
|
||||
# call_push($4)
|
||||
# call_push($5)
|
||||
# call_push($6)
|
||||
# call_push($7)
|
||||
# call_push($8)
|
||||
|
||||
call_push($9) # Saved regs
|
||||
call_push($10)
|
||||
call_push($11)
|
||||
call_push($12)
|
||||
call_push($13)
|
||||
call_push($14)
|
||||
|
||||
call_push($15) # frame ptr or saved reg
|
||||
|
||||
# call_push($16) # argument regs - not preserved cross calls
|
||||
# call_push($17)
|
||||
# call_push($18)
|
||||
# call_push($19)
|
||||
# call_push($20)
|
||||
# call_push($21)
|
||||
|
||||
# call_push($22) # temp regs - not preserved cross calls
|
||||
# call_push($23)
|
||||
# call_push($24)
|
||||
# call_push($25)
|
||||
|
||||
# call_push($26) # return address - expression eval
|
||||
# call_push($27) # procedure value or temporary reg
|
||||
# call_push($28) # assembler temp - not presrved
|
||||
call_push($29) # Global Pointer
|
||||
# call_push($30) # Stack Pointer
|
||||
|
||||
ldq $26, 8($sp) # restore return address
|
||||
lda $sp, 32($sp) # pop stack frame
|
||||
ret $31, ($26), 1 # return ($31 == hardwired zero)
|
||||
.end GC_push_regs
|
|
@ -0,0 +1,676 @@
|
|||
/*
|
||||
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
|
||||
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
|
||||
* Copyright (c) 1997 by Silicon Graphics. All rights reserved.
|
||||
*
|
||||
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
||||
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
||||
*
|
||||
* Permission is hereby granted to use or copy this program
|
||||
* for any purpose, provided the above notices are retained on all copies.
|
||||
* Permission to modify the code and to distribute modified code is granted,
|
||||
* provided the above notices are retained, and a notice that the code was
|
||||
* modified is included with the above copyright notice.
|
||||
*/
|
||||
/* Boehm, October 9, 1995 1:16 pm PDT */
|
||||
# include "gc_priv.h"
|
||||
|
||||
void GC_default_print_heap_obj_proc();
|
||||
GC_API void GC_register_finalizer_no_order
|
||||
GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
|
||||
GC_finalization_proc *ofn, GC_PTR *ocd));
|
||||
|
||||
/* Do we want to and know how to save the call stack at the time of */
|
||||
/* an allocation? How much space do we want to use in each object? */
|
||||
|
||||
# define START_FLAG ((word)0xfedcedcb)
|
||||
# define END_FLAG ((word)0xbcdecdef)
|
||||
/* Stored both one past the end of user object, and one before */
|
||||
/* the end of the object as seen by the allocator. */
|
||||
|
||||
|
||||
/* Object header */
|
||||
typedef struct {
|
||||
char * oh_string; /* object descriptor string */
|
||||
word oh_int; /* object descriptor integers */
|
||||
# ifdef NEED_CALLINFO
|
||||
struct callinfo oh_ci[NFRAMES];
|
||||
# endif
|
||||
word oh_sz; /* Original malloc arg. */
|
||||
word oh_sf; /* start flag */
|
||||
} oh;
|
||||
/* The size of the above structure is assumed not to dealign things, */
|
||||
/* and to be a multiple of the word length. */
|
||||
|
||||
#define DEBUG_BYTES (sizeof (oh) + sizeof (word))
|
||||
#undef ROUNDED_UP_WORDS
|
||||
#define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1) - 1)
|
||||
|
||||
|
||||
#ifdef SAVE_CALL_CHAIN
|
||||
# define ADD_CALL_CHAIN(base, ra) GC_save_callers(((oh *)(base)) -> oh_ci)
|
||||
# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
|
||||
#else
|
||||
# ifdef GC_ADD_CALLER
|
||||
# define ADD_CALL_CHAIN(base, ra) ((oh *)(base)) -> oh_ci[0].ci_pc = (ra)
|
||||
# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
|
||||
# else
|
||||
# define ADD_CALL_CHAIN(base, ra)
|
||||
# define PRINT_CALL_CHAIN(base)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* Check whether object with base pointer p has debugging info */
|
||||
/* p is assumed to point to a legitimate object in our part */
|
||||
/* of the heap. */
|
||||
GC_bool GC_has_debug_info(p)
|
||||
ptr_t p;
|
||||
{
|
||||
register oh * ohdr = (oh *)p;
|
||||
register ptr_t body = (ptr_t)(ohdr + 1);
|
||||
register word sz = GC_size((ptr_t) ohdr);
|
||||
|
||||
if (HBLKPTR((ptr_t)ohdr) != HBLKPTR((ptr_t)body)
|
||||
|| sz < sizeof (oh)) {
|
||||
return(FALSE);
|
||||
}
|
||||
if (ohdr -> oh_sz == sz) {
|
||||
/* Object may have had debug info, but has been deallocated */
|
||||
return(FALSE);
|
||||
}
|
||||
if (ohdr -> oh_sf == (START_FLAG ^ (word)body)) return(TRUE);
|
||||
if (((word *)ohdr)[BYTES_TO_WORDS(sz)-1] == (END_FLAG ^ (word)body)) {
|
||||
return(TRUE);
|
||||
}
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
/* Store debugging info into p. Return displaced pointer. */
|
||||
/* Assumes we don't hold allocation lock. */
|
||||
ptr_t GC_store_debug_info(p, sz, string, integer)
|
||||
register ptr_t p; /* base pointer */
|
||||
word sz; /* bytes */
|
||||
char * string;
|
||||
word integer;
|
||||
{
|
||||
register oh * ohdr = (oh *)p;
|
||||
register word * result = (word *)(ohdr + 1);
|
||||
DCL_LOCK_STATE;
|
||||
|
||||
/* There is some argument that we should dissble signals here. */
|
||||
/* But that's expensive. And this way things should only appear */
|
||||
/* inconsistent while we're in the handler. */
|
||||
LOCK();
|
||||
ohdr -> oh_string = string;
|
||||
ohdr -> oh_int = integer;
|
||||
ohdr -> oh_sz = sz;
|
||||
ohdr -> oh_sf = START_FLAG ^ (word)result;
|
||||
((word *)p)[BYTES_TO_WORDS(GC_size(p))-1] =
|
||||
result[ROUNDED_UP_WORDS(sz)] = END_FLAG ^ (word)result;
|
||||
UNLOCK();
|
||||
return((ptr_t)result);
|
||||
}
|
||||
|
||||
/* Check the object with debugging info at p */
|
||||
/* return NIL if it's OK. Else return clobbered */
|
||||
/* address. */
|
||||
ptr_t GC_check_annotated_obj(ohdr)
|
||||
register oh * ohdr;
|
||||
{
|
||||
register ptr_t body = (ptr_t)(ohdr + 1);
|
||||
register word gc_sz = GC_size((ptr_t)ohdr);
|
||||
if (ohdr -> oh_sz + DEBUG_BYTES > gc_sz) {
|
||||
return((ptr_t)(&(ohdr -> oh_sz)));
|
||||
}
|
||||
if (ohdr -> oh_sf != (START_FLAG ^ (word)body)) {
|
||||
return((ptr_t)(&(ohdr -> oh_sf)));
|
||||
}
|
||||
if (((word *)ohdr)[BYTES_TO_WORDS(gc_sz)-1] != (END_FLAG ^ (word)body)) {
|
||||
return((ptr_t)((word *)ohdr + BYTES_TO_WORDS(gc_sz)-1));
|
||||
}
|
||||
if (((word *)body)[ROUNDED_UP_WORDS(ohdr -> oh_sz)]
|
||||
!= (END_FLAG ^ (word)body)) {
|
||||
return((ptr_t)((word *)body + ROUNDED_UP_WORDS(ohdr -> oh_sz)));
|
||||
}
|
||||
return(0);
|
||||
}
|
||||
|
||||
extern const char* getTypeName(void* ptr);
|
||||
|
||||
void GC_print_obj(p)
|
||||
ptr_t p;
|
||||
{
|
||||
register oh * ohdr = (oh *)GC_base(p);
|
||||
register word *wp, *wend;
|
||||
|
||||
wp = (word*)((unsigned long)ohdr + sizeof(oh));
|
||||
|
||||
#ifdef MACOS
|
||||
GC_err_printf3("0x%08lX <%s> (sz=%ld)\n", wp, getTypeName(wp),
|
||||
(unsigned long)(ohdr -> oh_sz));
|
||||
#else
|
||||
GC_err_printf1("0x%08lX (", wp);
|
||||
GC_err_puts(ohdr->oh_string);
|
||||
GC_err_printf2(":%ld, sz=%ld)\n", (unsigned long)(ohdr -> oh_int),
|
||||
(unsigned long)(ohdr -> oh_sz));
|
||||
#endif
|
||||
|
||||
/* print all potential references held by this object. */
|
||||
wend = (word*)((unsigned long)wp + ohdr -> oh_sz);
|
||||
while (wp < wend) GC_err_printf1("\t0x%08lX\n", *wp++);
|
||||
|
||||
PRINT_CALL_CHAIN(ohdr);
|
||||
}
|
||||
|
||||
void GC_debug_print_heap_obj_proc(p)
|
||||
ptr_t p;
|
||||
{
|
||||
if (GC_has_debug_info(p)) {
|
||||
GC_print_obj(p);
|
||||
} else {
|
||||
GC_default_print_heap_obj_proc(p);
|
||||
}
|
||||
}
|
||||
|
||||
void GC_print_smashed_obj(p, clobbered_addr)
|
||||
ptr_t p, clobbered_addr;
|
||||
{
|
||||
register oh * ohdr = (oh *)GC_base(p);
|
||||
|
||||
GC_err_printf2("0x%lx in object at 0x%lx(", (unsigned long)clobbered_addr,
|
||||
(unsigned long)p);
|
||||
if (clobbered_addr <= (ptr_t)(&(ohdr -> oh_sz))
|
||||
|| ohdr -> oh_string == 0) {
|
||||
GC_err_printf1("<smashed>, appr. sz = %ld)\n",
|
||||
(GC_size((ptr_t)ohdr) - DEBUG_BYTES));
|
||||
} else {
|
||||
if (ohdr -> oh_string[0] == '\0') {
|
||||
GC_err_puts("EMPTY(smashed?)");
|
||||
} else {
|
||||
GC_err_puts(ohdr -> oh_string);
|
||||
}
|
||||
GC_err_printf2(":%ld, sz=%ld)\n", (unsigned long)(ohdr -> oh_int),
|
||||
(unsigned long)(ohdr -> oh_sz));
|
||||
PRINT_CALL_CHAIN(ohdr);
|
||||
}
|
||||
}
|
||||
|
||||
void GC_check_heap_proc();
|
||||
|
||||
void GC_start_debugging()
|
||||
{
|
||||
GC_check_heap = GC_check_heap_proc;
|
||||
GC_print_heap_obj = GC_debug_print_heap_obj_proc;
|
||||
GC_debugging_started = TRUE;
|
||||
GC_register_displacement((word)sizeof(oh));
|
||||
}
|
||||
|
||||
# if defined(__STDC__) || defined(__cplusplus)
|
||||
void GC_debug_register_displacement(GC_word offset)
|
||||
# else
|
||||
void GC_debug_register_displacement(offset)
|
||||
GC_word offset;
|
||||
# endif
|
||||
{
|
||||
GC_register_displacement(offset);
|
||||
GC_register_displacement((word)sizeof(oh) + offset);
|
||||
}
|
||||
|
||||
# ifdef GC_ADD_CALLER
|
||||
# define EXTRA_ARGS word ra, char * s, int i
|
||||
# define OPT_RA ra,
|
||||
# else
|
||||
# define EXTRA_ARGS char * s, int i
|
||||
# define OPT_RA
|
||||
# endif
|
||||
|
||||
# ifdef __STDC__
|
||||
GC_PTR GC_debug_malloc(size_t lb, EXTRA_ARGS)
|
||||
# else
|
||||
GC_PTR GC_debug_malloc(lb, s, i)
|
||||
size_t lb;
|
||||
char * s;
|
||||
int i;
|
||||
# ifdef GC_ADD_CALLER
|
||||
--> GC_ADD_CALLER not implemented for K&R C
|
||||
# endif
|
||||
# endif
|
||||
{
|
||||
GC_PTR result = GC_malloc(lb + DEBUG_BYTES);
|
||||
|
||||
if (result == 0) {
|
||||
GC_err_printf1("GC_debug_malloc(%ld) returning NIL (",
|
||||
(unsigned long) lb);
|
||||
GC_err_puts(s);
|
||||
GC_err_printf1(":%ld)\n", (unsigned long)i);
|
||||
return(0);
|
||||
}
|
||||
if (!GC_debugging_started) {
|
||||
GC_start_debugging();
|
||||
}
|
||||
ADD_CALL_CHAIN(result, ra);
|
||||
return (GC_store_debug_info(result, (word)lb, s, (word)i));
|
||||
}
|
||||
|
||||
#ifdef STUBBORN_ALLOC
|
||||
# ifdef __STDC__
|
||||
GC_PTR GC_debug_malloc_stubborn(size_t lb, EXTRA_ARGS)
|
||||
# else
|
||||
GC_PTR GC_debug_malloc_stubborn(lb, s, i)
|
||||
size_t lb;
|
||||
char * s;
|
||||
int i;
|
||||
# endif
|
||||
{
|
||||
GC_PTR result = GC_malloc_stubborn(lb + DEBUG_BYTES);
|
||||
|
||||
if (result == 0) {
|
||||
GC_err_printf1("GC_debug_malloc(%ld) returning NIL (",
|
||||
(unsigned long) lb);
|
||||
GC_err_puts(s);
|
||||
GC_err_printf1(":%ld)\n", (unsigned long)i);
|
||||
return(0);
|
||||
}
|
||||
if (!GC_debugging_started) {
|
||||
GC_start_debugging();
|
||||
}
|
||||
ADD_CALL_CHAIN(result, ra);
|
||||
return (GC_store_debug_info(result, (word)lb, s, (word)i));
|
||||
}
|
||||
|
||||
void GC_debug_change_stubborn(p)
|
||||
GC_PTR p;
|
||||
{
|
||||
register GC_PTR q = GC_base(p);
|
||||
register hdr * hhdr;
|
||||
|
||||
if (q == 0) {
|
||||
GC_err_printf1("Bad argument: 0x%lx to GC_debug_change_stubborn\n",
|
||||
(unsigned long) p);
|
||||
ABORT("GC_debug_change_stubborn: bad arg");
|
||||
}
|
||||
hhdr = HDR(q);
|
||||
if (hhdr -> hb_obj_kind != STUBBORN) {
|
||||
GC_err_printf1("GC_debug_change_stubborn arg not stubborn: 0x%lx\n",
|
||||
(unsigned long) p);
|
||||
ABORT("GC_debug_change_stubborn: arg not stubborn");
|
||||
}
|
||||
GC_change_stubborn(q);
|
||||
}
|
||||
|
||||
void GC_debug_end_stubborn_change(p)
|
||||
GC_PTR p;
|
||||
{
|
||||
register GC_PTR q = GC_base(p);
|
||||
register hdr * hhdr;
|
||||
|
||||
if (q == 0) {
|
||||
GC_err_printf1("Bad argument: 0x%lx to GC_debug_end_stubborn_change\n",
|
||||
(unsigned long) p);
|
||||
ABORT("GC_debug_end_stubborn_change: bad arg");
|
||||
}
|
||||
hhdr = HDR(q);
|
||||
if (hhdr -> hb_obj_kind != STUBBORN) {
|
||||
GC_err_printf1("debug_end_stubborn_change arg not stubborn: 0x%lx\n",
|
||||
(unsigned long) p);
|
||||
ABORT("GC_debug_end_stubborn_change: arg not stubborn");
|
||||
}
|
||||
GC_end_stubborn_change(q);
|
||||
}
|
||||
|
||||
#endif /* STUBBORN_ALLOC */
|
||||
|
||||
# ifdef __STDC__
|
||||
GC_PTR GC_debug_malloc_atomic(size_t lb, EXTRA_ARGS)
|
||||
# else
|
||||
GC_PTR GC_debug_malloc_atomic(lb, s, i)
|
||||
size_t lb;
|
||||
char * s;
|
||||
int i;
|
||||
# endif
|
||||
{
|
||||
GC_PTR result = GC_malloc_atomic(lb + DEBUG_BYTES);
|
||||
|
||||
if (result == 0) {
|
||||
GC_err_printf1("GC_debug_malloc_atomic(%ld) returning NIL (",
|
||||
(unsigned long) lb);
|
||||
GC_err_puts(s);
|
||||
GC_err_printf1(":%ld)\n", (unsigned long)i);
|
||||
return(0);
|
||||
}
|
||||
if (!GC_debugging_started) {
|
||||
GC_start_debugging();
|
||||
}
|
||||
ADD_CALL_CHAIN(result, ra);
|
||||
return (GC_store_debug_info(result, (word)lb, s, (word)i));
|
||||
}
|
||||
|
||||
# ifdef __STDC__
|
||||
GC_PTR GC_debug_malloc_uncollectable(size_t lb, EXTRA_ARGS)
|
||||
# else
|
||||
GC_PTR GC_debug_malloc_uncollectable(lb, s, i)
|
||||
size_t lb;
|
||||
char * s;
|
||||
int i;
|
||||
# endif
|
||||
{
|
||||
GC_PTR result = GC_malloc_uncollectable(lb + DEBUG_BYTES);
|
||||
|
||||
if (result == 0) {
|
||||
GC_err_printf1("GC_debug_malloc_uncollectable(%ld) returning NIL (",
|
||||
(unsigned long) lb);
|
||||
GC_err_puts(s);
|
||||
GC_err_printf1(":%ld)\n", (unsigned long)i);
|
||||
return(0);
|
||||
}
|
||||
if (!GC_debugging_started) {
|
||||
GC_start_debugging();
|
||||
}
|
||||
ADD_CALL_CHAIN(result, ra);
|
||||
return (GC_store_debug_info(result, (word)lb, s, (word)i));
|
||||
}
|
||||
|
||||
#ifdef ATOMIC_UNCOLLECTABLE
|
||||
# ifdef __STDC__
|
||||
GC_PTR GC_debug_malloc_atomic_uncollectable(size_t lb, EXTRA_ARGS)
|
||||
# else
|
||||
GC_PTR GC_debug_malloc_atomic_uncollectable(lb, s, i)
|
||||
size_t lb;
|
||||
char * s;
|
||||
int i;
|
||||
# endif
|
||||
{
|
||||
GC_PTR result = GC_malloc_atomic_uncollectable(lb + DEBUG_BYTES);
|
||||
|
||||
if (result == 0) {
|
||||
GC_err_printf1(
|
||||
"GC_debug_malloc_atomic_uncollectable(%ld) returning NIL (",
|
||||
(unsigned long) lb);
|
||||
GC_err_puts(s);
|
||||
GC_err_printf1(":%ld)\n", (unsigned long)i);
|
||||
return(0);
|
||||
}
|
||||
if (!GC_debugging_started) {
|
||||
GC_start_debugging();
|
||||
}
|
||||
ADD_CALL_CHAIN(result, ra);
|
||||
return (GC_store_debug_info(result, (word)lb, s, (word)i));
|
||||
}
|
||||
#endif /* ATOMIC_UNCOLLECTABLE */
|
||||
|
||||
# ifdef __STDC__
|
||||
void GC_debug_free(GC_PTR p)
|
||||
# else
|
||||
void GC_debug_free(p)
|
||||
GC_PTR p;
|
||||
# endif
|
||||
{
|
||||
register GC_PTR base = GC_base(p);
|
||||
register ptr_t clobbered;
|
||||
|
||||
if (base == 0) {
|
||||
GC_err_printf1("Attempt to free invalid pointer %lx\n",
|
||||
(unsigned long)p);
|
||||
if (p != 0) ABORT("free(invalid pointer)");
|
||||
}
|
||||
if ((ptr_t)p - (ptr_t)base != sizeof(oh)) {
|
||||
GC_err_printf1(
|
||||
"GC_debug_free called on pointer %lx wo debugging info\n",
|
||||
(unsigned long)p);
|
||||
} else {
|
||||
oh * ohdr = (oh *)base;
|
||||
clobbered = GC_check_annotated_obj(ohdr);
|
||||
if (clobbered != 0) {
|
||||
if (ohdr -> oh_sz == GC_size(base)) {
|
||||
GC_err_printf0(
|
||||
"GC_debug_free: found previously deallocated (?) object at ");
|
||||
} else {
|
||||
GC_err_printf0("GC_debug_free: found smashed object at ");
|
||||
}
|
||||
GC_print_smashed_obj(p, clobbered);
|
||||
}
|
||||
/* Invalidate size */
|
||||
ohdr -> oh_sz = GC_size(base);
|
||||
}
|
||||
# ifdef FIND_LEAK
|
||||
GC_free(base);
|
||||
# else
|
||||
{
|
||||
register hdr * hhdr = HDR(p);
|
||||
GC_bool uncollectable = FALSE;
|
||||
|
||||
if (hhdr -> hb_obj_kind == UNCOLLECTABLE) {
|
||||
uncollectable = TRUE;
|
||||
}
|
||||
# ifdef ATOMIC_UNCOLLECTABLE
|
||||
if (hhdr -> hb_obj_kind == AUNCOLLECTABLE) {
|
||||
uncollectable = TRUE;
|
||||
}
|
||||
# endif
|
||||
if (uncollectable) GC_free(base);
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
# ifdef __STDC__
|
||||
GC_PTR GC_debug_realloc(GC_PTR p, size_t lb, EXTRA_ARGS)
|
||||
# else
|
||||
GC_PTR GC_debug_realloc(p, lb, s, i)
|
||||
GC_PTR p;
|
||||
size_t lb;
|
||||
char *s;
|
||||
int i;
|
||||
# endif
|
||||
{
|
||||
register GC_PTR base = GC_base(p);
|
||||
register ptr_t clobbered;
|
||||
register GC_PTR result;
|
||||
register size_t copy_sz = lb;
|
||||
register size_t old_sz;
|
||||
register hdr * hhdr;
|
||||
|
||||
if (p == 0) return(GC_debug_malloc(lb, OPT_RA s, i));
|
||||
if (base == 0) {
|
||||
GC_err_printf1(
|
||||
"Attempt to reallocate invalid pointer %lx\n", (unsigned long)p);
|
||||
ABORT("realloc(invalid pointer)");
|
||||
}
|
||||
if ((ptr_t)p - (ptr_t)base != sizeof(oh)) {
|
||||
GC_err_printf1(
|
||||
"GC_debug_realloc called on pointer %lx wo debugging info\n",
|
||||
(unsigned long)p);
|
||||
return(GC_realloc(p, lb));
|
||||
}
|
||||
hhdr = HDR(base);
|
||||
switch (hhdr -> hb_obj_kind) {
|
||||
# ifdef STUBBORN_ALLOC
|
||||
case STUBBORN:
|
||||
result = GC_debug_malloc_stubborn(lb, OPT_RA s, i);
|
||||
break;
|
||||
# endif
|
||||
case NORMAL:
|
||||
result = GC_debug_malloc(lb, OPT_RA s, i);
|
||||
break;
|
||||
case PTRFREE:
|
||||
result = GC_debug_malloc_atomic(lb, OPT_RA s, i);
|
||||
break;
|
||||
case UNCOLLECTABLE:
|
||||
result = GC_debug_malloc_uncollectable(lb, OPT_RA s, i);
|
||||
break;
|
||||
# ifdef ATOMIC_UNCOLLECTABLE
|
||||
case AUNCOLLECTABLE:
|
||||
result = GC_debug_malloc_atomic_uncollectable(lb, OPT_RA s, i);
|
||||
break;
|
||||
# endif
|
||||
default:
|
||||
GC_err_printf0("GC_debug_realloc: encountered bad kind\n");
|
||||
ABORT("bad kind");
|
||||
}
|
||||
clobbered = GC_check_annotated_obj((oh *)base);
|
||||
if (clobbered != 0) {
|
||||
GC_err_printf0("GC_debug_realloc: found smashed object at ");
|
||||
GC_print_smashed_obj(p, clobbered);
|
||||
}
|
||||
old_sz = ((oh *)base) -> oh_sz;
|
||||
if (old_sz < copy_sz) copy_sz = old_sz;
|
||||
if (result == 0) return(0);
|
||||
BCOPY(p, result, copy_sz);
|
||||
GC_debug_free(p);
|
||||
return(result);
|
||||
}
|
||||
|
||||
/* Check all marked objects in the given block for validity */
|
||||
/*ARGSUSED*/
|
||||
void GC_check_heap_block(hbp, dummy)
|
||||
register struct hblk *hbp; /* ptr to current heap block */
|
||||
word dummy;
|
||||
{
|
||||
register struct hblkhdr * hhdr = HDR(hbp);
|
||||
register word sz = hhdr -> hb_sz;
|
||||
register int word_no;
|
||||
register word *p, *plim;
|
||||
|
||||
p = (word *)(hbp->hb_body);
|
||||
word_no = HDR_WORDS;
|
||||
if (sz > MAXOBJSZ) {
|
||||
plim = p;
|
||||
} else {
|
||||
plim = (word *)((((word)hbp) + HBLKSIZE) - WORDS_TO_BYTES(sz));
|
||||
}
|
||||
/* go through all words in block */
|
||||
while( p <= plim ) {
|
||||
if( mark_bit_from_hdr(hhdr, word_no)
|
||||
&& GC_has_debug_info((ptr_t)p)) {
|
||||
ptr_t clobbered = GC_check_annotated_obj((oh *)p);
|
||||
|
||||
if (clobbered != 0) {
|
||||
GC_err_printf0(
|
||||
"GC_check_heap_block: found smashed object at ");
|
||||
GC_print_smashed_obj((ptr_t)p, clobbered);
|
||||
}
|
||||
}
|
||||
word_no += sz;
|
||||
p += sz;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* This assumes that all accessible objects are marked, and that */
|
||||
/* I hold the allocation lock. Normally called by collector. */
|
||||
void GC_check_heap_proc()
|
||||
{
|
||||
# ifndef SMALL_CONFIG
|
||||
if (sizeof(oh) & (2 * sizeof(word) - 1) != 0) {
|
||||
ABORT("Alignment problem: object header has inappropriate size\n");
|
||||
}
|
||||
# endif
|
||||
GC_apply_to_all_blocks(GC_check_heap_block, (word)0);
|
||||
}
|
||||
|
||||
struct closure {
|
||||
GC_finalization_proc cl_fn;
|
||||
GC_PTR cl_data;
|
||||
};
|
||||
|
||||
# ifdef __STDC__
|
||||
void * GC_make_closure(GC_finalization_proc fn, void * data)
|
||||
# else
|
||||
GC_PTR GC_make_closure(fn, data)
|
||||
GC_finalization_proc fn;
|
||||
GC_PTR data;
|
||||
# endif
|
||||
{
|
||||
struct closure * result =
|
||||
(struct closure *) GC_malloc(sizeof (struct closure));
|
||||
|
||||
result -> cl_fn = fn;
|
||||
result -> cl_data = data;
|
||||
return((GC_PTR)result);
|
||||
}
|
||||
|
||||
# ifdef __STDC__
|
||||
void GC_debug_invoke_finalizer(void * obj, void * data)
|
||||
# else
|
||||
void GC_debug_invoke_finalizer(obj, data)
|
||||
char * obj;
|
||||
char * data;
|
||||
# endif
|
||||
{
|
||||
register struct closure * cl = (struct closure *) data;
|
||||
|
||||
(*(cl -> cl_fn))((GC_PTR)((char *)obj + sizeof(oh)), cl -> cl_data);
|
||||
}
|
||||
|
||||
|
||||
# ifdef __STDC__
|
||||
void GC_debug_register_finalizer(GC_PTR obj, GC_finalization_proc fn,
|
||||
GC_PTR cd, GC_finalization_proc *ofn,
|
||||
GC_PTR *ocd)
|
||||
# else
|
||||
void GC_debug_register_finalizer(obj, fn, cd, ofn, ocd)
|
||||
GC_PTR obj;
|
||||
GC_finalization_proc fn;
|
||||
GC_PTR cd;
|
||||
GC_finalization_proc *ofn;
|
||||
GC_PTR *ocd;
|
||||
# endif
|
||||
{
|
||||
ptr_t base = GC_base(obj);
|
||||
if (0 == base || (ptr_t)obj - base != sizeof(oh)) {
|
||||
GC_err_printf1(
|
||||
"GC_register_finalizer called with non-base-pointer 0x%lx\n",
|
||||
obj);
|
||||
}
|
||||
GC_register_finalizer(base, GC_debug_invoke_finalizer,
|
||||
GC_make_closure(fn,cd), ofn, ocd);
|
||||
}
|
||||
|
||||
# ifdef __STDC__
|
||||
void GC_debug_register_finalizer_no_order
|
||||
(GC_PTR obj, GC_finalization_proc fn,
|
||||
GC_PTR cd, GC_finalization_proc *ofn,
|
||||
GC_PTR *ocd)
|
||||
# else
|
||||
void GC_debug_register_finalizer_no_order
|
||||
(obj, fn, cd, ofn, ocd)
|
||||
GC_PTR obj;
|
||||
GC_finalization_proc fn;
|
||||
GC_PTR cd;
|
||||
GC_finalization_proc *ofn;
|
||||
GC_PTR *ocd;
|
||||
# endif
|
||||
{
|
||||
ptr_t base = GC_base(obj);
|
||||
if (0 == base || (ptr_t)obj - base != sizeof(oh)) {
|
||||
GC_err_printf1(
|
||||
"GC_register_finalizer_no_order called with non-base-pointer 0x%lx\n",
|
||||
obj);
|
||||
}
|
||||
GC_register_finalizer_no_order(base, GC_debug_invoke_finalizer,
|
||||
GC_make_closure(fn,cd), ofn, ocd);
|
||||
}
|
||||
|
||||
# ifdef __STDC__
|
||||
void GC_debug_register_finalizer_ignore_self
|
||||
(GC_PTR obj, GC_finalization_proc fn,
|
||||
GC_PTR cd, GC_finalization_proc *ofn,
|
||||
GC_PTR *ocd)
|
||||
# else
|
||||
void GC_debug_register_finalizer_ignore_self
|
||||
(obj, fn, cd, ofn, ocd)
|
||||
GC_PTR obj;
|
||||
GC_finalization_proc fn;
|
||||
GC_PTR cd;
|
||||
GC_finalization_proc *ofn;
|
||||
GC_PTR *ocd;
|
||||
# endif
|
||||
{
|
||||
ptr_t base = GC_base(obj);
|
||||
if (0 == base || (ptr_t)obj - base != sizeof(oh)) {
|
||||
GC_err_printf1(
|
||||
"GC_register_finalizer_ignore_self called with non-base-pointer 0x%lx\n",
|
||||
obj);
|
||||
}
|
||||
GC_register_finalizer_ignore_self(base, GC_debug_invoke_finalizer,
|
||||
GC_make_closure(fn,cd), ofn, ocd);
|
||||
}
|
|
@ -0,0 +1,796 @@
|
|||
/*
|
||||
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
|
||||
* Copyright (c) 1997 by Silicon Graphics. All rights reserved.
|
||||
*
|
||||
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
||||
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
||||
*
|
||||
* Permission is hereby granted to use or copy this program
|
||||
* for any purpose, provided the above notices are retained on all copies.
|
||||
* Permission to modify the code and to distribute modified code is granted,
|
||||
* provided the above notices are retained, and a notice that the code was
|
||||
* modified is included with the above copyright notice.
|
||||
*
|
||||
* Original author: Bill Janssen
|
||||
* Heavily modified by Hans Boehm and others
|
||||
*/
|
||||
|
||||
/*
|
||||
* This is incredibly OS specific code for tracking down data sections in
|
||||
* dynamic libraries. There appears to be no way of doing this quickly
|
||||
* without groveling through undocumented data structures. We would argue
|
||||
* that this is a bug in the design of the dlopen interface. THIS CODE
|
||||
* MAY BREAK IN FUTURE OS RELEASES. If this matters to you, don't hesitate
|
||||
* to let your vendor know ...
|
||||
*
|
||||
* None of this is safe with dlclose and incremental collection.
|
||||
* But then not much of anything is safe in the presence of dlclose.
|
||||
*/
|
||||
#ifndef MACOS
|
||||
# include <sys/types.h>
|
||||
#endif
|
||||
#include "gc_priv.h"
|
||||
|
||||
/* BTL: avoid circular redefinition of dlopen if SOLARIS_THREADS defined */
|
||||
# if defined(SOLARIS_THREADS) && defined(dlopen)
|
||||
/* To support threads in Solaris, gc.h interposes on dlopen by */
|
||||
/* defining "dlopen" to be "GC_dlopen", which is implemented below. */
|
||||
/* However, both GC_FirstDLOpenedLinkMap() and GC_dlopen() use the */
|
||||
/* real system dlopen() in their implementation. We first remove */
|
||||
/* gc.h's dlopen definition and restore it later, after GC_dlopen(). */
|
||||
# undef dlopen
|
||||
# define GC_must_restore_redefined_dlopen
|
||||
# else
|
||||
# undef GC_must_restore_redefined_dlopen
|
||||
# endif
|
||||
|
||||
#if (defined(DYNAMIC_LOADING) || defined(MSWIN32)) && !defined(PCR)
|
||||
#if !defined(SUNOS4) && !defined(SUNOS5DL) && !defined(IRIX5) && \
|
||||
!defined(MSWIN32) && !(defined(ALPHA) && defined(OSF1)) && \
|
||||
!defined(HP_PA) && !(defined(LINUX) && defined(__ELF__)) && \
|
||||
!defined(RS6000) && !defined(SCO_ELF)
|
||||
--> We only know how to find data segments of dynamic libraries for the
|
||||
--> above. Additional SVR4 variants might not be too
|
||||
--> hard to add.
|
||||
#endif
|
||||
|
||||
#include <stdio.h>
|
||||
#ifdef SUNOS5DL
|
||||
# include <sys/elf.h>
|
||||
# include <dlfcn.h>
|
||||
# include <link.h>
|
||||
#endif
|
||||
#ifdef SUNOS4
|
||||
# include <dlfcn.h>
|
||||
# include <link.h>
|
||||
# include <a.out.h>
|
||||
/* struct link_map field overrides */
|
||||
# define l_next lm_next
|
||||
# define l_addr lm_addr
|
||||
# define l_name lm_name
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(SUNOS5DL) && !defined(USE_PROC_FOR_LIBRARIES)
|
||||
|
||||
#ifdef LINT
|
||||
Elf32_Dyn _DYNAMIC;
|
||||
#endif
|
||||
|
||||
static struct link_map *
|
||||
GC_FirstDLOpenedLinkMap()
|
||||
{
|
||||
extern Elf32_Dyn _DYNAMIC;
|
||||
Elf32_Dyn *dp;
|
||||
struct r_debug *r;
|
||||
static struct link_map * cachedResult = 0;
|
||||
static Elf32_Dyn *dynStructureAddr = 0;
|
||||
/* BTL: added to avoid Solaris 5.3 ld.so _DYNAMIC bug */
|
||||
|
||||
# ifdef SUNOS53_SHARED_LIB
|
||||
/* BTL: Avoid the Solaris 5.3 bug that _DYNAMIC isn't being set */
|
||||
/* up properly in dynamically linked .so's. This means we have */
|
||||
/* to use its value in the set of original object files loaded */
|
||||
/* at program startup. */
|
||||
if( dynStructureAddr == 0 ) {
|
||||
void* startupSyms = dlopen(0, RTLD_LAZY);
|
||||
dynStructureAddr = (Elf32_Dyn*)dlsym(startupSyms, "_DYNAMIC");
|
||||
}
|
||||
# else
|
||||
dynStructureAddr = &_DYNAMIC;
|
||||
# endif
|
||||
|
||||
if( dynStructureAddr == 0) {
|
||||
return(0);
|
||||
}
|
||||
if( cachedResult == 0 ) {
|
||||
int tag;
|
||||
for( dp = ((Elf32_Dyn *)(&_DYNAMIC)); (tag = dp->d_tag) != 0; dp++ ) {
|
||||
if( tag == DT_DEBUG ) {
|
||||
struct link_map *lm
|
||||
= ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
|
||||
if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return cachedResult;
|
||||
}
|
||||
|
||||
#endif /* SUNOS5DL ... */
|
||||
|
||||
#if defined(SUNOS4) && !defined(USE_PROC_FOR_LIBRARIES)
|
||||
|
||||
#ifdef LINT
|
||||
struct link_dynamic _DYNAMIC;
|
||||
#endif
|
||||
|
||||
static struct link_map *
|
||||
GC_FirstDLOpenedLinkMap()
|
||||
{
|
||||
extern struct link_dynamic _DYNAMIC;
|
||||
|
||||
if( &_DYNAMIC == 0) {
|
||||
return(0);
|
||||
}
|
||||
return(_DYNAMIC.ld_un.ld_1->ld_loaded);
|
||||
}
|
||||
|
||||
/* Return the address of the ld.so allocated common symbol */
|
||||
/* with the least address, or 0 if none. */
|
||||
static ptr_t GC_first_common()
|
||||
{
|
||||
ptr_t result = 0;
|
||||
extern struct link_dynamic _DYNAMIC;
|
||||
struct rtc_symb * curr_symbol;
|
||||
|
||||
if( &_DYNAMIC == 0) {
|
||||
return(0);
|
||||
}
|
||||
curr_symbol = _DYNAMIC.ldd -> ldd_cp;
|
||||
for (; curr_symbol != 0; curr_symbol = curr_symbol -> rtc_next) {
|
||||
if (result == 0
|
||||
|| (ptr_t)(curr_symbol -> rtc_sp -> n_value) < result) {
|
||||
result = (ptr_t)(curr_symbol -> rtc_sp -> n_value);
|
||||
}
|
||||
}
|
||||
return(result);
|
||||
}
|
||||
|
||||
#endif /* SUNOS4 ... */
|
||||
|
||||
# if defined(SUNOS4) || defined(SUNOS5DL)
|
||||
/* Add dynamic library data sections to the root set. */
|
||||
# if !defined(PCR) && !defined(SOLARIS_THREADS) && defined(THREADS)
|
||||
# ifndef SRC_M3
|
||||
--> fix mutual exclusion with dlopen
|
||||
# endif /* We assume M3 programs don't call dlopen for now */
|
||||
# endif
|
||||
|
||||
# ifdef SOLARIS_THREADS
|
||||
/* Redefine dlopen to guarantee mutual exclusion with */
|
||||
/* GC_register_dynamic_libraries. */
|
||||
/* assumes that dlopen doesn't need to call GC_malloc */
|
||||
/* and friends. */
|
||||
# include <thread.h>
|
||||
# include <synch.h>
|
||||
|
||||
void * GC_dlopen(const char *path, int mode)
|
||||
{
|
||||
void * result;
|
||||
|
||||
# ifndef USE_PROC_FOR_LIBRARIES
|
||||
mutex_lock(&GC_allocate_ml);
|
||||
# endif
|
||||
result = dlopen(path, mode);
|
||||
# ifndef USE_PROC_FOR_LIBRARIES
|
||||
mutex_unlock(&GC_allocate_ml);
|
||||
# endif
|
||||
return(result);
|
||||
}
|
||||
# endif /* SOLARIS_THREADS */
|
||||
|
||||
/* BTL: added to fix circular dlopen definition if SOLARIS_THREADS defined */
|
||||
# if defined(GC_must_restore_redefined_dlopen)
|
||||
# define dlopen GC_dlopen
|
||||
# endif
|
||||
|
||||
# ifndef USE_PROC_FOR_LIBRARIES
|
||||
void GC_register_dynamic_libraries()
|
||||
{
|
||||
struct link_map *lm = GC_FirstDLOpenedLinkMap();
|
||||
|
||||
|
||||
for (lm = GC_FirstDLOpenedLinkMap();
|
||||
lm != (struct link_map *) 0; lm = lm->l_next)
|
||||
{
|
||||
# ifdef SUNOS4
|
||||
struct exec *e;
|
||||
|
||||
e = (struct exec *) lm->lm_addr;
|
||||
GC_add_roots_inner(
|
||||
((char *) (N_DATOFF(*e) + lm->lm_addr)),
|
||||
((char *) (N_BSSADDR(*e) + e->a_bss + lm->lm_addr)),
|
||||
TRUE);
|
||||
# endif
|
||||
# ifdef SUNOS5DL
|
||||
Elf32_Ehdr * e;
|
||||
Elf32_Phdr * p;
|
||||
unsigned long offset;
|
||||
char * start;
|
||||
register int i;
|
||||
|
||||
e = (Elf32_Ehdr *) lm->l_addr;
|
||||
p = ((Elf32_Phdr *)(((char *)(e)) + e->e_phoff));
|
||||
offset = ((unsigned long)(lm->l_addr));
|
||||
for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
|
||||
switch( p->p_type ) {
|
||||
case PT_LOAD:
|
||||
{
|
||||
if( !(p->p_flags & PF_W) ) break;
|
||||
start = ((char *)(p->p_vaddr)) + offset;
|
||||
GC_add_roots_inner(
|
||||
start,
|
||||
start + p->p_memsz,
|
||||
TRUE
|
||||
);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
# endif
|
||||
}
|
||||
# ifdef SUNOS4
|
||||
{
|
||||
static ptr_t common_start = 0;
|
||||
ptr_t common_end;
|
||||
extern ptr_t GC_find_limit();
|
||||
|
||||
if (common_start == 0) common_start = GC_first_common();
|
||||
if (common_start != 0) {
|
||||
common_end = GC_find_limit(common_start, TRUE);
|
||||
GC_add_roots_inner((char *)common_start, (char *)common_end, TRUE);
|
||||
}
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
# endif /* !USE_PROC ... */
|
||||
# endif /* SUNOS */
|
||||
|
||||
#if defined(LINUX) && defined(__ELF__) || defined(SCO_ELF)
|
||||
|
||||
/* Dynamic loading code for Linux running ELF. Somewhat tested on
|
||||
* Linux/x86, untested but hopefully should work on Linux/Alpha.
|
||||
* This code was derived from the Solaris/ELF support. Thanks to
|
||||
* whatever kind soul wrote that. - Patrick Bridges */
|
||||
|
||||
#include <elf.h>
|
||||
#include <link.h>
|
||||
|
||||
/* Newer versions of Linux/Alpha and Linux/x86 define this macro. We
|
||||
* define it for those older versions that don't. */
|
||||
# ifndef ElfW
|
||||
# if !defined(ELF_CLASS) || ELF_CLASS == ELFCLASS32
|
||||
# define ElfW(type) Elf32_##type
|
||||
# else
|
||||
# define ElfW(type) Elf64_##type
|
||||
# endif
|
||||
# endif
|
||||
|
||||
static struct link_map *
|
||||
GC_FirstDLOpenedLinkMap()
|
||||
{
|
||||
extern ElfW(Dyn) _DYNAMIC[];
|
||||
ElfW(Dyn) *dp;
|
||||
struct r_debug *r;
|
||||
static struct link_map *cachedResult = 0;
|
||||
|
||||
if( _DYNAMIC == 0) {
|
||||
return(0);
|
||||
}
|
||||
if( cachedResult == 0 ) {
|
||||
int tag;
|
||||
for( dp = _DYNAMIC; (tag = dp->d_tag) != 0; dp++ ) {
|
||||
if( tag == DT_DEBUG ) {
|
||||
struct link_map *lm
|
||||
= ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
|
||||
if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return cachedResult;
|
||||
}
|
||||
|
||||
|
||||
void GC_register_dynamic_libraries()
|
||||
{
|
||||
struct link_map *lm = GC_FirstDLOpenedLinkMap();
|
||||
|
||||
|
||||
for (lm = GC_FirstDLOpenedLinkMap();
|
||||
lm != (struct link_map *) 0; lm = lm->l_next)
|
||||
{
|
||||
ElfW(Ehdr) * e;
|
||||
ElfW(Phdr) * p;
|
||||
unsigned long offset;
|
||||
char * start;
|
||||
register int i;
|
||||
|
||||
e = (ElfW(Ehdr) *) lm->l_addr;
|
||||
p = ((ElfW(Phdr) *)(((char *)(e)) + e->e_phoff));
|
||||
offset = ((unsigned long)(lm->l_addr));
|
||||
for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
|
||||
switch( p->p_type ) {
|
||||
case PT_LOAD:
|
||||
{
|
||||
if( !(p->p_flags & PF_W) ) break;
|
||||
start = ((char *)(p->p_vaddr)) + offset;
|
||||
GC_add_roots_inner(start, start + p->p_memsz, TRUE);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(IRIX5) || defined(USE_PROC_FOR_LIBRARIES)
|
||||
|
||||
#include <sys/procfs.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <elf.h>
|
||||
#include <errno.h>
|
||||
|
||||
extern void * GC_roots_present();
|
||||
/* The type is a lie, since the real type doesn't make sense here, */
|
||||
/* and we only test for NULL. */
|
||||
|
||||
extern ptr_t GC_scratch_last_end_ptr; /* End of GC_scratch_alloc arena */
|
||||
|
||||
/* We use /proc to track down all parts of the address space that are */
|
||||
/* mapped by the process, and throw out regions we know we shouldn't */
|
||||
/* worry about. This may also work under other SVR4 variants. */
|
||||
void GC_register_dynamic_libraries()
|
||||
{
|
||||
static int fd = -1;
|
||||
char buf[30];
|
||||
static prmap_t * addr_map = 0;
|
||||
static int current_sz = 0; /* Number of records currently in addr_map */
|
||||
static int needed_sz; /* Required size of addr_map */
|
||||
register int i;
|
||||
register long flags;
|
||||
register ptr_t start;
|
||||
register ptr_t limit;
|
||||
ptr_t heap_start = (ptr_t)HEAP_START;
|
||||
ptr_t heap_end = heap_start;
|
||||
|
||||
# ifdef SUNOS5DL
|
||||
# define MA_PHYS 0
|
||||
# endif /* SUNOS5DL */
|
||||
|
||||
if (fd < 0) {
|
||||
sprintf(buf, "/proc/%d", getpid());
|
||||
/* The above generates a lint complaint, since pid_t varies. */
|
||||
/* It's unclear how to improve this. */
|
||||
fd = open(buf, O_RDONLY);
|
||||
if (fd < 0) {
|
||||
ABORT("/proc open failed");
|
||||
}
|
||||
}
|
||||
if (ioctl(fd, PIOCNMAP, &needed_sz) < 0) {
|
||||
GC_err_printf2("fd = %d, errno = %d\n", fd, errno);
|
||||
ABORT("/proc PIOCNMAP ioctl failed");
|
||||
}
|
||||
if (needed_sz >= current_sz) {
|
||||
current_sz = needed_sz * 2 + 1;
|
||||
/* Expansion, plus room for 0 record */
|
||||
addr_map = (prmap_t *)GC_scratch_alloc((word)
|
||||
(current_sz * sizeof(prmap_t)));
|
||||
}
|
||||
if (ioctl(fd, PIOCMAP, addr_map) < 0) {
|
||||
GC_err_printf4("fd = %d, errno = %d, needed_sz = %d, addr_map = 0x%X\n",
|
||||
fd, errno, needed_sz, addr_map);
|
||||
ABORT("/proc PIOCMAP ioctl failed");
|
||||
};
|
||||
if (GC_n_heap_sects > 0) {
|
||||
heap_end = GC_heap_sects[GC_n_heap_sects-1].hs_start
|
||||
+ GC_heap_sects[GC_n_heap_sects-1].hs_bytes;
|
||||
if (heap_end < GC_scratch_last_end_ptr) heap_end = GC_scratch_last_end_ptr;
|
||||
}
|
||||
for (i = 0; i < needed_sz; i++) {
|
||||
flags = addr_map[i].pr_mflags;
|
||||
if ((flags & (MA_BREAK | MA_STACK | MA_PHYS)) != 0) goto irrelevant;
|
||||
if ((flags & (MA_READ | MA_WRITE)) != (MA_READ | MA_WRITE))
|
||||
goto irrelevant;
|
||||
/* The latter test is empirically useless. Other than the */
|
||||
/* main data and stack segments, everything appears to be */
|
||||
/* mapped readable, writable, executable, and shared(!!). */
|
||||
/* This makes no sense to me. - HB */
|
||||
start = (ptr_t)(addr_map[i].pr_vaddr);
|
||||
if (GC_roots_present(start)) goto irrelevant;
|
||||
if (start < heap_end && start >= heap_start)
|
||||
goto irrelevant;
|
||||
# ifdef MMAP_STACKS
|
||||
if (GC_is_thread_stack(start)) goto irrelevant;
|
||||
# endif /* MMAP_STACKS */
|
||||
|
||||
limit = start + addr_map[i].pr_size;
|
||||
if (addr_map[i].pr_off == 0 && strncmp(start, ELFMAG, 4) == 0) {
|
||||
/* Discard text segments, i.e. 0-offset mappings against */
|
||||
/* executable files which appear to have ELF headers. */
|
||||
caddr_t arg;
|
||||
int obj;
|
||||
# define MAP_IRR_SZ 10
|
||||
static ptr_t map_irr[MAP_IRR_SZ];
|
||||
/* Known irrelevant map entries */
|
||||
static int n_irr = 0;
|
||||
struct stat buf;
|
||||
register int i;
|
||||
|
||||
for (i = 0; i < n_irr; i++) {
|
||||
if (map_irr[i] == start) goto irrelevant;
|
||||
}
|
||||
arg = (caddr_t)start;
|
||||
obj = ioctl(fd, PIOCOPENM, &arg);
|
||||
if (obj >= 0) {
|
||||
fstat(obj, &buf);
|
||||
close(obj);
|
||||
if ((buf.st_mode & 0111) != 0) {
|
||||
if (n_irr < MAP_IRR_SZ) {
|
||||
map_irr[n_irr++] = start;
|
||||
}
|
||||
goto irrelevant;
|
||||
}
|
||||
}
|
||||
}
|
||||
GC_add_roots_inner(start, limit, TRUE);
|
||||
irrelevant: ;
|
||||
}
|
||||
/* Dont keep cached descriptor, for now. Some kernels don't like us */
|
||||
/* to keep a /proc file descriptor around during kill -9. */
|
||||
if (close(fd) < 0) ABORT("Couldnt close /proc file");
|
||||
fd = -1;
|
||||
}
|
||||
|
||||
# endif /* USE_PROC || IRIX5 */
|
||||
|
||||
# ifdef MSWIN32
|
||||
|
||||
# define WIN32_LEAN_AND_MEAN
|
||||
# define NOSERVICE
|
||||
# include <windows.h>
|
||||
# include <stdlib.h>
|
||||
|
||||
/* We traverse the entire address space and register all segments */
|
||||
/* that could possibly have been written to. */
|
||||
DWORD GC_allocation_granularity;
|
||||
|
||||
extern GC_bool GC_is_heap_base (ptr_t p);
|
||||
|
||||
# ifdef WIN32_THREADS
|
||||
extern void GC_get_next_stack(char *start, char **lo, char **hi);
|
||||
# endif
|
||||
|
||||
void GC_cond_add_roots(char *base, char * limit)
|
||||
{
|
||||
char dummy;
|
||||
char * stack_top
|
||||
= (char *) ((word)(&dummy) & ~(GC_allocation_granularity-1));
|
||||
if (base == limit) return;
|
||||
# ifdef WIN32_THREADS
|
||||
{
|
||||
char * curr_base = base;
|
||||
char * next_stack_lo;
|
||||
char * next_stack_hi;
|
||||
|
||||
for(;;) {
|
||||
GC_get_next_stack(curr_base, &next_stack_lo, &next_stack_hi);
|
||||
if (next_stack_lo >= limit) break;
|
||||
GC_add_roots_inner(curr_base, next_stack_lo, TRUE);
|
||||
curr_base = next_stack_hi;
|
||||
}
|
||||
if (curr_base < limit) GC_add_roots_inner(curr_base, limit, TRUE);
|
||||
}
|
||||
# else
|
||||
if (limit > stack_top && base < GC_stackbottom) {
|
||||
/* Part of the stack; ignore it. */
|
||||
return;
|
||||
}
|
||||
GC_add_roots_inner(base, limit, TRUE);
|
||||
# endif
|
||||
}
|
||||
|
||||
extern GC_bool GC_win32s;
|
||||
|
||||
void GC_register_dynamic_libraries()
|
||||
{
|
||||
MEMORY_BASIC_INFORMATION buf;
|
||||
SYSTEM_INFO sysinfo;
|
||||
DWORD result;
|
||||
DWORD protect;
|
||||
LPVOID p;
|
||||
char * base;
|
||||
char * limit, * new_limit;
|
||||
|
||||
if (GC_win32s) return;
|
||||
GetSystemInfo(&sysinfo);
|
||||
base = limit = p = sysinfo.lpMinimumApplicationAddress;
|
||||
GC_allocation_granularity = sysinfo.dwAllocationGranularity;
|
||||
while (p < sysinfo.lpMaximumApplicationAddress) {
|
||||
result = VirtualQuery(p, &buf, sizeof(buf));
|
||||
if (result != sizeof(buf)) {
|
||||
ABORT("Weird VirtualQuery result");
|
||||
}
|
||||
new_limit = (char *)p + buf.RegionSize;
|
||||
protect = buf.Protect;
|
||||
if (buf.State == MEM_COMMIT
|
||||
&& (protect == PAGE_EXECUTE_READWRITE
|
||||
|| protect == PAGE_READWRITE)
|
||||
&& !GC_is_heap_base(buf.AllocationBase)) {
|
||||
if ((char *)p == limit) {
|
||||
limit = new_limit;
|
||||
} else {
|
||||
GC_cond_add_roots(base, limit);
|
||||
base = p;
|
||||
limit = new_limit;
|
||||
}
|
||||
}
|
||||
if (p > (LPVOID)new_limit /* overflow */) break;
|
||||
p = (LPVOID)new_limit;
|
||||
}
|
||||
GC_cond_add_roots(base, limit);
|
||||
}
|
||||
|
||||
#endif /* MSWIN32 */
|
||||
|
||||
#if defined(ALPHA) && defined(OSF1)
|
||||
|
||||
#include <loader.h>
|
||||
|
||||
void GC_register_dynamic_libraries()
|
||||
{
|
||||
int status;
|
||||
ldr_process_t mypid;
|
||||
|
||||
/* module */
|
||||
ldr_module_t moduleid = LDR_NULL_MODULE;
|
||||
ldr_module_info_t moduleinfo;
|
||||
size_t moduleinfosize = sizeof(moduleinfo);
|
||||
size_t modulereturnsize;
|
||||
|
||||
/* region */
|
||||
ldr_region_t region;
|
||||
ldr_region_info_t regioninfo;
|
||||
size_t regioninfosize = sizeof(regioninfo);
|
||||
size_t regionreturnsize;
|
||||
|
||||
/* Obtain id of this process */
|
||||
mypid = ldr_my_process();
|
||||
|
||||
/* For each module */
|
||||
while (TRUE) {
|
||||
|
||||
/* Get the next (first) module */
|
||||
status = ldr_next_module(mypid, &moduleid);
|
||||
|
||||
/* Any more modules? */
|
||||
if (moduleid == LDR_NULL_MODULE)
|
||||
break; /* No more modules */
|
||||
|
||||
/* Check status AFTER checking moduleid because */
|
||||
/* of a bug in the non-shared ldr_next_module stub */
|
||||
if (status != 0 ) {
|
||||
GC_printf1("dynamic_load: status = %ld\n", (long)status);
|
||||
{
|
||||
extern char *sys_errlist[];
|
||||
extern int sys_nerr;
|
||||
extern int errno;
|
||||
if (errno <= sys_nerr) {
|
||||
GC_printf1("dynamic_load: %s\n", (long)sys_errlist[errno]);
|
||||
} else {
|
||||
GC_printf1("dynamic_load: %d\n", (long)errno);
|
||||
}
|
||||
}
|
||||
ABORT("ldr_next_module failed");
|
||||
}
|
||||
|
||||
/* Get the module information */
|
||||
status = ldr_inq_module(mypid, moduleid, &moduleinfo,
|
||||
moduleinfosize, &modulereturnsize);
|
||||
if (status != 0 )
|
||||
ABORT("ldr_inq_module failed");
|
||||
|
||||
/* is module for the main program (i.e. nonshared portion)? */
|
||||
if (moduleinfo.lmi_flags & LDR_MAIN)
|
||||
continue; /* skip the main module */
|
||||
|
||||
# ifdef VERBOSE
|
||||
GC_printf("---Module---\n");
|
||||
GC_printf("Module ID = %16ld\n", moduleinfo.lmi_modid);
|
||||
GC_printf("Count of regions = %16d\n", moduleinfo.lmi_nregion);
|
||||
GC_printf("flags for module = %16lx\n", moduleinfo.lmi_flags);
|
||||
GC_printf("pathname of module = \"%s\"\n", moduleinfo.lmi_name);
|
||||
# endif
|
||||
|
||||
/* For each region in this module */
|
||||
for (region = 0; region < moduleinfo.lmi_nregion; region++) {
|
||||
|
||||
/* Get the region information */
|
||||
status = ldr_inq_region(mypid, moduleid, region, ®ioninfo,
|
||||
regioninfosize, ®ionreturnsize);
|
||||
if (status != 0 )
|
||||
ABORT("ldr_inq_region failed");
|
||||
|
||||
/* only process writable (data) regions */
|
||||
if (! (regioninfo.lri_prot & LDR_W))
|
||||
continue;
|
||||
|
||||
# ifdef VERBOSE
|
||||
GC_printf("--- Region ---\n");
|
||||
GC_printf("Region number = %16ld\n",
|
||||
regioninfo.lri_region_no);
|
||||
GC_printf("Protection flags = %016x\n", regioninfo.lri_prot);
|
||||
GC_printf("Virtual address = %16p\n", regioninfo.lri_vaddr);
|
||||
GC_printf("Mapped address = %16p\n", regioninfo.lri_mapaddr);
|
||||
GC_printf("Region size = %16ld\n", regioninfo.lri_size);
|
||||
GC_printf("Region name = \"%s\"\n", regioninfo.lri_name);
|
||||
# endif
|
||||
|
||||
/* register region as a garbage collection root */
|
||||
GC_add_roots_inner (
|
||||
(char *)regioninfo.lri_mapaddr,
|
||||
(char *)regioninfo.lri_mapaddr + regioninfo.lri_size,
|
||||
TRUE);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(HP_PA)
|
||||
|
||||
#include <errno.h>
|
||||
#include <dl.h>
|
||||
|
||||
extern int errno;
|
||||
extern char *sys_errlist[];
|
||||
extern int sys_nerr;
|
||||
|
||||
void GC_register_dynamic_libraries()
|
||||
{
|
||||
int status;
|
||||
int index = 1; /* Ordinal position in shared library search list */
|
||||
struct shl_descriptor *shl_desc; /* Shared library info, see dl.h */
|
||||
|
||||
/* For each dynamic library loaded */
|
||||
while (TRUE) {
|
||||
|
||||
/* Get info about next shared library */
|
||||
status = shl_get(index, &shl_desc);
|
||||
|
||||
/* Check if this is the end of the list or if some error occured */
|
||||
if (status != 0) {
|
||||
if (errno == EINVAL) {
|
||||
break; /* Moved past end of shared library list --> finished */
|
||||
} else {
|
||||
if (errno <= sys_nerr) {
|
||||
GC_printf1("dynamic_load: %s\n", (long) sys_errlist[errno]);
|
||||
} else {
|
||||
GC_printf1("dynamic_load: %d\n", (long) errno);
|
||||
}
|
||||
ABORT("shl_get failed");
|
||||
}
|
||||
}
|
||||
|
||||
# ifdef VERBOSE
|
||||
GC_printf0("---Shared library---\n");
|
||||
GC_printf1("\tfilename = \"%s\"\n", shl_desc->filename);
|
||||
GC_printf1("\tindex = %d\n", index);
|
||||
GC_printf1("\thandle = %08x\n",
|
||||
(unsigned long) shl_desc->handle);
|
||||
GC_printf1("\ttext seg. start = %08x\n", shl_desc->tstart);
|
||||
GC_printf1("\ttext seg. end = %08x\n", shl_desc->tend);
|
||||
GC_printf1("\tdata seg. start = %08x\n", shl_desc->dstart);
|
||||
GC_printf1("\tdata seg. end = %08x\n", shl_desc->dend);
|
||||
GC_printf1("\tref. count = %lu\n", shl_desc->ref_count);
|
||||
# endif
|
||||
|
||||
/* register shared library's data segment as a garbage collection root */
|
||||
GC_add_roots_inner((char *) shl_desc->dstart,
|
||||
(char *) shl_desc->dend, TRUE);
|
||||
|
||||
index++;
|
||||
}
|
||||
}
|
||||
#endif /* HP_PA */
|
||||
|
||||
#ifdef RS6000
|
||||
#pragma alloca
|
||||
#include <sys/ldr.h>
|
||||
#include <sys/errno.h>
|
||||
void GC_register_dynamic_libraries()
|
||||
{
|
||||
int len;
|
||||
char *ldibuf;
|
||||
int ldibuflen;
|
||||
struct ld_info *ldi;
|
||||
|
||||
ldibuf = alloca(ldibuflen = 8192);
|
||||
|
||||
while ( (len = loadquery(L_GETINFO,ldibuf,ldibuflen)) < 0) {
|
||||
if (errno != ENOMEM) {
|
||||
ABORT("loadquery failed");
|
||||
}
|
||||
ldibuf = alloca(ldibuflen *= 2);
|
||||
}
|
||||
|
||||
ldi = (struct ld_info *)ldibuf;
|
||||
while (ldi) {
|
||||
len = ldi->ldinfo_next;
|
||||
GC_add_roots_inner(
|
||||
ldi->ldinfo_dataorg,
|
||||
(unsigned long)ldi->ldinfo_dataorg
|
||||
+ ldi->ldinfo_datasize,
|
||||
TRUE);
|
||||
ldi = len ? (struct ld_info *)((char *)ldi + len) : 0;
|
||||
}
|
||||
}
|
||||
#endif /* RS6000 */
|
||||
|
||||
|
||||
|
||||
#else /* !DYNAMIC_LOADING */
|
||||
|
||||
#ifdef PCR
|
||||
|
||||
# include "il/PCR_IL.h"
|
||||
# include "th/PCR_ThCtl.h"
|
||||
# include "mm/PCR_MM.h"
|
||||
|
||||
void GC_register_dynamic_libraries()
|
||||
{
|
||||
/* Add new static data areas of dynamically loaded modules. */
|
||||
{
|
||||
PCR_IL_LoadedFile * p = PCR_IL_GetLastLoadedFile();
|
||||
PCR_IL_LoadedSegment * q;
|
||||
|
||||
/* Skip uncommited files */
|
||||
while (p != NIL && !(p -> lf_commitPoint)) {
|
||||
/* The loading of this file has not yet been committed */
|
||||
/* Hence its description could be inconsistent. */
|
||||
/* Furthermore, it hasn't yet been run. Hence its data */
|
||||
/* segments can't possibly reference heap allocated */
|
||||
/* objects. */
|
||||
p = p -> lf_prev;
|
||||
}
|
||||
for (; p != NIL; p = p -> lf_prev) {
|
||||
for (q = p -> lf_ls; q != NIL; q = q -> ls_next) {
|
||||
if ((q -> ls_flags & PCR_IL_SegFlags_Traced_MASK)
|
||||
== PCR_IL_SegFlags_Traced_on) {
|
||||
GC_add_roots_inner
|
||||
((char *)(q -> ls_addr),
|
||||
(char *)(q -> ls_addr) + q -> ls_bytes,
|
||||
TRUE);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#else /* !PCR */
|
||||
|
||||
void GC_register_dynamic_libraries(){}
|
||||
|
||||
int GC_no_dynamic_loading;
|
||||
|
||||
#endif /* !PCR */
|
||||
#endif /* !DYNAMIC_LOADING */
|
|
@ -0,0 +1,733 @@
|
|||
/*
|
||||
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
|
||||
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
|
||||
* Copyright 1996 by Silicon Graphics. All rights reserved.
|
||||
*
|
||||
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
||||
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
||||
*
|
||||
* Permission is hereby granted to use or copy this program
|
||||
* for any purpose, provided the above notices are retained on all copies.
|
||||
* Permission to modify the code and to distribute modified code is granted,
|
||||
* provided the above notices are retained, and a notice that the code was
|
||||
* modified is included with the above copyright notice.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Note that this defines a large number of tuning hooks, which can
|
||||
* safely be ignored in nearly all cases. For normal use it suffices
|
||||
* to call only GC_MALLOC and perhaps GC_REALLOC.
|
||||
* For better performance, also look at GC_MALLOC_ATOMIC, and
|
||||
* GC_enable_incremental. If you need an action to be performed
|
||||
* immediately before an object is collected, look at GC_register_finalizer.
|
||||
* If you are using Solaris threads, look at the end of this file.
|
||||
* Everything else is best ignored unless you encounter performance
|
||||
* problems.
|
||||
*/
|
||||
|
||||
#ifndef _GC_H
|
||||
|
||||
# define _GC_H
|
||||
# define __GC
|
||||
# include <stddef.h>
|
||||
|
||||
#if defined(__CYGWIN32__) && defined(GC_USE_DLL)
|
||||
#include "libgc_globals.h"
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) && defined(_DLL)
|
||||
# ifdef GC_BUILD
|
||||
# define GC_API __declspec(dllexport)
|
||||
# else
|
||||
# define GC_API __declspec(dllimport)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(__WATCOMC__) && defined(GC_DLL)
|
||||
# ifdef GC_BUILD
|
||||
# define GC_API extern __declspec(dllexport)
|
||||
# else
|
||||
# define GC_API extern __declspec(dllimport)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef GC_API
|
||||
#define GC_API extern
|
||||
#endif
|
||||
|
||||
# if defined(__STDC__) || defined(__cplusplus)
|
||||
# define GC_PROTO(args) args
|
||||
typedef void * GC_PTR;
|
||||
# else
|
||||
# define GC_PROTO(args) ()
|
||||
typedef char * GC_PTR;
|
||||
# endif
|
||||
|
||||
# ifdef __cplusplus
|
||||
extern "C" {
|
||||
# endif
|
||||
|
||||
|
||||
/* Define word and signed_word to be unsigned and signed types of the */
|
||||
/* size as char * or void *. There seems to be no way to do this */
|
||||
/* even semi-portably. The following is probably no better/worse */
|
||||
/* than almost anything else. */
|
||||
/* The ANSI standard suggests that size_t and ptr_diff_t might be */
|
||||
/* better choices. But those appear to have incorrect definitions */
|
||||
/* on may systems. Notably "typedef int size_t" seems to be both */
|
||||
/* frequent and WRONG. */
|
||||
typedef unsigned long GC_word;
|
||||
typedef long GC_signed_word;
|
||||
|
||||
/* Public read-only variables */
|
||||
|
||||
GC_API GC_word GC_gc_no;/* Counter incremented per collection. */
|
||||
/* Includes empty GCs at startup. */
|
||||
|
||||
|
||||
/* Public R/W variables */
|
||||
|
||||
GC_API GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested));
|
||||
/* When there is insufficient memory to satisfy */
|
||||
/* an allocation request, we return */
|
||||
/* (*GC_oom_fn)(). By default this just */
|
||||
/* returns 0. */
|
||||
/* If it returns, it must return 0 or a valid */
|
||||
/* pointer to a previously allocated heap */
|
||||
/* object. */
|
||||
|
||||
GC_API int GC_quiet; /* Disable statistics output. Only matters if */
|
||||
/* collector has been compiled with statistics */
|
||||
/* enabled. This involves a performance cost, */
|
||||
/* and is thus not the default. */
|
||||
|
||||
GC_API int GC_dont_gc; /* Dont collect unless explicitly requested, e.g. */
|
||||
/* because it's not safe. */
|
||||
|
||||
GC_API int GC_dont_expand;
|
||||
/* Dont expand heap unless explicitly requested */
|
||||
/* or forced to. */
|
||||
|
||||
GC_API int GC_full_freq; /* Number of partial collections between */
|
||||
/* full collections. Matters only if */
|
||||
/* GC_incremental is set. */
|
||||
|
||||
GC_API GC_word GC_non_gc_bytes;
|
||||
/* Bytes not considered candidates for collection. */
|
||||
/* Used only to control scheduling of collections. */
|
||||
|
||||
GC_API GC_word GC_free_space_divisor;
|
||||
/* We try to make sure that we allocate at */
|
||||
/* least N/GC_free_space_divisor bytes between */
|
||||
/* collections, where N is the heap size plus */
|
||||
/* a rough estimate of the root set size. */
|
||||
/* Initially, GC_free_space_divisor = 4. */
|
||||
/* Increasing its value will use less space */
|
||||
/* but more collection time. Decreasing it */
|
||||
/* will appreciably decrease collection time */
|
||||
/* at the expense of space. */
|
||||
/* GC_free_space_divisor = 1 will effectively */
|
||||
/* disable collections. */
|
||||
|
||||
GC_API GC_word GC_max_retries;
|
||||
/* The maximum number of GCs attempted before */
|
||||
/* reporting out of memory after heap */
|
||||
/* expansion fails. Initially 0. */
|
||||
|
||||
|
||||
GC_API char *GC_stackbottom; /* Cool end of user stack. */
|
||||
/* May be set in the client prior to */
|
||||
/* calling any GC_ routines. This */
|
||||
/* avoids some overhead, and */
|
||||
/* potentially some signals that can */
|
||||
/* confuse debuggers. Otherwise the */
|
||||
/* collector attempts to set it */
|
||||
/* automatically. */
|
||||
/* For multithreaded code, this is the */
|
||||
/* cold end of the stack for the */
|
||||
/* primordial thread. */
|
||||
|
||||
/* Public procedures */
|
||||
/*
|
||||
* general purpose allocation routines, with roughly malloc calling conv.
|
||||
* The atomic versions promise that no relevant pointers are contained
|
||||
* in the object. The nonatomic versions guarantee that the new object
|
||||
* is cleared. GC_malloc_stubborn promises that no changes to the object
|
||||
* will occur after GC_end_stubborn_change has been called on the
|
||||
* result of GC_malloc_stubborn. GC_malloc_uncollectable allocates an object
|
||||
* that is scanned for pointers to collectable objects, but is not itself
|
||||
* collectable. GC_malloc_uncollectable and GC_free called on the resulting
|
||||
* object implicitly update GC_non_gc_bytes appropriately.
|
||||
*/
|
||||
GC_API GC_PTR GC_malloc GC_PROTO((size_t size_in_bytes));
|
||||
GC_API GC_PTR GC_malloc_atomic GC_PROTO((size_t size_in_bytes));
|
||||
GC_API GC_PTR GC_malloc_uncollectable GC_PROTO((size_t size_in_bytes));
|
||||
GC_API GC_PTR GC_malloc_stubborn GC_PROTO((size_t size_in_bytes));
|
||||
|
||||
/* The following is only defined if the library has been suitably */
|
||||
/* compiled: */
|
||||
GC_API GC_PTR GC_malloc_atomic_uncollectable GC_PROTO((size_t size_in_bytes));
|
||||
|
||||
/* Explicitly deallocate an object. Dangerous if used incorrectly. */
|
||||
/* Requires a pointer to the base of an object. */
|
||||
/* If the argument is stubborn, it should not be changeable when freed. */
|
||||
/* An object should not be enable for finalization when it is */
|
||||
/* explicitly deallocated. */
|
||||
/* GC_free(0) is a no-op, as required by ANSI C for free. */
|
||||
GC_API void GC_free GC_PROTO((GC_PTR object_addr));
|
||||
|
||||
/*
|
||||
* Stubborn objects may be changed only if the collector is explicitly informed.
|
||||
* The collector is implicitly informed of coming change when such
|
||||
* an object is first allocated. The following routines inform the
|
||||
* collector that an object will no longer be changed, or that it will
|
||||
* once again be changed. Only nonNIL pointer stores into the object
|
||||
* are considered to be changes. The argument to GC_end_stubborn_change
|
||||
* must be exacly the value returned by GC_malloc_stubborn or passed to
|
||||
* GC_change_stubborn. (In the second case it may be an interior pointer
|
||||
* within 512 bytes of the beginning of the objects.)
|
||||
* There is a performance penalty for allowing more than
|
||||
* one stubborn object to be changed at once, but it is acceptable to
|
||||
* do so. The same applies to dropping stubborn objects that are still
|
||||
* changeable.
|
||||
*/
|
||||
GC_API void GC_change_stubborn GC_PROTO((GC_PTR));
|
||||
GC_API void GC_end_stubborn_change GC_PROTO((GC_PTR));
|
||||
|
||||
/* Return a pointer to the base (lowest address) of an object given */
|
||||
/* a pointer to a location within the object. */
|
||||
/* Return 0 if displaced_pointer doesn't point to within a valid */
|
||||
/* object. */
|
||||
GC_API GC_PTR GC_base GC_PROTO((GC_PTR displaced_pointer));
|
||||
|
||||
/* Given a pointer to the base of an object, return its size in bytes. */
|
||||
/* The returned size may be slightly larger than what was originally */
|
||||
/* requested. */
|
||||
GC_API size_t GC_size GC_PROTO((GC_PTR object_addr));
|
||||
|
||||
/* For compatibility with C library. This is occasionally faster than */
|
||||
/* a malloc followed by a bcopy. But if you rely on that, either here */
|
||||
/* or with the standard C library, your code is broken. In my */
|
||||
/* opinion, it shouldn't have been invented, but now we're stuck. -HB */
|
||||
/* The resulting object has the same kind as the original. */
|
||||
/* If the argument is stubborn, the result will have changes enabled. */
|
||||
/* It is an error to have changes enabled for the original object. */
|
||||
/* Follows ANSI comventions for NULL old_object. */
|
||||
GC_API GC_PTR GC_realloc
|
||||
GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes));
|
||||
|
||||
/* Explicitly increase the heap size. */
|
||||
/* Returns 0 on failure, 1 on success. */
|
||||
GC_API int GC_expand_hp GC_PROTO((size_t number_of_bytes));
|
||||
|
||||
/* Limit the heap size to n bytes. Useful when you're debugging, */
|
||||
/* especially on systems that don't handle running out of memory well. */
|
||||
/* n == 0 ==> unbounded. This is the default. */
|
||||
GC_API void GC_set_max_heap_size GC_PROTO((GC_word n));
|
||||
|
||||
/* Inform the collector that a certain section of statically allocated */
|
||||
/* memory contains no pointers to garbage collected memory. Thus it */
|
||||
/* need not be scanned. This is sometimes important if the application */
|
||||
/* maps large read/write files into the address space, which could be */
|
||||
/* mistaken for dynamic library data segments on some systems. */
|
||||
GC_API void GC_exclude_static_roots GC_PROTO((GC_PTR start, GC_PTR finish));
|
||||
|
||||
/* Clear the set of root segments. Wizards only. */
|
||||
GC_API void GC_clear_roots GC_PROTO((void));
|
||||
|
||||
/* Add a root segment. Wizards only. */
|
||||
GC_API void GC_add_roots GC_PROTO((char * low_address,
|
||||
char * high_address_plus_1));
|
||||
|
||||
/* Add a displacement to the set of those considered valid by the */
|
||||
/* collector. GC_register_displacement(n) means that if p was returned */
|
||||
/* by GC_malloc, then (char *)p + n will be considered to be a valid */
|
||||
/* pointer to n. N must be small and less than the size of p. */
|
||||
/* (All pointers to the interior of objects from the stack are */
|
||||
/* considered valid in any case. This applies to heap objects and */
|
||||
/* static data.) */
|
||||
/* Preferably, this should be called before any other GC procedures. */
|
||||
/* Calling it later adds to the probability of excess memory */
|
||||
/* retention. */
|
||||
/* This is a no-op if the collector was compiled with recognition of */
|
||||
/* arbitrary interior pointers enabled, which is now the default. */
|
||||
GC_API void GC_register_displacement GC_PROTO((GC_word n));
|
||||
|
||||
/* The following version should be used if any debugging allocation is */
|
||||
/* being done. */
|
||||
GC_API void GC_debug_register_displacement GC_PROTO((GC_word n));
|
||||
|
||||
/* Explicitly trigger a full, world-stop collection. */
|
||||
GC_API void GC_gcollect GC_PROTO((void));
|
||||
|
||||
/* Trigger a full world-stopped collection. Abort the collection if */
|
||||
/* and when stop_func returns a nonzero value. Stop_func will be */
|
||||
/* called frequently, and should be reasonably fast. This works even */
|
||||
/* if virtual dirty bits, and hence incremental collection is not */
|
||||
/* available for this architecture. Collections can be aborted faster */
|
||||
/* than normal pause times for incremental collection. However, */
|
||||
/* aborted collections do no useful work; the next collection needs */
|
||||
/* to start from the beginning. */
|
||||
/* Return 0 if the collection was aborted, 1 if it succeeded. */
|
||||
typedef int (* GC_stop_func) GC_PROTO((void));
|
||||
GC_API int GC_try_to_collect GC_PROTO((GC_stop_func stop_func));
|
||||
|
||||
/* Return the number of bytes in the heap. Excludes collector private */
|
||||
/* data structures. Includes empty blocks and fragmentation loss. */
|
||||
/* Includes some pages that were allocated but never written. */
|
||||
GC_API size_t GC_get_heap_size GC_PROTO((void));
|
||||
|
||||
/* Return the number of bytes allocated since the last collection. */
|
||||
GC_API size_t GC_get_bytes_since_gc GC_PROTO((void));
|
||||
|
||||
/* Enable incremental/generational collection. */
|
||||
/* Not advisable unless dirty bits are */
|
||||
/* available or most heap objects are */
|
||||
/* pointerfree(atomic) or immutable. */
|
||||
/* Don't use in leak finding mode. */
|
||||
/* Ignored if GC_dont_gc is true. */
|
||||
GC_API void GC_enable_incremental GC_PROTO((void));
|
||||
|
||||
/* Perform some garbage collection work, if appropriate. */
|
||||
/* Return 0 if there is no more work to be done. */
|
||||
/* Typically performs an amount of work corresponding roughly */
|
||||
/* to marking from one page. May do more work if further */
|
||||
/* progress requires it, e.g. if incremental collection is */
|
||||
/* disabled. It is reasonable to call this in a wait loop */
|
||||
/* until it returns 0. */
|
||||
GC_API int GC_collect_a_little GC_PROTO((void));
|
||||
|
||||
/* Allocate an object of size lb bytes. The client guarantees that */
|
||||
/* as long as the object is live, it will be referenced by a pointer */
|
||||
/* that points to somewhere within the first 256 bytes of the object. */
|
||||
/* (This should normally be declared volatile to prevent the compiler */
|
||||
/* from invalidating this assertion.) This routine is only useful */
|
||||
/* if a large array is being allocated. It reduces the chance of */
|
||||
/* accidentally retaining such an array as a result of scanning an */
|
||||
/* integer that happens to be an address inside the array. (Actually, */
|
||||
/* it reduces the chance of the allocator not finding space for such */
|
||||
/* an array, since it will try hard to avoid introducing such a false */
|
||||
/* reference.) On a SunOS 4.X or MS Windows system this is recommended */
|
||||
/* for arrays likely to be larger than 100K or so. For other systems, */
|
||||
/* or if the collector is not configured to recognize all interior */
|
||||
/* pointers, the threshold is normally much higher. */
|
||||
GC_API GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb));
|
||||
GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
|
||||
|
||||
#if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720
|
||||
# define GC_ADD_CALLER
|
||||
# define GC_RETURN_ADDR (GC_word)__return_address
|
||||
#endif
|
||||
|
||||
#ifdef GC_ADD_CALLER
|
||||
# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
|
||||
# define GC_EXTRA_PARAMS GC_word ra, char * descr_string, int descr_int
|
||||
#else
|
||||
# define GC_EXTRAS __FILE__, __LINE__
|
||||
# define GC_EXTRA_PARAMS char * descr_string, int descr_int
|
||||
#endif
|
||||
|
||||
/* Debugging (annotated) allocation. GC_gcollect will check */
|
||||
/* objects allocated in this way for overwrites, etc. */
|
||||
GC_API GC_PTR GC_debug_malloc
|
||||
GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
|
||||
GC_API GC_PTR GC_debug_malloc_atomic
|
||||
GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
|
||||
GC_API GC_PTR GC_debug_malloc_uncollectable
|
||||
GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
|
||||
GC_API GC_PTR GC_debug_malloc_stubborn
|
||||
GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
|
||||
GC_API void GC_debug_free GC_PROTO((GC_PTR object_addr));
|
||||
GC_API GC_PTR GC_debug_realloc
|
||||
GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes,
|
||||
GC_EXTRA_PARAMS));
|
||||
|
||||
GC_API void GC_debug_change_stubborn GC_PROTO((GC_PTR));
|
||||
GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
|
||||
# ifdef GC_DEBUG
|
||||
# define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS)
|
||||
# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS)
|
||||
# define GC_MALLOC_UNCOLLECTABLE(sz) GC_debug_malloc_uncollectable(sz, \
|
||||
GC_EXTRAS)
|
||||
# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS)
|
||||
# define GC_FREE(p) GC_debug_free(p)
|
||||
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
|
||||
GC_debug_register_finalizer(p, f, d, of, od)
|
||||
# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
|
||||
GC_debug_register_finalizer_ignore_self(p, f, d, of, od)
|
||||
# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS);
|
||||
# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
|
||||
# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
|
||||
# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
|
||||
GC_general_register_disappearing_link(link, GC_base(obj))
|
||||
# define GC_REGISTER_DISPLACEMENT(n) GC_debug_register_displacement(n)
|
||||
# else
|
||||
# define GC_MALLOC(sz) GC_malloc(sz)
|
||||
# define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz)
|
||||
# define GC_MALLOC_UNCOLLECTABLE(sz) GC_malloc_uncollectable(sz)
|
||||
# define GC_REALLOC(old, sz) GC_realloc(old, sz)
|
||||
# define GC_FREE(p) GC_free(p)
|
||||
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
|
||||
GC_register_finalizer(p, f, d, of, od)
|
||||
# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
|
||||
GC_register_finalizer_ignore_self(p, f, d, of, od)
|
||||
# define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
|
||||
# define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
|
||||
# define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
|
||||
# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
|
||||
GC_general_register_disappearing_link(link, obj)
|
||||
# define GC_REGISTER_DISPLACEMENT(n) GC_register_displacement(n)
|
||||
# endif
|
||||
/* The following are included because they are often convenient, and */
|
||||
/* reduce the chance for a misspecifed size argument. But calls may */
|
||||
/* expand to something syntactically incorrect if t is a complicated */
|
||||
/* type expression. */
|
||||
# define GC_NEW(t) (t *)GC_MALLOC(sizeof (t))
|
||||
# define GC_NEW_ATOMIC(t) (t *)GC_MALLOC_ATOMIC(sizeof (t))
|
||||
# define GC_NEW_STUBBORN(t) (t *)GC_MALLOC_STUBBORN(sizeof (t))
|
||||
# define GC_NEW_UNCOLLECTABLE(t) (t *)GC_MALLOC_UNCOLLECTABLE(sizeof (t))
|
||||
|
||||
/* Finalization. Some of these primitives are grossly unsafe. */
|
||||
/* The idea is to make them both cheap, and sufficient to build */
|
||||
/* a safer layer, closer to PCedar finalization. */
|
||||
/* The interface represents my conclusions from a long discussion */
|
||||
/* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */
|
||||
/* Christian Jacobi, and Russ Atkinson. It's not perfect, and */
|
||||
/* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */
|
||||
typedef void (*GC_finalization_proc)
|
||||
GC_PROTO((GC_PTR obj, GC_PTR client_data));
|
||||
|
||||
GC_API void GC_register_finalizer
|
||||
GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
|
||||
GC_finalization_proc *ofn, GC_PTR *ocd));
|
||||
GC_API void GC_debug_register_finalizer
|
||||
GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
|
||||
GC_finalization_proc *ofn, GC_PTR *ocd));
|
||||
/* When obj is no longer accessible, invoke */
|
||||
/* (*fn)(obj, cd). If a and b are inaccessible, and */
|
||||
/* a points to b (after disappearing links have been */
|
||||
/* made to disappear), then only a will be */
|
||||
/* finalized. (If this does not create any new */
|
||||
/* pointers to b, then b will be finalized after the */
|
||||
/* next collection.) Any finalizable object that */
|
||||
/* is reachable from itself by following one or more */
|
||||
/* pointers will not be finalized (or collected). */
|
||||
/* Thus cycles involving finalizable objects should */
|
||||
/* be avoided, or broken by disappearing links. */
|
||||
/* All but the last finalizer registered for an object */
|
||||
/* is ignored. */
|
||||
/* Finalization may be removed by passing 0 as fn. */
|
||||
/* Finalizers are implicitly unregistered just before */
|
||||
/* they are invoked. */
|
||||
/* The old finalizer and client data are stored in */
|
||||
/* *ofn and *ocd. */
|
||||
/* Fn is never invoked on an accessible object, */
|
||||
/* provided hidden pointers are converted to real */
|
||||
/* pointers only if the allocation lock is held, and */
|
||||
/* such conversions are not performed by finalization */
|
||||
/* routines. */
|
||||
/* If GC_register_finalizer is aborted as a result of */
|
||||
/* a signal, the object may be left with no */
|
||||
/* finalization, even if neither the old nor new */
|
||||
/* finalizer were NULL. */
|
||||
/* Obj should be the nonNULL starting address of an */
|
||||
/* object allocated by GC_malloc or friends. */
|
||||
/* Note that any garbage collectable object referenced */
|
||||
/* by cd will be considered accessible until the */
|
||||
/* finalizer is invoked. */
|
||||
|
||||
/* Another versions of the above follow. It ignores */
|
||||
/* self-cycles, i.e. pointers from a finalizable object to */
|
||||
/* itself. There is a stylistic argument that this is wrong, */
|
||||
/* but it's unavoidable for C++, since the compiler may */
|
||||
/* silently introduce these. It's also benign in that specific */
|
||||
/* case. */
|
||||
GC_API void GC_register_finalizer_ignore_self
|
||||
GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
|
||||
GC_finalization_proc *ofn, GC_PTR *ocd));
|
||||
GC_API void GC_debug_register_finalizer_ignore_self
|
||||
GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
|
||||
GC_finalization_proc *ofn, GC_PTR *ocd));
|
||||
|
||||
/* The following routine may be used to break cycles between */
|
||||
/* finalizable objects, thus causing cyclic finalizable */
|
||||
/* objects to be finalized in the correct order. Standard */
|
||||
/* use involves calling GC_register_disappearing_link(&p), */
|
||||
/* where p is a pointer that is not followed by finalization */
|
||||
/* code, and should not be considered in determining */
|
||||
/* finalization order. */
|
||||
GC_API int GC_register_disappearing_link GC_PROTO((GC_PTR * /* link */));
|
||||
/* Link should point to a field of a heap allocated */
|
||||
/* object obj. *link will be cleared when obj is */
|
||||
/* found to be inaccessible. This happens BEFORE any */
|
||||
/* finalization code is invoked, and BEFORE any */
|
||||
/* decisions about finalization order are made. */
|
||||
/* This is useful in telling the finalizer that */
|
||||
/* some pointers are not essential for proper */
|
||||
/* finalization. This may avoid finalization cycles. */
|
||||
/* Note that obj may be resurrected by another */
|
||||
/* finalizer, and thus the clearing of *link may */
|
||||
/* be visible to non-finalization code. */
|
||||
/* There's an argument that an arbitrary action should */
|
||||
/* be allowed here, instead of just clearing a pointer. */
|
||||
/* But this causes problems if that action alters, or */
|
||||
/* examines connectivity. */
|
||||
/* Returns 1 if link was already registered, 0 */
|
||||
/* otherwise. */
|
||||
/* Only exists for backward compatibility. See below: */
|
||||
|
||||
GC_API int GC_general_register_disappearing_link
|
||||
GC_PROTO((GC_PTR * /* link */, GC_PTR obj));
|
||||
/* A slight generalization of the above. *link is */
|
||||
/* cleared when obj first becomes inaccessible. This */
|
||||
/* can be used to implement weak pointers easily and */
|
||||
/* safely. Typically link will point to a location */
|
||||
/* holding a disguised pointer to obj. (A pointer */
|
||||
/* inside an "atomic" object is effectively */
|
||||
/* disguised.) In this way soft */
|
||||
/* pointers are broken before any object */
|
||||
/* reachable from them are finalized. Each link */
|
||||
/* May be registered only once, i.e. with one obj */
|
||||
/* value. This was added after a long email discussion */
|
||||
/* with John Ellis. */
|
||||
/* Obj must be a pointer to the first word of an object */
|
||||
/* we allocated. It is unsafe to explicitly deallocate */
|
||||
/* the object containing link. Explicitly deallocating */
|
||||
/* obj may or may not cause link to eventually be */
|
||||
/* cleared. */
|
||||
GC_API int GC_unregister_disappearing_link GC_PROTO((GC_PTR * /* link */));
|
||||
/* Returns 0 if link was not actually registered. */
|
||||
/* Undoes a registration by either of the above two */
|
||||
/* routines. */
|
||||
|
||||
/* Auxiliary fns to make finalization work correctly with displaced */
|
||||
/* pointers introduced by the debugging allocators. */
|
||||
GC_API GC_PTR GC_make_closure GC_PROTO((GC_finalization_proc fn, GC_PTR data));
|
||||
GC_API void GC_debug_invoke_finalizer GC_PROTO((GC_PTR obj, GC_PTR data));
|
||||
|
||||
GC_API int GC_invoke_finalizers GC_PROTO((void));
|
||||
/* Run finalizers for all objects that are ready to */
|
||||
/* be finalized. Return the number of finalizers */
|
||||
/* that were run. Normally this is also called */
|
||||
/* implicitly during some allocations. If */
|
||||
/* FINALIZE_ON_DEMAND is defined, it must be called */
|
||||
/* explicitly. */
|
||||
|
||||
/* GC_set_warn_proc can be used to redirect or filter warning messages. */
|
||||
/* p may not be a NULL pointer. */
|
||||
typedef void (*GC_warn_proc) GC_PROTO((char *msg, GC_word arg));
|
||||
GC_API GC_warn_proc GC_set_warn_proc GC_PROTO((GC_warn_proc p));
|
||||
/* Returns old warning procedure. */
|
||||
|
||||
/* The following is intended to be used by a higher level */
|
||||
/* (e.g. cedar-like) finalization facility. It is expected */
|
||||
/* that finalization code will arrange for hidden pointers to */
|
||||
/* disappear. Otherwise objects can be accessed after they */
|
||||
/* have been collected. */
|
||||
/* Note that putting pointers in atomic objects or in */
|
||||
/* nonpointer slots of "typed" objects is equivalent to */
|
||||
/* disguising them in this way, and may have other advantages. */
|
||||
# if defined(I_HIDE_POINTERS) || defined(GC_I_HIDE_POINTERS)
|
||||
typedef GC_word GC_hidden_pointer;
|
||||
# define HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
|
||||
# define REVEAL_POINTER(p) ((GC_PTR)(HIDE_POINTER(p)))
|
||||
/* Converting a hidden pointer to a real pointer requires verifying */
|
||||
/* that the object still exists. This involves acquiring the */
|
||||
/* allocator lock to avoid a race with the collector. */
|
||||
# endif /* I_HIDE_POINTERS */
|
||||
|
||||
typedef GC_PTR (*GC_fn_type) GC_PROTO((GC_PTR client_data));
|
||||
GC_API GC_PTR GC_call_with_alloc_lock
|
||||
GC_PROTO((GC_fn_type fn, GC_PTR client_data));
|
||||
|
||||
/* Check that p and q point to the same object. */
|
||||
/* Fail conspicuously if they don't. */
|
||||
/* Returns the first argument. */
|
||||
/* Succeeds if neither p nor q points to the heap. */
|
||||
/* May succeed if both p and q point to between heap objects. */
|
||||
GC_API GC_PTR GC_same_obj GC_PROTO((GC_PTR p, GC_PTR q));
|
||||
|
||||
/* Checked pointer pre- and post- increment operations. Note that */
|
||||
/* the second argument is in units of bytes, not multiples of the */
|
||||
/* object size. This should either be invoked from a macro, or the */
|
||||
/* call should be automatically generated. */
|
||||
GC_API GC_PTR GC_pre_incr GC_PROTO((GC_PTR *p, size_t how_much));
|
||||
GC_API GC_PTR GC_post_incr GC_PROTO((GC_PTR *p, size_t how_much));
|
||||
|
||||
/* Check that p is visible */
|
||||
/* to the collector as a possibly pointer containing location. */
|
||||
/* If it isn't fail conspicuously. */
|
||||
/* Returns the argument in all cases. May erroneously succeed */
|
||||
/* in hard cases. (This is intended for debugging use with */
|
||||
/* untyped allocations. The idea is that it should be possible, though */
|
||||
/* slow, to add such a call to all indirect pointer stores.) */
|
||||
/* Currently useless for multithreaded worlds. */
|
||||
GC_API GC_PTR GC_is_visible GC_PROTO((GC_PTR p));
|
||||
|
||||
/* Check that if p is a pointer to a heap page, then it points to */
|
||||
/* a valid displacement within a heap object. */
|
||||
/* Fail conspicuously if this property does not hold. */
|
||||
/* Uninteresting with ALL_INTERIOR_POINTERS. */
|
||||
/* Always returns its argument. */
|
||||
GC_API GC_PTR GC_is_valid_displacement GC_PROTO((GC_PTR p));
|
||||
|
||||
/* Safer, but slow, pointer addition. Probably useful mainly with */
|
||||
/* a preprocessor. Useful only for heap pointers. */
|
||||
#ifdef GC_DEBUG
|
||||
# define GC_PTR_ADD3(x, n, type_of_result) \
|
||||
((type_of_result)GC_same_obj((x)+(n), (x)))
|
||||
# define GC_PRE_INCR3(x, n, type_of_result) \
|
||||
((type_of_result)GC_pre_incr(&(x), (n)*sizeof(*x))
|
||||
# define GC_POST_INCR2(x, type_of_result) \
|
||||
((type_of_result)GC_post_incr(&(x), sizeof(*x))
|
||||
# ifdef __GNUC__
|
||||
# define GC_PTR_ADD(x, n) \
|
||||
GC_PTR_ADD3(x, n, typeof(x))
|
||||
# define GC_PRE_INCR(x, n) \
|
||||
GC_PRE_INCR3(x, n, typeof(x))
|
||||
# define GC_POST_INCR(x, n) \
|
||||
GC_POST_INCR3(x, typeof(x))
|
||||
# else
|
||||
/* We can't do this right without typeof, which ANSI */
|
||||
/* decided was not sufficiently useful. Repeatedly */
|
||||
/* mentioning the arguments seems too dangerous to be */
|
||||
/* useful. So does not casting the result. */
|
||||
# define GC_PTR_ADD(x, n) ((x)+(n))
|
||||
# endif
|
||||
#else /* !GC_DEBUG */
|
||||
# define GC_PTR_ADD3(x, n, type_of_result) ((x)+(n))
|
||||
# define GC_PTR_ADD(x, n) ((x)+(n))
|
||||
# define GC_PRE_INCR3(x, n, type_of_result) ((x) += (n))
|
||||
# define GC_PRE_INCR(x, n) ((x) += (n))
|
||||
# define GC_POST_INCR2(x, n, type_of_result) ((x)++)
|
||||
# define GC_POST_INCR(x, n) ((x)++)
|
||||
#endif
|
||||
|
||||
/* Safer assignment of a pointer to a nonstack location. */
|
||||
#ifdef GC_DEBUG
|
||||
# ifdef __STDC__
|
||||
# define GC_PTR_STORE(p, q) \
|
||||
(*(void **)GC_is_visible(p) = GC_is_valid_displacement(q))
|
||||
# else
|
||||
# define GC_PTR_STORE(p, q) \
|
||||
(*(char **)GC_is_visible(p) = GC_is_valid_displacement(q))
|
||||
# endif
|
||||
#else /* !GC_DEBUG */
|
||||
# define GC_PTR_STORE(p, q) *((p) = (q))
|
||||
#endif
|
||||
|
||||
/* Fynctions called to report pointer checking errors */
|
||||
GC_API void (*GC_same_obj_print_proc) GC_PROTO((GC_PTR p, GC_PTR q));
|
||||
|
||||
GC_API void (*GC_is_valid_displacement_print_proc)
|
||||
GC_PROTO((GC_PTR p));
|
||||
|
||||
GC_API void (*GC_is_visible_print_proc)
|
||||
GC_PROTO((GC_PTR p));
|
||||
|
||||
#if defined(_SOLARIS_PTHREADS) && !defined(SOLARIS_THREADS)
|
||||
# define SOLARIS_THREADS
|
||||
#endif
|
||||
|
||||
#ifdef SOLARIS_THREADS
|
||||
/* We need to intercept calls to many of the threads primitives, so */
|
||||
/* that we can locate thread stacks and stop the world. */
|
||||
/* Note also that the collector cannot see thread specific data. */
|
||||
/* Thread specific data should generally consist of pointers to */
|
||||
/* uncollectable objects, which are deallocated using the destructor */
|
||||
/* facility in thr_keycreate. */
|
||||
# include <thread.h>
|
||||
# include <signal.h>
|
||||
int GC_thr_create(void *stack_base, size_t stack_size,
|
||||
void *(*start_routine)(void *), void *arg, long flags,
|
||||
thread_t *new_thread);
|
||||
int GC_thr_join(thread_t wait_for, thread_t *departed, void **status);
|
||||
int GC_thr_suspend(thread_t target_thread);
|
||||
int GC_thr_continue(thread_t target_thread);
|
||||
void * GC_dlopen(const char *path, int mode);
|
||||
|
||||
# ifdef _SOLARIS_PTHREADS
|
||||
# include <pthread.h>
|
||||
extern int GC_pthread_create(pthread_t *new_thread,
|
||||
const pthread_attr_t *attr,
|
||||
void * (*thread_execp)(void *), void *arg);
|
||||
extern int GC_pthread_join(pthread_t wait_for, void **status);
|
||||
|
||||
# undef thread_t
|
||||
|
||||
# define pthread_join GC_pthread_join
|
||||
# define pthread_create GC_pthread_create
|
||||
#endif
|
||||
|
||||
# define thr_create GC_thr_create
|
||||
# define thr_join GC_thr_join
|
||||
# define thr_suspend GC_thr_suspend
|
||||
# define thr_continue GC_thr_continue
|
||||
# define dlopen GC_dlopen
|
||||
|
||||
# endif /* SOLARIS_THREADS */
|
||||
|
||||
|
||||
#if defined(IRIX_THREADS) || defined(LINUX_THREADS)
|
||||
/* We treat these similarly. */
|
||||
# include <pthread.h>
|
||||
# include <signal.h>
|
||||
|
||||
int GC_pthread_create(pthread_t *new_thread,
|
||||
const pthread_attr_t *attr,
|
||||
void *(*start_routine)(void *), void *arg);
|
||||
int GC_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset);
|
||||
int GC_pthread_join(pthread_t thread, void **retval);
|
||||
|
||||
# define pthread_create GC_pthread_create
|
||||
# define pthread_sigmask GC_pthread_sigmask
|
||||
# define pthread_join GC_pthread_join
|
||||
|
||||
#endif /* IRIX_THREADS || LINUX_THREADS */
|
||||
|
||||
# if defined(PCR) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || \
|
||||
defined(IRIX_THREADS) || defined(LINUX_THREADS) || \
|
||||
defined(IRIX_JDK_THREADS)
|
||||
/* Any flavor of threads except SRC_M3. */
|
||||
/* This returns a list of objects, linked through their first */
|
||||
/* word. Its use can greatly reduce lock contention problems, since */
|
||||
/* the allocation lock can be acquired and released many fewer times. */
|
||||
GC_PTR GC_malloc_many(size_t lb);
|
||||
#define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */
|
||||
/* in returned list. */
|
||||
extern void GC_thr_init(); /* Needed for Solaris/X86 */
|
||||
|
||||
#endif /* THREADS && !SRC_M3 */
|
||||
|
||||
/*
|
||||
* If you are planning on putting
|
||||
* the collector in a SunOS 5 dynamic library, you need to call GC_INIT()
|
||||
* from the statically loaded program section.
|
||||
* This circumvents a Solaris 2.X (X<=4) linker bug.
|
||||
*/
|
||||
#if defined(sparc) || defined(__sparc)
|
||||
# define GC_INIT() { extern end, etext; \
|
||||
GC_noop(&end, &etext); }
|
||||
#else
|
||||
# if defined(__CYGWIN32__) && defined(GC_USE_DLL)
|
||||
/*
|
||||
* Similarly gnu-win32 DLLs need explicit initialization
|
||||
*/
|
||||
# define GC_INIT() { GC_add_roots(DATASTART, DATAEND); }
|
||||
# else
|
||||
# define GC_INIT()
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if (defined(_MSDOS) || defined(_MSC_VER)) && (_M_IX86 >= 300) \
|
||||
|| defined(_WIN32)
|
||||
/* win32S may not free all resources on process exit. */
|
||||
/* This explicitly deallocates the heap. */
|
||||
GC_API void GC_win32_free_heap ();
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* end of extern "C" */
|
||||
#endif
|
||||
|
||||
#endif /* _GC_H */
|
|
@ -0,0 +1,60 @@
|
|||
/*************************************************************************
|
||||
Copyright (c) 1994 by Xerox Corporation. All rights reserved.
|
||||
|
||||
THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
||||
OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
||||
|
||||
Last modified on Sat Nov 19 19:31:14 PST 1994 by ellis
|
||||
on Sat Jun 8 15:10:00 PST 1994 by boehm
|
||||
|
||||
Permission is hereby granted to copy this code for any purpose,
|
||||
provided the above notices are retained on all copies.
|
||||
|
||||
This implementation module for gc_c++.h provides an implementation of
|
||||
the global operators "new" and "delete" that calls the Boehm
|
||||
allocator. All objects allocated by this implementation will be
|
||||
non-collectable but part of the root set of the collector.
|
||||
|
||||
You should ensure (using implementation-dependent techniques) that the
|
||||
linker finds this module before the library that defines the default
|
||||
built-in "new" and "delete".
|
||||
|
||||
Authors: John R. Ellis and Jesse Hull
|
||||
|
||||
**************************************************************************/
|
||||
/* Boehm, December 20, 1994 7:26 pm PST */
|
||||
|
||||
#include "gc_cpp.h"
|
||||
|
||||
void* operator new( size_t size ) {
|
||||
return GC_MALLOC_UNCOLLECTABLE( size );}
|
||||
|
||||
void operator delete( void* obj ) {
|
||||
GC_FREE( obj );}
|
||||
|
||||
#ifdef _MSC_VER
|
||||
// This new operator is used by VC++ in case of Debug builds !
|
||||
void* operator new( size_t size,
|
||||
int ,//nBlockUse,
|
||||
const char * szFileName,
|
||||
int nLine
|
||||
) {
|
||||
# ifndef GC_DEBUG
|
||||
return GC_malloc_uncollectable( size );
|
||||
# else
|
||||
return GC_debug_malloc_uncollectable(size, szFileName, nLine);
|
||||
# endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef OPERATOR_NEW_ARRAY
|
||||
|
||||
void* operator new[]( size_t size ) {
|
||||
return GC_MALLOC_UNCOLLECTABLE( size );}
|
||||
|
||||
void operator delete[]( void* obj ) {
|
||||
GC_FREE( obj );}
|
||||
|
||||
#endif /* OPERATOR_NEW_ARRAY */
|
||||
|
||||
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
generic_threads.c
|
||||
|
||||
A module that permits clients of the GC to supply callback functions
|
||||
for thread stack scanning.
|
||||
|
||||
by Patrick C. Beard.
|
||||
*/
|
||||
|
||||
#include "generic_threads.h"
|
||||
# include "gc_priv.h"
|
||||
# include "gc_mark.h"
|
||||
|
||||
static void mark_range(char* begin, char* end)
|
||||
{
|
||||
while (begin < end) {
|
||||
GC_PUSH_ONE_STACK(*(word*)begin, 0);
|
||||
begin += ALIGNMENT;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Until a client installs a stack marking routine, this will mark the
|
||||
current stack. This is crucial to keep data live during program
|
||||
startup.
|
||||
*/
|
||||
static void default_mark_all_stacks(GC_mark_range_proc marker)
|
||||
{
|
||||
#ifdef STACK_GROWS_DOWN
|
||||
mark_range(GC_approx_sp(), GC_get_stack_base());
|
||||
#else
|
||||
mark_range(GC_get_stack_base(), GC_approx_sp());
|
||||
#endif
|
||||
}
|
||||
|
||||
static void default_proc(void* mutex) {}
|
||||
|
||||
GC_generic_mark_all_stacks_proc GC_generic_mark_all_stacks = &default_mark_all_stacks;
|
||||
void* GC_generic_mutex = NULL;
|
||||
GC_generic_proc GC_generic_locker = &default_proc;
|
||||
GC_generic_proc GC_generic_unlocker = &default_proc;
|
||||
GC_generic_proc GC_generic_stopper = &default_proc;
|
||||
GC_generic_proc GC_generic_starter = &default_proc;
|
||||
|
||||
void GC_generic_init_threads(GC_generic_mark_all_stacks_proc mark_all_stacks,
|
||||
void* mutex,
|
||||
GC_generic_proc locker, GC_generic_proc unlocker,
|
||||
GC_generic_proc stopper, GC_generic_proc starter)
|
||||
{
|
||||
GC_generic_mark_all_stacks = mark_all_stacks;
|
||||
GC_generic_mutex = mutex;
|
||||
GC_generic_locker = locker;
|
||||
GC_generic_unlocker = unlocker;
|
||||
GC_generic_stopper = stopper;
|
||||
GC_generic_starter = starter;
|
||||
|
||||
GC_dont_expand = 1;
|
||||
// GC_set_max_heap_size(20L * 1024L * 1024L);
|
||||
}
|
||||
|
||||
void GC_push_all_stacks()
|
||||
{
|
||||
GC_generic_mark_all_stacks(&mark_range);
|
||||
}
|
||||
|
||||
void GC_stop_world()
|
||||
{
|
||||
GC_generic_stopper(GC_generic_mutex);
|
||||
}
|
||||
|
||||
void GC_start_world()
|
||||
{
|
||||
GC_generic_starter(GC_generic_mutex);
|
||||
}
|
|
@ -0,0 +1,665 @@
|
|||
/*
|
||||
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
|
||||
* Copyright (c) 1996 by Silicon Graphics. All rights reserved.
|
||||
* Copyright (c) 1998 by Fergus Henderson. All rights reserved.
|
||||
*
|
||||
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
||||
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
||||
*
|
||||
* Permission is hereby granted to use or copy this program
|
||||
* for any purpose, provided the above notices are retained on all copies.
|
||||
* Permission to modify the code and to distribute modified code is granted,
|
||||
* provided the above notices are retained, and a notice that the code was
|
||||
* modified is included with the above copyright notice.
|
||||
*/
|
||||
/*
|
||||
* Support code for LinuxThreads, the clone()-based kernel
|
||||
* thread package for Linux which is included in libc6.
|
||||
*
|
||||
* This code relies on implementation details of LinuxThreads,
|
||||
* (i.e. properties not guaranteed by the Pthread standard):
|
||||
*
|
||||
* - the function GC_linux_thread_top_of_stack(void)
|
||||
* relies on the way LinuxThreads lays out thread stacks
|
||||
* in the address space.
|
||||
*
|
||||
* Note that there is a lot of code duplication between linux_threads.c
|
||||
* and irix_threads.c; any changes made here may need to be reflected
|
||||
* there too.
|
||||
*/
|
||||
|
||||
/* #define DEBUG_THREADS 1 */
|
||||
|
||||
/* ANSI C requires that a compilation unit contains something */
|
||||
# include "gc_priv.h"
|
||||
|
||||
# if defined(LINUX_THREADS)
|
||||
|
||||
# include <pthread.h>
|
||||
# include <time.h>
|
||||
# include <errno.h>
|
||||
# include <unistd.h>
|
||||
# include <sys/mman.h>
|
||||
# include <sys/time.h>
|
||||
# include <semaphore.h>
|
||||
|
||||
#undef pthread_create
|
||||
#undef pthread_sigmask
|
||||
#undef pthread_join
|
||||
|
||||
void GC_thr_init();
|
||||
|
||||
#if 0
|
||||
void GC_print_sig_mask()
|
||||
{
|
||||
sigset_t blocked;
|
||||
int i;
|
||||
|
||||
if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
|
||||
ABORT("pthread_sigmask");
|
||||
GC_printf0("Blocked: ");
|
||||
for (i = 1; i <= MAXSIG; i++) {
|
||||
if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
|
||||
}
|
||||
GC_printf0("\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
/* We use the allocation lock to protect thread-related data structures. */
|
||||
|
||||
/* The set of all known threads. We intercept thread creation and */
|
||||
/* joins. We never actually create detached threads. We allocate all */
|
||||
/* new thread stacks ourselves. These allow us to maintain this */
|
||||
/* data structure. */
|
||||
/* Protected by GC_thr_lock. */
|
||||
/* Some of this should be declared volatile, but that's incosnsistent */
|
||||
/* with some library routine declarations. */
|
||||
typedef struct GC_Thread_Rep {
|
||||
struct GC_Thread_Rep * next; /* More recently allocated threads */
|
||||
/* with a given pthread id come */
|
||||
/* first. (All but the first are */
|
||||
/* guaranteed to be dead, but we may */
|
||||
/* not yet have registered the join.) */
|
||||
pthread_t id;
|
||||
word flags;
|
||||
# define FINISHED 1 /* Thread has exited. */
|
||||
# define DETACHED 2 /* Thread is intended to be detached. */
|
||||
# define MAIN_THREAD 4 /* True for the original thread only. */
|
||||
|
||||
ptr_t stack_end;
|
||||
ptr_t stack_ptr; /* Valid only when stopped. */
|
||||
int signal;
|
||||
void * status; /* The value returned from the thread. */
|
||||
/* Used only to avoid premature */
|
||||
/* reclamation of any data it might */
|
||||
/* reference. */
|
||||
} * GC_thread;
|
||||
|
||||
GC_thread GC_lookup_thread(pthread_t id);
|
||||
|
||||
/*
|
||||
* The only way to suspend threads given the pthread interface is to send
|
||||
* signals. We can't use SIGSTOP directly, because we need to get the
|
||||
* thread to save its stack pointer in the GC thread table before
|
||||
* suspending. So we have to reserve a signal of our own for this.
|
||||
* This means we have to intercept client calls to change the signal mask.
|
||||
* The linuxthreads package already uses SIGUSR1 and SIGUSR2,
|
||||
* so we need to reuse something else. I chose SIGPWR.
|
||||
* (Perhaps SIGUNUSED would be a better choice.)
|
||||
*/
|
||||
#define SIG_SUSPEND SIGPWR
|
||||
|
||||
#define SIG_RESTART SIGXCPU
|
||||
|
||||
sem_t GC_suspend_ack_sem;
|
||||
|
||||
/*
|
||||
GC_linux_thread_top_of_stack() relies on implementation details of
|
||||
LinuxThreads, namely that thread stacks are allocated on 2M boundaries
|
||||
and grow to no more than 2M.
|
||||
To make sure that we're using LinuxThreads and not some other thread
|
||||
package, we generate a dummy reference to `__pthread_initial_thread_bos',
|
||||
which is a symbol defined in LinuxThreads, but (hopefully) not in other
|
||||
thread packages.
|
||||
*/
|
||||
extern char * __pthread_initial_thread_bos;
|
||||
char **dummy_var_to_force_linux_threads = &__pthread_initial_thread_bos;
|
||||
|
||||
#define LINUX_THREADS_STACK_SIZE (2 * 1024 * 1024)
|
||||
|
||||
static inline ptr_t GC_linux_thread_top_of_stack(void)
|
||||
{
|
||||
char *sp = GC_approx_sp();
|
||||
ptr_t tos = (ptr_t) (((unsigned long)sp | (LINUX_THREADS_STACK_SIZE - 1)) + 1);
|
||||
#if DEBUG_THREADS
|
||||
GC_printf1("SP = %lx\n", (unsigned long)sp);
|
||||
GC_printf1("TOS = %lx\n", (unsigned long)tos);
|
||||
#endif
|
||||
return tos;
|
||||
}
|
||||
|
||||
void GC_suspend_handler(int sig)
|
||||
{
|
||||
int dummy;
|
||||
pthread_t my_thread = pthread_self();
|
||||
GC_thread me;
|
||||
sigset_t all_sigs;
|
||||
sigset_t old_sigs;
|
||||
int i;
|
||||
sigset_t mask;
|
||||
|
||||
if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
|
||||
|
||||
#if DEBUG_THREADS
|
||||
GC_printf1("Suspending 0x%x\n", my_thread);
|
||||
#endif
|
||||
|
||||
me = GC_lookup_thread(my_thread);
|
||||
/* The lookup here is safe, since I'm doing this on behalf */
|
||||
/* of a thread which holds the allocation lock in order */
|
||||
/* to stop the world. Thus concurrent modification of the */
|
||||
/* data structure is impossible. */
|
||||
me -> stack_ptr = (ptr_t)(&dummy);
|
||||
me -> stack_end = GC_linux_thread_top_of_stack();
|
||||
|
||||
/* Tell the thread that wants to stop the world that this */
|
||||
/* thread has been stopped. Note that sem_post() is */
|
||||
/* the only async-signal-safe primitive in LinuxThreads. */
|
||||
sem_post(&GC_suspend_ack_sem);
|
||||
|
||||
/* Wait until that thread tells us to restart by sending */
|
||||
/* this thread a SIG_RESTART signal. */
|
||||
/* SIG_RESTART should be masked at this point. Thus there */
|
||||
/* is no race. */
|
||||
if (sigfillset(&mask) != 0) ABORT("sigfillset() failed");
|
||||
if (sigdelset(&mask, SIG_RESTART) != 0) ABORT("sigdelset() failed");
|
||||
do {
|
||||
me->signal = 0;
|
||||
sigsuspend(&mask); /* Wait for signal */
|
||||
} while (me->signal != SIG_RESTART);
|
||||
|
||||
#if DEBUG_THREADS
|
||||
GC_printf1("Continuing 0x%x\n", my_thread);
|
||||
#endif
|
||||
}
|
||||
|
||||
void GC_restart_handler(int sig)
|
||||
{
|
||||
GC_thread me;
|
||||
|
||||
if (sig != SIG_RESTART) ABORT("Bad signal in suspend_handler");
|
||||
|
||||
/* Let the GC_suspend_handler() know that we got a SIG_RESTART. */
|
||||
/* The lookup here is safe, since I'm doing this on behalf */
|
||||
/* of a thread which holds the allocation lock in order */
|
||||
/* to stop the world. Thus concurrent modification of the */
|
||||
/* data structure is impossible. */
|
||||
me = GC_lookup_thread(pthread_self());
|
||||
me->signal = SIG_RESTART;
|
||||
|
||||
/*
|
||||
** Note: even if we didn't do anything useful here,
|
||||
** it would still be necessary to have a signal handler,
|
||||
** rather than ignoring the signals, otherwise
|
||||
** the signals will not be delivered at all, and
|
||||
** will thus not interrupt the sigsuspend() above.
|
||||
*/
|
||||
|
||||
#if DEBUG_THREADS
|
||||
GC_printf1("In GC_restart_handler for 0x%x\n", pthread_self());
|
||||
#endif
|
||||
}
|
||||
|
||||
GC_bool GC_thr_initialized = FALSE;
|
||||
|
||||
# define THREAD_TABLE_SZ 128 /* Must be power of 2 */
|
||||
volatile GC_thread GC_threads[THREAD_TABLE_SZ];
|
||||
|
||||
/* Add a thread to GC_threads. We assume it wasn't already there. */
|
||||
/* Caller holds allocation lock. */
|
||||
GC_thread GC_new_thread(pthread_t id)
|
||||
{
|
||||
int hv = ((word)id) % THREAD_TABLE_SZ;
|
||||
GC_thread result;
|
||||
static struct GC_Thread_Rep first_thread;
|
||||
static GC_bool first_thread_used = FALSE;
|
||||
|
||||
if (!first_thread_used) {
|
||||
result = &first_thread;
|
||||
first_thread_used = TRUE;
|
||||
/* Dont acquire allocation lock, since we may already hold it. */
|
||||
} else {
|
||||
result = (struct GC_Thread_Rep *)
|
||||
GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep), NORMAL);
|
||||
}
|
||||
if (result == 0) return(0);
|
||||
result -> id = id;
|
||||
result -> next = GC_threads[hv];
|
||||
GC_threads[hv] = result;
|
||||
/* result -> flags = 0; */
|
||||
return(result);
|
||||
}
|
||||
|
||||
/* Delete a thread from GC_threads. We assume it is there. */
|
||||
/* (The code intentionally traps if it wasn't.) */
|
||||
/* Caller holds allocation lock. */
|
||||
void GC_delete_thread(pthread_t id)
|
||||
{
|
||||
int hv = ((word)id) % THREAD_TABLE_SZ;
|
||||
register GC_thread p = GC_threads[hv];
|
||||
register GC_thread prev = 0;
|
||||
|
||||
while (!pthread_equal(p -> id, id)) {
|
||||
prev = p;
|
||||
p = p -> next;
|
||||
}
|
||||
if (prev == 0) {
|
||||
GC_threads[hv] = p -> next;
|
||||
} else {
|
||||
prev -> next = p -> next;
|
||||
}
|
||||
}
|
||||
|
||||
/* If a thread has been joined, but we have not yet */
|
||||
/* been notified, then there may be more than one thread */
|
||||
/* in the table with the same pthread id. */
|
||||
/* This is OK, but we need a way to delete a specific one. */
|
||||
void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
|
||||
{
|
||||
int hv = ((word)id) % THREAD_TABLE_SZ;
|
||||
register GC_thread p = GC_threads[hv];
|
||||
register GC_thread prev = 0;
|
||||
|
||||
while (p != gc_id) {
|
||||
prev = p;
|
||||
p = p -> next;
|
||||
}
|
||||
if (prev == 0) {
|
||||
GC_threads[hv] = p -> next;
|
||||
} else {
|
||||
prev -> next = p -> next;
|
||||
}
|
||||
}
|
||||
|
||||
/* Return a GC_thread corresponding to a given thread_t. */
|
||||
/* Returns 0 if it's not there. */
|
||||
/* Caller holds allocation lock or otherwise inhibits */
|
||||
/* updates. */
|
||||
/* If there is more than one thread with the given id we */
|
||||
/* return the most recent one. */
|
||||
GC_thread GC_lookup_thread(pthread_t id)
|
||||
{
|
||||
int hv = ((word)id) % THREAD_TABLE_SZ;
|
||||
register GC_thread p = GC_threads[hv];
|
||||
|
||||
while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
|
||||
return(p);
|
||||
}
|
||||
|
||||
/* Caller holds allocation lock. */
|
||||
void GC_stop_world()
|
||||
{
|
||||
pthread_t my_thread = pthread_self();
|
||||
register int i;
|
||||
register GC_thread p;
|
||||
register int n_live_threads = 0;
|
||||
register int result;
|
||||
|
||||
for (i = 0; i < THREAD_TABLE_SZ; i++) {
|
||||
for (p = GC_threads[i]; p != 0; p = p -> next) {
|
||||
if (p -> id != my_thread) {
|
||||
if (p -> flags & FINISHED) continue;
|
||||
n_live_threads++;
|
||||
#if DEBUG_THREADS
|
||||
GC_printf1("Sending suspend signal to 0x%x\n", p -> id);
|
||||
#endif
|
||||
result = pthread_kill(p -> id, SIG_SUSPEND);
|
||||
switch(result) {
|
||||
case ESRCH:
|
||||
/* Not really there anymore. Possible? */
|
||||
n_live_threads--;
|
||||
break;
|
||||
case 0:
|
||||
break;
|
||||
default:
|
||||
ABORT("pthread_kill failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (i = 0; i < n_live_threads; i++) {
|
||||
sem_wait(&GC_suspend_ack_sem);
|
||||
}
|
||||
#if DEBUG_THREADS
|
||||
GC_printf1("World stopped 0x%x\n", pthread_self());
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Caller holds allocation lock. */
|
||||
void GC_start_world()
|
||||
{
|
||||
pthread_t my_thread = pthread_self();
|
||||
register int i;
|
||||
register GC_thread p;
|
||||
register int n_live_threads = 0;
|
||||
register int result;
|
||||
|
||||
# if DEBUG_THREADS
|
||||
GC_printf0("World starting\n");
|
||||
# endif
|
||||
|
||||
for (i = 0; i < THREAD_TABLE_SZ; i++) {
|
||||
for (p = GC_threads[i]; p != 0; p = p -> next) {
|
||||
if (p -> id != my_thread) {
|
||||
if (p -> flags & FINISHED) continue;
|
||||
n_live_threads++;
|
||||
#if DEBUG_THREADS
|
||||
GC_printf1("Sending restart signal to 0x%x\n", p -> id);
|
||||
#endif
|
||||
result = pthread_kill(p -> id, SIG_RESTART);
|
||||
switch(result) {
|
||||
case ESRCH:
|
||||
/* Not really there anymore. Possible? */
|
||||
n_live_threads--;
|
||||
break;
|
||||
case 0:
|
||||
break;
|
||||
default:
|
||||
ABORT("pthread_kill failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#if DEBUG_THREADS
|
||||
GC_printf0("World started\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
/* We hold allocation lock. We assume the world is stopped. */
|
||||
void GC_push_all_stacks()
|
||||
{
|
||||
register int i;
|
||||
register GC_thread p;
|
||||
register ptr_t sp = GC_approx_sp();
|
||||
register ptr_t lo, hi;
|
||||
pthread_t me = pthread_self();
|
||||
|
||||
if (!GC_thr_initialized) GC_thr_init();
|
||||
#if DEBUG_THREADS
|
||||
GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me);
|
||||
#endif
|
||||
for (i = 0; i < THREAD_TABLE_SZ; i++) {
|
||||
for (p = GC_threads[i]; p != 0; p = p -> next) {
|
||||
if (p -> flags & FINISHED) continue;
|
||||
if (pthread_equal(p -> id, me)) {
|
||||
lo = GC_approx_sp();
|
||||
} else {
|
||||
lo = p -> stack_ptr;
|
||||
}
|
||||
if ((p -> flags & MAIN_THREAD) == 0) {
|
||||
if (pthread_equal(p -> id, me)) {
|
||||
hi = GC_linux_thread_top_of_stack();
|
||||
} else {
|
||||
hi = p -> stack_end;
|
||||
}
|
||||
} else {
|
||||
/* The original stack. */
|
||||
hi = GC_stackbottom;
|
||||
}
|
||||
#if DEBUG_THREADS
|
||||
GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
|
||||
(unsigned long) p -> id,
|
||||
(unsigned long) lo, (unsigned long) hi);
|
||||
#endif
|
||||
GC_push_all_stack(lo, hi);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* We hold the allocation lock. */
|
||||
void GC_thr_init()
|
||||
{
|
||||
GC_thread t;
|
||||
struct sigaction act;
|
||||
|
||||
if (GC_thr_initialized) return;
|
||||
GC_thr_initialized = TRUE;
|
||||
|
||||
if (sem_init(&GC_suspend_ack_sem, 0, 0) != 0)
|
||||
ABORT("sem_init failed");
|
||||
|
||||
act.sa_flags = SA_RESTART;
|
||||
if (sigfillset(&act.sa_mask) != 0) {
|
||||
ABORT("sigfillset() failed");
|
||||
}
|
||||
/* SIG_RESTART is unmasked by the handler when necessary. */
|
||||
act.sa_handler = GC_suspend_handler;
|
||||
if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
|
||||
ABORT("Cannot set SIG_SUSPEND handler");
|
||||
}
|
||||
|
||||
act.sa_handler = GC_restart_handler;
|
||||
if (sigaction(SIG_RESTART, &act, NULL) != 0) {
|
||||
ABORT("Cannot set SIG_SUSPEND handler");
|
||||
}
|
||||
|
||||
/* Add the initial thread, so we can stop it. */
|
||||
t = GC_new_thread(pthread_self());
|
||||
t -> stack_ptr = 0;
|
||||
t -> flags = DETACHED | MAIN_THREAD;
|
||||
}
|
||||
|
||||
int GC_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
|
||||
{
|
||||
sigset_t fudged_set;
|
||||
|
||||
if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
|
||||
fudged_set = *set;
|
||||
sigdelset(&fudged_set, SIG_SUSPEND);
|
||||
set = &fudged_set;
|
||||
}
|
||||
return(pthread_sigmask(how, set, oset));
|
||||
}
|
||||
|
||||
struct start_info {
|
||||
void *(*start_routine)(void *);
|
||||
void *arg;
|
||||
word flags;
|
||||
sem_t registered; /* 1 ==> in our thread table, but */
|
||||
/* parent hasn't yet noticed. */
|
||||
};
|
||||
|
||||
|
||||
void GC_thread_exit_proc(void *arg)
|
||||
{
|
||||
GC_thread me;
|
||||
struct start_info * si = arg;
|
||||
|
||||
LOCK();
|
||||
me = GC_lookup_thread(pthread_self());
|
||||
if (me -> flags & DETACHED) {
|
||||
GC_delete_thread(pthread_self());
|
||||
} else {
|
||||
me -> flags |= FINISHED;
|
||||
}
|
||||
UNLOCK();
|
||||
}
|
||||
|
||||
int GC_pthread_join(pthread_t thread, void **retval)
|
||||
{
|
||||
int result;
|
||||
GC_thread thread_gc_id;
|
||||
|
||||
LOCK();
|
||||
thread_gc_id = GC_lookup_thread(thread);
|
||||
/* This is guaranteed to be the intended one, since the thread id */
|
||||
/* cant have been recycled by pthreads. */
|
||||
UNLOCK();
|
||||
result = pthread_join(thread, retval);
|
||||
LOCK();
|
||||
/* Here the pthread thread id may have been recycled. */
|
||||
GC_delete_gc_thread(thread, thread_gc_id);
|
||||
UNLOCK();
|
||||
return result;
|
||||
}
|
||||
|
||||
void * GC_start_routine(void * arg)
|
||||
{
|
||||
struct start_info * si = arg;
|
||||
void * result;
|
||||
GC_thread me;
|
||||
pthread_t my_pthread;
|
||||
void *(*start)(void *);
|
||||
void *start_arg;
|
||||
|
||||
my_pthread = pthread_self();
|
||||
LOCK();
|
||||
me = GC_new_thread(my_pthread);
|
||||
me -> flags = si -> flags;
|
||||
me -> stack_ptr = 0;
|
||||
me -> stack_end = 0;
|
||||
UNLOCK();
|
||||
start = si -> start_routine;
|
||||
start_arg = si -> arg;
|
||||
sem_post(&(si -> registered));
|
||||
pthread_cleanup_push(GC_thread_exit_proc, si);
|
||||
# ifdef DEBUG_THREADS
|
||||
GC_printf1("Starting thread 0x%lx\n", pthread_self());
|
||||
GC_printf1("pid = %ld\n", (long) getpid());
|
||||
GC_printf1("sp = 0x%lx\n", (long) &arg);
|
||||
GC_printf1("start_routine = 0x%lx\n", start);
|
||||
# endif
|
||||
result = (*start)(start_arg);
|
||||
#if DEBUG_THREADS
|
||||
GC_printf1("Finishing thread 0x%x\n", pthread_self());
|
||||
#endif
|
||||
me -> status = result;
|
||||
me -> flags |= FINISHED;
|
||||
pthread_cleanup_pop(1);
|
||||
/* Cleanup acquires lock, ensuring that we can't exit */
|
||||
/* while a collection that thinks we're alive is trying to stop */
|
||||
/* us. */
|
||||
return(result);
|
||||
}
|
||||
|
||||
int
|
||||
GC_pthread_create(pthread_t *new_thread,
|
||||
const pthread_attr_t *attr,
|
||||
void *(*start_routine)(void *), void *arg)
|
||||
{
|
||||
int result;
|
||||
GC_thread t;
|
||||
pthread_t my_new_thread;
|
||||
void * stack;
|
||||
size_t stacksize;
|
||||
pthread_attr_t new_attr;
|
||||
int detachstate;
|
||||
word my_flags = 0;
|
||||
struct start_info * si = GC_malloc(sizeof(struct start_info));
|
||||
/* This is otherwise saved only in an area mmapped by the thread */
|
||||
/* library, which isn't visible to the collector. */
|
||||
|
||||
if (0 == si) return(ENOMEM);
|
||||
sem_init(&(si -> registered), 0, 0);
|
||||
si -> start_routine = start_routine;
|
||||
si -> arg = arg;
|
||||
LOCK();
|
||||
if (!GC_thr_initialized) GC_thr_init();
|
||||
if (NULL == attr) {
|
||||
stack = 0;
|
||||
(void) pthread_attr_init(&new_attr);
|
||||
} else {
|
||||
new_attr = *attr;
|
||||
}
|
||||
pthread_attr_getdetachstate(&new_attr, &detachstate);
|
||||
if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
|
||||
si -> flags = my_flags;
|
||||
UNLOCK();
|
||||
result = pthread_create(new_thread, &new_attr, GC_start_routine, si);
|
||||
/* Wait until child has been added to the thread table. */
|
||||
/* This also ensures that we hold onto si until the child is done */
|
||||
/* with it. Thus it doesn't matter whether it is otherwise */
|
||||
/* visible to the collector. */
|
||||
if (0 != sem_wait(&(si -> registered))) ABORT("sem_wait failed");
|
||||
sem_destroy(&(si -> registered));
|
||||
/* pthread_attr_destroy(&new_attr); */
|
||||
/* pthread_attr_destroy(&new_attr); */
|
||||
return(result);
|
||||
}
|
||||
|
||||
GC_bool GC_collecting = 0;
|
||||
/* A hint that we're in the collector and */
|
||||
/* holding the allocation lock for an */
|
||||
/* extended period. */
|
||||
|
||||
/* Reasonably fast spin locks. Basically the same implementation */
|
||||
/* as STL alloc.h. This isn't really the right way to do this. */
|
||||
/* but until the POSIX scheduling mess gets straightened out ... */
|
||||
|
||||
volatile unsigned int GC_allocate_lock = 0;
|
||||
|
||||
|
||||
void GC_lock()
|
||||
{
|
||||
# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
|
||||
# define high_spin_max 1000 /* spin cycles for multiprocessor */
|
||||
static unsigned spin_max = low_spin_max;
|
||||
unsigned my_spin_max;
|
||||
static unsigned last_spins = 0;
|
||||
unsigned my_last_spins;
|
||||
volatile unsigned junk;
|
||||
# define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
|
||||
int i;
|
||||
|
||||
if (!GC_test_and_set(&GC_allocate_lock)) {
|
||||
return;
|
||||
}
|
||||
junk = 0;
|
||||
my_spin_max = spin_max;
|
||||
my_last_spins = last_spins;
|
||||
for (i = 0; i < my_spin_max; i++) {
|
||||
if (GC_collecting) goto yield;
|
||||
if (i < my_last_spins/2 || GC_allocate_lock) {
|
||||
PAUSE;
|
||||
continue;
|
||||
}
|
||||
if (!GC_test_and_set(&GC_allocate_lock)) {
|
||||
/*
|
||||
* got it!
|
||||
* Spinning worked. Thus we're probably not being scheduled
|
||||
* against the other process with which we were contending.
|
||||
* Thus it makes sense to spin longer the next time.
|
||||
*/
|
||||
last_spins = i;
|
||||
spin_max = high_spin_max;
|
||||
return;
|
||||
}
|
||||
}
|
||||
/* We are probably being scheduled against the other process. Sleep. */
|
||||
spin_max = low_spin_max;
|
||||
yield:
|
||||
for (i = 0;; ++i) {
|
||||
if (!GC_test_and_set(&GC_allocate_lock)) {
|
||||
return;
|
||||
}
|
||||
# define SLEEP_THRESHOLD 12
|
||||
/* nanosleep(<= 2ms) just spins under Linux. We */
|
||||
/* want to be careful to avoid that behavior. */
|
||||
if (i < SLEEP_THRESHOLD) {
|
||||
sched_yield();
|
||||
} else {
|
||||
struct timespec ts;
|
||||
|
||||
if (i > 26) i = 26;
|
||||
/* Don't wait for more than about 60msecs, even */
|
||||
/* under extreme contention. */
|
||||
ts.tv_sec = 0;
|
||||
ts.tv_nsec = 1 << i;
|
||||
nanosleep(&ts, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# endif /* LINUX_THREADS */
|
||||
|
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
MacOS_config.h
|
||||
|
||||
Configuration flags for Macintosh development systems.
|
||||
|
||||
<Revision History>
|
||||
|
||||
11/16/95 pcb Updated compilation flags to reflect latest 4.6 Makefile.
|
||||
|
||||
by Patrick C. Beard.
|
||||
*/
|
||||
/* Boehm, November 17, 1995 12:10 pm PST */
|
||||
|
||||
#ifdef __MWERKS__
|
||||
|
||||
// for CodeWarrior Pro with Metrowerks Standard Library (MSL).
|
||||
// #define MSL_USE_PRECOMPILED_HEADERS 0
|
||||
#include <ansi_prefix.mac.h>
|
||||
#ifndef __STDC__
|
||||
#define __STDC__ 0
|
||||
#endif
|
||||
|
||||
#endif /* __MWERKS__ */
|
||||
|
||||
// these are defined again in gc_priv.h.
|
||||
#undef TRUE
|
||||
#undef FALSE
|
||||
|
||||
#define ALL_INTERIOR_POINTERS // follows interior pointers.
|
||||
#define SILENT // no collection messages.
|
||||
//#define DONT_ADD_BYTE_AT_END // no padding.
|
||||
//#define SMALL_CONFIG // whether to use a smaller heap.
|
||||
#define NO_SIGNALS // signals aren't real on the Macintosh.
|
||||
#define USE_TEMPORARY_MEMORY // use Macintosh temporary memory.
|
||||
#define FIND_LEAK // use as a leak detector.
|
||||
|
||||
// CFLAGS= -O -DNO_SIGNALS -DSILENT -DALL_INTERIOR_POINTERS
|
||||
//
|
||||
//LIBGC_CFLAGS= -O -DNO_SIGNALS -DSILENT \
|
||||
// -DREDIRECT_MALLOC=GC_malloc_uncollectable \
|
||||
// -DDONT_ADD_BYTE_AT_END -DALL_INTERIOR_POINTERS
|
||||
// Flags for building libgc.a -- the last two are required.
|
||||
//
|
||||
// Setjmp_test may yield overly optimistic results when compiled
|
||||
// without optimization.
|
||||
// -DSILENT disables statistics printing, and improves performance.
|
||||
// -DCHECKSUMS reports on erroneously clear dirty bits, and unexpectedly
|
||||
// altered stubborn objects, at substantial performance cost.
|
||||
// Use only for incremental collector debugging.
|
||||
// -DFIND_LEAK causes the collector to assume that all inaccessible
|
||||
// objects should have been explicitly deallocated, and reports exceptions.
|
||||
// Finalization and the test program are not usable in this mode.
|
||||
// -DSOLARIS_THREADS enables support for Solaris (thr_) threads.
|
||||
// (Clients should also define SOLARIS_THREADS and then include
|
||||
// gc.h before performing thr_ or GC_ operations.)
|
||||
// This is broken on nonSPARC machines.
|
||||
// -DALL_INTERIOR_POINTERS allows all pointers to the interior
|
||||
// of objects to be recognized. (See gc_priv.h for consequences.)
|
||||
// -DSMALL_CONFIG tries to tune the collector for small heap sizes,
|
||||
// usually causing it to use less space in such situations.
|
||||
// Incremental collection no longer works in this case.
|
||||
// -DLARGE_CONFIG tunes the collector for unusually large heaps.
|
||||
// Necessary for heaps larger than about 500 MB on most machines.
|
||||
// Recommended for heaps larger than about 64 MB.
|
||||
// -DDONT_ADD_BYTE_AT_END is meaningful only with
|
||||
// -DALL_INTERIOR_POINTERS. Normally -DALL_INTERIOR_POINTERS
|
||||
// causes all objects to be padded so that pointers just past the end of
|
||||
// an object can be recognized. This can be expensive. (The padding
|
||||
// is normally more than one byte due to alignment constraints.)
|
||||
// -DDONT_ADD_BYTE_AT_END disables the padding.
|
||||
// -DNO_SIGNALS does not disable signals during critical parts of
|
||||
// the GC process. This is no less correct than many malloc
|
||||
// implementations, and it sometimes has a significant performance
|
||||
// impact. However, it is dangerous for many not-quite-ANSI C
|
||||
// programs that call things like printf in asynchronous signal handlers.
|
||||
// -DOPERATOR_NEW_ARRAY declares that the C++ compiler supports the
|
||||
// new syntax "operator new[]" for allocating and deleting arrays.
|
||||
// See gc_cpp.h for details. No effect on the C part of the collector.
|
||||
// This is defined implicitly in a few environments.
|
||||
// -DREDIRECT_MALLOC=X causes malloc, realloc, and free to be defined
|
||||
// as aliases for X, GC_realloc, and GC_free, respectively.
|
||||
// Calloc is redefined in terms of the new malloc. X should
|
||||
// be either GC_malloc or GC_malloc_uncollectable.
|
||||
// The former is occasionally useful for working around leaks in code
|
||||
// you don't want to (or can't) look at. It may not work for
|
||||
// existing code, but it often does. Neither works on all platforms,
|
||||
// since some ports use malloc or calloc to obtain system memory.
|
||||
// (Probably works for UNIX, and win32.)
|
||||
// -DNO_DEBUG removes GC_dump and the debugging routines it calls.
|
||||
// Reduces code size slightly at the expense of debuggability.
|
Двоичный файл не отображается.
Двоичный файл не отображается.
Двоичный файл не отображается.
|
@ -0,0 +1,435 @@
|
|||
/*
|
||||
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
|
||||
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
|
||||
*
|
||||
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
||||
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
||||
*
|
||||
* Permission is hereby granted to use or copy this program
|
||||
* for any purpose, provided the above notices are retained on all copies.
|
||||
* Permission to modify the code and to distribute modified code is granted,
|
||||
* provided the above notices are retained, and a notice that the code was
|
||||
* modified is included with the above copyright notice.
|
||||
*/
|
||||
/* Boehm, February 7, 1996 4:32 pm PST */
|
||||
|
||||
#include <stdio.h>
|
||||
#include "gc_priv.h"
|
||||
|
||||
extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
|
||||
void GC_extend_size_map(); /* in misc.c. */
|
||||
|
||||
/* Allocate reclaim list for kind: */
|
||||
/* Return TRUE on success */
|
||||
GC_bool GC_alloc_reclaim_list(kind)
|
||||
register struct obj_kind * kind;
|
||||
{
|
||||
struct hblk ** result = (struct hblk **)
|
||||
GC_scratch_alloc((MAXOBJSZ+1) * sizeof(struct hblk *));
|
||||
if (result == 0) return(FALSE);
|
||||
BZERO(result, (MAXOBJSZ+1)*sizeof(struct hblk *));
|
||||
kind -> ok_reclaim_list = result;
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
/* allocate lb bytes for an object of kind. */
|
||||
/* Should not be used to directly to allocate */
|
||||
/* objects such as STUBBORN objects that */
|
||||
/* require special handling on allocation. */
|
||||
/* First a version that assumes we already */
|
||||
/* hold lock: */
|
||||
ptr_t GC_generic_malloc_inner(lb, k)
|
||||
register word lb;
|
||||
register int k;
|
||||
{
|
||||
register word lw;
|
||||
register ptr_t op;
|
||||
register ptr_t *opp;
|
||||
|
||||
if( SMALL_OBJ(lb) ) {
|
||||
register struct obj_kind * kind = GC_obj_kinds + k;
|
||||
# ifdef MERGE_SIZES
|
||||
lw = GC_size_map[lb];
|
||||
# else
|
||||
lw = ALIGNED_WORDS(lb);
|
||||
if (lw == 0) lw = 1;
|
||||
# endif
|
||||
opp = &(kind -> ok_freelist[lw]);
|
||||
if( (op = *opp) == 0 ) {
|
||||
# ifdef MERGE_SIZES
|
||||
if (GC_size_map[lb] == 0) {
|
||||
if (!GC_is_initialized) GC_init_inner();
|
||||
if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
|
||||
return(GC_generic_malloc_inner(lb, k));
|
||||
}
|
||||
# else
|
||||
if (!GC_is_initialized) {
|
||||
GC_init_inner();
|
||||
return(GC_generic_malloc_inner(lb, k));
|
||||
}
|
||||
# endif
|
||||
if (kind -> ok_reclaim_list == 0) {
|
||||
if (!GC_alloc_reclaim_list(kind)) goto out;
|
||||
}
|
||||
op = GC_allocobj(lw, k);
|
||||
if (op == 0) goto out;
|
||||
}
|
||||
/* Here everything is in a consistent state. */
|
||||
/* We assume the following assignment is */
|
||||
/* atomic. If we get aborted */
|
||||
/* after the assignment, we lose an object, */
|
||||
/* but that's benign. */
|
||||
/* Volatile declarations may need to be added */
|
||||
/* to prevent the compiler from breaking things.*/
|
||||
*opp = obj_link(op);
|
||||
obj_link(op) = 0;
|
||||
} else {
|
||||
register struct hblk * h;
|
||||
register word n_blocks = divHBLKSZ(ADD_SLOP(lb)
|
||||
+ HDR_BYTES + HBLKSIZE-1);
|
||||
|
||||
if (!GC_is_initialized) GC_init_inner();
|
||||
/* Do our share of marking work */
|
||||
if(GC_incremental && !GC_dont_gc)
|
||||
GC_collect_a_little_inner((int)n_blocks);
|
||||
lw = ROUNDED_UP_WORDS(lb);
|
||||
while ((h = GC_allochblk(lw, k, 0)) == 0
|
||||
&& GC_collect_or_expand(n_blocks, FALSE));
|
||||
if (h == 0) {
|
||||
op = 0;
|
||||
} else {
|
||||
op = (ptr_t) (h -> hb_body);
|
||||
GC_words_wasted += BYTES_TO_WORDS(n_blocks * HBLKSIZE) - lw;
|
||||
}
|
||||
}
|
||||
GC_words_allocd += lw;
|
||||
|
||||
out:
|
||||
return((ptr_t)op);
|
||||
}
|
||||
|
||||
ptr_t GC_generic_malloc(lb, k)
|
||||
register word lb;
|
||||
register int k;
|
||||
{
|
||||
ptr_t result;
|
||||
DCL_LOCK_STATE;
|
||||
|
||||
GC_INVOKE_FINALIZERS();
|
||||
DISABLE_SIGNALS();
|
||||
LOCK();
|
||||
result = GC_generic_malloc_inner(lb, k);
|
||||
UNLOCK();
|
||||
ENABLE_SIGNALS();
|
||||
if (0 == result) {
|
||||
return((*GC_oom_fn)(lb));
|
||||
} else {
|
||||
return(result);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#define GENERAL_MALLOC(lb,k) \
|
||||
(GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
|
||||
/* We make the GC_clear_stack_call a tail call, hoping to get more of */
|
||||
/* the stack. */
|
||||
|
||||
/* Allocate lb bytes of atomic (pointerfree) data */
|
||||
# ifdef __STDC__
|
||||
GC_PTR GC_malloc_atomic(size_t lb)
|
||||
# else
|
||||
GC_PTR GC_malloc_atomic(lb)
|
||||
size_t lb;
|
||||
# endif
|
||||
{
|
||||
register ptr_t op;
|
||||
register ptr_t * opp;
|
||||
register word lw;
|
||||
DCL_LOCK_STATE;
|
||||
|
||||
if( SMALL_OBJ(lb) ) {
|
||||
# ifdef MERGE_SIZES
|
||||
lw = GC_size_map[lb];
|
||||
# else
|
||||
lw = ALIGNED_WORDS(lb);
|
||||
# endif
|
||||
opp = &(GC_aobjfreelist[lw]);
|
||||
FASTLOCK();
|
||||
if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
|
||||
FASTUNLOCK();
|
||||
return(GENERAL_MALLOC((word)lb, PTRFREE));
|
||||
}
|
||||
/* See above comment on signals. */
|
||||
*opp = obj_link(op);
|
||||
GC_words_allocd += lw;
|
||||
FASTUNLOCK();
|
||||
return((GC_PTR) op);
|
||||
} else {
|
||||
return(GENERAL_MALLOC((word)lb, PTRFREE));
|
||||
}
|
||||
}
|
||||
|
||||
/* Allocate lb bytes of composite (pointerful) data */
|
||||
# ifdef __STDC__
|
||||
GC_PTR GC_malloc(size_t lb)
|
||||
# else
|
||||
GC_PTR GC_malloc(lb)
|
||||
size_t lb;
|
||||
# endif
|
||||
{
|
||||
register ptr_t op;
|
||||
register ptr_t *opp;
|
||||
register word lw;
|
||||
DCL_LOCK_STATE;
|
||||
|
||||
if( SMALL_OBJ(lb) ) {
|
||||
# ifdef MERGE_SIZES
|
||||
lw = GC_size_map[lb];
|
||||
# else
|
||||
lw = ALIGNED_WORDS(lb);
|
||||
# endif
|
||||
opp = &(GC_objfreelist[lw]);
|
||||
FASTLOCK();
|
||||
if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
|
||||
FASTUNLOCK();
|
||||
return(GENERAL_MALLOC((word)lb, NORMAL));
|
||||
}
|
||||
/* See above comment on signals. */
|
||||
*opp = obj_link(op);
|
||||
obj_link(op) = 0;
|
||||
GC_words_allocd += lw;
|
||||
FASTUNLOCK();
|
||||
return((GC_PTR) op);
|
||||
} else {
|
||||
return(GENERAL_MALLOC((word)lb, NORMAL));
|
||||
}
|
||||
}
|
||||
|
||||
# ifdef REDIRECT_MALLOC
|
||||
# ifdef __STDC__
|
||||
GC_PTR malloc(size_t lb)
|
||||
# else
|
||||
GC_PTR malloc(lb)
|
||||
size_t lb;
|
||||
# endif
|
||||
{
|
||||
/* It might help to manually inline the GC_malloc call here. */
|
||||
/* But any decent compiler should reduce the extra procedure call */
|
||||
/* to at most a jump instruction in this case. */
|
||||
# if defined(I386) && defined(SOLARIS_THREADS)
|
||||
/*
|
||||
* Thread initialisation can call malloc before
|
||||
* we're ready for it.
|
||||
* It's not clear that this is enough to help matters.
|
||||
* The thread implementation may well call malloc at other
|
||||
* inopportune times.
|
||||
*/
|
||||
if (!GC_is_initialized) return sbrk(lb);
|
||||
# endif /* I386 && SOLARIS_THREADS */
|
||||
return(REDIRECT_MALLOC(lb));
|
||||
}
|
||||
|
||||
# ifdef __STDC__
|
||||
GC_PTR calloc(size_t n, size_t lb)
|
||||
# else
|
||||
GC_PTR calloc(n, lb)
|
||||
size_t n, lb;
|
||||
# endif
|
||||
{
|
||||
return(REDIRECT_MALLOC(n*lb));
|
||||
}
|
||||
# endif /* REDIRECT_MALLOC */
|
||||
|
||||
GC_PTR GC_generic_or_special_malloc(lb,knd)
|
||||
word lb;
|
||||
int knd;
|
||||
{
|
||||
switch(knd) {
|
||||
# ifdef STUBBORN_ALLOC
|
||||
case STUBBORN:
|
||||
return(GC_malloc_stubborn((size_t)lb));
|
||||
# endif
|
||||
case PTRFREE:
|
||||
return(GC_malloc_atomic((size_t)lb));
|
||||
case NORMAL:
|
||||
return(GC_malloc((size_t)lb));
|
||||
case UNCOLLECTABLE:
|
||||
return(GC_malloc_uncollectable((size_t)lb));
|
||||
# ifdef ATOMIC_UNCOLLECTABLE
|
||||
case AUNCOLLECTABLE:
|
||||
return(GC_malloc_atomic_uncollectable((size_t)lb));
|
||||
# endif /* ATOMIC_UNCOLLECTABLE */
|
||||
default:
|
||||
return(GC_generic_malloc(lb,knd));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Change the size of the block pointed to by p to contain at least */
|
||||
/* lb bytes. The object may be (and quite likely will be) moved. */
|
||||
/* The kind (e.g. atomic) is the same as that of the old. */
|
||||
/* Shrinking of large blocks is not implemented well. */
|
||||
# ifdef __STDC__
|
||||
GC_PTR GC_realloc(GC_PTR p, size_t lb)
|
||||
# else
|
||||
GC_PTR GC_realloc(p,lb)
|
||||
GC_PTR p;
|
||||
size_t lb;
|
||||
# endif
|
||||
{
|
||||
register struct hblk * h;
|
||||
register hdr * hhdr;
|
||||
register word sz; /* Current size in bytes */
|
||||
register word orig_sz; /* Original sz in bytes */
|
||||
int obj_kind;
|
||||
|
||||
if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
|
||||
h = HBLKPTR(p);
|
||||
hhdr = HDR(h);
|
||||
sz = hhdr -> hb_sz;
|
||||
obj_kind = hhdr -> hb_obj_kind;
|
||||
sz = WORDS_TO_BYTES(sz);
|
||||
orig_sz = sz;
|
||||
|
||||
if (sz > WORDS_TO_BYTES(MAXOBJSZ)) {
|
||||
/* Round it up to the next whole heap block */
|
||||
register word descr;
|
||||
|
||||
sz = (sz+HDR_BYTES+HBLKSIZE-1)
|
||||
& (~HBLKMASK);
|
||||
sz -= HDR_BYTES;
|
||||
hhdr -> hb_sz = BYTES_TO_WORDS(sz);
|
||||
descr = GC_obj_kinds[obj_kind].ok_descriptor;
|
||||
if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
|
||||
hhdr -> hb_descr = descr;
|
||||
if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
|
||||
/* Extra area is already cleared by allochblk. */
|
||||
}
|
||||
if (ADD_SLOP(lb) <= sz) {
|
||||
if (lb >= (sz >> 1)) {
|
||||
# ifdef STUBBORN_ALLOC
|
||||
if (obj_kind == STUBBORN) GC_change_stubborn(p);
|
||||
# endif
|
||||
if (orig_sz > lb) {
|
||||
/* Clear unneeded part of object to avoid bogus pointer */
|
||||
/* tracing. */
|
||||
/* Safe for stubborn objects. */
|
||||
BZERO(((ptr_t)p) + lb, orig_sz - lb);
|
||||
}
|
||||
return(p);
|
||||
} else {
|
||||
/* shrink */
|
||||
GC_PTR result =
|
||||
GC_generic_or_special_malloc((word)lb, obj_kind);
|
||||
|
||||
if (result == 0) return(0);
|
||||
/* Could also return original object. But this */
|
||||
/* gives the client warning of imminent disaster. */
|
||||
BCOPY(p, result, lb);
|
||||
# ifndef IGNORE_FREE
|
||||
GC_free(p);
|
||||
# endif
|
||||
return(result);
|
||||
}
|
||||
} else {
|
||||
/* grow */
|
||||
GC_PTR result =
|
||||
GC_generic_or_special_malloc((word)lb, obj_kind);
|
||||
|
||||
if (result == 0) return(0);
|
||||
BCOPY(p, result, sz);
|
||||
# ifndef IGNORE_FREE
|
||||
GC_free(p);
|
||||
# endif
|
||||
return(result);
|
||||
}
|
||||
}
|
||||
|
||||
# ifdef REDIRECT_MALLOC
|
||||
# ifdef __STDC__
|
||||
GC_PTR realloc(GC_PTR p, size_t lb)
|
||||
# else
|
||||
GC_PTR realloc(p,lb)
|
||||
GC_PTR p;
|
||||
size_t lb;
|
||||
# endif
|
||||
{
|
||||
return(GC_realloc(p, lb));
|
||||
}
|
||||
# endif /* REDIRECT_MALLOC */
|
||||
|
||||
/* Explicitly deallocate an object p. */
|
||||
# ifdef __STDC__
|
||||
void GC_free(GC_PTR p)
|
||||
# else
|
||||
void GC_free(p)
|
||||
GC_PTR p;
|
||||
# endif
|
||||
{
|
||||
register struct hblk *h;
|
||||
register hdr *hhdr;
|
||||
register signed_word sz;
|
||||
register ptr_t * flh;
|
||||
register int knd;
|
||||
register struct obj_kind * ok;
|
||||
DCL_LOCK_STATE;
|
||||
|
||||
if (p == 0) return;
|
||||
/* Required by ANSI. It's not my fault ... */
|
||||
h = HBLKPTR(p);
|
||||
hhdr = HDR(h);
|
||||
# if defined(REDIRECT_MALLOC) && \
|
||||
(defined(SOLARIS_THREADS) || defined(LINUX_THREADS))
|
||||
/* We have to redirect malloc calls during initialization. */
|
||||
/* Don't try to deallocate that memory. */
|
||||
if (0 == hhdr) return;
|
||||
# endif
|
||||
knd = hhdr -> hb_obj_kind;
|
||||
sz = hhdr -> hb_sz;
|
||||
ok = &GC_obj_kinds[knd];
|
||||
if (sz <= MAXOBJSZ) {
|
||||
# ifdef THREADS
|
||||
DISABLE_SIGNALS();
|
||||
LOCK();
|
||||
# endif
|
||||
GC_mem_freed += sz;
|
||||
/* A signal here can make GC_mem_freed and GC_non_gc_bytes */
|
||||
/* inconsistent. We claim this is benign. */
|
||||
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
|
||||
/* Its unnecessary to clear the mark bit. If the */
|
||||
/* object is reallocated, it doesn't matter. O.w. the */
|
||||
/* collector will do it, since it's on a free list. */
|
||||
if (ok -> ok_init) {
|
||||
BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1));
|
||||
}
|
||||
flh = &(ok -> ok_freelist[sz]);
|
||||
obj_link(p) = *flh;
|
||||
*flh = (ptr_t)p;
|
||||
# ifdef THREADS
|
||||
UNLOCK();
|
||||
ENABLE_SIGNALS();
|
||||
# endif
|
||||
} else {
|
||||
DISABLE_SIGNALS();
|
||||
LOCK();
|
||||
GC_mem_freed += sz;
|
||||
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
|
||||
GC_freehblk(h);
|
||||
UNLOCK();
|
||||
ENABLE_SIGNALS();
|
||||
}
|
||||
}
|
||||
|
||||
# ifdef REDIRECT_MALLOC
|
||||
# ifdef __STDC__
|
||||
void free(GC_PTR p)
|
||||
# else
|
||||
void free(p)
|
||||
GC_PTR p;
|
||||
# endif
|
||||
{
|
||||
# ifndef IGNORE_FREE
|
||||
GC_free(p);
|
||||
# endif
|
||||
}
|
||||
# endif /* REDIRECT_MALLOC */
|
|
@ -0,0 +1,367 @@
|
|||
/*
|
||||
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
|
||||
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
|
||||
* Copyright (c) 1996 by Silicon Graphics. All rights reserved.
|
||||
*
|
||||
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
||||
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
||||
*
|
||||
* Permission is hereby granted to use or copy this program
|
||||
* for any purpose, provided the above notices are retained on all copies.
|
||||
* Permission to modify the code and to distribute modified code is granted,
|
||||
* provided the above notices are retained, and a notice that the code was
|
||||
* modified is included with the above copyright notice.
|
||||
*/
|
||||
|
||||
/*
|
||||
* These are extra allocation routines which are likely to be less
|
||||
* frequently used than those in malloc.c. They are separate in the
|
||||
* hope that the .o file will be excluded from statically linked
|
||||
* executables. We should probably break this up further.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include "gc_priv.h"
|
||||
|
||||
extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
|
||||
void GC_extend_size_map(); /* in misc.c. */
|
||||
GC_bool GC_alloc_reclaim_list(); /* in malloc.c */
|
||||
|
||||
/* Some externally visible but unadvertised variables to allow access to */
|
||||
/* free lists from inlined allocators without including gc_priv.h */
|
||||
/* or introducing dependencies on internal data structure layouts. */
|
||||
ptr_t * CONST GC_objfreelist_ptr = GC_objfreelist;
|
||||
ptr_t * CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
|
||||
ptr_t * CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
|
||||
# ifdef ATOMIC_UNCOLLECTABLE
|
||||
ptr_t * CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
|
||||
# endif
|
||||
|
||||
/* Allocate a composite object of size n bytes. The caller guarantees */
|
||||
/* that pointers past the first page are not relevant. Caller holds */
|
||||
/* allocation lock. */
|
||||
ptr_t GC_generic_malloc_inner_ignore_off_page(lb, k)
|
||||
register size_t lb;
|
||||
register int k;
|
||||
{
|
||||
register struct hblk * h;
|
||||
register word n_blocks;
|
||||
register word lw;
|
||||
register ptr_t op;
|
||||
|
||||
if (lb <= HBLKSIZE)
|
||||
return(GC_generic_malloc_inner((word)lb, k));
|
||||
n_blocks = divHBLKSZ(ADD_SLOP(lb) + HDR_BYTES + HBLKSIZE-1);
|
||||
if (!GC_is_initialized) GC_init_inner();
|
||||
/* Do our share of marking work */
|
||||
if(GC_incremental && !GC_dont_gc)
|
||||
GC_collect_a_little_inner((int)n_blocks);
|
||||
lw = ROUNDED_UP_WORDS(lb);
|
||||
while ((h = GC_allochblk(lw, k, IGNORE_OFF_PAGE)) == 0
|
||||
&& GC_collect_or_expand(n_blocks, TRUE));
|
||||
if (h == 0) {
|
||||
op = 0;
|
||||
} else {
|
||||
op = (ptr_t) (h -> hb_body);
|
||||
GC_words_wasted += BYTES_TO_WORDS(n_blocks * HBLKSIZE) - lw;
|
||||
}
|
||||
GC_words_allocd += lw;
|
||||
return((ptr_t)op);
|
||||
}
|
||||
|
||||
ptr_t GC_generic_malloc_ignore_off_page(lb, k)
|
||||
register size_t lb;
|
||||
register int k;
|
||||
{
|
||||
register ptr_t result;
|
||||
DCL_LOCK_STATE;
|
||||
|
||||
GC_INVOKE_FINALIZERS();
|
||||
DISABLE_SIGNALS();
|
||||
LOCK();
|
||||
result = GC_generic_malloc_inner_ignore_off_page(lb,k);
|
||||
UNLOCK();
|
||||
ENABLE_SIGNALS();
|
||||
if (0 == result) {
|
||||
return((*GC_oom_fn)(lb));
|
||||
} else {
|
||||
return(result);
|
||||
}
|
||||
}
|
||||
|
||||
# if defined(__STDC__) || defined(__cplusplus)
|
||||
void * GC_malloc_ignore_off_page(size_t lb)
|
||||
# else
|
||||
char * GC_malloc_ignore_off_page(lb)
|
||||
register size_t lb;
|
||||
# endif
|
||||
{
|
||||
return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));
|
||||
}
|
||||
|
||||
# if defined(__STDC__) || defined(__cplusplus)
|
||||
void * GC_malloc_atomic_ignore_off_page(size_t lb)
|
||||
# else
|
||||
char * GC_malloc_atomic_ignore_off_page(lb)
|
||||
register size_t lb;
|
||||
# endif
|
||||
{
|
||||
return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
|
||||
}
|
||||
|
||||
/* Increment GC_words_allocd from code that doesn't have direct access */
|
||||
/* to GC_arrays. */
|
||||
# ifdef __STDC__
|
||||
void GC_incr_words_allocd(size_t n)
|
||||
{
|
||||
GC_words_allocd += n;
|
||||
}
|
||||
|
||||
/* The same for GC_mem_freed. */
|
||||
void GC_incr_mem_freed(size_t n)
|
||||
{
|
||||
GC_mem_freed += n;
|
||||
}
|
||||
# endif /* __STDC__ */
|
||||
|
||||
/* Analogous to the above, but assumes a small object size, and */
|
||||
/* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
|
||||
#ifdef __STDC__
|
||||
ptr_t GC_generic_malloc_words_small(size_t lw, int k)
|
||||
#else
|
||||
ptr_t GC_generic_malloc_words_small(lw, k)
|
||||
register word lw;
|
||||
register int k;
|
||||
#endif
|
||||
{
|
||||
register ptr_t op;
|
||||
register ptr_t *opp;
|
||||
register struct obj_kind * kind = GC_obj_kinds + k;
|
||||
DCL_LOCK_STATE;
|
||||
|
||||
GC_INVOKE_FINALIZERS();
|
||||
DISABLE_SIGNALS();
|
||||
LOCK();
|
||||
opp = &(kind -> ok_freelist[lw]);
|
||||
if( (op = *opp) == 0 ) {
|
||||
if (!GC_is_initialized) {
|
||||
GC_init_inner();
|
||||
}
|
||||
if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) {
|
||||
op = GC_clear_stack(GC_allocobj((word)lw, k));
|
||||
}
|
||||
if (op == 0) {
|
||||
UNLOCK();
|
||||
ENABLE_SIGNALS();
|
||||
return ((*GC_oom_fn)(WORDS_TO_BYTES(lw)));
|
||||
}
|
||||
}
|
||||
*opp = obj_link(op);
|
||||
obj_link(op) = 0;
|
||||
GC_words_allocd += lw;
|
||||
UNLOCK();
|
||||
ENABLE_SIGNALS();
|
||||
return((ptr_t)op);
|
||||
}
|
||||
|
||||
#if defined(THREADS) && !defined(SRC_M3)
|
||||
/* Return a list of 1 or more objects of the indicated size, linked */
|
||||
/* through the first word in the object. This has the advantage that */
|
||||
/* it acquires the allocation lock only once, and may greatly reduce */
|
||||
/* time wasted contending for the allocation lock. Typical usage would */
|
||||
/* be in a thread that requires many items of the same size. It would */
|
||||
/* keep its own free list in thread-local storage, and call */
|
||||
/* GC_malloc_many or friends to replenish it. (We do not round up */
|
||||
/* object sizes, since a call indicates the intention to consume many */
|
||||
/* objects of exactly this size.) */
|
||||
/* Note that the client should usually clear the link field. */
|
||||
ptr_t GC_generic_malloc_many(lb, k)
|
||||
register word lb;
|
||||
register int k;
|
||||
{
|
||||
ptr_t op;
|
||||
register ptr_t p;
|
||||
ptr_t *opp;
|
||||
word lw;
|
||||
register word my_words_allocd;
|
||||
DCL_LOCK_STATE;
|
||||
|
||||
if (!SMALL_OBJ(lb)) {
|
||||
op = GC_generic_malloc(lb, k);
|
||||
if(0 != op) obj_link(op) = 0;
|
||||
return(op);
|
||||
}
|
||||
lw = ALIGNED_WORDS(lb);
|
||||
GC_INVOKE_FINALIZERS();
|
||||
DISABLE_SIGNALS();
|
||||
LOCK();
|
||||
opp = &(GC_obj_kinds[k].ok_freelist[lw]);
|
||||
if( (op = *opp) == 0 ) {
|
||||
if (!GC_is_initialized) {
|
||||
GC_init_inner();
|
||||
}
|
||||
op = GC_clear_stack(GC_allocobj(lw, k));
|
||||
if (op == 0) {
|
||||
UNLOCK();
|
||||
ENABLE_SIGNALS();
|
||||
op = (*GC_oom_fn)(lb);
|
||||
if(0 != op) obj_link(op) = 0;
|
||||
return(op);
|
||||
}
|
||||
}
|
||||
*opp = 0;
|
||||
my_words_allocd = 0;
|
||||
for (p = op; p != 0; p = obj_link(p)) {
|
||||
my_words_allocd += lw;
|
||||
if (my_words_allocd >= BODY_SZ) {
|
||||
*opp = obj_link(p);
|
||||
obj_link(p) = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
GC_words_allocd += my_words_allocd;
|
||||
|
||||
out:
|
||||
UNLOCK();
|
||||
ENABLE_SIGNALS();
|
||||
return(op);
|
||||
|
||||
}
|
||||
|
||||
void * GC_malloc_many(size_t lb)
|
||||
{
|
||||
return(GC_generic_malloc_many(lb, NORMAL));
|
||||
}
|
||||
|
||||
/* Note that the "atomic" version of this would be unsafe, since the */
|
||||
/* links would not be seen by the collector. */
|
||||
# endif
|
||||
|
||||
/* Allocate lb bytes of pointerful, traced, but not collectable data */
|
||||
# ifdef __STDC__
|
||||
GC_PTR GC_malloc_uncollectable(size_t lb)
|
||||
# else
|
||||
GC_PTR GC_malloc_uncollectable(lb)
|
||||
size_t lb;
|
||||
# endif
|
||||
{
|
||||
register ptr_t op;
|
||||
register ptr_t *opp;
|
||||
register word lw;
|
||||
DCL_LOCK_STATE;
|
||||
|
||||
if( SMALL_OBJ(lb) ) {
|
||||
# ifdef MERGE_SIZES
|
||||
# ifdef ADD_BYTE_AT_END
|
||||
if (lb != 0) lb--;
|
||||
/* We don't need the extra byte, since this won't be */
|
||||
/* collected anyway. */
|
||||
# endif
|
||||
lw = GC_size_map[lb];
|
||||
# else
|
||||
lw = ALIGNED_WORDS(lb);
|
||||
# endif
|
||||
opp = &(GC_uobjfreelist[lw]);
|
||||
FASTLOCK();
|
||||
if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
|
||||
/* See above comment on signals. */
|
||||
*opp = obj_link(op);
|
||||
obj_link(op) = 0;
|
||||
GC_words_allocd += lw;
|
||||
/* Mark bit ws already set on free list. It will be */
|
||||
/* cleared only temporarily during a collection, as a */
|
||||
/* result of the normal free list mark bit clearing. */
|
||||
GC_non_gc_bytes += WORDS_TO_BYTES(lw);
|
||||
FASTUNLOCK();
|
||||
return((GC_PTR) op);
|
||||
}
|
||||
FASTUNLOCK();
|
||||
op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
|
||||
} else {
|
||||
op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
|
||||
}
|
||||
if (0 == op) return(0);
|
||||
/* We don't need the lock here, since we have an undisguised */
|
||||
/* pointer. We do need to hold the lock while we adjust */
|
||||
/* mark bits. */
|
||||
{
|
||||
register struct hblk * h;
|
||||
|
||||
h = HBLKPTR(op);
|
||||
lw = HDR(h) -> hb_sz;
|
||||
|
||||
DISABLE_SIGNALS();
|
||||
LOCK();
|
||||
GC_set_mark_bit(op);
|
||||
GC_non_gc_bytes += WORDS_TO_BYTES(lw);
|
||||
UNLOCK();
|
||||
ENABLE_SIGNALS();
|
||||
return((GC_PTR) op);
|
||||
}
|
||||
}
|
||||
|
||||
# ifdef ATOMIC_UNCOLLECTABLE
|
||||
/* Allocate lb bytes of pointerfree, untraced, uncollectable data */
|
||||
/* This is normally roughly equivalent to the system malloc. */
|
||||
/* But it may be useful if malloc is redefined. */
|
||||
# ifdef __STDC__
|
||||
GC_PTR GC_malloc_atomic_uncollectable(size_t lb)
|
||||
# else
|
||||
GC_PTR GC_malloc_atomic_uncollectable(lb)
|
||||
size_t lb;
|
||||
# endif
|
||||
{
|
||||
register ptr_t op;
|
||||
register ptr_t *opp;
|
||||
register word lw;
|
||||
DCL_LOCK_STATE;
|
||||
|
||||
if( SMALL_OBJ(lb) ) {
|
||||
# ifdef MERGE_SIZES
|
||||
# ifdef ADD_BYTE_AT_END
|
||||
if (lb != 0) lb--;
|
||||
/* We don't need the extra byte, since this won't be */
|
||||
/* collected anyway. */
|
||||
# endif
|
||||
lw = GC_size_map[lb];
|
||||
# else
|
||||
lw = ALIGNED_WORDS(lb);
|
||||
# endif
|
||||
opp = &(GC_auobjfreelist[lw]);
|
||||
FASTLOCK();
|
||||
if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
|
||||
/* See above comment on signals. */
|
||||
*opp = obj_link(op);
|
||||
obj_link(op) = 0;
|
||||
GC_words_allocd += lw;
|
||||
/* Mark bit was already set while object was on free list. */
|
||||
GC_non_gc_bytes += WORDS_TO_BYTES(lw);
|
||||
FASTUNLOCK();
|
||||
return((GC_PTR) op);
|
||||
}
|
||||
FASTUNLOCK();
|
||||
op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
|
||||
} else {
|
||||
op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
|
||||
}
|
||||
if (0 == op) return(0);
|
||||
/* We don't need the lock here, since we have an undisguised */
|
||||
/* pointer. We do need to hold the lock while we adjust */
|
||||
/* mark bits. */
|
||||
{
|
||||
register struct hblk * h;
|
||||
|
||||
h = HBLKPTR(op);
|
||||
lw = HDR(h) -> hb_sz;
|
||||
|
||||
DISABLE_SIGNALS();
|
||||
LOCK();
|
||||
GC_set_mark_bit(op);
|
||||
GC_non_gc_bytes += WORDS_TO_BYTES(lw);
|
||||
UNLOCK();
|
||||
ENABLE_SIGNALS();
|
||||
return((GC_PTR) op);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* ATOMIC_UNCOLLECTABLE */
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,484 @@
|
|||
/*
|
||||
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
|
||||
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
|
||||
*
|
||||
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
||||
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
||||
*
|
||||
* Permission is hereby granted to use or copy this program
|
||||
* for any purpose, provided the above notices are retained on all copies.
|
||||
* Permission to modify the code and to distribute modified code is granted,
|
||||
* provided the above notices are retained, and a notice that the code was
|
||||
* modified is included with the above copyright notice.
|
||||
*/
|
||||
/* Boehm, October 9, 1995 1:06 pm PDT */
|
||||
# include <stdio.h>
|
||||
# include "gc_priv.h"
|
||||
|
||||
/* Data structure for list of root sets. */
|
||||
/* We keep a hash table, so that we can filter out duplicate additions. */
|
||||
/* Under Win32, we need to do a better job of filtering overlaps, so */
|
||||
/* we resort to sequential search, and pay the price. */
|
||||
/* This is really declared in gc_priv.h:
|
||||
struct roots {
|
||||
ptr_t r_start;
|
||||
ptr_t r_end;
|
||||
# ifndef MSWIN32
|
||||
struct roots * r_next;
|
||||
# endif
|
||||
GC_bool r_tmp;
|
||||
-- Delete before registering new dynamic libraries
|
||||
};
|
||||
|
||||
struct roots GC_static_roots[MAX_ROOT_SETS];
|
||||
*/
|
||||
|
||||
static int n_root_sets = 0;
|
||||
|
||||
/* GC_static_roots[0..n_root_sets) contains the valid root sets. */
|
||||
|
||||
# if !defined(NO_DEBUGGING)
|
||||
/* For debugging: */
|
||||
void GC_print_static_roots()
|
||||
{
|
||||
register int i;
|
||||
size_t total = 0;
|
||||
|
||||
for (i = 0; i < n_root_sets; i++) {
|
||||
GC_printf2("From 0x%lx to 0x%lx ",
|
||||
(unsigned long) GC_static_roots[i].r_start,
|
||||
(unsigned long) GC_static_roots[i].r_end);
|
||||
if (GC_static_roots[i].r_tmp) {
|
||||
GC_printf0(" (temporary)\n");
|
||||
} else {
|
||||
GC_printf0("\n");
|
||||
}
|
||||
total += GC_static_roots[i].r_end - GC_static_roots[i].r_start;
|
||||
}
|
||||
GC_printf1("Total size: %ld\n", (unsigned long) total);
|
||||
if (GC_root_size != total) {
|
||||
GC_printf1("GC_root_size incorrect: %ld!!\n",
|
||||
(unsigned long) GC_root_size);
|
||||
}
|
||||
}
|
||||
# endif /* NO_DEBUGGING */
|
||||
|
||||
/* Primarily for debugging support: */
|
||||
/* Is the address p in one of the registered static */
|
||||
/* root sections? */
|
||||
GC_bool GC_is_static_root(p)
|
||||
ptr_t p;
|
||||
{
|
||||
static int last_root_set = 0;
|
||||
register int i;
|
||||
|
||||
|
||||
if (p >= GC_static_roots[last_root_set].r_start
|
||||
&& p < GC_static_roots[last_root_set].r_end) return(TRUE);
|
||||
for (i = 0; i < n_root_sets; i++) {
|
||||
if (p >= GC_static_roots[i].r_start
|
||||
&& p < GC_static_roots[i].r_end) {
|
||||
last_root_set = i;
|
||||
return(TRUE);
|
||||
}
|
||||
}
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
#ifndef MSWIN32
|
||||
/*
|
||||
# define LOG_RT_SIZE 6
|
||||
# define RT_SIZE (1 << LOG_RT_SIZE) -- Power of 2, may be != MAX_ROOT_SETS
|
||||
|
||||
struct roots * GC_root_index[RT_SIZE];
|
||||
-- Hash table header. Used only to check whether a range is
|
||||
-- already present.
|
||||
-- really defined in gc_priv.h
|
||||
*/
|
||||
|
||||
static int rt_hash(addr)
|
||||
char * addr;
|
||||
{
|
||||
word result = (word) addr;
|
||||
# if CPP_WORDSZ > 8*LOG_RT_SIZE
|
||||
result ^= result >> 8*LOG_RT_SIZE;
|
||||
# endif
|
||||
# if CPP_WORDSZ > 4*LOG_RT_SIZE
|
||||
result ^= result >> 4*LOG_RT_SIZE;
|
||||
# endif
|
||||
result ^= result >> 2*LOG_RT_SIZE;
|
||||
result ^= result >> LOG_RT_SIZE;
|
||||
result &= (RT_SIZE-1);
|
||||
return(result);
|
||||
}
|
||||
|
||||
/* Is a range starting at b already in the table? If so return a */
|
||||
/* pointer to it, else NIL. */
|
||||
struct roots * GC_roots_present(b)
|
||||
char *b;
|
||||
{
|
||||
register int h = rt_hash(b);
|
||||
register struct roots *p = GC_root_index[h];
|
||||
|
||||
while (p != 0) {
|
||||
if (p -> r_start == (ptr_t)b) return(p);
|
||||
p = p -> r_next;
|
||||
}
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
/* Add the given root structure to the index. */
|
||||
static void add_roots_to_index(p)
|
||||
struct roots *p;
|
||||
{
|
||||
register int h = rt_hash(p -> r_start);
|
||||
|
||||
p -> r_next = GC_root_index[h];
|
||||
GC_root_index[h] = p;
|
||||
}
|
||||
|
||||
# else /* MSWIN32 */
|
||||
|
||||
# define add_roots_to_index(p)
|
||||
|
||||
# endif
|
||||
|
||||
|
||||
|
||||
|
||||
word GC_root_size = 0;
|
||||
|
||||
void GC_add_roots(b, e)
|
||||
char * b; char * e;
|
||||
{
|
||||
DCL_LOCK_STATE;
|
||||
|
||||
DISABLE_SIGNALS();
|
||||
LOCK();
|
||||
GC_add_roots_inner(b, e, FALSE);
|
||||
UNLOCK();
|
||||
ENABLE_SIGNALS();
|
||||
}
|
||||
|
||||
|
||||
/* Add [b,e) to the root set. Adding the same interval a second time */
|
||||
/* is a moderately fast noop, and hence benign. We do not handle */
|
||||
/* different but overlapping intervals efficiently. (We do handle */
|
||||
/* them correctly.) */
|
||||
/* Tmp specifies that the interval may be deleted before */
|
||||
/* reregistering dynamic libraries. */
|
||||
void GC_add_roots_inner(b, e, tmp)
|
||||
char * b; char * e;
|
||||
GC_bool tmp;
|
||||
{
|
||||
struct roots * old;
|
||||
|
||||
# ifdef MSWIN32
|
||||
/* Spend the time to ensure that there are no overlapping */
|
||||
/* or adjacent intervals. */
|
||||
/* This could be done faster with e.g. a */
|
||||
/* balanced tree. But the execution time here is */
|
||||
/* virtually guaranteed to be dominated by the time it */
|
||||
/* takes to scan the roots. */
|
||||
{
|
||||
register int i;
|
||||
|
||||
for (i = 0; i < n_root_sets; i++) {
|
||||
old = GC_static_roots + i;
|
||||
if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) {
|
||||
if ((ptr_t)b < old -> r_start) {
|
||||
old -> r_start = (ptr_t)b;
|
||||
GC_root_size += (old -> r_start - (ptr_t)b);
|
||||
}
|
||||
if ((ptr_t)e > old -> r_end) {
|
||||
old -> r_end = (ptr_t)e;
|
||||
GC_root_size += ((ptr_t)e - old -> r_end);
|
||||
}
|
||||
old -> r_tmp &= tmp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i < n_root_sets) {
|
||||
/* merge other overlapping intervals */
|
||||
struct roots *other;
|
||||
|
||||
for (i++; i < n_root_sets; i++) {
|
||||
other = GC_static_roots + i;
|
||||
b = (char *)(other -> r_start);
|
||||
e = (char *)(other -> r_end);
|
||||
if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) {
|
||||
if ((ptr_t)b < old -> r_start) {
|
||||
old -> r_start = (ptr_t)b;
|
||||
GC_root_size += (old -> r_start - (ptr_t)b);
|
||||
}
|
||||
if ((ptr_t)e > old -> r_end) {
|
||||
old -> r_end = (ptr_t)e;
|
||||
GC_root_size += ((ptr_t)e - old -> r_end);
|
||||
}
|
||||
old -> r_tmp &= other -> r_tmp;
|
||||
/* Delete this entry. */
|
||||
GC_root_size -= (other -> r_end - other -> r_start);
|
||||
other -> r_start = GC_static_roots[n_root_sets-1].r_start;
|
||||
other -> r_end = GC_static_roots[n_root_sets-1].r_end;
|
||||
n_root_sets--;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
# else
|
||||
old = GC_roots_present(b);
|
||||
if (old != 0) {
|
||||
if ((ptr_t)e <= old -> r_end) /* already there */ return;
|
||||
/* else extend */
|
||||
GC_root_size += (ptr_t)e - old -> r_end;
|
||||
old -> r_end = (ptr_t)e;
|
||||
return;
|
||||
}
|
||||
# endif
|
||||
if (n_root_sets == MAX_ROOT_SETS) {
|
||||
ABORT("Too many root sets\n");
|
||||
}
|
||||
GC_static_roots[n_root_sets].r_start = (ptr_t)b;
|
||||
GC_static_roots[n_root_sets].r_end = (ptr_t)e;
|
||||
GC_static_roots[n_root_sets].r_tmp = tmp;
|
||||
# ifndef MSWIN32
|
||||
GC_static_roots[n_root_sets].r_next = 0;
|
||||
# endif
|
||||
add_roots_to_index(GC_static_roots + n_root_sets);
|
||||
GC_root_size += (ptr_t)e - (ptr_t)b;
|
||||
n_root_sets++;
|
||||
}
|
||||
|
||||
void GC_clear_roots GC_PROTO((void))
|
||||
{
|
||||
DCL_LOCK_STATE;
|
||||
|
||||
DISABLE_SIGNALS();
|
||||
LOCK();
|
||||
n_root_sets = 0;
|
||||
GC_root_size = 0;
|
||||
# ifndef MSWIN32
|
||||
{
|
||||
register int i;
|
||||
|
||||
for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
|
||||
}
|
||||
# endif
|
||||
UNLOCK();
|
||||
ENABLE_SIGNALS();
|
||||
}
|
||||
|
||||
/* Internal use only; lock held. */
|
||||
void GC_remove_tmp_roots()
|
||||
{
|
||||
register int i;
|
||||
|
||||
for (i = 0; i < n_root_sets; ) {
|
||||
if (GC_static_roots[i].r_tmp) {
|
||||
GC_root_size -=
|
||||
(GC_static_roots[i].r_end - GC_static_roots[i].r_start);
|
||||
GC_static_roots[i].r_start = GC_static_roots[n_root_sets-1].r_start;
|
||||
GC_static_roots[i].r_end = GC_static_roots[n_root_sets-1].r_end;
|
||||
GC_static_roots[i].r_tmp = GC_static_roots[n_root_sets-1].r_tmp;
|
||||
n_root_sets--;
|
||||
} else {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
# ifndef MSWIN32
|
||||
{
|
||||
register int i;
|
||||
|
||||
for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
|
||||
for (i = 0; i < n_root_sets; i++)
|
||||
add_roots_to_index(GC_static_roots + i);
|
||||
}
|
||||
# endif
|
||||
|
||||
}
|
||||
|
||||
ptr_t GC_approx_sp()
|
||||
{
|
||||
word dummy;
|
||||
|
||||
return((ptr_t)(&dummy));
|
||||
}
|
||||
|
||||
/*
|
||||
* Data structure for excluded static roots.
|
||||
* Real declaration is in gc_priv.h.
|
||||
|
||||
struct exclusion {
|
||||
ptr_t e_start;
|
||||
ptr_t e_end;
|
||||
};
|
||||
|
||||
struct exclusion GC_excl_table[MAX_EXCLUSIONS];
|
||||
-- Array of exclusions, ascending
|
||||
-- address order.
|
||||
*/
|
||||
|
||||
size_t GC_excl_table_entries = 0; /* Number of entries in use. */
|
||||
|
||||
/* Return the first exclusion range that includes an address >= start_addr */
|
||||
/* Assumes the exclusion table contains at least one entry (namely the */
|
||||
/* GC data structures). */
|
||||
struct exclusion * GC_next_exclusion(start_addr)
|
||||
ptr_t start_addr;
|
||||
{
|
||||
size_t low = 0;
|
||||
size_t high = GC_excl_table_entries - 1;
|
||||
size_t mid;
|
||||
|
||||
while (high > low) {
|
||||
mid = (low + high) >> 1;
|
||||
/* low <= mid < high */
|
||||
if ((word) GC_excl_table[mid].e_end <= (word) start_addr) {
|
||||
low = mid + 1;
|
||||
} else {
|
||||
high = mid;
|
||||
}
|
||||
}
|
||||
if ((word) GC_excl_table[low].e_end <= (word) start_addr) return 0;
|
||||
return GC_excl_table + low;
|
||||
}
|
||||
|
||||
void GC_exclude_static_roots(start, finish)
|
||||
GC_PTR start;
|
||||
GC_PTR finish;
|
||||
{
|
||||
struct exclusion * next;
|
||||
size_t next_index, i;
|
||||
|
||||
if (0 == GC_excl_table_entries) {
|
||||
next = 0;
|
||||
} else {
|
||||
next = GC_next_exclusion(start);
|
||||
}
|
||||
if (0 != next) {
|
||||
if ((word)(next -> e_start) < (word) finish) {
|
||||
/* incomplete error check. */
|
||||
ABORT("exclusion ranges overlap");
|
||||
}
|
||||
if ((word)(next -> e_start) == (word) finish) {
|
||||
/* extend old range backwards */
|
||||
next -> e_start = (ptr_t)start;
|
||||
return;
|
||||
}
|
||||
next_index = next - GC_excl_table;
|
||||
for (i = GC_excl_table_entries; i > next_index; --i) {
|
||||
GC_excl_table[i] = GC_excl_table[i-1];
|
||||
}
|
||||
} else {
|
||||
next_index = GC_excl_table_entries;
|
||||
}
|
||||
if (GC_excl_table_entries == MAX_EXCLUSIONS) ABORT("Too many exclusions");
|
||||
GC_excl_table[next_index].e_start = (ptr_t)start;
|
||||
GC_excl_table[next_index].e_end = (ptr_t)finish;
|
||||
++GC_excl_table_entries;
|
||||
}
|
||||
|
||||
/* Invoke push_conditional on ranges that are not excluded. */
|
||||
void GC_push_conditional_with_exclusions(bottom, top, all)
|
||||
ptr_t bottom;
|
||||
ptr_t top;
|
||||
int all;
|
||||
{
|
||||
struct exclusion * next;
|
||||
ptr_t excl_start;
|
||||
|
||||
while (bottom < top) {
|
||||
next = GC_next_exclusion(bottom);
|
||||
if (0 == next || (excl_start = next -> e_start) >= top) {
|
||||
GC_push_conditional(bottom, top, all);
|
||||
return;
|
||||
}
|
||||
if (excl_start > bottom) GC_push_conditional(bottom, excl_start, all);
|
||||
bottom = next -> e_end;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* In the absence of threads, push the stack contents.
|
||||
* In the presence of threads, push enough of the current stack
|
||||
* to ensure that callee-save registers saved in collector frames have been
|
||||
* seen.
|
||||
*/
|
||||
void GC_push_current_stack(cold_gc_frame)
|
||||
ptr_t cold_gc_frame;
|
||||
{
|
||||
# if defined(THREADS)
|
||||
if (0 == cold_gc_frame) return;
|
||||
# ifdef STACK_GROWS_DOWN
|
||||
GC_push_all_eager(GC_approx_sp(), cold_gc_frame);
|
||||
# else
|
||||
GC_push_all_eager( cold_gc_frame, GC_approx_sp() );
|
||||
# endif
|
||||
# else
|
||||
# ifdef STACK_GROWS_DOWN
|
||||
GC_push_all_stack_partially_eager( GC_approx_sp(), GC_stackbottom,
|
||||
cold_gc_frame );
|
||||
# else
|
||||
GC_push_all_stack_partially_eager( GC_stackbottom, GC_approx_sp(),
|
||||
cold_gc_frame );
|
||||
# endif
|
||||
# endif /* !THREADS */
|
||||
}
|
||||
|
||||
/*
|
||||
* Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
|
||||
* on groups of pointers) on every top level accessible pointer.
|
||||
* If all is FALSE, arrange to push only possibly altered values.
|
||||
* Cold_gc_frame is an address inside a GC frame that
|
||||
* remains valid until all marking is complete.
|
||||
* A zero value indicates that it's OK to miss some
|
||||
* register values.
|
||||
*/
|
||||
void GC_push_roots(all, cold_gc_frame)
|
||||
GC_bool all;
|
||||
ptr_t cold_gc_frame;
|
||||
{
|
||||
register int i;
|
||||
|
||||
/*
|
||||
* push registers - i.e., call GC_push_one(r) for each
|
||||
* register contents r.
|
||||
*/
|
||||
# ifdef USE_GENERIC_PUSH_REGS
|
||||
GC_generic_push_regs(cold_gc_frame);
|
||||
# else
|
||||
GC_push_regs(); /* usually defined in machine_dep.c */
|
||||
# endif
|
||||
|
||||
/*
|
||||
* Next push static data. This must happen early on, since it's
|
||||
* not robust against mark stack overflow.
|
||||
*/
|
||||
/* Reregister dynamic libraries, in case one got added. */
|
||||
# if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(PCR)) \
|
||||
&& !defined(SRC_M3)
|
||||
GC_remove_tmp_roots();
|
||||
GC_register_dynamic_libraries();
|
||||
# endif
|
||||
/* Mark everything in static data areas */
|
||||
for (i = 0; i < n_root_sets; i++) {
|
||||
GC_push_conditional_with_exclusions(
|
||||
GC_static_roots[i].r_start,
|
||||
GC_static_roots[i].r_end, all);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now traverse stacks.
|
||||
*/
|
||||
# if !defined(USE_GENERIC_PUSH_REGS)
|
||||
GC_push_current_stack(cold_gc_frame);
|
||||
/* IN the threads case, this only pushes collector frames. */
|
||||
/* In the USE_GENERIC_PUSH_REGS case, this is done inside */
|
||||
/* GC_push_regs, so that we catch callee-save registers saved */
|
||||
/* inside the GC_push_regs frame. */
|
||||
# endif
|
||||
if (GC_push_other_roots != 0) (*GC_push_other_roots)();
|
||||
/* In the threads case, this also pushes thread stacks. */
|
||||
}
|
||||
|
|
@ -0,0 +1,880 @@
|
|||
/*
|
||||
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
|
||||
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
|
||||
*
|
||||
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
||||
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
||||
*
|
||||
* Permission is hereby granted to use or copy this program
|
||||
* for any purpose, provided the above notices are retained on all copies.
|
||||
* Permission to modify the code and to distribute modified code is granted,
|
||||
* provided the above notices are retained, and a notice that the code was
|
||||
* modified is included with the above copyright notice.
|
||||
*/
|
||||
/* Boehm, July 31, 1995 5:02 pm PDT */
|
||||
|
||||
|
||||
#include <stdio.h>
|
||||
#include <signal.h>
|
||||
|
||||
#define I_HIDE_POINTERS /* To make GC_call_with_alloc_lock visible */
|
||||
#include "gc_priv.h"
|
||||
|
||||
#ifdef SOLARIS_THREADS
|
||||
# include <sys/syscall.h>
|
||||
#endif
|
||||
#ifdef MSWIN32
|
||||
# include <windows.h>
|
||||
#endif
|
||||
|
||||
# ifdef THREADS
|
||||
# ifdef PCR
|
||||
# include "il/PCR_IL.h"
|
||||
PCR_Th_ML GC_allocate_ml;
|
||||
# else
|
||||
# if defined(SRC_M3) || defined(GENERIC_THREADS)
|
||||
/* Critical section counter is defined in the M3 runtime */
|
||||
/* That's all we use. */
|
||||
# else
|
||||
# ifdef SOLARIS_THREADS
|
||||
mutex_t GC_allocate_ml; /* Implicitly initialized. */
|
||||
# else
|
||||
# ifdef WIN32_THREADS
|
||||
GC_API CRITICAL_SECTION GC_allocate_ml;
|
||||
# else
|
||||
# if defined(IRIX_THREADS) || defined(LINUX_THREADS) \
|
||||
|| defined(IRIX_JDK_THREADS)
|
||||
# ifdef UNDEFINED
|
||||
pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER;
|
||||
# endif
|
||||
pthread_t GC_lock_holder = NO_THREAD;
|
||||
# else
|
||||
--> declare allocator lock here
|
||||
# endif
|
||||
# endif
|
||||
# endif
|
||||
# endif
|
||||
# endif
|
||||
# endif
|
||||
|
||||
GC_FAR struct _GC_arrays GC_arrays /* = { 0 } */;
|
||||
|
||||
|
||||
GC_bool GC_debugging_started = FALSE;
|
||||
/* defined here so we don't have to load debug_malloc.o */
|
||||
|
||||
void (*GC_check_heap)() = (void (*)())0;
|
||||
|
||||
void (*GC_start_call_back)() = (void (*)())0;
|
||||
|
||||
ptr_t GC_stackbottom = 0;
|
||||
|
||||
GC_bool GC_dont_gc = 0;
|
||||
|
||||
GC_bool GC_quiet = 0;
|
||||
|
||||
/*ARGSUSED*/
|
||||
GC_PTR GC_default_oom_fn GC_PROTO((size_t bytes_requested))
|
||||
{
|
||||
return(0);
|
||||
}
|
||||
|
||||
GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested)) = GC_default_oom_fn;
|
||||
|
||||
extern signed_word GC_mem_found;
|
||||
|
||||
# ifdef MERGE_SIZES
|
||||
/* Set things up so that GC_size_map[i] >= words(i), */
|
||||
/* but not too much bigger */
|
||||
/* and so that size_map contains relatively few distinct entries */
|
||||
/* This is stolen from Russ Atkinson's Cedar quantization */
|
||||
/* alogrithm (but we precompute it). */
|
||||
|
||||
|
||||
void GC_init_size_map()
|
||||
{
|
||||
register unsigned i;
|
||||
|
||||
/* Map size 0 to 1. This avoids problems at lower levels. */
|
||||
GC_size_map[0] = 1;
|
||||
/* One word objects don't have to be 2 word aligned. */
|
||||
for (i = 1; i < sizeof(word); i++) {
|
||||
GC_size_map[i] = 1;
|
||||
}
|
||||
GC_size_map[sizeof(word)] = ROUNDED_UP_WORDS(sizeof(word));
|
||||
for (i = sizeof(word) + 1; i <= 8 * sizeof(word); i++) {
|
||||
# ifdef ALIGN_DOUBLE
|
||||
GC_size_map[i] = (ROUNDED_UP_WORDS(i) + 1) & (~1);
|
||||
# else
|
||||
GC_size_map[i] = ROUNDED_UP_WORDS(i);
|
||||
# endif
|
||||
}
|
||||
for (i = 8*sizeof(word) + 1; i <= 16 * sizeof(word); i++) {
|
||||
GC_size_map[i] = (ROUNDED_UP_WORDS(i) + 1) & (~1);
|
||||
}
|
||||
/* We leave the rest of the array to be filled in on demand. */
|
||||
}
|
||||
|
||||
/* Fill in additional entries in GC_size_map, including the ith one */
|
||||
/* We assume the ith entry is currently 0. */
|
||||
/* Note that a filled in section of the array ending at n always */
|
||||
/* has length at least n/4. */
|
||||
void GC_extend_size_map(i)
|
||||
word i;
|
||||
{
|
||||
word orig_word_sz = ROUNDED_UP_WORDS(i);
|
||||
word word_sz = orig_word_sz;
|
||||
register word byte_sz = WORDS_TO_BYTES(word_sz);
|
||||
/* The size we try to preserve. */
|
||||
/* Close to to i, unless this would */
|
||||
/* introduce too many distinct sizes. */
|
||||
word smaller_than_i = byte_sz - (byte_sz >> 3);
|
||||
word much_smaller_than_i = byte_sz - (byte_sz >> 2);
|
||||
register word low_limit; /* The lowest indexed entry we */
|
||||
/* initialize. */
|
||||
register word j;
|
||||
|
||||
if (GC_size_map[smaller_than_i] == 0) {
|
||||
low_limit = much_smaller_than_i;
|
||||
while (GC_size_map[low_limit] != 0) low_limit++;
|
||||
} else {
|
||||
low_limit = smaller_than_i + 1;
|
||||
while (GC_size_map[low_limit] != 0) low_limit++;
|
||||
word_sz = ROUNDED_UP_WORDS(low_limit);
|
||||
word_sz += word_sz >> 3;
|
||||
if (word_sz < orig_word_sz) word_sz = orig_word_sz;
|
||||
}
|
||||
# ifdef ALIGN_DOUBLE
|
||||
word_sz += 1;
|
||||
word_sz &= ~1;
|
||||
# endif
|
||||
if (word_sz > MAXOBJSZ) {
|
||||
word_sz = MAXOBJSZ;
|
||||
}
|
||||
/* If we can fit the same number of larger objects in a block, */
|
||||
/* do so. */
|
||||
{
|
||||
size_t number_of_objs = BODY_SZ/word_sz;
|
||||
word_sz = BODY_SZ/number_of_objs;
|
||||
# ifdef ALIGN_DOUBLE
|
||||
word_sz &= ~1;
|
||||
# endif
|
||||
}
|
||||
byte_sz = WORDS_TO_BYTES(word_sz);
|
||||
# ifdef ADD_BYTE_AT_END
|
||||
/* We need one extra byte; don't fill in GC_size_map[byte_sz] */
|
||||
byte_sz--;
|
||||
# endif
|
||||
|
||||
for (j = low_limit; j <= byte_sz; j++) GC_size_map[j] = word_sz;
|
||||
}
|
||||
# endif
|
||||
|
||||
|
||||
/*
|
||||
* The following is a gross hack to deal with a problem that can occur
|
||||
* on machines that are sloppy about stack frame sizes, notably SPARC.
|
||||
* Bogus pointers may be written to the stack and not cleared for
|
||||
* a LONG time, because they always fall into holes in stack frames
|
||||
* that are not written. We partially address this by clearing
|
||||
* sections of the stack whenever we get control.
|
||||
*/
|
||||
word GC_stack_last_cleared = 0; /* GC_no when we last did this */
|
||||
# ifdef THREADS
|
||||
# define CLEAR_SIZE 2048
|
||||
# else
|
||||
# define CLEAR_SIZE 213
|
||||
# endif
|
||||
# define DEGRADE_RATE 50
|
||||
|
||||
word GC_min_sp; /* Coolest stack pointer value from which we've */
|
||||
/* already cleared the stack. */
|
||||
|
||||
# ifdef STACK_GROWS_DOWN
|
||||
# define COOLER_THAN >
|
||||
# define HOTTER_THAN <
|
||||
# define MAKE_COOLER(x,y) if ((word)(x)+(y) > (word)(x)) {(x) += (y);} \
|
||||
else {(x) = (word)ONES;}
|
||||
# define MAKE_HOTTER(x,y) (x) -= (y)
|
||||
# else
|
||||
# define COOLER_THAN <
|
||||
# define HOTTER_THAN >
|
||||
# define MAKE_COOLER(x,y) if ((word)(x)-(y) < (word)(x)) {(x) -= (y);} else {(x) = 0;}
|
||||
# define MAKE_HOTTER(x,y) (x) += (y)
|
||||
# endif
|
||||
|
||||
word GC_high_water;
|
||||
/* "hottest" stack pointer value we have seen */
|
||||
/* recently. Degrades over time. */
|
||||
|
||||
word GC_words_allocd_at_reset;
|
||||
|
||||
#if defined(ASM_CLEAR_CODE) && !defined(THREADS)
|
||||
extern ptr_t GC_clear_stack_inner();
|
||||
#endif
|
||||
|
||||
#if !defined(ASM_CLEAR_CODE) && !defined(THREADS)
|
||||
/* Clear the stack up to about limit. Return arg. */
|
||||
/*ARGSUSED*/
|
||||
ptr_t GC_clear_stack_inner(arg, limit)
|
||||
ptr_t arg;
|
||||
word limit;
|
||||
{
|
||||
word dummy[CLEAR_SIZE];
|
||||
|
||||
BZERO(dummy, CLEAR_SIZE*sizeof(word));
|
||||
if ((word)(dummy) COOLER_THAN limit) {
|
||||
(void) GC_clear_stack_inner(arg, limit);
|
||||
}
|
||||
/* Make sure the recursive call is not a tail call, and the bzero */
|
||||
/* call is not recognized as dead code. */
|
||||
GC_noop1((word)dummy);
|
||||
return(arg);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Clear some of the inaccessible part of the stack. Returns its */
|
||||
/* argument, so it can be used in a tail call position, hence clearing */
|
||||
/* another frame. */
|
||||
ptr_t GC_clear_stack(arg)
|
||||
ptr_t arg;
|
||||
{
|
||||
register word sp = (word)GC_approx_sp(); /* Hotter than actual sp */
|
||||
# ifdef THREADS
|
||||
word dummy[CLEAR_SIZE];
|
||||
# else
|
||||
register word limit;
|
||||
# endif
|
||||
|
||||
# define SLOP 400
|
||||
/* Extra bytes we clear every time. This clears our own */
|
||||
/* activation record, and should cause more frequent */
|
||||
/* clearing near the cold end of the stack, a good thing. */
|
||||
# define GC_SLOP 4000
|
||||
/* We make GC_high_water this much hotter than we really saw */
|
||||
/* saw it, to cover for GC noise etc. above our current frame. */
|
||||
# define CLEAR_THRESHOLD 100000
|
||||
/* We restart the clearing process after this many bytes of */
|
||||
/* allocation. Otherwise very heavily recursive programs */
|
||||
/* with sparse stacks may result in heaps that grow almost */
|
||||
/* without bounds. As the heap gets larger, collection */
|
||||
/* frequency decreases, thus clearing frequency would decrease, */
|
||||
/* thus more junk remains accessible, thus the heap gets */
|
||||
/* larger ... */
|
||||
# ifdef THREADS
|
||||
BZERO(dummy, CLEAR_SIZE*sizeof(word));
|
||||
# else
|
||||
if (GC_gc_no > GC_stack_last_cleared) {
|
||||
/* Start things over, so we clear the entire stack again */
|
||||
if (GC_stack_last_cleared == 0) GC_high_water = (word) GC_stackbottom;
|
||||
GC_min_sp = GC_high_water;
|
||||
GC_stack_last_cleared = GC_gc_no;
|
||||
GC_words_allocd_at_reset = GC_words_allocd;
|
||||
}
|
||||
/* Adjust GC_high_water */
|
||||
MAKE_COOLER(GC_high_water, WORDS_TO_BYTES(DEGRADE_RATE) + GC_SLOP);
|
||||
if (sp HOTTER_THAN GC_high_water) {
|
||||
GC_high_water = sp;
|
||||
}
|
||||
MAKE_HOTTER(GC_high_water, GC_SLOP);
|
||||
limit = GC_min_sp;
|
||||
MAKE_HOTTER(limit, SLOP);
|
||||
if (sp COOLER_THAN limit) {
|
||||
limit &= ~0xf; /* Make it sufficiently aligned for assembly */
|
||||
/* implementations of GC_clear_stack_inner. */
|
||||
GC_min_sp = sp;
|
||||
return(GC_clear_stack_inner(arg, limit));
|
||||
} else if (WORDS_TO_BYTES(GC_words_allocd - GC_words_allocd_at_reset)
|
||||
> CLEAR_THRESHOLD) {
|
||||
/* Restart clearing process, but limit how much clearing we do. */
|
||||
GC_min_sp = sp;
|
||||
MAKE_HOTTER(GC_min_sp, CLEAR_THRESHOLD/4);
|
||||
if (GC_min_sp HOTTER_THAN GC_high_water) GC_min_sp = GC_high_water;
|
||||
GC_words_allocd_at_reset = GC_words_allocd;
|
||||
}
|
||||
# endif
|
||||
return(arg);
|
||||
}
|
||||
|
||||
|
||||
/* Return a pointer to the base address of p, given a pointer to a */
|
||||
/* an address within an object. Return 0 o.w. */
|
||||
# ifdef __STDC__
|
||||
GC_PTR GC_base(GC_PTR p)
|
||||
# else
|
||||
GC_PTR GC_base(p)
|
||||
GC_PTR p;
|
||||
# endif
|
||||
{
|
||||
register word r;
|
||||
register struct hblk *h;
|
||||
register bottom_index *bi;
|
||||
register hdr *candidate_hdr;
|
||||
register word limit;
|
||||
|
||||
r = (word)p;
|
||||
if (!GC_is_initialized) return 0;
|
||||
h = HBLKPTR(r);
|
||||
GET_BI(r, bi);
|
||||
candidate_hdr = HDR_FROM_BI(bi, r);
|
||||
if (candidate_hdr == 0) return(0);
|
||||
/* If it's a pointer to the middle of a large object, move it */
|
||||
/* to the beginning. */
|
||||
while (IS_FORWARDING_ADDR_OR_NIL(candidate_hdr)) {
|
||||
h = FORWARDED_ADDR(h,candidate_hdr);
|
||||
r = (word)h + HDR_BYTES;
|
||||
candidate_hdr = HDR(h);
|
||||
}
|
||||
if (candidate_hdr -> hb_map == GC_invalid_map) return(0);
|
||||
/* Make sure r points to the beginning of the object */
|
||||
r &= ~(WORDS_TO_BYTES(1) - 1);
|
||||
{
|
||||
register int offset = (char *)r - (char *)(HBLKPTR(r));
|
||||
register signed_word sz = candidate_hdr -> hb_sz;
|
||||
|
||||
# ifdef ALL_INTERIOR_POINTERS
|
||||
register map_entry_type map_entry;
|
||||
|
||||
map_entry = MAP_ENTRY((candidate_hdr -> hb_map), offset);
|
||||
if (map_entry == OBJ_INVALID) {
|
||||
return(0);
|
||||
}
|
||||
r -= WORDS_TO_BYTES(map_entry);
|
||||
limit = r + WORDS_TO_BYTES(sz);
|
||||
# else
|
||||
register int correction;
|
||||
|
||||
offset = BYTES_TO_WORDS(offset - HDR_BYTES);
|
||||
correction = offset % sz;
|
||||
r -= (WORDS_TO_BYTES(correction));
|
||||
limit = r + WORDS_TO_BYTES(sz);
|
||||
if (limit > (word)(h + 1)
|
||||
&& sz <= BYTES_TO_WORDS(HBLKSIZE) - HDR_WORDS) {
|
||||
return(0);
|
||||
}
|
||||
# endif
|
||||
if ((word)p >= limit) return(0);
|
||||
}
|
||||
return((GC_PTR)r);
|
||||
}
|
||||
|
||||
|
||||
/* Return the size of an object, given a pointer to its base. */
|
||||
/* (For small obects this also happens to work from interior pointers, */
|
||||
/* but that shouldn't be relied upon.) */
|
||||
# ifdef __STDC__
|
||||
size_t GC_size(GC_PTR p)
|
||||
# else
|
||||
size_t GC_size(p)
|
||||
GC_PTR p;
|
||||
# endif
|
||||
{
|
||||
register int sz;
|
||||
register hdr * hhdr = HDR(p);
|
||||
|
||||
sz = WORDS_TO_BYTES(hhdr -> hb_sz);
|
||||
if (sz < 0) {
|
||||
return(-sz);
|
||||
} else {
|
||||
return(sz);
|
||||
}
|
||||
}
|
||||
|
||||
size_t GC_get_heap_size GC_PROTO(())
|
||||
{
|
||||
return ((size_t) GC_heapsize);
|
||||
}
|
||||
|
||||
size_t GC_get_bytes_since_gc GC_PROTO(())
|
||||
{
|
||||
return ((size_t) WORDS_TO_BYTES(GC_words_allocd));
|
||||
}
|
||||
|
||||
GC_bool GC_is_initialized = FALSE;
|
||||
|
||||
void GC_init()
|
||||
{
|
||||
DCL_LOCK_STATE;
|
||||
|
||||
DISABLE_SIGNALS();
|
||||
LOCK();
|
||||
GC_init_inner();
|
||||
UNLOCK();
|
||||
ENABLE_SIGNALS();
|
||||
|
||||
}
|
||||
|
||||
#ifdef MSWIN32
|
||||
extern void GC_init_win32();
|
||||
#endif
|
||||
|
||||
extern void GC_setpagesize();
|
||||
|
||||
void GC_init_inner()
|
||||
{
|
||||
# ifndef THREADS
|
||||
word dummy;
|
||||
# endif
|
||||
|
||||
if (GC_is_initialized) return;
|
||||
GC_setpagesize();
|
||||
GC_exclude_static_roots(beginGC_arrays, end_gc_area);
|
||||
# ifdef PRINTSTATS
|
||||
if ((ptr_t)endGC_arrays != (ptr_t)(&GC_obj_kinds)) {
|
||||
GC_printf0("Reordering linker, didn't exclude obj_kinds\n");
|
||||
}
|
||||
# endif
|
||||
# ifdef MSWIN32
|
||||
GC_init_win32();
|
||||
# endif
|
||||
# if defined(LINUX) && defined(POWERPC)
|
||||
GC_init_linuxppc();
|
||||
# endif
|
||||
# if defined(LINUX) && defined(SPARC)
|
||||
GC_init_linuxsparc();
|
||||
# endif
|
||||
# ifdef SOLARIS_THREADS
|
||||
GC_thr_init();
|
||||
/* We need dirty bits in order to find live stack sections. */
|
||||
GC_dirty_init();
|
||||
# endif
|
||||
# if defined(IRIX_THREADS) || defined(LINUX_THREADS) \
|
||||
|| defined(IRIX_JDK_THREADS)
|
||||
GC_thr_init();
|
||||
# endif
|
||||
# if !defined(THREADS) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
|
||||
|| defined(IRIX_THREADS) || defined(LINUX_THREADS) || defined(GENERIC_THREADS)
|
||||
if (GC_stackbottom == 0) {
|
||||
GC_stackbottom = GC_get_stack_base();
|
||||
}
|
||||
# endif
|
||||
if (sizeof (ptr_t) != sizeof(word)) {
|
||||
ABORT("sizeof (ptr_t) != sizeof(word)\n");
|
||||
}
|
||||
if (sizeof (signed_word) != sizeof(word)) {
|
||||
ABORT("sizeof (signed_word) != sizeof(word)\n");
|
||||
}
|
||||
if (sizeof (struct hblk) != HBLKSIZE) {
|
||||
ABORT("sizeof (struct hblk) != HBLKSIZE\n");
|
||||
}
|
||||
# ifndef THREADS
|
||||
# if defined(STACK_GROWS_UP) && defined(STACK_GROWS_DOWN)
|
||||
ABORT(
|
||||
"Only one of STACK_GROWS_UP and STACK_GROWS_DOWN should be defd\n");
|
||||
# endif
|
||||
# if !defined(STACK_GROWS_UP) && !defined(STACK_GROWS_DOWN)
|
||||
ABORT(
|
||||
"One of STACK_GROWS_UP and STACK_GROWS_DOWN should be defd\n");
|
||||
# endif
|
||||
# ifdef STACK_GROWS_DOWN
|
||||
if ((word)(&dummy) > (word)GC_stackbottom) {
|
||||
GC_err_printf0(
|
||||
"STACK_GROWS_DOWN is defd, but stack appears to grow up\n");
|
||||
# ifndef UTS4 /* Compiler bug workaround */
|
||||
GC_err_printf2("sp = 0x%lx, GC_stackbottom = 0x%lx\n",
|
||||
(unsigned long) (&dummy),
|
||||
(unsigned long) GC_stackbottom);
|
||||
# endif
|
||||
ABORT("stack direction 3\n");
|
||||
}
|
||||
# else
|
||||
if ((word)(&dummy) < (word)GC_stackbottom) {
|
||||
GC_err_printf0(
|
||||
"STACK_GROWS_UP is defd, but stack appears to grow down\n");
|
||||
GC_err_printf2("sp = 0x%lx, GC_stackbottom = 0x%lx\n",
|
||||
(unsigned long) (&dummy),
|
||||
(unsigned long) GC_stackbottom);
|
||||
ABORT("stack direction 4");
|
||||
}
|
||||
# endif
|
||||
# endif
|
||||
# if !defined(_AUX_SOURCE) || defined(__GNUC__)
|
||||
if ((word)(-1) < (word)0) {
|
||||
GC_err_printf0("The type word should be an unsigned integer type\n");
|
||||
GC_err_printf0("It appears to be signed\n");
|
||||
ABORT("word");
|
||||
}
|
||||
# endif
|
||||
if ((signed_word)(-1) >= (signed_word)0) {
|
||||
GC_err_printf0(
|
||||
"The type signed_word should be a signed integer type\n");
|
||||
GC_err_printf0("It appears to be unsigned\n");
|
||||
ABORT("signed_word");
|
||||
}
|
||||
|
||||
/* Add initial guess of root sets. Do this first, since sbrk(0) */
|
||||
/* might be used. */
|
||||
GC_register_data_segments();
|
||||
GC_init_headers();
|
||||
GC_bl_init();
|
||||
GC_mark_init();
|
||||
if (!GC_expand_hp_inner((word)MINHINCR)) {
|
||||
GC_err_printf0("Can't start up: not enough memory\n");
|
||||
EXIT();
|
||||
}
|
||||
/* Preallocate large object map. It's otherwise inconvenient to */
|
||||
/* deal with failure. */
|
||||
if (!GC_add_map_entry((word)0)) {
|
||||
GC_err_printf0("Can't start up: not enough memory\n");
|
||||
EXIT();
|
||||
}
|
||||
GC_register_displacement_inner(0L);
|
||||
# ifdef MERGE_SIZES
|
||||
GC_init_size_map();
|
||||
# endif
|
||||
# ifdef PCR
|
||||
if (PCR_IL_Lock(PCR_Bool_false, PCR_allSigsBlocked, PCR_waitForever)
|
||||
!= PCR_ERes_okay) {
|
||||
ABORT("Can't lock load state\n");
|
||||
} else if (PCR_IL_Unlock() != PCR_ERes_okay) {
|
||||
ABORT("Can't unlock load state\n");
|
||||
}
|
||||
PCR_IL_Unlock();
|
||||
GC_pcr_install();
|
||||
# endif
|
||||
/* Get black list set up */
|
||||
GC_gcollect_inner();
|
||||
# ifdef STUBBORN_ALLOC
|
||||
GC_stubborn_init();
|
||||
# endif
|
||||
GC_is_initialized = TRUE;
|
||||
/* Convince lint that some things are used */
|
||||
# ifdef LINT
|
||||
{
|
||||
extern char * GC_copyright[];
|
||||
extern int GC_read();
|
||||
extern void GC_register_finalizer_no_order();
|
||||
|
||||
GC_noop(GC_copyright, GC_find_header,
|
||||
GC_push_one, GC_call_with_alloc_lock, GC_read,
|
||||
GC_dont_expand,
|
||||
# ifndef NO_DEBUGGING
|
||||
GC_dump,
|
||||
# endif
|
||||
GC_register_finalizer_no_order);
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
void GC_enable_incremental GC_PROTO(())
|
||||
{
|
||||
# if !defined(FIND_LEAK) && !defined(SMALL_CONFIG)
|
||||
DCL_LOCK_STATE;
|
||||
|
||||
DISABLE_SIGNALS();
|
||||
LOCK();
|
||||
if (GC_incremental) goto out;
|
||||
GC_setpagesize();
|
||||
# ifdef MSWIN32
|
||||
{
|
||||
extern GC_bool GC_is_win32s();
|
||||
|
||||
/* VirtualProtect is not functional under win32s. */
|
||||
if (GC_is_win32s()) goto out;
|
||||
}
|
||||
# endif /* MSWIN32 */
|
||||
# ifndef SOLARIS_THREADS
|
||||
GC_dirty_init();
|
||||
# endif
|
||||
if (!GC_is_initialized) {
|
||||
GC_init_inner();
|
||||
}
|
||||
if (GC_dont_gc) {
|
||||
/* Can't easily do it. */
|
||||
UNLOCK();
|
||||
ENABLE_SIGNALS();
|
||||
return;
|
||||
}
|
||||
if (GC_words_allocd > 0) {
|
||||
/* There may be unmarked reachable objects */
|
||||
GC_gcollect_inner();
|
||||
} /* else we're OK in assuming everything's */
|
||||
/* clean since nothing can point to an */
|
||||
/* unmarked object. */
|
||||
GC_read_dirty();
|
||||
GC_incremental = TRUE;
|
||||
out:
|
||||
UNLOCK();
|
||||
ENABLE_SIGNALS();
|
||||
# endif
|
||||
}
|
||||
|
||||
|
||||
#ifdef MSWIN32
|
||||
# define LOG_FILE "gc.log"
|
||||
|
||||
HANDLE GC_stdout = 0, GC_stderr;
|
||||
int GC_tmp;
|
||||
DWORD GC_junk;
|
||||
|
||||
void GC_set_files()
|
||||
{
|
||||
if (!GC_stdout) {
|
||||
GC_stdout = CreateFile(LOG_FILE, GENERIC_WRITE,
|
||||
FILE_SHARE_READ | FILE_SHARE_WRITE,
|
||||
NULL, CREATE_ALWAYS, FILE_FLAG_WRITE_THROUGH,
|
||||
NULL);
|
||||
if (INVALID_HANDLE_VALUE == GC_stdout) ABORT("Open of log file failed");
|
||||
}
|
||||
if (GC_stderr == 0) {
|
||||
GC_stderr = GC_stdout;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(OS2) || defined(MACOS)
|
||||
FILE * GC_stdout = NULL;
|
||||
FILE * GC_stderr = NULL;
|
||||
int GC_tmp; /* Should really be local ... */
|
||||
|
||||
void GC_set_files()
|
||||
{
|
||||
if (GC_stdout == NULL) {
|
||||
GC_stdout = stdout;
|
||||
}
|
||||
if (GC_stderr == NULL) {
|
||||
GC_stderr = stderr;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(OS2) && !defined(MACOS) && !defined(MSWIN32)
|
||||
int GC_stdout = 1;
|
||||
int GC_stderr = 2;
|
||||
# if !defined(AMIGA)
|
||||
# include <unistd.h>
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if !defined(MSWIN32) && !defined(OS2) && !defined(MACOS)
|
||||
int GC_write(fd, buf, len)
|
||||
int fd;
|
||||
char *buf;
|
||||
size_t len;
|
||||
{
|
||||
register int bytes_written = 0;
|
||||
register int result;
|
||||
|
||||
while (bytes_written < len) {
|
||||
# ifdef SOLARIS_THREADS
|
||||
result = syscall(SYS_write, fd, buf + bytes_written,
|
||||
len - bytes_written);
|
||||
# else
|
||||
result = write(fd, buf + bytes_written, len - bytes_written);
|
||||
# endif
|
||||
if (-1 == result) return(result);
|
||||
bytes_written += result;
|
||||
}
|
||||
return(bytes_written);
|
||||
}
|
||||
#endif /* UN*X */
|
||||
|
||||
#ifdef MSWIN32
|
||||
# define WRITE(f, buf, len) (GC_set_files(), \
|
||||
GC_tmp = WriteFile((f), (buf), \
|
||||
(len), &GC_junk, NULL),\
|
||||
(GC_tmp? 1 : -1))
|
||||
#else
|
||||
# if defined(OS2) || defined(MACOS)
|
||||
# define WRITE(f, buf, len) (GC_set_files(), \
|
||||
GC_tmp = fwrite((buf), 1, (len), (f)), \
|
||||
fflush(f), GC_tmp)
|
||||
# else
|
||||
# define WRITE(f, buf, len) GC_write((f), (buf), (len))
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* A version of printf that is unlikely to call malloc, and is thus safer */
|
||||
/* to call from the collector in case malloc has been bound to GC_malloc. */
|
||||
/* Assumes that no more than 1023 characters are written at once. */
|
||||
/* Assumes that all arguments have been converted to something of the */
|
||||
/* same size as long, and that the format conversions expect something */
|
||||
/* of that size. */
|
||||
void GC_printf(format, a, b, c, d, e, f)
|
||||
char * format;
|
||||
long a, b, c, d, e, f;
|
||||
{
|
||||
char buf[1025];
|
||||
|
||||
if (GC_quiet) return;
|
||||
buf[1024] = 0x15;
|
||||
(void) sprintf(buf, format, a, b, c, d, e, f);
|
||||
if (buf[1024] != 0x15) ABORT("GC_printf clobbered stack");
|
||||
if (WRITE(GC_stdout, buf, strlen(buf)) < 0) ABORT("write to stdout failed");
|
||||
}
|
||||
|
||||
void GC_err_printf(format, a, b, c, d, e, f)
|
||||
char * format;
|
||||
long a, b, c, d, e, f;
|
||||
{
|
||||
char buf[1025];
|
||||
|
||||
buf[1024] = 0x15;
|
||||
(void) sprintf(buf, format, a, b, c, d, e, f);
|
||||
if (buf[1024] != 0x15) ABORT("GC_err_printf clobbered stack");
|
||||
if (WRITE(GC_stderr, buf, strlen(buf)) < 0) ABORT("write to stderr failed");
|
||||
}
|
||||
|
||||
void GC_err_puts(s)
|
||||
char *s;
|
||||
{
|
||||
if (WRITE(GC_stderr, s, strlen(s)) < 0) ABORT("write to stderr failed");
|
||||
}
|
||||
|
||||
# if defined(__STDC__) || defined(__cplusplus)
|
||||
void GC_default_warn_proc(char *msg, GC_word arg)
|
||||
# else
|
||||
void GC_default_warn_proc(msg, arg)
|
||||
char *msg;
|
||||
GC_word arg;
|
||||
# endif
|
||||
{
|
||||
GC_err_printf1(msg, (unsigned long)arg);
|
||||
}
|
||||
|
||||
GC_warn_proc GC_current_warn_proc = GC_default_warn_proc;
|
||||
|
||||
# if defined(__STDC__) || defined(__cplusplus)
|
||||
GC_warn_proc GC_set_warn_proc(GC_warn_proc p)
|
||||
# else
|
||||
GC_warn_proc GC_set_warn_proc(p)
|
||||
GC_warn_proc p;
|
||||
# endif
|
||||
{
|
||||
GC_warn_proc result;
|
||||
|
||||
LOCK();
|
||||
result = GC_current_warn_proc;
|
||||
GC_current_warn_proc = p;
|
||||
UNLOCK();
|
||||
return(result);
|
||||
}
|
||||
|
||||
|
||||
#ifndef PCR
|
||||
void GC_abort(msg)
|
||||
char * msg;
|
||||
{
|
||||
GC_err_printf1("%s\n", msg);
|
||||
(void) abort();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef NEED_CALLINFO
|
||||
|
||||
#if defined(MACOS) && defined(POWERPC)
|
||||
|
||||
struct traceback_table {
|
||||
long zero;
|
||||
long magic;
|
||||
long reserved;
|
||||
long codeSize;
|
||||
short nameLength;
|
||||
char name[2];
|
||||
};
|
||||
typedef struct traceback_table traceback_table;
|
||||
|
||||
static char* pc2name(word pc, char name[], long size)
|
||||
{
|
||||
name[0] = '\0';
|
||||
|
||||
// make sure pc is instruction aligned (at least).
|
||||
if (pc == (pc & 0xFFFFFFFC)) {
|
||||
long instructionsToLook = 4096;
|
||||
long* instruction = (long*)pc;
|
||||
|
||||
// look for the traceback table.
|
||||
while (instructionsToLook--) {
|
||||
if (instruction[0] == 0x4E800020 && instruction[1] == 0x00000000) {
|
||||
traceback_table* tb = (traceback_table*)&instruction[1];
|
||||
long nameLength = (tb->nameLength > --size ? size : tb->nameLength);
|
||||
memcpy(name, tb->name + 1, --nameLength);
|
||||
name[nameLength] = '\0';
|
||||
break;
|
||||
}
|
||||
++instruction;
|
||||
}
|
||||
}
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
extern void MWUnmangle(const char *mangled_name, char *unmangled_name, size_t buffersize);
|
||||
|
||||
void GC_print_callers(struct callinfo info[NFRAMES])
|
||||
{
|
||||
register int i;
|
||||
static char name[1024], unmangled_name[1024];
|
||||
|
||||
GC_err_printf0("Callers at location:\n");
|
||||
for (i = 0; i < NFRAMES; i++) {
|
||||
if (info[i].ci_pc == 0) break;
|
||||
pc2name(info[i].ci_pc, name, sizeof(name));
|
||||
MWUnmangle(name, unmangled_name, sizeof(unmangled_name));
|
||||
GC_err_printf2("%s(0x%08X)\n", unmangled_name, info[i].ci_pc);
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void GC_print_callers (info)
|
||||
struct callinfo info[NFRAMES];
|
||||
{
|
||||
register int i;
|
||||
|
||||
# if NFRAMES == 1
|
||||
GC_err_printf0("\tCaller at allocation:\n");
|
||||
# else
|
||||
GC_err_printf0("\tCall chain at allocation:\n");
|
||||
# endif
|
||||
for (i = 0; i < NFRAMES; i++) {
|
||||
if (info[i].ci_pc == 0) break;
|
||||
# if NARGS > 0
|
||||
{
|
||||
int j;
|
||||
|
||||
GC_err_printf0("\t\targs: ");
|
||||
for (j = 0; j < NARGS; j++) {
|
||||
if (j != 0) GC_err_printf0(", ");
|
||||
GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
|
||||
~(info[i].ci_arg[j]));
|
||||
}
|
||||
GC_err_printf0("\n");
|
||||
}
|
||||
# endif
|
||||
GC_err_printf1("\t\t##PC##= 0x%X\n", info[i].ci_pc);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* !MACOS */
|
||||
|
||||
#endif /* SAVE_CALL_CHAIN */
|
||||
|
||||
# ifdef SRC_M3
|
||||
void GC_enable()
|
||||
{
|
||||
GC_dont_gc--;
|
||||
}
|
||||
|
||||
void GC_disable()
|
||||
{
|
||||
GC_dont_gc++;
|
||||
}
|
||||
# endif
|
||||
|
||||
#if !defined(NO_DEBUGGING)
|
||||
|
||||
void GC_dump()
|
||||
{
|
||||
GC_printf0("***Static roots:\n");
|
||||
GC_print_static_roots();
|
||||
GC_printf0("\n***Heap sections:\n");
|
||||
GC_print_heap_sects();
|
||||
GC_printf0("\n***Free blocks:\n");
|
||||
GC_print_hblkfreelist();
|
||||
GC_printf0("\n***Blocks in use:\n");
|
||||
GC_print_block_list();
|
||||
}
|
||||
|
||||
# endif /* NO_DEBUGGING */
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Загрузка…
Ссылка в новой задаче